diff -Nru check-mk-1.2.2p3/3ware_disks check-mk-1.2.6p12/3ware_disks --- check-mk-1.2.2p3/3ware_disks 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/3ware_disks 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -72,13 +72,17 @@ model = line[-1] infotext = "%s (unit: %s, size: %s,%s, type: %s, model: %s)" % \ (status, unit_type, size, size_type, disk_type, model) - if status == "OK": - return (0, "OK - disk status is " + infotext) + if status in [ "OK", "VERIFYING" ]: + return (0, "disk status is " + infotext) else: - return (2, "CRIT - disk status is " + infotext) - return (3, "UNKNOWN - disk %s not found in agent output" % item) + return (2, "disk status is " + infotext) + return (3, "disk %s not found in agent output" % item) # declare the check to Check_MK -check_info['3ware_disks'] = \ - (check_3ware_disks, "RAID 3ware disk %s", 0, inventory_3ware_disks) -checkgroup_of['3ware_disks'] = "raid_disk" + +check_info["3ware_disks"] = { + 'check_function': check_3ware_disks, + 'inventory_function': inventory_3ware_disks, + 'service_description': 'RAID 3ware disk %s', + 'group': 'raid_disk', +} diff -Nru check-mk-1.2.2p3/3ware_info check-mk-1.2.6p12/3ware_info --- check-mk-1.2.2p3/3ware_info 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/3ware_info 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -58,4 +58,9 @@ infotext = infotext+line+ ';' return (0, infotext) -check_info['3ware_info'] = (check_3ware_info, "RAID 3ware controller %s", 0, inventory_3ware_info) + +check_info["3ware_info"] = { + 'check_function': check_3ware_info, + 'inventory_function': inventory_3ware_info, + 'service_description': 'RAID 3ware controller %s', +} diff -Nru check-mk-1.2.2p3/3ware_units check-mk-1.2.6p12/3ware_units --- check-mk-1.2.2p3/3ware_units 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/3ware_units 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -66,15 +66,19 @@ infotext = "%s (type: %s, size: %sGB%s)" % (status, unit_type, size, complete_txt) - if status == "OK": - return (0, "OK - unit status is " + infotext) - elif status in [ "INITIALIZING", "VERIFY-PAUSED", "VERIFYING", "REBUILDING" ]: - return (1, "WARN - unit status is " + infotext) + if status in [ "OK", "VERIFYING" ]: + return (0, "unit status is " + infotext) + elif status in [ "INITIALIZING", "VERIFY-PAUSED", "REBUILDING" ]: + return (1, "unit status is " + infotext) else: - return (2, "CRIT - unit status is " + infotext) - return (3, "UNKNOWN - unit %s not found in agent output" % item) + return (2, "unit status is " + infotext) + return (3, "unit %s not found in agent output" % item) # declare the check to Check_MK -check_info['3ware_units'] = \ - (check_3ware_units, "RAID 3ware unit %s", 0, inventory_3ware_units) -checkgroup_of['3ware_units'] = "raid" + +check_info["3ware_units"] = { + 'check_function': check_3ware_units, + 'inventory_function': inventory_3ware_units, + 'service_description': 'RAID 3ware unit %s', + 'group': 'raid', +} diff -Nru check-mk-1.2.2p3/aclocal.m4 check-mk-1.2.6p12/aclocal.m4 --- check-mk-1.2.2p3/aclocal.m4 2013-11-05 09:42:54.000000000 +0000 +++ check-mk-1.2.6p12/aclocal.m4 2015-09-21 11:01:30.000000000 +0000 @@ -1,7 +1,8 @@ -# generated automatically by aclocal 1.11.1 -*- Autoconf -*- +# generated automatically by aclocal 1.11.6 -*- Autoconf -*- # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, -# 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc. +# 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, +# Inc. # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -13,18 +14,21 @@ m4_ifndef([AC_AUTOCONF_VERSION], [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl -m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.67],, -[m4_warning([this file was generated for autoconf 2.67. +m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.69],, +[m4_warning([this file was generated for autoconf 2.69. You have another version of autoconf. It may work, but is not guaranteed to. If you have problems, you may need to regenerate the build system entirely. To do so, use the procedure documented by the package, typically `autoreconf'.])]) -# Copyright (C) 2002, 2003, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. +# Copyright (C) 2002, 2003, 2005, 2006, 2007, 2008, 2011 Free Software +# Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. +# serial 1 + # AM_AUTOMAKE_VERSION(VERSION) # ---------------------------- # Automake X.Y traces this macro to ensure aclocal.m4 has been @@ -34,7 +38,7 @@ [am__api_version='1.11' dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to dnl require some minimum version. Point them to the right macro. -m4_if([$1], [1.11.1], [], +m4_if([$1], [1.11.6], [], [AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl ]) @@ -50,19 +54,21 @@ # Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced. # This function is AC_REQUIREd by AM_INIT_AUTOMAKE. AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION], -[AM_AUTOMAKE_VERSION([1.11.1])dnl +[AM_AUTOMAKE_VERSION([1.11.6])dnl m4_ifndef([AC_AUTOCONF_VERSION], [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl _AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))]) # AM_AUX_DIR_EXPAND -*- Autoconf -*- -# Copyright (C) 2001, 2003, 2005 Free Software Foundation, Inc. +# Copyright (C) 2001, 2003, 2005, 2011 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. +# serial 1 + # For projects using AC_CONFIG_AUX_DIR([foo]), Autoconf sets # $ac_aux_dir to `$srcdir/foo'. In other projects, it is set to # `$srcdir', `$srcdir/..', or `$srcdir/../..'. @@ -144,14 +150,14 @@ Usually this means the macro was only invoked conditionally.]]) fi])]) -# Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2009 -# Free Software Foundation, Inc. +# Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2009, +# 2010, 2011 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. -# serial 10 +# serial 12 # There are a few dirty hacks below to avoid letting `AC_PROG_CC' be # written in clear, in which case automake, when reading aclocal.m4, @@ -191,6 +197,7 @@ # instance it was reported that on HP-UX the gcc test will end up # making a dummy file named `D' -- because `-MD' means `put the output # in D'. + rm -rf conftest.dir mkdir conftest.dir # Copy depcomp to subdir because otherwise we won't find it if we're # using a relative directory. @@ -255,7 +262,7 @@ break fi ;; - msvisualcpp | msvcmsys) + msvc7 | msvc7msys | msvisualcpp | msvcmsys) # This compiler won't grok `-c -o', but also, the minuso test has # not run yet. These depmodes are late enough in the game, and # so weak that their functioning should not be impacted. @@ -320,10 +327,13 @@ if test "x$enable_dependency_tracking" != xno; then am_depcomp="$ac_aux_dir/depcomp" AMDEPBACKSLASH='\' + am__nodep='_no' fi AM_CONDITIONAL([AMDEP], [test "x$enable_dependency_tracking" != xno]) AC_SUBST([AMDEPBACKSLASH])dnl _AM_SUBST_NOTMAKE([AMDEPBACKSLASH])dnl +AC_SUBST([am__nodep])dnl +_AM_SUBST_NOTMAKE([am__nodep])dnl ]) # Generate code to set up dependency tracking. -*- Autoconf -*- @@ -545,12 +555,15 @@ done echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count]) -# Copyright (C) 2001, 2003, 2005, 2008 Free Software Foundation, Inc. +# Copyright (C) 2001, 2003, 2005, 2008, 2011 Free Software Foundation, +# Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. +# serial 1 + # AM_PROG_INSTALL_SH # ------------------ # Define $install_sh. @@ -682,12 +695,15 @@ fi ]) -# Copyright (C) 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# Copyright (C) 2003, 2004, 2005, 2006, 2011 Free Software Foundation, +# Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. +# serial 1 + # AM_PROG_MKDIR_P # --------------- # Check for `mkdir -p'. @@ -710,13 +726,14 @@ # Helper functions for option handling. -*- Autoconf -*- -# Copyright (C) 2001, 2002, 2003, 2005, 2008 Free Software Foundation, Inc. +# Copyright (C) 2001, 2002, 2003, 2005, 2008, 2010 Free Software +# Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. -# serial 4 +# serial 5 # _AM_MANGLE_OPTION(NAME) # ----------------------- @@ -724,13 +741,13 @@ [[_AM_OPTION_]m4_bpatsubst($1, [[^a-zA-Z0-9_]], [_])]) # _AM_SET_OPTION(NAME) -# ------------------------------ +# -------------------- # Set option NAME. Presently that only means defining a flag for this option. AC_DEFUN([_AM_SET_OPTION], [m4_define(_AM_MANGLE_OPTION([$1]), 1)]) # _AM_SET_OPTIONS(OPTIONS) -# ---------------------------------- +# ------------------------ # OPTIONS is a space-separated list of Automake options. AC_DEFUN([_AM_SET_OPTIONS], [m4_foreach_w([_AM_Option], [$1], [_AM_SET_OPTION(_AM_Option)])]) @@ -806,12 +823,14 @@ fi AC_MSG_RESULT(yes)]) -# Copyright (C) 2001, 2003, 2005 Free Software Foundation, Inc. +# Copyright (C) 2001, 2003, 2005, 2011 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. +# serial 1 + # AM_PROG_INSTALL_STRIP # --------------------- # One issue with vendor `install' (even GNU) is that you can't @@ -834,13 +853,13 @@ INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" AC_SUBST([INSTALL_STRIP_PROGRAM])]) -# Copyright (C) 2006, 2008 Free Software Foundation, Inc. +# Copyright (C) 2006, 2008, 2010 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. -# serial 2 +# serial 3 # _AM_SUBST_NOTMAKE(VARIABLE) # --------------------------- @@ -849,13 +868,13 @@ AC_DEFUN([_AM_SUBST_NOTMAKE]) # AM_SUBST_NOTMAKE(VARIABLE) -# --------------------------- +# -------------------------- # Public sister of _AM_SUBST_NOTMAKE. AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)]) # Check how to create a tarball. -*- Autoconf -*- -# Copyright (C) 2004, 2005 Free Software Foundation, Inc. +# Copyright (C) 2004, 2005, 2012 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -877,10 +896,11 @@ # a tarball read from stdin. # $(am__untar) < result.tar AC_DEFUN([_AM_PROG_TAR], -[# Always define AMTAR for backward compatibility. -AM_MISSING_PROG([AMTAR], [tar]) +[# Always define AMTAR for backward compatibility. Yes, it's still used +# in the wild :-( We should find a proper way to deprecate it ... +AC_SUBST([AMTAR], ['$${TAR-tar}']) m4_if([$1], [v7], - [am__tar='${AMTAR} chof - "$$tardir"'; am__untar='${AMTAR} xf -'], + [am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'], [m4_case([$1], [ustar],, [pax],, [m4_fatal([Unknown tar format])]) AC_MSG_CHECKING([how to create a $1 tar archive]) diff -Nru check-mk-1.2.2p3/ad_replication check-mk-1.2.6p12/ad_replication --- check-mk-1.2.2p3/ad_replication 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/ad_replication 2015-07-15 09:04:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -55,7 +55,13 @@ def inventory_ad_replication(info): inv = [] for line in parse_ad_replication_info(info): - entry = ('%s/%s' % (line[4], line[5]), 'ad_replication_default_params') + if len(line) == 11: + sourceSite = line[4] + sourceDC = line[5] + elif len(line) == 10: + sourceSite = line[3] + sourceDC = line[4] + entry = ('%s/%s' % (sourceSite, sourceDC), 'ad_replication_default_params') if line[0] == 'showrepl_INFO' and entry not in inv: inv.append(entry) return inv @@ -67,9 +73,14 @@ foundLine = False for l in parse_ad_replication_info(info): - (lineType, destSite, destDC, namingContext, sourceSite, sourceDC, - transport, numFailures, timeLastFailure, timeLastSuccess, - statusLastFailure ) = l + if len(l) == 11: + (lineType, destSite, destDC, namingContext, sourceSite, sourceDC, + transport, numFailures, timeLastFailure, timeLastSuccess, + statusLastFailure ) = l + elif len(l) == 10: + (lineType, destSite, namingContext, sourceSite, sourceDC, + transport, numFailures, timeLastFailure, timeLastSuccess, + statusLastFailure ) = l if lineType == 'showrepl_INFO' and sourceSite+'/'+sourceDC == item: foundLine = True @@ -81,34 +92,39 @@ if int(numFailures) > maxFailuresWarn: status = 1 - output += 'WARNING: %s/%s replication of context %s reached ' \ - ' the threshold of maxmimum failures (%s) (Last Success: %s, ' \ + output += '(!) %s/%s replication of context %s reached ' \ + ' the threshold of maximum failures (%s) (Last Success: %s, ' \ 'LastFailure: %s NumFailures: %s Status: %s), ' % \ - (sourceSite, sourceDC, namingContext, maxFailuresWarn, timeLastFailure, - timeLastSuccess, numFailures, statusLastFailure) + (sourceSite, sourceDC, namingContext, maxFailuresWarn, timeLastSuccess, + timeLastFailure, numFailures, statusLastFailure) if int(numFailures) > maxFailuresCrit: status = 2 - output += 'CRITICAL: %s/%s replication of context %s reached ' \ - ' the threshold of maxmimum failures (%s) (Last Success: %s, ' \ + output += '(!!) %s/%s replication of context %s reached ' \ + ' the threshold of maximum failures (%s) (Last Success: %s, ' \ 'LastFailure: %s NumFailures: %s Status: %s), ' % \ - (sourceSite, sourceDC, namingContext, maxFailuresCrit, timeLastFailure, - timeLastSuccess, numFailures, statusLastFailure) + (sourceSite, sourceDC, namingContext, maxFailuresCrit, timeLastSuccess, + timeLastFailure, numFailures, statusLastFailure) if timeLastFailure is not None and timeLastSuccess is not None \ and timeLastFailure > timeLastSuccess: status = 2 - output += 'CRITICAL: %s/%s replication of context %s failed ' \ + output += '(!!) %s/%s replication of context %s failed ' \ '(Last Success: %s, LastFailure: %s NumFailures: %s Status: %s), ' % \ - (sourceSite, sourceDC, namingContext, timeLastFailure, - timeLastSuccess, numFailures, statusLastFailure) + (sourceSite, sourceDC, namingContext, timeLastSuccess, + timeLastFailure, numFailures, statusLastFailure) if not foundLine: - return (3, 'UNKNOWN - Replication information for %s not found' % item) + return (3, 'Replication information for %s not found' % item) if status != 0: return (status, output.rstrip(', ')) else: - return (status, 'OK - All replications are OK.') + return (status, 'All replications are OK.') -check_info['ad_replication'] = (check_ad_replication, "AD Replication %s", 0, inventory_ad_replication) -checkgroup_of['ad_replication'] = "ad_replication" + +check_info["ad_replication"] = { + 'check_function': check_ad_replication, + 'inventory_function': inventory_ad_replication, + 'service_description': 'AD Replication %s', + 'group': 'ad_replication', +} diff -Nru check-mk-1.2.2p3/adva_fsp_current check-mk-1.2.6p12/adva_fsp_current --- check-mk-1.2.2p3/adva_fsp_current 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/adva_fsp_current 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,68 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_adva_fsp_current(info): + inventory = [] + for line in info: + # Ignore non-connected sensors + if len(line) == 5 and line[4] != "" and line[2] != "": + inventory.append( (line[4], None) ) + return inventory + +def check_adva_fsp_current(item, _no_params, info): + for line in info: + if len(line) == 5 and line[4] == item: + current, high, power, descr = line[0:4] + current = float(current)/1000 + high = float(high)/1000 + + infotext = "%.3f A (limit at %.3f A) at %s" % (current, high, descr) + perfdata = [ ("current", current, "", str(high), ) ] + + if current <= 0: + return(3, "Invalid Sensor Data") + elif current >= high: + return (2, "%s" % infotext, perfdata) + else: + return (0, "%s" % infotext, perfdata) + + return (3, "Sensor %s not found in SNMP data" % item) + +check_info['adva_fsp_current'] = { + "inventory_function" : inventory_adva_fsp_current, + "check_function" : check_adva_fsp_current, + "service_description" : "Power Supply %s", + "has_perfdata" : True, + "snmp_info" : ( ".1.3.6.1.4.1.2544", [ + "1.11.2.4.2.2.1.1", # currentDiagnosticsAmpere + "1.11.2.4.2.2.1.2", # currentDiagnosticsUpperThres + "1.11.2.4.2.2.1.3", # currentDiagnosticsPsuOutputPower + "2.5.5.1.1.1", # inventoryUnitName + "2.5.5.2.1.5", # entityIndexAid + ]), + "snmp_scan_function" : lambda oid: + oid(".1.3.6.1.2.1.1.1.0") == "Fiber Service Platform F7", +} diff -Nru check-mk-1.2.2p3/adva_fsp_if check-mk-1.2.6p12/adva_fsp_if --- check-mk-1.2.2p3/adva_fsp_if 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/adva_fsp_if 2015-07-01 12:18:10.000000000 +0000 @@ -0,0 +1,135 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +adva_fsp_if_inventory_porttypes = [ '1', '6', '56' ] +adva_fsp_if_inventory_portstates = [ '1' ] +adva_fsp_if_inventory_uses_description = True +adva_fsp_if_inventory_uses_alias = False + +adva_fsp_if_operstates = { + "1": ("up", 0 ), + "2": ("down", 2 ), + "3": ("testing",1), + "4": ("unknown",3), + "5": ("dormant",1), + "6": ("notPresent",2), + "7": ("lowerLayerDown",2), +} + +adva_fsp_if_adminstates = { + "1": ("up", 0 ), + "2": ("down", 2 ), + "3": ("testing",1), +} + +def inventory_adva_fsp_if(info): + inventory = [] + for line in info: + if line[2] in adva_fsp_if_inventory_porttypes and line[3] in adva_fsp_if_inventory_portstates: + if adva_fsp_if_inventory_uses_description and line[1] != "": + item = line[1] + elif adva_fsp_if_inventory_uses_alias and line[5] != "": + item = line[5] + else: + item = line[0] + inventory.append( (item, {}) ) + return inventory + +def adva_fsp_if_getindex(item,info): + for line in info: + if adva_fsp_if_inventory_uses_description: + if line[1] == item: + return line[0] + elif adva_fsp_if_inventory_uses_alias: + if line[5] == item: + return line[0] + else: + return item + +def check_adva_fsp_if(item, params, info): + index = adva_fsp_if_getindex(item, info) + for line in info: + if line[0] == index: + state = 0 + admintxt, adminstate = adva_fsp_if_adminstates[line[3]] + state = max(adminstate, state) + opertxt, operstate = adva_fsp_if_operstates[line[4]] + state = max(operstate, state) + if state == 2: + statesym = "(!!)" + elif state == 1: + statesym = "(!)" + else: + statesym = "" + infotext = "Admin/Operational State: %s/%s %s" % ( admintxt, opertxt, statesym ) + + perfdata = [] + for power, name in [ (line[6], "output"), + (line[7], "input" ) + ]: + try: + sym = "" + climits = "" + fpower = float(power)/10.0 + if params.get("limits_%s_power" % name): + lower, upper = params.get("limits_%s_power" % name) + climits = "%s:%s" % params.get("limits_%s_power" % name) + if fpower < lower or fpower > upper: + state = 2 + sym = "(!!)" + infotext += ", %s Power: %.1f%s" % (name.title(), fpower, sym ) + perfdata.append( ("%s_power" % name, "%.1fdBm" % fpower, "", climits ) ) + + except: + if not re.match("S", item): # if no service interface and no power parameter + infotext += ", %s Power: n.a. (!)" % name.title() + state = max(1,state) + + return(state, infotext, perfdata) + + return (3, "no such interface found") + + +check_info['adva_fsp_if'] = { + "inventory_function" : inventory_adva_fsp_if, + "check_function" : check_adva_fsp_if, + "service_description" : "Interface %s", + "has_perfdata" : True, + "group" : "adva_ifs", + "snmp_info" : ( ".1.3.6.1", + [ "2.1.2.2.1.1", # ifIndex + "2.1.2.2.1.2", # ifDescr + "2.1.2.2.1.3", # ifType + "2.1.2.2.1.7", # ifAdminStatus + "2.1.2.2.1.8", # ifOperStatus + "31.1.1.1.18", # ifAlias + "4.1.2544.1.11.2.4.3.5.1.4", # opticalIfDiagOutputPower + "4.1.2544.1.11.2.4.3.5.1.3", # opticalIfDiagInputPower + ], + ), + "snmp_scan_function" : lambda oid: + oid(".1.3.6.1.2.1.1.1.0") == "Fiber Service Platform F7", +} diff -Nru check-mk-1.2.2p3/adva_fsp_temp check-mk-1.2.6p12/adva_fsp_temp --- check-mk-1.2.2p3/adva_fsp_temp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/adva_fsp_temp 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,146 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +adva_fsp_temp_default_levels = { + "trend_range": None, # range in minutes, default is no trend computation + "trend_c": (5,10), # in degrees Celcius + "trend_timeleft": (240, 120), # in minutes +} + +def inventory_adva_fsp_temp(info): + inventory = [] + for line in info: + # Ignore unconnected sensors + if len(line) == 5 and line[0] != "" and line[4] != "" and int(line[0]) >= -2730: + inventory.append( (line[4], "adva_fsp_temp_default_levels") ) + return inventory + +def check_adva_fsp_temp(item, params, info): + for line in info: + if len(line) == 5 and line[4] == item: + temp, high, low, descr = line[0:4] + temp = float(temp)/10 + high = float(high)/10 + low = float(low)/10 + if low > -273: + infotext = "Temperature is %.1f °C (limits %.1f/%.1f °C) at %s" % (temp, low, high, descr) + perfdata = [ ("temp", temp, "", str(low)+":"+str(high), ) ] + else: + infotext = "Temperature is %.1f °C (crit at %.1f °C) at %s" % (temp, high, descr) + perfdata = [ ("temp", temp, "", high, ) ] + + if temp <= -2730: + return(3, "Invalid sensor data") + elif temp >= high or temp <= low: + state = 2 + else: + state = 0 + + if params.get("trend_range"): + problems = [] + try: + range = params["trend_range"] # in minutes + range_sec = range * 60.0 + this_time = time.time() + + # first compute current rate in C/s by computing delta since last check + rate = get_rate("temp.%s.delta" % item, this_time, temp, True) + + # average trend, initialize with zero, rate_avg is in C/s + rate_avg = get_average("temp.%s.trend" % item, + this_time, rate, range_sec / 60.0, True) #? + + # rate_avg is growth in C/s, trend is in C per trend range minutes + trend = float(rate_avg * range_sec) + sign = trend > 0 and "+" or "" + infotext += ", rate: %s%.2f °C/%g min" % (sign, trend, range) + + # apply levels for absolute growth in C / interval + trend_c = params.get("trend_c") + if trend_c: + wa, cr = trend_c + if trend >= cr: + problems.append("rising faster than %s °C/%g min(!!)" % ( cr, range )) + state = 2 + elif trend >= wa: + problems.append("rising faster than %s °C/%g min(!)" % ( wa, range )) + state = max(1, state) + else: + wa, cr = None, None + + # compute time until temperature limit is reached (only for positive trend, of course) + # The start value of minutes_left is negative. The pnp graph and the perfometer + # will interpret this as inifinite -> not growing + minutes_left = -1 + if trend > 0: + diff_to_high = high - temp + minutes_left = diff_to_high / trend * range + timeleft = params.get("trend_timeleft") + def format_minutes(minutes): + if minutes > 60: # hours + hours = minutes / 60 + minutes += - int(hours) * 60 + return "%dh %02dm" % (hours, minutes) + else: + return "%d minutes" % minutes + + if timeleft: + wa, cr = timeleft + if minutes_left <= cr: + state = 2 + problems.append("%s until temp limit reached(!!)" % format_minutes(minutes_left)) + elif minutes_left <= wa: + state = max(state, 1) + problems.append("%s until temp limit reached(!)" % format_minutes(minutes_left)) + + except MKCounterWrapped: + pass + + if problems: + infotext += " - %s" % ", ".join(problems) + + return (state, "%s" % infotext, perfdata) + + return (3, "Sensor %s not found in SNMP data" % item) + +check_info['adva_fsp_temp'] = { + "inventory_function" : inventory_adva_fsp_temp, + "check_function" : check_adva_fsp_temp, + "service_description" : "Temperature %s", + "has_perfdata" : True, + "default_levels_variable" : "adva_fsp_temp_default_levels", + "snmp_info" : ( ".1.3.6.1.4.1.2544", [ + "1.11.2.4.2.1.1.1", # moduleDiagnosticsTemp + "1.11.2.4.2.1.1.2", # moduleDiagnosticsUpperTempThres + "1.11.2.4.2.1.1.3", # moduleDiagnosticsLowerTempThres + "2.5.5.1.1.1", + "2.5.5.2.1.5", + #"2.5.5.1.1.10", + ]), + "snmp_scan_function" : lambda oid: + oid(".1.3.6.1.2.1.1.1.0") == "Fiber Service Platform F7", + "group" : "temperature_trends", +} diff -Nru check-mk-1.2.2p3/agent_activemq check-mk-1.2.6p12/agent_activemq --- check-mk-1.2.2p3/agent_activemq 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/agent_activemq 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,35 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +#( "Hostname", Port, PiggyMode) + +def agent_activemq_arguments(params, hostname, ipaddress): + if len(params[2]) == 0: + return "%s %s" % (params[0], params[1]) + else: + return "%s %s 1" % (params[0], params[1]) + +special_agent_info['activemq'] = agent_activemq_arguments diff -Nru check-mk-1.2.2p3/agent_allnet_ip_sensoric check-mk-1.2.6p12/agent_allnet_ip_sensoric --- check-mk-1.2.2p3/agent_allnet_ip_sensoric 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/agent_allnet_ip_sensoric 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,36 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def agent_allnet_ip_sensoric_arguments(params, hostname, ipaddress): + args = '' + + if "timeout" in params: + args += ' --timeout %d' % params["timeout"] + + args += " " + quote_shell_string(ipaddress) + return args + +special_agent_info['allnet_ip_sensoric'] = agent_allnet_ip_sensoric_arguments diff -Nru check-mk-1.2.2p3/agent_emcvnx check-mk-1.2.6p12/agent_emcvnx --- check-mk-1.2.2p3/agent_emcvnx 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/agent_emcvnx 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,38 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def agent_emcvnx_arguments(params, hostname, ipaddress): + args = '' + if params["user"] != "": + args += " -u " + quote_shell_string(params["user"]) + if params["password"] != "": + args += " -p " + quote_shell_string(params["password"]) + args += " -i " + ",".join(params["infos"]) + + args += " " + quote_shell_string(ipaddress) + return args + +special_agent_info['emcvnx'] = agent_emcvnx_arguments diff -Nru check-mk-1.2.2p3/agent_fritzbox check-mk-1.2.6p12/agent_fritzbox --- check-mk-1.2.2p3/agent_fritzbox 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/agent_fritzbox 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,40 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# { +# 'timeout': 10, +# } + +def agent_fritzbox_arguments(params, hostname, ipaddress): + args = '' + + if "timeout" in params: + args += ' --timeout %d' % params["timeout"] + + args += " " + quote_shell_string(ipaddress) + return args + +special_agent_info['fritzbox'] = agent_fritzbox_arguments diff -Nru check-mk-1.2.2p3/agent_hivemanager check-mk-1.2.6p12/agent_hivemanager --- check-mk-1.2.2p3/agent_hivemanager 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/agent_hivemanager 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,31 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def agent_hivemanager_arguments(params, hostname, ipaddress): + # User, Password + return "'%s' %s %s" % ( ipaddress, quote_shell_string(params[0]), quote_shell_string(params[1]) ) + +special_agent_info['hivemanager'] = agent_hivemanager_arguments diff -Nru check-mk-1.2.2p3/agent_ibmsvc check-mk-1.2.6p12/agent_ibmsvc --- check-mk-1.2.2p3/agent_ibmsvc 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/agent_ibmsvc 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,37 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def agent_ibmsvc_arguments(params, hostname, ipaddress): + args = '' + args += " -u " + quote_shell_string(params["user"]) + if params["accept-any-hostkey"] == True: + args += " --accept-any-hostkey" + args += " -i " + ",".join(params["infos"]) + + args += " " + quote_shell_string(ipaddress) + return args + +special_agent_info['ibmsvc'] = agent_ibmsvc_arguments diff -Nru check-mk-1.2.2p3/agent_innovaphone check-mk-1.2.6p12/agent_innovaphone --- check-mk-1.2.2p3/agent_innovaphone 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/agent_innovaphone 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,31 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def agent_innovaphone_arguments(params, hostname, ipaddress): + # IP, User, Password + return "%s %s %s" % ( ipaddress, params[0], params[1] ) + +special_agent_info['innovaphone'] = agent_innovaphone_arguments diff -Nru check-mk-1.2.2p3/agent_netapp check-mk-1.2.6p12/agent_netapp --- check-mk-1.2.2p3/agent_netapp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/agent_netapp 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,36 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def agent_netapp_arguments(params, hostname, ipaddress): + args = '' + + args += " -u " + quote_shell_string(params["username"]) + args += " -s " + quote_shell_string(params["password"]) + + args += " " + quote_shell_string(ipaddress) + return args + +special_agent_info['netapp'] = agent_netapp_arguments diff -Nru check-mk-1.2.2p3/agent_random check-mk-1.2.6p12/agent_random --- check-mk-1.2.2p3/agent_random 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/agent_random 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,37 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# { +# 'tcp_port': 4711, +# 'secret': 'wef', +# 'infos': ['hostsystem', 'virtualmachine'], +# 'user': 'wefwef' +# } + +def agent_random_arguments(params, hostname, ipaddress): + return hostname + +special_agent_info['random'] = agent_random_arguments diff -Nru check-mk-1.2.2p3/agents/README.OpenVMS check-mk-1.2.6p12/agents/README.OpenVMS --- check-mk-1.2.2p3/agents/README.OpenVMS 2013-03-06 07:57:34.000000000 +0000 +++ check-mk-1.2.6p12/agents/README.OpenVMS 2014-12-11 10:15:03.000000000 +0000 @@ -55,7 +55,7 @@ ] -Now you can run Check_MK inventory as usually and should find a couple +Now you can run Check_MK service discovery as usually and should find a couple of new checks. Here is an example output from the VMS agent on a small virtualized test machine: diff -Nru check-mk-1.2.2p3/agent_simulator.py check-mk-1.2.6p12/agent_simulator.py --- check-mk-1.2.2p3/agent_simulator.py 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/agent_simulator.py 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/agents.tar.gz and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/agents.tar.gz differ diff -Nru check-mk-1.2.2p3/agent_ucs_bladecenter check-mk-1.2.6p12/agent_ucs_bladecenter --- check-mk-1.2.2p3/agent_ucs_bladecenter 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/agent_ucs_bladecenter 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,36 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def agent_ucsbladecenter_arguments(params, hostname, ipaddress): + args = '' + + args += " -u " + quote_shell_string(params["username"]) + args += " -p " + quote_shell_string(params["password"]) + + args += " " + quote_shell_string(ipaddress) + return args + +special_agent_info['ucs_bladecenter'] = agent_ucsbladecenter_arguments diff -Nru check-mk-1.2.2p3/agent_vsphere check-mk-1.2.6p12/agent_vsphere --- check-mk-1.2.2p3/agent_vsphere 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/agent_vsphere 2015-07-15 09:04:37.000000000 +0000 @@ -0,0 +1,80 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# { +# 'tcp_port': 443, +# 'secret': 'wef', +# 'infos': ['hostsystem', 'virtualmachine'], +# 'user': 'wefwef' +# } + +def agent_vsphere_arguments(params, hostname, ipaddress): + args = '' + if "tcp_port" in params: + args += " -p %d" % params["tcp_port"] + + args += " -u " + quote_shell_string(params["user"]) + args += " -s " + quote_shell_string(params["secret"]) + args += " -i " + ",".join(params["infos"]) + + direct = params.get("direct", False) + + if direct == "agent": + args += ' --agent' + elif direct: + args += ' --direct --hostname ' + quote_shell_string(hostname) + + if params.get("skip_placeholder_vms", True): + args += " -P" + + if "spaces" in params: + args += ' --spaces %s' % params["spaces"] + + if "timeout" in params: + args += ' --timeout %d' % params["timeout"] + + if params.get("use_pysphere"): + args += ' --pysphere' + + if params.get("vm_pwr_display"): + args += ' --vm_pwr_display %s' % params.get("vm_pwr_display") + + if params.get("host_pwr_display"): + args += ' --host_pwr_display %s' % params.get("host_pwr_display") + + if "ssl" in params: + if params["ssl"] == False: + args += ' --no-cert-check ' + quote_shell_string(ipaddress) + elif params["ssl"] == True: + args += " " + quote_shell_string(hostname) + else: + args += " " + quote_shell_string(params["ssl"]) + else: # legacy mode + args += " " + quote_shell_string(ipaddress) + + return args + +special_agent_info['vsphere'] = agent_vsphere_arguments diff -Nru check-mk-1.2.2p3/aironet_clients check-mk-1.2.6p12/aironet_clients --- check-mk-1.2.2p3/aironet_clients 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/aironet_clients 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -47,10 +47,10 @@ info = [ line for line in info if line[0] != '' ] if len(info) == 0: - return (0, "OK - No clients currently logged in") + return (0, "No clients currently logged in") if item == "clients": - return (0, "OK - %d clients currently logged in" % len(info), + return (0, "%d clients currently logged in" % len(info), [("clients", len(info), None, None, 0, None)]) # item = "quality" or "strength" @@ -70,31 +70,32 @@ avg = sum([saveint(line[index]) for line in info]) / float(len(info)) warn, crit = params perfdata = [(item, avg, warn, crit, mmin, mmax)] - infotxt = " - signal %s at %.1f%s (warn/crit at %s%s/%s%s)" % \ + infotxt = "signal %s at %.1f%s (warn/crit at %s%s/%s%s)" % \ (item, avg, unit, warn, unit, crit, unit) if neg * avg <= neg * crit: - return (2, "CRIT" + infotxt, perfdata) + return (2, infotxt, perfdata) elif neg * avg <= neg * warn: - return (1, "WARN" + infotxt, perfdata) + return (1, infotxt, perfdata) else: - return (0, "OK" + infotxt, perfdata) + return (0, infotxt, perfdata) - - - -check_info['aironet_clients'] = ( check_aironet_clients, "Average client signal %s", 1, inventory_aironet_clients ) -snmp_info['aironet_clients'] = ( ".1.3.6.1.4.1.9.9.273.1.3.1.1", [ 3, 4 ]) - -# CISCO-DOT11-ASSOCIATION-MIB::cDot11ClientSignalStrength -# CISCO-DOT11-ASSOCIATION-MIB::cDot11ClientSigQuality - -snmp_scan_functions['aironet_clients'] \ - = lambda oid: oid(".1.3.6.1.2.1.1.2.0") in [ - ".1.3.6.1.4.1.9.1.525", - ".1.3.6.1.4.1.9.1.618", - ".1.3.6.1.4.1.9.1.685", - ".1.3.6.1.4.1.9.1.758", - ".1.3.6.1.4.1.9.1.1034", - ".1.3.6.1.4.1.9.1.1247", - ] +check_info["aironet_clients"] = { + 'check_function': check_aironet_clients, + 'inventory_function': inventory_aironet_clients, + 'service_description': 'Average client signal %s', + 'has_perfdata': True, + 'snmp_info': ('.1.3.6.1.4.1.9.9.273.1.3.1.1', [ + 3, # CISCO-DOT11-ASSOCIATION-MIB::cDot11ClientSignalStrength + 4, # CISCO-DOT11-ASSOCIATION-MIB::cDot11ClientSigQuality + ]), + 'snmp_scan_function': \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") in [ + ".1.3.6.1.4.1.9.1.525", + ".1.3.6.1.4.1.9.1.618", + ".1.3.6.1.4.1.9.1.685", + ".1.3.6.1.4.1.9.1.758", + ".1.3.6.1.4.1.9.1.1034", + ".1.3.6.1.4.1.9.1.1247", + ], +} diff -Nru check-mk-1.2.2p3/aironet_errors check-mk-1.2.6p12/aironet_errors --- check-mk-1.2.2p3/aironet_errors 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/aironet_errors 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -34,31 +34,34 @@ if int(line[0]) == item: value = int(line[1]) this_time = time.time() - timedif, persec = get_counter("aironet_errors.%s" % item, this_time, value) + persec = get_rate("aironet_errors.%s" % item, this_time, value) warn, crit = params perfdata = [("errors", persec, None, None, warn, crit)] - infotxt = " - %.2f errors/s in last %d secs" % (persec, int(timedif)) + infotxt = "%.2f errors/s" % persec if persec >= crit: - return (2, "CRIT" + infotxt, perfdata) + return (2, infotxt, perfdata) elif persec >= warn: - return (1, "WARN" + infotxt, perfdata) + return (1, infotxt, perfdata) else: - return (0, "OK" + infotxt, perfdata) + return (0, infotxt, perfdata) + return (3, "No radio %s available" % item) - return (3, "UNKNOW - No radio %s available" % item) - -check_info['aironet_errors'] = ( check_aironet_errors, "MAC CRC errors radio %s", 1, inventory_aironet_errors ) -snmp_info['aironet_errors'] = ( ".1.3.6.1.4.1.9.9.272.1.2.1.1.1", [0, 2] ) -# CISCO-DOT11-IF-MIB::cd11IfRecFrameMacCrcErrors - -snmp_scan_functions['aironet_errors'] \ - = lambda oid: oid(".1.3.6.1.2.1.1.2.0") in [ - ".1.3.6.1.4.1.9.1.525", - ".1.3.6.1.4.1.9.1.618", - ".1.3.6.1.4.1.9.1.685", - ".1.3.6.1.4.1.9.1.758", - ".1.3.6.1.4.1.9.1.1034", - ".1.3.6.1.4.1.9.1.1247", - ] +check_info["aironet_errors"] = { + 'check_function': check_aironet_errors, + 'inventory_function': inventory_aironet_errors, + 'service_description': 'MAC CRC errors radio %s', + 'has_perfdata': True, + # CISCO-DOT11-IF-MIB::cd11IfRecFrameMacCrcErrors + 'snmp_info': ('.1.3.6.1.4.1.9.9.272.1.2.1.1.1', [0, 2]), + 'snmp_scan_function': \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") in [ + ".1.3.6.1.4.1.9.1.525", + ".1.3.6.1.4.1.9.1.618", + ".1.3.6.1.4.1.9.1.685", + ".1.3.6.1.4.1.9.1.758", + ".1.3.6.1.4.1.9.1.1034", + ".1.3.6.1.4.1.9.1.1247", + ], +} diff -Nru check-mk-1.2.2p3/aix_baselevel check-mk-1.2.6p12/aix_baselevel --- check-mk-1.2.2p3/aix_baselevel 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/aix_baselevel 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,39 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +def inv_aix_baselevel(info): + version = info[0][0] + inv_tree("software.os.").update({ + "version" : version, + "vendor" : "IBM", + "type" : "aix", + "name" : "IBM AIX %s" %version, + }) + +inv_info['aix_baselevel'] = { + "inv_function" : inv_aix_baselevel, +} diff -Nru check-mk-1.2.2p3/aix_diskiod check-mk-1.2.6p12/aix_diskiod --- check-mk-1.2.2p3/aix_diskiod 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/aix_diskiod 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,70 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# <<>> +# hdisk3 0.9 237.0 9.1 1337054478 1628926522 +# hdisk5 0.9 237.1 8.8 1333731705 1633897629 +# hdisk7 0.9 256.2 10.1 1537047014 1669194644 +# hdisk6 0.9 236.6 9.1 1334163361 1626627852 +# hdisk2 0.9 237.6 9.1 1334458233 1639383130 +# hdisk9 0.8 239.4 9.3 1337740029 1658392394 +# hdisk8 0.9 238.3 8.9 1332262996 1649741796 +# hdisk4 0.9 237.4 8.8 1332426157 1638419364 +# hdisk13 0.5 238.1 8.3 394246756 2585031872 +# hdisk11 0.5 238.3 8.3 397601918 2584807275 + +# Columns means: +# 1. device +# 2. % tm_act +# 3. Kbps +# 4. tps +# 5. Kb_read -> Kilobytes read since system boot +# 6. Kb_wrtn -> Kilobytes written since system boot + +def aix_diskiod_convert(info): + converted = [] + for node_info, device, tm_act, kbps, tps, kb_read, kb_written in info: + converted.append((node_info, device, int(kb_read) * 1024, int(kb_written) * 1024)) + return converted + + +def inventory_aix_diskiod(info): + return inventory_diskstat_generic(aix_diskiod_convert(info)) + + +def check_aix_diskiod(item, params, info): + return check_diskstat_generic(item, params, time.time(), aix_diskiod_convert(info), mode='bytes') + +check_info["aix_diskiod"] = { + 'check_function': check_aix_diskiod, + 'inventory_function': inventory_aix_diskiod, + 'service_description': 'Disk IO %s', + 'has_perfdata': True, + 'node_info': True, + 'group': 'disk_io', + 'includes': [ "diskstat.include" ], +} + diff -Nru check-mk-1.2.2p3/aix_lvm check-mk-1.2.6p12/aix_lvm --- check-mk-1.2.2p3/aix_lvm 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/aix_lvm 2015-09-16 14:25:30.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -104,7 +104,7 @@ # Get structured LVM info lvmconf = parse_aix_lvm(info) - if target_lv in lvmconf[target_vg].keys(): + if target_vg in lvmconf and target_lv in lvmconf[target_vg]: msgtxt = [] state = 0 @@ -137,10 +137,10 @@ msgtxt = "LV is open/syncd" else: msgtxt = ", ".join(msgtxt) - return (state, nagios_state_names[state] + " - " + msgtxt) + return (state, msgtxt) - return (3, "UNKNOWN - no such volume found") + return (3, "no such volume found") check_info['aix_lvm'] = { diff -Nru check-mk-1.2.2p3/aix_memory check-mk-1.2.6p12/aix_memory --- check-mk-1.2.2p3/aix_memory 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/aix_memory 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,94 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +# <<>> +# 32702464 memory pages +# 31736528 lruable pages +# 858141 free pages +# 4 memory pools +# 6821312 pinned pages +# 80.0 maxpin percentage +# 3.0 minperm percentage +# 90.0 maxperm percentage +# 8.8 numperm percentage +# 2808524 file pages +# 0.0 compressed percentage +# 0 compressed pages +# 8.8 numclient percentage +# 90.0 maxclient percentage +# 2808524 client pages +# 0 remote pageouts scheduled +# 354 pending disk I/Os blocked with no pbuf +# 860832 paging space I/Os blocked with no psbuf +# 2228 filesystem I/Os blocked with no fsbuf +# 508 client filesystem I/Os blocked with no fsbuf +# 1372 external pager filesystem I/Os blocked with no fsbuf +# 88.8 percentage of memory used for computational pages +# allocated = 8257536 blocks used = 1820821 blocks free = 6436715 blocks +# +# The first part is the output of vmstat -v, the last line is the output +# of swap -s and show the swap space usage + +# Parse AIX vmstat output into something compatible with the Linux +# output from /proc/meminfo. AIX speaks of 4k pages while Linux of kilobytes. +def parse_aix_memory(info): + parsed = {} + for line in info: + if line[0] == "allocated": # Swap space + parsed["SwapTotal"] = int(line[2]) * 4 + parsed["SwapFree"] = int(line[10]) * 4 + else: + varname = " ".join(line[1:]) + if varname == "memory pages": + parsed["MemTotal"] = int(line[0]) * 4 + elif varname == "free pages": + parsed["MemFree"] = int(line[0]) * 4 + elif varname == "file pages": + parsed["Cached"] = int(line[0]) * 4 + return parsed + + +def check_aix_memory(_no_item, params, info): + meminfo = parse_aix_memory(info) + return check_memory(params, meminfo) + + +def inventory_aix_memory(info): + meminfo = parse_aix_memory(info) + if "MemFree" in meminfo: + return [(None, {})] + + +check_info['aix_memory'] = { + "check_function" : check_aix_memory, + "inventory_function" : inventory_aix_memory, + "service_description" : "Memory used", + "has_perfdata" : True, + "group" : "memory", + "default_levels_variable" : "memory_default_levels", + "includes" : [ "mem.include" ], +} diff -Nru check-mk-1.2.2p3/aix_multipath check-mk-1.2.6p12/aix_multipath --- check-mk-1.2.2p3/aix_multipath 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/aix_multipath 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -69,7 +69,7 @@ else: message.append(path_message) - return (state, nagios_state_names[state] + " - " + ", ".join(message)) + return (state, ", ".join(message)) check_info["aix_multipath"] = { diff -Nru check-mk-1.2.2p3/aix_packages check-mk-1.2.6p12/aix_packages --- check-mk-1.2.2p3/aix_packages 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/aix_packages 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,74 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +# <<>> +# #Package Name:Fileset:Level:State:PTF Id:Fix State:Type:Description:Destination Dir.:Uninstaller:Message Catalog:Message Set:Message Number:Parent:Automatic:EFIX Locked:Install Path:Build Date +# EMC:EMC.CLARiiON.aix.rte:6.0.0.3: : :C: :EMC CLARiiON AIX Support Software: : : : : : :0:0:/: +# EMC:EMC.CLARiiON.fcp.rte:6.0.0.3: : :C: :EMC CLARiiON FCP Support Software: : : : : : :0:0:/: +# ICU4C.rte:ICU4C.rte:7.1.2.0: : :C: :International Components for Unicode : : : : : : :0:0:/:1241 +# Java5.sdk:Java5.sdk:5.0.0.500: : :C:F:Java SDK 32-bit: : : : : : :0:0:/: +# Java5_64.sdk:Java5_64.sdk:5.0.0.500: : :C:F:Java SDK 64-bit: : : : : : :0:0:/: +# Java6.sdk:Java6.sdk:6.0.0.375: : :C:F:Java SDK 32-bit: : : : : : :0:0:/: + + +def inv_aix_packages(info): + paclist = inv_tree("software.packages:") + if not info: + return + headers = info[0] + headers[0] = headers[0].lstrip("#") + for line in info[1:]: + row = dict(zip(headers, map(lambda x: x.strip(), line))) + + # AIX Type codes + # Type codes: + # F -- Installp Fileset + # P -- Product + # C -- Component + # T -- Feature + # R -- RPM Package + # E -- Interim Fix + + if row["Type"] == "R": + package_type = "rpm" + elif row["Type"]: + package_type = "aix_" + row["Type"].lower() + else: + package_type = "aix" + + entry = { + "name" : row["Package Name"], + "summary" : row["Description"], + "version" : row["Level"], + "package_type" : package_type, + } + paclist.append(entry) + + +inv_info['aix_packages'] = { + "inv_function" : inv_aix_packages, +} diff -Nru check-mk-1.2.2p3/aix_service_packs check-mk-1.2.6p12/aix_service_packs --- check-mk-1.2.2p3/aix_service_packs 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/aix_service_packs 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,34 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inv_aix_service_packs(info): + node = inv_tree("software.os.service_packs:") + for line in info: + node.append({ "name" : line[0]}) + +inv_info['aix_service_packs'] = { + "inv_function" : inv_aix_service_packs, +} diff -Nru check-mk-1.2.2p3/akcp_daisy_temp check-mk-1.2.6p12/akcp_daisy_temp --- check-mk-1.2.2p3/akcp_daisy_temp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/akcp_daisy_temp 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,69 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +akcp_daisy_temp_defaultlevels = (28, 32) + + +def inventory_akcp_daisy_temp(info): + inventory = [] + for port, subport, name, temp in info: + # Ignore sensors that are found by the non-daisychaining-version of + # this check (akcp_sensor_temp) + if subport not in [ '-1', '0' ]: + inventory.append((name, 'akcp_daisy_temp_defaultlevels' )) + return inventory + + +def check_akcp_daisy_temp(item, params, info): + for port, subport, name, rawtemp in info: + if name == item: + temp = float(rawtemp) / 10 + return check_temperature(temp, params) + + return 3, "Sensor not found in SNMP data" + + +check_info["akcp_daisy_temp"] = { + 'check_function': check_akcp_daisy_temp, + 'inventory_function': inventory_akcp_daisy_temp, + 'service_description': 'Temperature %s', + 'has_perfdata': True, + 'snmp_info': ( + ".1.3.6.1.4.1.3854.1.2.2.1.19.33", + [ 1, 2, 3, 4, 5, 6, 7, 8 ], + [ + OID_END, # Port/Subport + "2.1.1", # subport + "2.1.2", # Sensor Name + "2.1.14", # Degree Celsius + ] + ), + 'snmp_scan_function': lambda oid: oid(".1.3.6.1.2.1.1.2.0") in \ + [ ".1.3.6.1.4.1.3854.1.2.2.1.1", ".1.3.6.1.4.1.3854.1" ] \ + and oid('.1.3.6.1.4.1.3854.1.2.2.1.19.*'), + 'group' : 'room_temperature', + 'includes' : [ 'temperature.include' ], +} diff -Nru check-mk-1.2.2p3/akcp_sensor_drycontact check-mk-1.2.6p12/akcp_sensor_drycontact --- check-mk-1.2.2p3/akcp_sensor_drycontact 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/akcp_sensor_drycontact 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,52 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_akcp_sensor_drycontact(info): + return [ ( x[0], None ) for x in info if x[2] == "1" ] + +def check_akcp_sensor_drycontact(item, _no_params, info): + for line in info: + if item == line[0]: + state = saveint(line[1]) + if state == 2: + return 0, "Drycontact OK" + return 2, "Drycontact on Error" + return 3, "Drycontact not found anymore" + +check_info["akcp_sensor_drycontact"] = { + "check_function" : check_akcp_sensor_drycontact, + "inventory_function" : inventory_akcp_sensor_drycontact, + "service_description" : "Device %s", + "has_perfdata" : False, + 'snmp_scan_function' : lambda oid: oid(".1.3.6.1.2.1.1.2.0") in\ + [ ".1.3.6.1.4.1.3854.1.2.2.1.1", ".1.3.6.1.4.1.3854.1" ], + 'snmp_info' : ( ".1.3.6.1.4.1.3854.1.2.2.1.18.1" , [ + 1, #Sensor Name + 3, #Sensor Status (2 = OK, 4 = Not ok) + 5, #Sensor Online + ] ), +} + diff -Nru check-mk-1.2.2p3/akcp_sensor_humidity check-mk-1.2.6p12/akcp_sensor_humidity --- check-mk-1.2.2p3/akcp_sensor_humidity 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/akcp_sensor_humidity 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -25,7 +25,7 @@ # Boston, MA 02110-1301 USA. # +------------------------------------------------------------------+ -# | This file has been contributed by: | +# | The initial version of this file has been contributed by: | # | | # | Michael Nieporte | # +------------------------------------------------------------------+ @@ -36,7 +36,7 @@ inventory = [] for desc, humidity, status, online in info: if online == "1": - inventory.append( (desc, "", "akcp_sensor_humidity_defaultlevels") ) + inventory.append( (desc, "akcp_sensor_humidity_defaultlevels") ) return inventory def check_akcp_sensor_humidity(item, params, info): @@ -44,41 +44,44 @@ if desc == item: critlow, warnlow, warnhigh, crithigh = params if status == "7": - return (3, "CRIT - Sensor Error") + return (3, "Sensor Error") else: if online == "1": inthumidity = saveint(humidity) - infotext = "%s %%" % humidity + " (warn/crit below %.1f/%.1f or above %.1f/%.1f %%" % \ - (critlow, warnlow, warnhigh, crithigh) + ")" + infotext = "%s%% (warn/crit below %.1f/%.1f or above %.1f/%.1f %%)" % \ + ( humidity, warnlow, critlow, warnhigh, crithigh) perfdata = [ ( "humidity", humidity, critlow, warnlow, warnhigh, crithigh ) ] if inthumidity <= critlow or inthumidity >= crithigh: - return (2, "CRIT - Humidity is: " + infotext, perfdata ) + return 2, "Humidity is: " + infotext, perfdata elif inthumidity <= warnlow or inthumidity >= warnhigh: - return (1, "WARN - Humidity is: " + infotext, perfdata ) + return 1, "Humidity is: " + infotext, perfdata else: - return (0, "OK - Humidity is: " + infotext, perfdata ) + return 0, "Humidity is: " + infotext, perfdata else: - return (3, "UNKNOWN - Sensor is offline") - return (3, "UNKNOWN - Sensor not found") + return 3, "Sensor is offline" + return 3, "Sensor not found" -check_info['akcp_sensor_humidity'] = ( check_akcp_sensor_humidity, "Humidity Sensor - %s", 1, inventory_akcp_sensor_humidity) -snmp_info['akcp_sensor_humidity'] = ( - ".1.3.6.1.4.1.3854.1.2.2.1.17.1", [ - 1, #sensorProbeHumidityDescription - 3, #sensorProbeHumidityPercent - 4, #sensorProbeHumidityStatus - The current status of this Sensor - 5, #sensorProbeHumidityOnline - online(1), offline(2) - ], -) # possible values for sensorProbeHumidityStatus: # noStatus(1), normal(2), highWarning(3), highCritical(4), # lowWarning(5), lowCritical(6), sensorError(7) - -snmp_scan_functions['akcp_sensor_humidity'] = \ - lambda oid: "AKCP" in oid(".1.3.6.1.4.1.3854.1.1.6.0") and "sensorProbe" in oid(".1.3.6.1.4.1.3854.1.1.8.0") - -checkgroup_of['akcp_sensor_humidity'] = "akcp_humidity" +check_info["akcp_sensor_humidity"] = { + 'check_function': check_akcp_sensor_humidity, + 'inventory_function': inventory_akcp_sensor_humidity, + 'service_description': 'Humidity Sensor - %s', + 'has_perfdata': True, + 'snmp_info': ( + ".1.3.6.1.4.1.3854.1.2.2.1.17.1", [ + 1, #sensorProbeHumidityDescription + 3, #sensorProbeHumidityPercent + 4, #sensorProbeHumidityStatus - The current status of this Sensor + 5, #sensorProbeHumidityOnline - online(1), offline(2) + ], + ), + 'snmp_scan_function': lambda oid: oid(".1.3.6.1.2.1.1.2.0") in\ + [ ".1.3.6.1.4.1.3854.1.2.2.1.1", ".1.3.6.1.4.1.3854.1" ], + 'group': 'humidity', +} diff -Nru check-mk-1.2.2p3/akcp_sensor_temp check-mk-1.2.6p12/akcp_sensor_temp --- check-mk-1.2.2p3/akcp_sensor_temp 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/akcp_sensor_temp 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -31,66 +31,51 @@ # +------------------------------------------------------------------+ -akcp_sensor_temp_defaultlevels = (28, 32) +akcp_sensor_temp_defaultlevels = (32, 35) def inventory_akcp_sensor_temp(info): - inventory = [] - for desc, degree, status, online, degreetype, degreeraw in info: - if online == "1": - inventory.append( (desc, "akcp_sensor_temp_defaultlevels") ) - return inventory + return [ (x[0], "akcp_sensor_temp_defaultlevels" ) for x in info if x[3] == "1" ] def check_akcp_sensor_temp(item, params, info): for desc, degree, status, online, degreetype, degreeraw in info: if desc == item: - if degreetype == "0": - type = "F" - else: - type = "C" - warn, crit = params + if status == "7": + return 3, "Sensor Error" + elif online != "1": + return 3, "Sensor is offline" + if degreeraw != "": - temp = saveint(degreeraw) / 10.0 + temp = int(degreeraw) / 10.0 else: temp = float(degree) - if status == "7": - return (3, "UNKNOWN - Sensor Error") - else: - if online == "1": - # break temp down to celsius if it had been defined as Fahrenheit. - if type == "F": - temp = to_celsius(temp) - type = "C" - infotext = "%.1f " % temp + type + " (warn/crit at %.1f/%.1f " % (warn, crit) + type + ")" - perfdata = [ ( "temperature", temp, warn, crit ) ] - if temp >= crit: - return (2, "CRIT - Temperature is: " + infotext, perfdata) - elif temp >= warn: - return (1, "WARN - Temperature is: " + infotext, perfdata) - else: - return (0, "OK - Temperature is: " + infotext, perfdata ) - else: - return (3, "UNKNOWN - Sensor is offline") - return (3, "UNKNOWN - Sensor not found") - - -check_info['akcp_sensor_temp'] = ( check_akcp_sensor_temp, "Temperature %s", 1, inventory_akcp_sensor_temp) - -snmp_info['akcp_sensor_temp'] = ( - ".1.3.6.1.4.1.3854.1.2.2.1.16.1", [ - 1, #sensorProbeTempDescription - 3, #sensorProbeTempDegree - 4, #sensorProbeTempStatus - 5, #sensorProbeTempOnline - online(1), offline(2) - 12, #sensorProbeTempDegreeType - The degrees are in fahrenheit(0) or in celsius(1) - 14, #sensorProbeTempDegreeRaw - SP Only: The raw degree data from the temperature sensor, slightly higher resolution - ], -) + + # break temp down to celsius if it had been defined as Fahrenheit. + if degreetype == "0": + temp = fahrenheit_to_celsius(temp) + + return check_temperature(temp, params) + # possible values for sensorProbeTempStatus: # noStatus(1), normal(2), highWarning(3), highCritical(4), # lowWarning(5), lowCritical(6), sensorError(7) - - -snmp_scan_functions['akcp_sensor_temp'] = \ - lambda oid: "AKCP" in oid(".1.3.6.1.4.1.3854.1.1.6.0") and "sensorProbe" in oid(".1.3.6.1.4.1.3854.1.1.8.0") -checkgroup_of['akcp_sensor_temp'] = "room_temperature" +check_info["akcp_sensor_temp"] = { + 'check_function': check_akcp_sensor_temp, + 'inventory_function': inventory_akcp_sensor_temp, + 'service_description': 'Temperature %s', + 'has_perfdata': True, + 'snmp_info': ( + ".1.3.6.1.4.1.3854.1.2.2.1.16.1", [ + 1, #sensorProbeTempDescription + 3, #sensorProbeTempDegree + 4, #sensorProbeTempStatus + 5, #sensorProbeTempOnline - online(1), offline(2) + 12, #sensorProbeTempDegreeType - The degrees are in fahrenheit(0) or in celsius(1) + 14, #sensorProbeTempDegreeRaw - SP Only: The raw degree data from the temperature sensor, slightly hig + ], + ), + 'snmp_scan_function': lambda oid: oid(".1.3.6.1.2.1.1.2.0") in\ + [ ".1.3.6.1.4.1.3854.1.2.2.1.1", ".1.3.6.1.4.1.3854.1" ], + 'group' : 'room_temperature', + 'includes' : [ 'temperature.include' ], +} diff -Nru check-mk-1.2.2p3/alcatel_power check-mk-1.2.6p12/alcatel_power --- check-mk-1.2.2p3/alcatel_power 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/alcatel_power 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,52 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_alcatel_power(info): + return [ (x[0], None) for x in info if x[2] in ['0x35000001', '0x45000004', '0x45000002', '0x45000009']] + +def check_alcatel_power(item, _no_params, info): + for line in info: + if item == line[0]: + if line[2] not in ['0x35000001', '0x45000004', '0x45000002', '0x45000009']: + return 3, "No Power supply connected to this port" + if int(line[1]) != 1: + return 2, "Supply in error condition (%s)" % line[1] + return 0, "Supply status OK" + return 3, "Supply not found" + +check_info["alcatel_power"] = { + "check_function" : check_alcatel_power, + "inventory_function" : inventory_alcatel_power, + "service_description" : "Power Supply %s", + "has_perfdata" : False, + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.6486.800"), + "snmp_info" : (".1.3.6.1.4.1.6486.800.1.1.1.1.1.1.1", + [OID_END, + 2, # Error Status + 5 # Device Type (0x35000001 == Power Supply) + ]), +} + diff -Nru check-mk-1.2.2p3/allnet_ip_sensoric check-mk-1.2.6p12/allnet_ip_sensoric --- check-mk-1.2.2p3/allnet_ip_sensoric 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/allnet_ip_sensoric 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,288 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: + +# <<>> +# AgentOS:;ALL5000 +# <<>> +# sensor0.alarm0;0 +# sensor0.all4000_typ;0 +# sensor0.function;1 +# sensor0.limit_high;50.00 +# sensor0.limit_low;10.00 +# sensor0.maximum;28.56 +# sensor0.minimum;27.50 +# sensor0.name;Temperatur intern +# sensor0.value_float;27.50 +# sensor0.value_int;2750 +# sensor0.value_string;27.50 +# sensor1.alarm1;0 +# sensor1.all4000_typ;0 +# sensor1.function;3 +# sensor1.limit_high;50.00 +# sensor1.limit_low;-0.50 +# sensor1.maximum;0.00 +# sensor1.minimum;2048000.00 +# sensor1.name;ADC 0 +# sensor1.value_float;0.00 +# sensor1.value_int;0 +# sensor1.value_string;0.00 +# [...] +# sensor9.alarm9;1 +# sensor9.all4000_typ;101 +# sensor9.function;12 +# sensor9.limit_high;85.00 +# sensor9.limit_low;10.00 +# sensor9.maximum;100.00 +# sensor9.minimum;2048000.02 +# sensor9.name;USV Spannung +# sensor9.value_float;100.00 +# sensor9.value_int;100 +# sensor9.value_string;100 +# system.alarmcount;4 +# system.date;30.06.2014 +# system.devicename;all5000 +# system.devicetype;ALL5000 +# system.sys;114854 +# system.time;16:08:48 + +# parses agent output in a structure like: +# {'sensor0': {'alarm0': '0', +# 'all4000_typ': '0', +# 'function': '1', +# 'limit_high': '50.00', +# 'limit_low': '10.00', +# 'maximum': '28.56', +# 'minimum': '27.43', +# 'name': 'Temperatur intern', +# 'value_float': '27.50', +# 'value_int': '2750', +# 'value_string': '27.50'}, +# [...] +# 'system': {'alarmcount': '4', +# 'date': '30.06.2014', +# 'devicename': 'all5000', +# 'devicetype': 'ALL5000', +# 'sys': '116240', +# 'time': '16:57:50'}} + + +def allnet_ip_sensoric_parse(info): + parsed = {} + for key, value in info: + match = re.search('(\w+)\.(\w+)', key) + if match: + sensor = match.group(1) + field = match.group(2) + parsed.setdefault(sensor, {}) + parsed[sensor][field] = value + + return parsed + +def allnet_ip_sensoric_compose_item(sensor_id, sensor): + sensor_id = re.sub("sensor", "", sensor_id) + if "name" in sensor.keys(): + item = "%s Sensor %s" % (sensor["name"], sensor_id) + else: + item = "Sensor %s" % (sensor_id) + return item + +# .--el. tension---------------------------------------------------------. +# | _ _ _ | +# | ___| | | |_ ___ _ __ ___(_) ___ _ __ | +# | / _ \ | | __/ _ \ '_ \/ __| |/ _ \| '_ \ | +# | | __/ |_ | || __/ | | \__ \ | (_) | | | | | +# | \___|_(_) \__\___|_| |_|___/_|\___/|_| |_| | +# | | +# '----------------------------------------------------------------------' + +def inventory_allnet_ip_sensoric_tension(info): + parsed = allnet_ip_sensoric_parse(info) + inventory = [] + for sensor in parsed.keys(): + if "function" in parsed[sensor].keys() and parsed[sensor]["function"] == "12": + item = allnet_ip_sensoric_compose_item(sensor, parsed[sensor]) + #print "sensor: %s, item: %s" % (sensor, item) + inventory.append( (item, None) ) + return inventory + +def check_allnet_ip_sensoric_tension(item, _no_params, info): + parsed = allnet_ip_sensoric_parse(info) + sensor_id = "sensor" + re.sub(".+Sensor ", "", item) + + if sensor_id not in parsed.keys(): + return 3, "%s not found in agent output" % item + + value = float(parsed[sensor_id]["value_float"]) + + perfdata = [ ("tension", "%0.2f" % value, '', '', 0, 100) ] + + status = 0 + if value == 0: + status = 2 + + return status, "%s is at %0.2f" % (item, value), perfdata + +check_info["allnet_ip_sensoric.tension"] = { + "check_function" : check_allnet_ip_sensoric_tension, + "inventory_function" : inventory_allnet_ip_sensoric_tension, + "service_description" : "Electric Tension %s", + "has_perfdata" : True, +} + +#. +# .--temp----------------------------------------------------------------. +# | _ | +# | | |_ ___ _ __ ___ _ __ | +# | | __/ _ \ '_ ` _ \| '_ \ | +# | | || __/ | | | | | |_) | | +# | \__\___|_| |_| |_| .__/ | +# | |_| | +# '----------------------------------------------------------------------' + +allnet_ip_sensoric_temp_default_levels = (35, 40) + +def inventory_allnet_ip_sensoric_temp(info): + parsed = allnet_ip_sensoric_parse(info) + inventory = [] + for sensor in parsed.keys(): + if (parsed[sensor].get('function') and parsed[sensor]["function"] == "1") \ + or (parsed[sensor].get('unit') and parsed[sensor]['unit'] == '°C'): + item = allnet_ip_sensoric_compose_item(sensor, parsed[sensor]) + inventory.append( (item, "allnet_ip_sensoric_temp_default_levels") ) + return inventory + +def check_allnet_ip_sensoric_temp(item, params, info): + parsed = allnet_ip_sensoric_parse(info) + sensor_id = "sensor" + re.sub(".+Sensor ", "", item) + + if sensor_id not in parsed.keys(): + return 3, "Sensor not found in agent output" + + temp = float(parsed[sensor_id]["value_float"]) + + return check_temperature(temp, params) + +check_info["allnet_ip_sensoric.temp"] = { + "check_function" : check_allnet_ip_sensoric_temp, + "inventory_function" : inventory_allnet_ip_sensoric_temp, + "service_description" : "Temperature %s", + "has_perfdata" : True, + "group" : "hw_temperature", + "includes" : [ "temperature.include" ], +} + +#. +# .--humidity------------------------------------------------------------. +# | _ _ _ _ _ | +# | | |__ _ _ _ __ ___ (_) __| (_) |_ _ _ | +# | | '_ \| | | | '_ ` _ \| |/ _` | | __| | | | | +# | | | | | |_| | | | | | | | (_| | | |_| |_| | | +# | |_| |_|\__,_|_| |_| |_|_|\__,_|_|\__|\__, | | +# | |___/ | +# '----------------------------------------------------------------------' + +allnet_ip_sensoric_humidity_default_levels = ( 35, 40, 60, 65 ) + +def inventory_allnet_ip_sensoric_humidity(info): + parsed = allnet_ip_sensoric_parse(info) + inventory = [] + for sensor in parsed.keys(): + if "function" in parsed[sensor].keys() and parsed[sensor]["function"] == "2": + item = allnet_ip_sensoric_compose_item(sensor, parsed[sensor]) + inventory.append( (item, "allnet_ip_sensoric_humidity_default_levels") ) + return inventory + +def check_allnet_ip_sensoric_humidity(item, params, info): + critlow, warnlow, warnhigh, crithigh = params + parsed = allnet_ip_sensoric_parse(info) + sensor_id = "sensor" + re.sub(".+Sensor ", "", item) + + if sensor_id not in parsed.keys(): + return 3, "%s not found in agent output" % item + + humidity = float(parsed[sensor_id]["value_float"]) + + perfdata = [ ("humidity", humidity, critlow, warnlow, warnhigh, crithigh) ] + + if humidity <= critlow or humidity >= crithigh: + status = 2 + elif humidity <= warnlow or humidity >= warnhigh: + status = 1 + else: + status = 0 + + return status, "%s is %0.2f %%" % (item, humidity), perfdata + +check_info["allnet_ip_sensoric.humidity"] = { + "check_function" : check_allnet_ip_sensoric_humidity, + "inventory_function" : inventory_allnet_ip_sensoric_humidity, + "service_description" : "Humidity %s", + "has_perfdata" : True, + "group" : "humidity", +} + +#. +# .--pressure------------------------------------------------------------. +# | | +# | _ __ _ __ ___ ___ ___ _ _ _ __ ___ | +# | | '_ \| '__/ _ \/ __/ __| | | | '__/ _ \ | +# | | |_) | | | __/\__ \__ \ |_| | | | __/ | +# | | .__/|_| \___||___/___/\__,_|_| \___| | +# | |_| | +# '----------------------------------------------------------------------' + +def inventory_allnet_ip_sensoric_pressure(info): + parsed = allnet_ip_sensoric_parse(info) + inventory = [] + for sensor in parsed.keys(): + if "function" in parsed[sensor].keys() and parsed[sensor]["function"] == "16": + item = allnet_ip_sensoric_compose_item(sensor, parsed[sensor]) + inventory.append( (item, None) ) + return inventory + +def check_allnet_ip_sensoric_pressure(item, _no_params, info): + parsed = allnet_ip_sensoric_parse(info) + sensor_id = "sensor" + re.sub(".+Sensor ", "", item) + + if sensor_id not in parsed.keys(): + return 3, "%s not found in agent output" % item + + pressure = float(parsed[sensor_id]["value_float"]) / 1000 + + perfdata = [ ("pressure", str(pressure) + "bars", None, None, 0 ) ] + + return 0, "%s is %0.5f bars" % (item, pressure), perfdata + +check_info["allnet_ip_sensoric.pressure"] = { + "check_function" : check_allnet_ip_sensoric_pressure, + "inventory_function" : inventory_allnet_ip_sensoric_pressure, + "service_description" : "Pressure %s", + "has_perfdata" : True, +} + +#. diff -Nru check-mk-1.2.2p3/allnet_ip_sensoric.humidity check-mk-1.2.6p12/allnet_ip_sensoric.humidity --- check-mk-1.2.2p3/allnet_ip_sensoric.humidity 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/allnet_ip_sensoric.humidity 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,46 @@ +title: ALLNET IP Sensoric: Humidity Sensors +agents: allnet_ip_sensoric +catalog: hw/environment/allnet +license: GPL +distribution: check_mk +description: + Checks Humidity Sensors in ALLNET IP Sensoric devices. + + The check returns {WARN} or {CRIT} if the humidity in percent is higher + then given upper levels or lower than the given lower levels. + Otherwise it returns {OK}. + +item: + If a name is reported for the sensor by the device: The name and the + sensor ID. Otherwise the sensor ID only. + +perfdata: + One value: The humidity in percent, together with it's lower and upper + levels. + +inventory: + Creates one check for every Humidity Sensor (function = 2). + +examples: + # set default levels lower levels to 30 and 40 % and + # the default levels upper levels to 70 and 75 % + allnet_ip_sensoric_humidity_default_levels = ( 30, 40, 70, 75 ) + + # Check humidity of named sensor on a ALLNET IP Sensoric called sen01 + # with default levels + checks += [ + ("sen01", "allnet_ip_sensoric.humidity", 'Luftfeuchte Rack Sensor 12', allnet_ip_sensoric_humidity_default_levels) + ] + + # or use individual levels for warn and crit + checks += [ + ("sen01", "allnet_ip_sensoric.humidity", 'Luftfeuchte Rack Sensor 12', (10, 20, 80, 90)) + ] + +[parameters] +parameters (int, int, int, int): lower CRIT, lower WARN, upper WARN and + upper CRIT levels for humidity in percent + +[configuration] +allnet_ip_sensoric_humidity_default_levels (int, int, int, int): The standard levels + preset to ( 35, 40, 60, 65 ) diff -Nru check-mk-1.2.2p3/allnet_ip_sensoric.pressure check-mk-1.2.6p12/allnet_ip_sensoric.pressure --- check-mk-1.2.2p3/allnet_ip_sensoric.pressure 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/allnet_ip_sensoric.pressure 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,19 @@ +title: ALLNET IP Sensoric: Pressure Sensors +agents: allnet_ip_sensoric +catalog: hw/environment/allnet +license: GPL +distribution: check_mk +description: + Reports values of Pressure Sensors in ALLNET IP Sensoric devices. + + This check is for creating performace data only and allways returns {OK}. + +item: + If a name is reported for the sensor by the device: The name and the + sensor ID. Otherwise the sensor ID only. + +perfdata: + One value: The pressure in bars. + +inventory: + Creates one check for every Pressure Sensor (function = 16). diff -Nru check-mk-1.2.2p3/allnet_ip_sensoric.temp check-mk-1.2.6p12/allnet_ip_sensoric.temp --- check-mk-1.2.2p3/allnet_ip_sensoric.temp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/allnet_ip_sensoric.temp 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,44 @@ +title: ALLNET IP Sensoric: Temperature Sensors +agents: allnet_ip_sensoric +catalog: hw/environment/allnet +license: GPL +distribution: check_mk +description: + Checks Temperature Sensors in ALLNET IP Sensoric devices. + + The check returns {WARN} or {CRIT} if the temperature in degree celsius is higher + then given levels and {OK} otherwise. + +item: + If a name is reported for the sensor by the device: The name and the + sensor ID. Otherwise the sensor ID only. + +perfdata: + One value: The temperature in degree celsius, together with it's minimum and + maximum and levels for warn and crit. + +inventory: + Creates one check for every Temperature Sensor (function = 1). + +examples: + # set default levels to 30 and 45 °C: + allnet_ip_sensoric_temp_default_levels = (30, 45) + + # Check temperature of named sensor on a ALLNET IP Sensoric called sen01 + # with default levels + checks += [ + ("sen01", "allnet_ip_sensoric.temp", 'Temperatur Rack Sensor 11', allnet_ip_sensoric_temp_default_levels) + ] + + # or use individual levels for warn and crit + checks += [ + ("sen01", "allnet_ip_sensoric.temp", 'Temperatur Rack Sensor 11', (40, 50)) + ] + +[parameters] +parameters (int, int): temperature levels in degree celsius for {WARN} and {CRIT} + +[configuration] +allnet_ip_sensoric_temp_default_levels (int, int): The standard levels + for {WARN} and {CRIT}, preset to (35, 40) + diff -Nru check-mk-1.2.2p3/allnet_ip_sensoric.tension check-mk-1.2.6p12/allnet_ip_sensoric.tension --- check-mk-1.2.2p3/allnet_ip_sensoric.tension 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/allnet_ip_sensoric.tension 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,19 @@ +title: ALLNET IP Sensoric: Electric Tension Sensors +agents: allnet_ip_sensoric +catalog: hw/environment/allnet +license: GPL +distribution: check_mk +description: + Checks Electric Tension Sensors in ALLNET IP Sensoric devices. + + Returns {CRIT} if the reported value is 0 and {OK} otherwise. + +item: + If a name is reported for the sensor by the device: The name and the + sensor ID. Otherwise the sensor ID only. + +perfdata: + Exactly one number: The value as reported by the device. + +inventory: + Creates one check for every Electric Tension Sensor (function = 12). diff -Nru check-mk-1.2.2p3/apache_status check-mk-1.2.6p12/apache_status --- check-mk-1.2.2p3/apache_status 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/apache_status 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,23 +24,38 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -apache_status_fields = { - # key sort convert-func - 'Uptime': (0, int), - 'IdleWorkers': (5, int), - 'BusyWorkers': (6, int), - 'OpenSlots': (7, int), - 'TotalSlots': (8, int), - 'Total Accesses': (9, int), - 'CPULoad': (10, float), - 'Total kBytes': (11, float), - 'ReqPerSec': (12, float), - 'BytesPerReq': (13, float), - 'BytesPerSec': (14, float), - 'Scoreboard': (15, str), + +def apache_status_open_slots(value, params): + warn, crit = params + extra_perf = [ warn, crit ] + if value <= crit: + return 2, "(!!)", extra_perf + elif value <= warn: + return 1, "(!)", extra_perf + else: + return 0, "", extra_perf + +_apache_status_fields = { + # key sort convert-func param_function + 'Uptime': (0, int), + 'IdleWorkers': (5, int), + 'BusyWorkers': (6, int), + 'OpenSlots': (7, int, apache_status_open_slots), + 'TotalSlots': (8, int), + 'Total Accesses': (9, int), + 'CPULoad': (10, float), + 'Total kBytes': (11, float), + 'ReqPerSec': (12, float), + 'BytesPerReq': (13, float), + 'BytesPerSec': (14, float), + 'Scoreboard': (15, str), + 'ConnsTotal': (16, int), + 'ConnsAsyncWriting': (17, int), + 'ConnsAsyncKeepAlive': (18, int), + 'ConnsAsyncClosing': (19, int), } -apache_status_stats = { +_apache_status_stats = { 'Waiting': '_', 'StartingUp': 'S', 'ReadingRequest': 'R', @@ -57,8 +72,10 @@ data = {} for line in info: address, port = line[:2] + if len(line) != 4 and not (len(line) == 5 and line[2] == 'Total'): + continue # Skip unexpected lines label = (' '.join(line[2:-1])).rstrip(':') - value = apache_status_fields[label][1](line[-1]) + value = _apache_status_fields[label][1](line[-1]) item = '%s:%s' % (address, port) if item not in data: @@ -66,7 +83,7 @@ # Get statistics from scoreboard if label == 'Scoreboard': - for stat_label, key in apache_status_stats.items(): + for stat_label, key in _apache_status_stats.items(): data[item]['State_' + stat_label] = value.count(key) data[item]['OpenSlots'] = value.count('.') @@ -85,32 +102,47 @@ data = apache_status_parse(info) inv = [] for item in data.keys(): - inv.append((item, None)) + inv.append((item, {})) return inv -def check_apache_status(item, _no_params, info): +def check_apache_status(item, params, info): + if params == None: + params = {} + all_data = apache_status_parse(info) if item not in all_data: - return (3, 'UNKNOWN - Unable to find instance in agent output') + return (3, 'Unable to find instance in agent output') data_dict = all_data[item] + + this_time = int(time.time()) + + if "Total Accesses" in data_dict: + data_dict["ReqPerSec"] = get_rate("apache_status_%s_accesses" % item, this_time, + data_dict["Total Accesses"]) + del(data_dict["Total Accesses"]) + if "Total kBytes" in data_dict: + data_dict["BytesPerSec"] = get_rate("apache_status_%s_bytes" % item, this_time, + data_dict["Total kBytes"] * 1024) + del(data_dict["Total kBytes"]) + data = data_dict.items() - status = 0 - output = [] - perfdata = [] + worst_state = 0 + output = [] + perfdata = [] # Sort keys - data.sort(cmp = lambda x, y: cmp(apache_status_fields.get(x[0], (0, None))[0], - apache_status_fields.get(y[0], (0, None))[0])) + data.sort(cmp = lambda x, y: cmp(_apache_status_fields.get(x[0], (0, None))[0], + _apache_status_fields.get(y[0], (0, None))[0])) for key, value in data: - if key not in apache_status_fields.keys(): + if key not in _apache_status_fields.keys(): continue # Don't process the scoreboard data directly. Print states instead if key == 'Scoreboard': states = [] - for stat_label, key in apache_status_stats.items(): + for stat_label, key in _apache_status_stats.items(): val = data_dict.get('State_' + stat_label, 0) if val > 0: states.append('%s: %d' % (stat_label, val)) @@ -125,14 +157,21 @@ else: display_value = '%d' % value - output.append('%s: %s' % (key, display_value)) - perfdata.append((key.replace(' ', '_'), value)) + extra_info = "" + extra_perf = [] + if params.get(key) and len(_apache_status_fields[key]) > 2: + key_state, extra_info, extra_perf = _apache_status_fields[key][2](value, params.get(key)) + worst_state = max(key_state, worst_state) + + output.append('%s: %s%s' % (key, display_value, extra_info)) + perfdata.append(tuple([key.replace(' ', '_'), value] + extra_perf)) - return (status, '%s - %s' % (nagios_state_names[status], ', '.join(output)), perfdata) + return (worst_state, ', '.join(output), perfdata) check_info['apache_status'] = { "check_function" : check_apache_status, "inventory_function" : inventory_apache_status, "service_description" : "Apache %s Status", "has_perfdata" : True, + "group" : "apache_status" } diff -Nru check-mk-1.2.2p3/apache_status.cfg check-mk-1.2.6p12/apache_status.cfg --- check-mk-1.2.2p3/apache_status.cfg 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/apache_status.cfg 1970-01-01 00:00:00.000000000 +0000 @@ -1,35 +0,0 @@ -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# ails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# Example for configuration for apache_status plugin -# Note: you need this file only if the autodetection fails -# or you do not want to contact all servers it detects - -# Note: Activate this only if the autodetection fails. -# servers = [ -# ('http', 'localhost', 80), -# ('https', 'localhost', 443), -# ('http', 'localhost', 8080), -# ] - diff -Nru check-mk-1.2.2p3/apc_ats_output check-mk-1.2.6p12/apc_ats_output --- check-mk-1.2.2p3/apc_ats_output 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/apc_ats_output 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,127 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +apc_ats_output_default_levels = { + "output_voltage_max" : ( 240, 250 ), + "load_perc_max" : ( 85, 95 ) + } + +def inventory_apc_ats_output(info): + return [ (x[0], "apc_ats_output_default_levels") for x in info ] + +def check_apc_ats_output(item, params, info): + for line in info: + if line[0] == item: + vac, watt, ampere, load_perc = map(saveint, line[1:]) + ampere = ampere/10 + state = 0 + messages = [] + perf = [] + + # Output Voltage Max + warn, crit = params.get('output_voltage_max', (None, None)) + perf.append(('volt', vac, warn, crit )) + base_msg = "%d Volt" % vac + if warn and crit: + levels = " its to high (Warning/Critical %d/%d)" % (warn, crit) + else: + levels = "" + if crit and vac > crit: + state = 2 + base_msg += levels + "(!!)" + elif warn and vac > warn: + state = 1 + base_msg += levels + "(!)" + + # Output Voltage Min + warn, crit = params.get('output_voltage_min', (None, None)) + if warn and crit: + levels = " its to low (warn/crit %d/%d)" % (warn, crit) + else: + levels = "" + if crit and vac < crit: + state = 2 + base_msg += levels + "(!!)" + elif warn and vac < warn: + state = max(state, 1) + base_msg += levels + "(!)" + + messages.append(base_msg) + + # Watt + perf.append(('watt', watt )) + messages.append("%d Watt" % watt) + + # Ampere and Percent Load + perf.append(("ampere", ampere)) + messages.append("%d Ampere" % ampere ) + + warn, crit = params.get('load_perc_max', (None, None)) + if warn and crit: + levels = " its to high (warn/crit %d/%d) " % (warn, crit) + else: + levels = "" + base_msg = "Load: %d%%" % load_perc + if crit and load_perc > crit: + state = 2 + base_msg += " " + levels + "(!!)" + elif warn and load_perc > warn: + state = max(state, 1) + base_msg += " " + levels + "(!)" + warn, crit = params.get('load_perc_min', (None, None)) + if warn and crit: + levels = " its to low (warn/crit %d/%d) " % (warn, crit) + else: + levels = "" + if crit and load_perc < crit: + state = 2 + base_msg += " " + levels + "(!!)" + elif warn and load_perc < warn: + state = max(state, 1) + base_msg += " " + levels + "(!)" + perf.append(( "load_perc", load_perc, warn, crit )) + messages.append(base_msg) + + return state, "Running with: " + ", ".join(messages), perf + return 3, "Power phase not found" + +check_info["apc_ats_output"] = { + "check_function" : check_apc_ats_output, + "group" : "apc_ats_output", + "inventory_function" : inventory_apc_ats_output, + "service_description" : "Phase %s output", + "has_perfdata" : True, + "snmp_scan_function" : lambda oid: ".1.3.6.1.4.1.318.1.3.11" in oid(".1.3.6.1.2.1.1.2.0"), + "snmp_info" : ( ".1.3.6.1.4.1.318.1.1.8.5.4.3.1", [ + "1", # atsOutputPhaseTableIndex + #"2", # atsOutputPhaseIndex 1-3 = Phase 1-3, 4 = Neutral + "3", # atsOutputVoltage (VAC) + "13", # atsOutputPower (Watt) + "4", # atsOutputCurrent (0.1 AMPERE) + "10", # atsOutputPercentLoad + ] ), +} + diff -Nru check-mk-1.2.2p3/apc_ats_status check-mk-1.2.6p12/apc_ats_status --- check-mk-1.2.2p3/apc_ats_status 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/apc_ats_status 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,99 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_apc_ats_status(info): + if len(info) == 1: + return [ (None, saveint(info[0][1]) ) ] + return [] + +def check_apc_ats_status(_no_item, source, info): + comstatus, selected_source, redundancy, overcurrent, ps5, ps24 = map(saveint, info[0]) + state = 0 + messages = [] + + #current source of power + sources = { 1 : "A", 2 : "B" } + if source != selected_source: + state = 2 + messages.append("Power source Changed from %s to %s(!!)" % \ + (sources[source], sources[selected_source])) + else: + messages.append("Power source %s selected" % sources[source]) + + #current communication status of the Automatic Transfer Switch. + if comstatus == 1: + state = max(1,state) + messages.append("Communication Status: never Discovered(!)") + elif comstatus == 3: + state = 2 + messages.append("Communication Status: lost(!!)") + + # current redundancy state of the ATS. + # Lost(1) indicates that the ATS is unable to switch over to the alternate power source + # if the current source fails. Redundant(2) indicates that the ATS will switch + # over to the alternate power source if the current source fails. + if redundancy != 2: + state = 2 + messages.append("redundancy lost(!!)") + else: + messages.append("Device fully redundant") + + # current state of the ATS. atsOverCurrent(1) indicates that the ATS has i + # exceeded the output current threshold and will not allow a switch + # over to the alternate power source if the current source fails. + # atsCurrentOK(2) indicates that the output current is below the output current threshold. + if overcurrent == 1: + state = 2 + messages.append("exceedet ouput current threshold(!!)") + + # 5Volt power supply + if ps5 != 2: + state = 2 + messages.append("5V power supply failed(!!)") + + # 24Volt power supply + if ps24 != 2: + state = 2 + messages.append("24V power suppy failed(!!)") + + return state, ", ".join(messages) + +check_info["apc_ats_status"] = { + "check_function" : check_apc_ats_status, + "inventory_function" : inventory_apc_ats_status, + "service_description" : "ATS Status", + "has_perfdata" : False, + "snmp_scan_function" : lambda oid: ".1.3.6.1.4.1.318.1.3.11" in oid(".1.3.6.1.2.1.1.2.0"), + "snmp_info" : ( ".1.3.6.1.4.1.318.1.1.8.5.1", [ + "1.0", # atsStatusCommStatus + "2.0", # atsStatusSelectedSource + "3.0", # atsStatusRedundancyState + "4.0", # atsStatusOverCurrentState + "5.0", # atsStatus5VPowerSupply + "6.0", # atsStatus24VPowerSupply + ] ), +} + diff -Nru check-mk-1.2.2p3/apc_humidity check-mk-1.2.6p12/apc_humidity --- check-mk-1.2.2p3/apc_humidity 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/apc_humidity 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,60 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +apc_humidity_default_levels = (35, 40, 60, 65) + + +def inventory_apc_humidity(info): + return [(line[0], "apc_humidity_default_levels") for line in info] + +def check_apc_humidity(item, params, info): + for line in info: + if line[0] == item: + humidity = saveint(line[1]) + critlow, warnlow, warnhigh, crithigh = params + infotext = "%s%% (warn/crit below %.1f/%.1f or above %.1f/%.1f %%)" % \ + ( humidity, warnlow, critlow, warnhigh, crithigh) + perfdata = [ ( "humidity", humidity, critlow, warnlow, warnhigh, crithigh ) ] + if humidity <= critlow or humidity >= crithigh: + return (2, "Humidity is: " + infotext, perfdata ) + elif humidity <= warnlow or humidity >= warnhigh: + return (1, "Humidity is: " + infotext, perfdata ) + else: + return (0, "Humidity is: " + infotext, perfdata ) + + return (3, "sensor not found in SNMP data") + + +check_info["apc_humidity"] = { + 'check_function': check_apc_humidity, + 'inventory_function': inventory_apc_humidity, + 'service_description': 'Humidity %s', + 'has_perfdata': True, + 'snmp_info': ('.1.3.6.1.4.1.318.1.1.10.4.2.3.1', ['3', '6']), + 'snmp_scan_function': lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.318.1.3"), + 'group': 'humidity', +} diff -Nru check-mk-1.2.2p3/apc_inputs check-mk-1.2.6p12/apc_inputs --- check-mk-1.2.2p3/apc_inputs 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/apc_inputs 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,80 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_apc_inputs(info): + if len(info) > 0: + inventory = [] + for line in info: + if line[2] not in [ "3", "4" ]: + inventory.append(( line[0], {"state" : line[2]} )) + return inventory + +def check_apc_inputs(item, params, info): + states = { + '1' : "closed", + '2' : "open", + '3' : "disabled", + '4' : "not applicable", + } + alarm_states = { + '1' : "normal", + '2' : "warning", + '3' : "critical", + '4' : "not applicable", + } + for name, location, state, alarm_status in info: + if name == item: + if alarm_status in [ "2", "4"]: + check_state = 1 + elif alarm_status == "3": + check_state = 2 + elif alarm_status == "1": + check_state = 0 + + messages = [ "State is %s" % alarm_states[alarm_status] ] + + if params['state'] != state: + check_state = max( check_state, 1 ) + messages.append("Port state Change from %s to %s" % \ + ( states[params['state']], states[state] )) + + return check_state, ", ".join(messages) + + +check_info['apc_inputs'] = { + "inventory_function" : inventory_apc_inputs, + "check_function" : check_apc_inputs, + "service_description" : "Input %s", + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.318.1.3"), + "snmp_info" : ( ".1.3.6.1.4.1.318.1.1.25.2.2.1", + [ + 3, # Port Name + 4, # Location + 5, # CurrentState + 6, # AlarmStatus + ] ) +} + diff -Nru check-mk-1.2.2p3/apc_inrow_airflow check-mk-1.2.6p12/apc_inrow_airflow --- check-mk-1.2.2p3/apc_inrow_airflow 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/apc_inrow_airflow 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,65 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +apc_inrow_airflow_default_levels = { "level_low" : ( 5.0 , 2.0 ), "level_high" : ( 10.0, 11.0 ) } + +def inventory_apc_inrow_airflow(info): + return [ (None, "apc_inrow_airflow_default_levels")] + +def check_apc_inrow_airflow(_no_item, params, info): + flow = saveint(info[0][0])/100.0 + state = 0 + message = "" + + warn, crit = params['level_low'] + if flow < crit: + state = 2 + message = "to low" + elif flow < warn: + state = 1 + message = "to low" + + warn, crit = params['level_high'] + if flow > crit: + state = 2 + message = "to high" + elif flow > warn: + state = 1 + message = "to high" + + perf = [ ("flow", flow, warn, crit ) ] + return state, "Current: %.2fl/s %s" % ( flow, message ), perf + +check_info["apc_inrow_airflow"] = { + "check_function" : check_apc_inrow_airflow, + "inventory_function" : inventory_apc_inrow_airflow, + "service_description" : "Airflow", + "snmp_info" : (".1.3.6.1.4.1.318.1.1.13.3.2.2.2", [ "5" ]), + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.318.1.3.14.5", + "group" : "airflow", + "has_perfdata" : True, +} diff -Nru check-mk-1.2.2p3/apc_inrow_fanspeed check-mk-1.2.6p12/apc_inrow_fanspeed --- check-mk-1.2.2p3/apc_inrow_fanspeed 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/apc_inrow_fanspeed 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,41 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_apc_inrow_fanspeed(info): + return [ (None, None) ] + +def check_apc_inrow_fanspeed(_no_item, _no_params, info): + value = savefloat(info[0][0]) / 10 + return 0, "Current: %.2f%%" % value, [ ("fanspeed", value ) ] + +check_info["apc_inrow_fanspeed"] = { + "check_function" : check_apc_inrow_fanspeed, + "inventory_function" : inventory_apc_inrow_fanspeed, + "service_description" : "Fanspeed", + "snmp_info" : ( ".1.3.6.1.4.1.318.1.1.13.3.2.2.2", [ "16" ] ), + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.318.1.3.14.5", + "has_perfdata" : True, +} diff -Nru check-mk-1.2.2p3/apc_inrow_temp check-mk-1.2.6p12/apc_inrow_temp --- check-mk-1.2.2p3/apc_inrow_temp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/apc_inrow_temp 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,97 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Structure of relevant MIB part (all are integers): +# airIRRCUnitStatusOperateMode 1.3.6.1.4.1.318.1.1.13.3.2.2.2.1.0 +# airIRRCUnitStatusCoolOutput 1.3.6.1.4.1.318.1.1.13.3.2.2.2.2.0 +# airIRRCUnitStatusCoolDemand 1.3.6.1.4.1.318.1.1.13.3.2.2.2.3.0 +# airIRRCUnitStatusAirFlowUS 1.3.6.1.4.1.318.1.1.13.3.2.2.2.4.0 +# airIRRCUnitStatusAirFlowMetric 1.3.6.1.4.1.318.1.1.13.3.2.2.2.5.0 +# airIRRCUnitStatusRackInletTempUS 1.3.6.1.4.1.318.1.1.13.3.2.2.2.6.0 +# airIRRCUnitStatusRackInletTempMetric 1.3.6.1.4.1.318.1.1.13.3.2.2.2.7.0 +# airIRRCUnitStatusSupplyAirTempUS 1.3.6.1.4.1.318.1.1.13.3.2.2.2.8.0 +# airIRRCUnitStatusSupplyAirTempMetric 1.3.6.1.4.1.318.1.1.13.3.2.2.2.9.0 +# airIRRCUnitStatusReturnAirTempUS 1.3.6.1.4.1.318.1.1.13.3.2.2.2.10.0 +# airIRRCUnitStatusReturnAirTempMetric 1.3.6.1.4.1.318.1.1.13.3.2.2.2.11.0 +# airIRRCUnitStatusContainmtDPUS 1.3.6.1.4.1.318.1.1.13.3.2.2.2.12.0 +# airIRRCUnitStatusContainmtDPMetric 1.3.6.1.4.1.318.1.1.13.3.2.2.2.13.0 +# airIRRCUnitStatusFilterDPUS 1.3.6.1.4.1.318.1.1.13.3.2.2.2.14.0 +# airIRRCUnitStatusFilterDPMetric 1.3.6.1.4.1.318.1.1.13.3.2.2.2.15.0 +# airIRRCUnitStatusFanSpeed 1.3.6.1.4.1.318.1.1.13.3.2.2.2.16.0 +# airIRRCUnitStatusInputState 1.3.6.1.4.1.318.1.1.13.3.2.2.2.17.0 +# airIRRCUnitStatusOutputState 1.3.6.1.4.1.318.1.1.13.3.2.2.2.18.0 +# airIRRCUnitStatusActivePowerSource 1.3.6.1.4.1.318.1.1.13.3.2.2.2.19.0 +# airIRRCUnitStatusFluidValvePosition 1.3.6.1.4.1.318.1.1.13.3.2.2.2.20.0 +# airIRRCUnitStatusFluidFlowUS 1.3.6.1.4.1.318.1.1.13.3.2.2.2.21.0 +# airIRRCUnitStatusFluidFlowMetric 1.3.6.1.4.1.318.1.1.13.3.2.2.2.22.0 +# airIRRCUnitStatusEnteringFluidTemperatureUS 1.3.6.1.4.1.318.1.1.13.3.2.2.2.23.0 +# airIRRCUnitStatusEnteringFluidTemperatureMetric 1.3.6.1.4.1.318.1.1.13.3.2.2.2.24.0 +# airIRRCUnitStatusLeavingFluidTemperatureUS 1.3.6.1.4.1.318.1.1.13.3.2.2.2.25.0 +# airIRRCUnitStatusLeavingFluidTemperatureMetric 1.3.6.1.4.1.318.1.1.13.3.2.2.2.26.0 + +apc_inrow_temp_default_levels = ( 30, 35 ) + +def apc_inrow_temp_convert(info): + vars = [ + "Rack Inlet", + "Supply Air", + "Return Air", + "Entering Fluid", + "Leaving Fluid", + ] + count = 0 + data = {} + for name in vars: + if len(info) > count and info[count] != "-1": + value = int(info[count][0]) + data[name] = value / 10.0 + count += 1 + return data + + +def inventory_apc_inrow_temp(info): + info = apc_inrow_temp_convert(info) + return [ (x, "apc_inrow_temp_default_levels") for x in info.keys() ] + + +def check_apc_inrow_temp(item, params, info): + info = apc_inrow_temp_convert(info) + for sensor, value in info.items(): + if sensor == item: + return check_temperature(value, params) + + +check_info["apc_inrow_temp"] = { + "check_function" : check_apc_inrow_temp, + "inventory_function" : inventory_apc_inrow_temp, + "service_description" : "Temperature %s", + "group" : "hw_temperature", + "snmp_info" : (".1.3.6.1.4.1.318.1.1.13.3.2.2.2", [ 7, 9, 11, 24, 26 ]), + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.318.1.3.14.5", + "has_perfdata" : True, + "includes" : [ "temperature.include" ], +} + diff -Nru check-mk-1.2.2p3/apc_mod_pdu_modules check-mk-1.2.6p12/apc_mod_pdu_modules --- check-mk-1.2.2p3/apc_mod_pdu_modules 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/apc_mod_pdu_modules 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,68 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_apc_mod_pdu_modules(info): + return [ (x[0], None) for x in info if x[0] != '' ] + +def check_apc_mod_pdu_modules(item, _no_params, info): + apc_states = { + 1 : "normal", + 2 : "warning", + 3 : "notPresent", + 6 : "unknown", + } + for name, status, current_power in info: + if name == item: + status = saveint(status) + current_power = savefloat(current_power)/100 + message = "Status %s, current: %.2fkw " % \ + ( apc_states.get(status, 6), current_power ) + + perf = [ + ("current_power", current_power ) + ] + if status == 2: + return 1, message,perf + if status in [ 3, 6 ]: + return 2, message,perf + if status == 1: + return 0, message,perf + return 3, message + return 3, "Module not found" + +check_info["apc_mod_pdu_modules"] = { + "check_function" : check_apc_mod_pdu_modules, + "inventory_function" : inventory_apc_mod_pdu_modules, + "service_description" : "Module %s", + "snmp_info" : (".1.3.6.1.4.1.318.1.1.22.2.6.1", [ + 4, #isxModularDistModuleOutputName + 6, #isxModularDistModuleOutputStatus + 20, #isxModularDistModuleOutputTotalPower + ]), + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.318.1.3.24.1" , + "has_perfdata" : True, +} + diff -Nru check-mk-1.2.2p3/apc_powerswitch check-mk-1.2.6p12/apc_powerswitch --- check-mk-1.2.2p3/apc_powerswitch 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/apc_powerswitch 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -29,7 +29,7 @@ def inventory_apc_powerswitch(info): - return [(x[0],saveint(x[2])) for x in info] + return [ (x[0],saveint(x[2])) for x in info if x[2] == "1" ] def check_apc_powerswitch(item, params, info): state_name = { @@ -44,16 +44,14 @@ r_state = 1 if state in apc_powerswitch_critical_states: r_state = 2 - return(r_state, nagios_state_names[r_state] + '- Port %s has status %s' % (line[1], state_name[state])) - - return False - + return(r_state, 'Port %s has status %s' % (line[1], state_name[state])) + return 3, "Port not found" check_info['apc_powerswitch'] = { 'check_function' : check_apc_powerswitch, 'inventory_function' : inventory_apc_powerswitch, - 'snmp_scan_function' : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.318.1.3.4.5"), + 'snmp_scan_function' : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.318.1.3.4"), 'service_description' : 'Power Outlet Port %s', 'snmp_info' : ( '.1.3.6.1.4.1.318.1.1.12.3.5.1.1', [1, 2, 4] ), } diff -Nru check-mk-1.2.2p3/apc_rackpdu_power check-mk-1.2.6p12/apc_rackpdu_power --- check-mk-1.2.2p3/apc_rackpdu_power 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/apc_rackpdu_power 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -41,17 +41,22 @@ if host == item: perfdata = [ ( "amperage", amperage, max ) ] if amperage >= max: - return (2, "CRIT - Amperage: %f - Overload possible" % amperage, perfdata) + return (2, "Amperage: %f - Overload possible" % amperage, perfdata) else: - return (0, "OK - Amperage: %f" % amperage, perfdata) - return (3, "UNKNOWN - Something went wrong") + return (0, "Amperage: %f" % amperage, perfdata) + return (3, "Something went wrong") -check_info["apc_rackpdu_power"] = \ - (check_apc_rackpdu_power, "PDU %s", 1, inventory_apc_rackpdu_power) -snmp_info["apc_rackpdu_power"] = ( ".1.3.6.1.4.1.318.1.1.12", [ "1.1.0", "2.3.1.1.2.1", "2.2.1.1.3.1", "2.2.1.1.4.1" ] ) # Look for the APC management module and then dive into it's rackpdu tree -snmp_scan_functions["apc_rackpdu_power"] = lambda oid: \ - oid(".1.3.6.1.2.1.1.1.0").lower().startswith("apc web/snmp") \ - and oid(".1.3.6.1.4.1.318.1.1.12.1.1.0") + +check_info["apc_rackpdu_power"] = { + 'check_function': check_apc_rackpdu_power, + 'inventory_function': inventory_apc_rackpdu_power, + 'service_description': 'PDU %s', + 'has_perfdata': True, + 'snmp_info': ('.1.3.6.1.4.1.318.1.1.12', ['1.1.0', '2.3.1.1.2.1', '2.2.1.1.3.1', '2.2.1.1.4.1']), + 'snmp_scan_function': lambda oid: \ + oid(".1.3.6.1.2.1.1.1.0").lower().startswith("apc web/snmp") \ + and oid(".1.3.6.1.4.1.318.1.1.12.1.1.0"), +} diff -Nru check-mk-1.2.2p3/apc_symmetra check-mk-1.2.6p12/apc_symmetra --- check-mk-1.2.2p3/apc_symmetra 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/apc_symmetra 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,120 +24,185 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -# We use the following OIDs: -# PowerNet-MIB::upsBasicBatteryStatus.0 .1.3.6.1.4.1.318.1.1.1.2.1.1.0 -# PowerNet-MIB::upsBasicOutputStatus.0 .1.3.6.1.4.1.318.1.1.1.4.1.1.0 -# PowerNet-MIB::upsAdvBatteryCapacity.0 .1.3.6.1.4.1.318.1.1.1.2.2.1.0 -# PowerNet-MIB::upsAdvBatteryTemperature.0 .1.3.6.1.4.1.318.1.1.1.2.2.2.0 -# PowerNet-MIB::upsAdvBatteryCurrent.0 .1.3.6.1.4.1.318.1.1.1.2.2.9.0 -# PowerNet-MIB::upsAdvOutputVoltage.0 .1.3.6.1.4.1.318.1.1.1.4.2.1.0 -# PowerNet-MIB::upsAdvOutputCurrent.0 .1.3.6.1.4.1.318.1.1.1.4.2.4.0 - -# upsBasicBatteryStatus: unknown(1), batteryNormal(2), batteryLow(3) -# upsBasicOutputStatus: unknown(1), onLine(2), onBattery(3), onSmartBoost(4), -# timedSleeping(5), softwareBypass(6), off(7), rebooting(8), switchedBypass(9), -# hardwareFailureBypass(10), sleepingUntilPowerReturn(11), onSmartTrim(12) +# upsBasicStateOutputState: +# The flags are numbered 1 to 64, read from left to right. The flags are defined as follows: +# 1: Abnormal Condition Present, 2: On Battery, 3: Low Battery, 4: On Line +# 5: Replace Battery, 6: Serial Communication Established, 7: AVR Boost Active +# 8: AVR Trim Active, 9: Overload, 10: Runtime Calibration, 11: Batteries Discharged +# 12: Manual Bypass, 13: Software Bypass, 14: In Bypass due to Internal Fault +# 15: In Bypass due to Supply Failure, 16: In Bypass due to Fan Failure +# 17: Sleeping on a Timer, 18: Sleeping until Utility Power Returns +# 19: On, 20: Rebooting, 21: Battery Communication Lost, 22: Graceful Shutdown Initiated +# 23: Smart Boost or Smart Trim Fault, 24: Bad Output Voltage, 25: Battery Charger Failure +# 26: High Battery Temperature, 27: Warning Battery Temperature, 28: Critical Battery Temperature +# 29: Self Test In Progress, 30: Low Battery / On Battery, 31: Graceful Shutdown Issued by Upstream Device +# 32: Graceful Shutdown Issued by Downstream Device, 33: No Batteries Attached +# 34: Synchronized Command is in Progress, 35: Synchronized Sleeping Command is in Progress +# 36: Synchronized Rebooting Command is in Progress, 37: Inverter DC Imbalance +# 38: Transfer Relay Failure, 39: Shutdown or Unable to Transfer, 40: Low Battery Shutdown +# 41: Electronic Unit Fan Failure, 42: Main Relay Failure, 43: Bypass Relay Failure +# 44: Temporary Bypass, 45: High Internal Temperature, 46: Battery Temperature Sensor Fault +# 47: Input Out of Range for Bypass, 48: DC Bus Overvoltage, 49: PFC Failure +# 50: Critical Hardware Fault, 51: Green Mode/ECO Mode, 52: Hot Standby +# 53: Emergency Power Off (EPO) Activated, 54: Load Alarm Violation, 55: Bypass Phase Fault +# 56: UPS Internal Communication Failure, 57-64: + +# old format: +# apc_default_levels = ( 95, 40, 1, 220 ) +# Temperature default now 60C: regadring to a apc technician a temperature up tp 70C is possible +factory_settings["apc_default_levels"] = { + "levels": ( 95, 60, 1, 220 ) +} + +def inventory_apc(info): + if len(info) > 0: + return [(None, "apc_default_levels")] -apc_default_levels = ( 95, 40, 1, 220 ) def check_apc(item, params, info): - BasicBatteryStatus, BasicOutputStatus, AdvBatteryCapacity, \ - AdvBatteryTemperature, AdvBatteryCurrent, AdvOutputVoltage, \ - AdvOutputCurrent = [ saveint(x) for x in info[0][:7] ] - RunTimeRemaining = int(info[0][7]) / 100 - - crit_capacity, crit_batt_temp, crit_batt_curr, crit_voltage = params + battery_status, output_status, battery_capacity, system_temp, battery_replace, \ + num_batt_packs, battery_current, input_voltage, output_voltage, output_current, \ + time_remaining, calib_result, output_load = [ saveint(x) for x in info[0][:13] ] + last_diag_date = info[0][13] + + if info[0][14] != '': + output_state_bitmask = int(info[0][14], 2) # string contains a bitmask, convert to int + else: + output_state_bitmask = 0 + self_test_in_progress = output_state_bitmask & 1<<35 != 0 + + # convert old format tuple to dict + if type(params) is tuple: + params = { "levels": params } + + # new format with up to 6 params in dict + alt_crit_capacity = None + crit_capacity, crit_sys_temp, crit_batt_curr, crit_voltage = params['levels'] + if params.get("post_calibration_levels"): + # the last_diag_date is reported as %m/%d/%Y or %y + if last_diag_date != 'Unknown' and len(last_diag_date) in [8, 10]: + year_format = len(last_diag_date) == 8 and '%y' or '%Y' + last_ts = time.mktime(time.strptime(last_diag_date, '%m/%d/'+year_format)) + diff_sec = time.time() - last_ts - single_states = [] + allowed_delay_sec = 86400 + params['post_calibration_levels']['additional_time_span'] + alt_crit_capacity = params['post_calibration_levels']['altcapacity'] # 1. Check battery status status_text = { 1:"unknown", 2:"normal", 3:"low" } - infotxt = "Battery status %s" % (status_text.get(BasicBatteryStatus)) - if BasicBatteryStatus != 2: - state = 2 - infotxt += "(!!)" - else: - state = 0 - single_states.append( (state, infotxt, None) ) + if battery_status != 2: + yield 2, "Battery status: %s" % (status_text.get(battery_status)) - # 2. Check basic output status + # 2. Check battery replacement status + if battery_replace == 2: + if num_batt_packs == 1: + yield 1, "one battery needs replacement" + elif num_batt_packs > 1: + yield 2, "%i batteries need replacement" % num_batt_packs + elif battery_status == 2: + yield 0, "Battery status: ok" + + # 3. Check basic output status status_text = { 1:"unknown", 2:"online", 3:"on battery", 4:"on smart boost", 5:"timed sleeping", 6:"software bypass", 7:"off", 8:"rebooting", 9:"switched bypass", 10:"hardware failure bypass", 11:"sleeping until power return", 12:"on smart trim" } - infotxt = "output status %s" % (status_text.get(BasicOutputStatus)) - if BasicOutputStatus not in [2, 4, 12]: + calib_text = { 1:"", 2:" (calibration invalid)", 3:" (calibration in progress)" } + stest_text = self_test_in_progress and " (self-test running)" or "" + infotxt = "output status: %s%s%s" % (status_text.get(output_status), calib_text.get(calib_result), stest_text) + # during calibration test is OK + if output_status not in [2, 4, 12] and calib_result != 3 and not self_test_in_progress: state = 2 - infotxt += "(!!)" else: state = 0 - single_states.append( (state, infotxt, None) ) + yield state, infotxt - # 3. Check battery capacity - infotxt = "capacity %d%%" % AdvBatteryCapacity - if AdvBatteryCapacity <= crit_capacity: - state = 2 - infotxt += "(!!)" + # 4. Check battery capacity + state = 0 + infotxt = "capacity %d%% (crit at or below " % battery_capacity + if alt_crit_capacity != None and diff_sec < allowed_delay_sec: + infotxt += "%d%% in delay after calib.)" % alt_crit_capacity + if battery_capacity <= alt_crit_capacity: + state = 2 else: - state = 0 - single_states.append( (state, infotxt, ("capacity", AdvBatteryCapacity, "", crit_capacity, 0, 100)) ) + infotxt += "%d%%)" % crit_capacity + if battery_capacity <= crit_capacity: + state = 2 + yield state, infotxt, [("capacity", battery_capacity, None, crit_capacity, 0, 100)] - # 4. Check battery temperature - infotxt = "bat. temp. %dC" % AdvBatteryTemperature - if AdvBatteryTemperature >= crit_batt_temp: + # 5. Check System temperature + infotxt = "sys. temp. %d °C" % system_temp + if system_temp >= crit_sys_temp: state = 2 - infotxt += "(!!)" else: state = 0 - single_states.append( (state, infotxt, ("battemp", AdvBatteryTemperature, "", crit_batt_temp) ) ) + yield state, infotxt, [("systemp", system_temp, None, crit_sys_temp)] - # 5. Check battery current - infotxt = "bat. curr. %dA" % AdvBatteryCurrent - if AdvBatteryCurrent >= crit_batt_curr: - state = 2 - infotxt += "(!!)" - else: + # 6. Check battery current + infotxt = "bat. curr. %d A" % battery_current + if (alt_crit_capacity != None and diff_sec < allowed_delay_sec) or self_test_in_progress: state = 0 - single_states.append( (state, infotxt, ("batcurr", AdvBatteryCurrent, "", crit_batt_curr, 0) ) ) + elif battery_current >= crit_batt_curr: + state = 2 + yield state, infotxt, [("batcurr", battery_current, None, crit_batt_curr, 0)] + + # 6a. Simply show input voltage (no performance data) + yield 0, "input voltage %d V" % input_voltage - # 6. Check output voltage - infotxt = "output voltage %dV" % AdvOutputVoltage - if AdvOutputVoltage <= crit_voltage: + # 7. Check output voltage + infotxt = "output voltage %d V" % output_voltage + if output_voltage <= crit_voltage: state = 2 - infotxt += "(!!)" else: state = 0 - single_states.append( (state, infotxt, ("voltage", AdvOutputVoltage, "", crit_voltage, 0) ) ) + yield state, infotxt, [("voltage", output_voltage, None, crit_voltage, 0)] - # 7. Simply add output current as perfdata - single_states.append( (0, "output current %dA" % AdvOutputCurrent, ("current", AdvOutputCurrent)) ) + # 8. Simply add output current as perfdata + yield 0, "output current %d A" % output_current, [("current", output_current)] - # 8. run time remaining - # RunTimeRemaining == "0:0:26:00.00" - hrs = int(RunTimeRemaining) / 3600 - mins, secs = divmod(int(RunTimeRemaining) % 3600, 60) - single_states.append( (0, "run time remaining: %02d:%02d:%02d" % (hrs, mins, secs), None) ) - - # create summary state - worst_state = max([x[0] for x in single_states]) - info_text = ", ".join([x[1] for x in single_states]) - state_text = { 0:"OK", 1:"WARN", 2:"CRIT" }.get(worst_state) - - return (worst_state, "%s - %s" % (state_text, info_text), [x[2] for x in single_states if x[2] != None]) - -def inventory_apc(info): - if len(info) > 0: - return [(None, "apc_default_levels")] + # 9. run time remaining + time_remaining /= 100 + hrs = time_remaining / 3600 + mins, secs = divmod(time_remaining % 3600, 60) + yield 0, "run time remaining: %02d:%02d:%02d" % (hrs, mins, secs), \ + [("runtime", time_remaining/60)] + # 10. Adv Output load (load in percent) + load_state = 0 + loadwarn, loadcrit = None, None + if params.get('output_load'): + loadwarn, loadcrit = params['output_load'] + if output_load >= loadcrit: + load_state = 2 + elif output_load >= loadwarn: + load_state = 1 + yield load_state, "current output load %d%%" % output_load, \ + [("OutputLoad", output_load, loadwarn, loadcrit )] check_info['apc_symmetra'] = { - "inventory_function" : inventory_apc, - "check_function" : check_apc, - "service_description" : "APC Symmetra status", - "has_perfdata" : True, - "group" : "apc_symentra", - "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.318.1.3"), - "snmp_info" : (".1.3.6.1.4.1.318.1.1.1", - [ "2.1.1.0", "4.1.1.0", "2.2.1.0", "2.2.2.0", - "2.2.9.0", "4.2.1.0", "4.2.4.0", "2.2.3.0" ] ) + "inventory_function" : inventory_apc, + "check_function" : check_apc, + "service_description" : "APC Symmetra status", + "has_perfdata" : True, + "group" : "apc_symentra", + "default_levels_variable" : "apc_default_levels", + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.318.1.3"), + "snmp_info" : (".1.3.6.1.4.1.318.1.1.1", + [ + "2.1.1.0", # PowerNet-MIB::upsBasicBatteryStatus, + "4.1.1.0", # PowerNet-MIB::upsBasicOutputStatus, + "2.2.1.0", # PowerNet-MIB::upsAdvBatteryCapacity, + "2.2.2.0", # PowerNet-MIB::upsAdvBatteryTemperature, + "2.2.4.0", # PowerNet-MIB::upsAdvBatteryReplaceIndicator, + "2.2.6.0", # PowerNet-MIB::upsAdvBatteryNumOfBadBattPacks, + "2.2.9.0", # PowerNet-MIB::upsAdvBatteryCurrent, + "3.2.1.0", # PowerNet-MIB::upsAdvInputVoltage, + "4.2.1.0", # PowerNet-MIB::upsAdvOutputVoltage, + "4.2.4.0", # PowerNet-MIB::upsAdvOutputCurrent, + "2.2.3.0", # PowerNet-MIB::upsAdvBatteryRunTimeRemaining, + "7.2.6.0", # PowerNet-MIB::upsAdvTestCalibrationResults + "4.2.3.0", # PowerNet-MIB::upsAdvOutputLoad + "7.2.4.0", # PowerNet-MIB::upsLastDiagnosticsDate + "11.1.1.0", # PowerNet-MIB::upsBasicStateOutputState + ] ), } diff -Nru check-mk-1.2.2p3/apc_symmetra_ext_temp check-mk-1.2.6p12/apc_symmetra_ext_temp --- check-mk-1.2.2p3/apc_symmetra_ext_temp 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/apc_symmetra_ext_temp 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -26,32 +26,24 @@ apc_symmetra_ext_temp_default_levels = (30, 35) -def inventory_temp(info): +def inventory_apc_symmetra_ext_temp(info): return [ ( line[0], "apc_symmetra_ext_temp_default_levels" ) \ for line in info if line[1] == "2" ] -def check_temp(item, params, info): - warn, crit = params +def check_apc_symmetra_ext_temp(item, params, info): for index, status, temp in info: if item == index: - temp = int(temp) - perfdata = [( "temp", temp, warn, crit )] - infotxt = " - %sC (levels at %d/%d)" % (temp, warn, crit) - if temp >= crit: - return (2, "CRITICAL" + infotxt, perfdata) - elif temp >= warn: - return (1, "WARNING" + infotxt, perfdata) - else: - return (0, "OK" + infotxt, perfdata) - return (3, "UNKNOWN - Sensor not found in SNMP data") + return check_temperature(int(temp), params) + return 3, "Sensor not found in SNMP data" - -check_info['apc_symmetra_ext_temp'] = (check_temp, "Temperature External %s", 1, inventory_temp) -snmp_info['apc_symmetra_ext_temp'] = ( - ".1.3.6.1.4.1.318.1.1.10.2.3.2.1", - [ 1, 3, 4 ] ) - -snmp_scan_functions['apc_symmetra_ext_temp'] = lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.318.1.3") - -checkgroup_of["apc_symmetra_ext_temp"] = "room_temperature" +check_info["apc_symmetra_ext_temp"] = { + 'check_function': check_apc_symmetra_ext_temp, + 'inventory_function': inventory_apc_symmetra_ext_temp, + 'service_description': 'Temperature External %s', + 'has_perfdata': True, + 'snmp_info': ('.1.3.6.1.4.1.318.1.1.10.2.3.2.1', [1, 3, 4]), + 'snmp_scan_function': lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.318.1.3"), + 'group': 'room_temperature', + 'includes': [ 'temperature.include' ], +} diff -Nru check-mk-1.2.2p3/apc_symmetra_power check-mk-1.2.6p12/apc_symmetra_power --- check-mk-1.2.2p3/apc_symmetra_power 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/apc_symmetra_power 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -35,18 +35,25 @@ if line[0] == item: power = int(line[1]) perfdata = [ ( "power", power, warn, crit, 0 ) ] - infotext = " - current power: %d W, warn/crit at and below %d/%d W" % \ + infotext = "current power: %d W, warn/crit at and below %d/%d W" % \ (power, warn, crit) - if power != -1 and power <= crit: - return (2, "CRIT" + infotext, perfdata) - elif power != -1 and power <= warn: - return (1, "WARN" + infotext, perfdata) + if power != -1 and power < crit: + return (2, infotext, perfdata) + elif power != -1 and power < warn: + return (1, infotext, perfdata) else: - return (0, "OK" + infotext, perfdata) + return (0, infotext, perfdata) - return (3, "UNKNOWN - Phase %s not found in SNMP output" % item) + return (3, "Phase %s not found in SNMP output" % item) -check_info['apc_symmetra_power'] = ( check_apc_power, "Power phase %s", 1, inventory_apc_power ) -snmp_info['apc_symmetra_power'] = ( ".1.3.6.1.4.1.318.1.1.1.9.3.3.1", [ "2.1.1", "7.1.1" ] ) -snmp_scan_functions['apc_symmetra_power'] = lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.318.1.3") + +check_info["apc_symmetra_power"] = { + 'check_function': check_apc_power, + 'inventory_function': inventory_apc_power, + 'service_description': 'Power phase %s', + 'has_perfdata': True, + 'group': 'apc_power', + 'snmp_info': ('.1.3.6.1.4.1.318.1.1.1.9.3.3.1', ['2.1.1', '7.1.1']), + 'snmp_scan_function': lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.318.1.3"), +} diff -Nru check-mk-1.2.2p3/apc_symmetra_temp check-mk-1.2.6p12/apc_symmetra_temp --- check-mk-1.2.2p3/apc_symmetra_temp 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/apc_symmetra_temp 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -52,22 +52,20 @@ return [(line[0], "apc_temp_default_levels") for line in info] def check_apc_temp(item, params, info): - for line in info: - if line[0] == item: - temp = int(line[1]) - warn, crit = params - perfdata = [ ("temp", temp, warn, crit) ] - infotext = " - %.1f C (warn/crit at %.1f/%.1f C)" % (temp, warn, crit) - if temp >= crit: - return (2, "CRIT" + infotext, perfdata) - elif temp >= warn: - return (1, "WARN" + infotext, perfdata) - else: - return (0, "OK" + infotext, perfdata) - - return (3, "UNKNOWN - sensor not found in SNMP data") - -check_info['apc_symmetra_temp'] = ( check_apc_temp, "Temperature %s", 1, inventory_apc_temp ) -snmp_info['apc_symmetra_temp'] = ( ".1.3.6.1.4.1.318.1.1.10.4.2.3.1", [ "3", "5" ] ) -snmp_scan_functions['apc_symmetra_temp'] = lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.318.1.3") -checkgroup_of['apc_symmetra_temp'] = "room_temperature" + for name, temp in info: + if name == item: + return check_temperature(int(temp), params) + + return (3, "Sensor not found in SNMP data") + + +check_info["apc_symmetra_temp"] = { + 'check_function': check_apc_temp, + 'inventory_function': inventory_apc_temp, + 'service_description': 'Temperature %s', + 'has_perfdata': True, + 'snmp_info': ('.1.3.6.1.4.1.318.1.1.10.4.2.3.1', ['3', '5']), + 'snmp_scan_function': lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.318.1.3"), + 'group': 'room_temperature', + 'includes': [ 'temperature.include' ], +} diff -Nru check-mk-1.2.2p3/apc_symmetra_test check-mk-1.2.6p12/apc_symmetra_test --- check-mk-1.2.2p3/apc_symmetra_test 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/apc_symmetra_test 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,107 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# We use the following OIDs: + + +# PowerNet-MIB::upsAdvTestDiagnosticsResults .1.3.6.1.4.1.318.1.1.1.7.2.3 +# upsAdvTestDiagnosticsResults OBJECT-TYPE +# SYNTAX INTEGER { +# ok(1), +# failed(2), +# invalidTest(3), +# testInProgress(4) +# } +# ACCESS read-only +# STATUS mandatory +# DESCRIPTION +# "The results of the last UPS diagnostics test performed." +# ::= { upsAdvTest 3 } + +# PowerNet-MIB::upsAdvTestLastDiagnosticsDate .1.3.6.1.4.1.318.1.1.1.7.2.4 +# upsAdvTestLastDiagnosticsDate OBJECT-TYPE +# SYNTAX DisplayString +# ACCESS read-only +# STATUS mandatory +# DESCRIPTION +# "The date the last UPS diagnostics test was performed in +# mm/dd/yy format." +# ::= { upsAdvTest 4 } +# + +ups_test_default = (0, 0) + +def check_apc_test(item, params, info): + days_warn, days_crit = params + if not info: + return 3, "Data Missing" + last_result = int(info[0][0]) + last_date = info[0][1] + + if last_date == 'Unknown' or len(last_date) not in [8, 10]: + return 3, "Date of last self test is unknown" + + year_format = len(last_date) == 8 and '%y' or '%Y' + last_ts = time.mktime(time.strptime(last_date, '%m/%d/'+year_format)) + days_diff = (time.time() - last_ts) / 86400 + + diagnostic_status_text = { 1:"OK", 2:"failed", 3:"invalid", 4:"in progress" } + + state = 0 + diag_label = "" + if last_result == 2: + state = 2 + diag_label = "(!!)" + elif last_result == 3: + state = 1 + diag_label = "(!)" + + time_label = "" + if days_crit and days_diff >= days_crit: + state = 2 + time_label = "(!!)" + elif days_warn and days_diff >= days_warn: + state = max(state, 1) + time_label = "(!)" + + return state, "Result of self test: %s%s, Date of last test: %s%s" % \ + (diagnostic_status_text.get(last_result, '-'), diag_label, last_date, time_label) + + +def inventory_apc_test(info): + if info: + return [(None, "ups_test_default")] + + +check_info['apc_symmetra_test'] = { + "inventory_function" : inventory_apc_test, + "check_function" : check_apc_test, + "service_description" : "Self Test", + "has_perfdata" : False, + "group" : "ups_test", + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.318.1.3"), + "snmp_info" : (".1.3.6.1.4.1.318.1.1.1.7.2", [ 3,4 ] ) +} diff -Nru check-mk-1.2.2p3/api/python/example_multisite.py check-mk-1.2.6p12/api/python/example_multisite.py --- check-mk-1.2.2p3/api/python/example_multisite.py 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/api/python/example_multisite.py 2014-10-30 13:30:24.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/api/python/example.py check-mk-1.2.6p12/api/python/example.py --- check-mk-1.2.2p3/api/python/example.py 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/api/python/example.py 2014-10-30 13:30:24.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,7 +24,7 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -import os +import os, sys import livestatus try: diff -Nru check-mk-1.2.2p3/api/python/livestatus.py check-mk-1.2.6p12/api/python/livestatus.py --- check-mk-1.2.2p3/api/python/livestatus.py 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/api/python/livestatus.py 2014-10-30 13:30:24.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,7 +24,7 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -import socket, time +import socket, time, re # Python 2.3 does not have 'set' in normal namespace. # But it can be imported from 'sets' @@ -52,6 +52,9 @@ # Keep a global array of persistant connections persistent_connections = {} +# Regular expression for removing Cache: headers if caching is not allowed +remove_cache_regex = re.compile("\nCache:[^\n]*") + # DEBUGGING PERSISTENT CONNECTIONS # import os # hirn_debug = file("/tmp/live.log", "a") @@ -160,10 +163,11 @@ class BaseConnection: - def __init__(self, socketurl, persist = False): + def __init__(self, socketurl, persist = False, allow_cache = False): """Create a new connection to a MK Livestatus socket""" self.add_headers = "" self.persist = persist + self.allow_cache = allow_cache self.socketurl = socketurl self.socket = None self.timeout = None @@ -185,7 +189,7 @@ self.socket = persistent_connections[self.socketurl] self.successful_persistence = True return - + self.successful_persistence = False # Create new socket @@ -194,7 +198,7 @@ parts = url.split(":") if parts[0] == "unix": if len(parts) != 2: - raise MKLivestatusConfigError("Invalid livestatus unix url: %s. " + raise MKLivestatusConfigError("Invalid livestatus unix URL: %s. " "Correct example is 'unix:/var/run/nagios/rw/live'" % url) self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) target = parts[1] @@ -204,21 +208,37 @@ host = parts[1] port = int(parts[2]) except: - raise MKLivestatusConfigError("Invalid livestatus tcp url '%s'. " + raise MKLivestatusConfigError("Invalid livestatus tcp URL '%s'. " "Correct example is 'tcp:somehost:6557'" % url) self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) target = (host, port) else: - raise MKLivestatusConfigError("Invalid livestatus url '%s'. " + raise MKLivestatusConfigError("Invalid livestatus URL '%s'. " "Must begin with 'tcp:' or 'unix:'" % url) - try: - if self.timeout: - self.socket.settimeout(float(self.timeout)) - self.socket.connect(target) - except Exception, e: - self.socket = None - raise MKLivestatusSocketError("Cannot connect to '%s': %s" % (self.socketurl, e)) + # If a timeout is set, then we retry after a failure with mild + # a binary backoff. + if self.timeout: + before = time.time() + sleep_interval = 0.1 + + while True: + try: + if self.timeout: + self.socket.settimeout(float(sleep_interval)) + self.socket.connect(target) + break + except Exception, e: + if self.timeout: + time_left = self.timeout - (time.time() - before) + # only try again, if there is substantial time left + if time_left > sleep_interval: + time.sleep(sleep_interval) + sleep_interval *= 1.5 + continue + + self.socket = None + raise MKLivestatusSocketError("Cannot connect to '%s': %s" % (self.socketurl, e)) if self.persist: persistent_connections[self.socketurl] = self.socket @@ -230,6 +250,8 @@ def receive_data(self, size): result = "" + # Timeout is only honored when connecting + self.socket.settimeout(None) while size > 0: packet = self.socket.recv(size) if len(packet) == 0: @@ -242,7 +264,10 @@ self.send_query(query, add_headers) return self.recv_response(query, add_headers) - def send_query(self, query, add_headers = ""): + def send_query(self, query, add_headers = "", do_reconnect=True): + if not self.allow_cache: + query = remove_cache_regex.sub("", query) + orig_query = query if self.socket == None: self.connect() if not query.endswith("\n"): @@ -250,23 +275,36 @@ query += self.auth_header + self.add_headers query += "Localtime: %d\nOutputFormat: python\nKeepAlive: on\nResponseHeader: fixed16\n" % int(time.time()) query += add_headers + if not query.endswith("\n"): query += "\n" query += "\n" try: + # socket.send() will implicitely cast to str(), we need ot + # convert to UTF-8 in order to avoid exceptions + if type(query) == unicode: + query = query.encode("utf-8") self.socket.send(query) except IOError, e: if self.persist: del persistent_connections[self.socketurl] self.successful_persistence = False self.socket = None - raise MKLivestatusSocketError(str(e)) + + if do_reconnect: + # Automatically try to reconnect in case of an error, but + # only once. + self.connect() + self.send_query(orig_query, add_headers, False) + return + + raise MKLivestatusSocketError("RC1:" + str(e)) # Reads a response from the livestatus socket. If the socket is closed # by the livestatus server, we automatically make a reconnect and send # the query again (once). This is due to timeouts during keepalive. - def recv_response(self, query = None, add_headers = ""): + def recv_response(self, query = None, add_headers = "", timeout_at = None): try: resp = self.receive_data(16) code = resp[0:3] @@ -282,20 +320,26 @@ raise MKLivestatusSocketError("Malformed output") else: raise MKLivestatusQueryError(code, data.strip()) - except MKLivestatusSocketClosed: + + # In case of an IO error or the other side having + # closed the socket do a reconnect and try again, but + # only once + except (MKLivestatusSocketClosed, IOError), e: self.disconnect() - if query: + now = time.time() + if query and (not timeout_at or timeout_at > now): + if timeout_at == None: + timeout_at = now + self.timeout + time.sleep(0.1) self.connect() self.send_query(query, add_headers) - return self.recv_response() # do not send query again -> danger of infinite loop + return self.recv_response(query, add_headers, timeout_at) # do not send query again -> danger of infinite loop else: - raise + raise MKLivestatusSocketError(str(e)) + + except Exception, e: + raise MKLivestatusSocketError("Unhandled exception: %s" % e) - except IOError, e: - self.socket = None - if self.persist: - del persistent_connections[self.socketurl] - raise MKLivestatusSocketError(str(e)) def do_command(self, command): if self.socket == None: @@ -312,8 +356,8 @@ class SingleSiteConnection(BaseConnection, Helpers): - def __init__(self, socketurl, persist = False): - BaseConnection.__init__(self, socketurl, persist) + def __init__(self, socketurl, persist = False, allow_cache = False): + BaseConnection.__init__(self, socketurl, persist, allow_cache) self.prepend_site = False self.auth_users = {} self.deadsites = {} # never filled, just for compatibility @@ -377,7 +421,7 @@ try: url = site["socket"] persist = not temporary and site.get("persist", False) - connection = SingleSiteConnection(url, persist) + connection = SingleSiteConnection(url, persist, allow_cache=site.get("cache", False)) if "timeout" in site: connection.set_timeout(int(site["timeout"])) connection.connect() @@ -628,7 +672,7 @@ # Return connection to localhost (UNIX), if available def local_connection(self): for sitename, site, connection in self.connections: - if site["socket"].startswith("unix:"): + if site["socket"].startswith("unix:") and "liveproxy" not in site["socket"]: return connection raise MKLivestatusConfigError("No livestatus connection to local host") diff -Nru check-mk-1.2.2p3/api/python/make_nagvis_map.py check-mk-1.2.6p12/api/python/make_nagvis_map.py --- check-mk-1.2.2p3/api/python/make_nagvis_map.py 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/api/python/make_nagvis_map.py 2014-10-30 13:30:24.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/arc_raid_status check-mk-1.2.6p12/arc_raid_status --- check-mk-1.2.2p3/arc_raid_status 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/arc_raid_status 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,70 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +# 1 Raid Set # 00 3 2250.5GB 0.0GB 123 Normal +# ( # Name Disks TotalCap FreeCap DiskChannels State ) + +def inventory_arc_raid_status(info): + return [ ( x[0], saveint(x[-5]) ) for x in info ] + +def check_arc_raid_status(item, params, info): + for line in info: + if line[0] == item: + messages = [] + state = 0 + + raid_state = line[-1] + label = "" + if raid_state in [ "Degarde", "Incompleted" ]: + state = 2 + label = "(!!)" + elif raid_state == "Rebuilding": + state = 1 + label = "(!)" + elif raid_state != "Normal": + state = 2 + label = "(!!)" + messages.append("Raid in state: %s%s" % ( raid_state, label )) + + # Check the number of disks + i_disks = params + c_disks = saveint(line[-5]) + if i_disks != c_disks: + message.append("Number of disks has changed from %d to %d(!!)" % ( i_disks, c_disks )) + state = 2 + + return state, ", ".join(messages) + + return 3, "Array not found" + +check_info["arc_raid_status"] = { + "check_function" : check_arc_raid_status, + "inventory_function" : inventory_arc_raid_status, + "service_description" : "Raid Array #%s", + "has_perfdata" : False, +} + diff -Nru check-mk-1.2.2p3/arcserve_backup check-mk-1.2.6p12/arcserve_backup --- check-mk-1.2.2p3/arcserve_backup 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/arcserve_backup 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,170 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +# <<>> +# +# Job: +# 3960 +# Beschreibung: Tagessicherung staging. (27.01.2014) +# 255.154 Verzeichnis(se) 1.400.060 Datei(en) (388,38 GB) auf Datentr�ger gesichert. +# Vorgang Sichern erfolgreich +# +# Job: +# 3954 +# Beschreibung: Wochensicherung staging. (24.01.2014) +# 340.611 Verzeichnis(se) 1.726.321 Datei(en) (446,52 GB) auf Datentr�ger gesichert. +# Vorgang Sichern erfolgreich +# +# <<>> +# Job: +# 3972 +# Beschreibung: Tagessicherung staging. (30.01.2014) +# 255.641 Verzeichnis(se) 1.405.125 Datei(en) (389,27 GB) auf Datentr�ger gesichert. +# Vorgang Sichern unvollst�ndig.Anzahl an Fehlern/Warnungen: 0/1 +# +# Job: +# 3954 +# Beschreibung: Wochensicherung staging. (24.01.2014) +# 340.611 Verzeichnis(se) 1.726.321 Datei(en) (446,52 GB) auf Datentr�ger gesichert. +# Vorgang Sichern erfolgreich +# +# <<>> +# Job: +# 3976 +# Beschreibung: Wochensicherung staging. (31.01.2014) +# 341.092 Verzeichnis(se) 1.731.713 Datei(en) (447,42 GB) auf Datentr�ger gesichert. +# Vorgang Sichern konnte nicht durchgef�hrt werden.Anzahl an Fehlern/Warnungen: 1/0 +# +# Job: +# 3972 +# Beschreibung: Tagessicherung staging. (30.01.2014) +# 255.641 Verzeichnis(se) 1.405.125 Datei(en) (389,27 GB) auf Datentr�ger gesichert. +# Vorgang Sichern unvollst�ndig.Anzahl an Fehlern/Warnungen: 0/1 + +# parses info in a structure like +# parsed = { +# 'Tagessicherung staging': { 'dirs' : 255641, +# 'files' : 1405125, +# 'result': 'Sichern unvollst\xc3\xa4ndig.Anzahl an Fehlern/Warnungen: 0/1', +# 'size' : 417975479828}, +# 'Wochensicherung staging': {'dirs' : 341092, +# 'files' : 1731713, +# 'result': 'Sichern konnte nicht durchgef\xc3\xbchrt werden.Anzahl an Fehlern/Warnungen: 1/0', +# 'size' : 480413566894}} + +def parse_arcserve_backup(info): + unit_factor = { "kb": 1024, "mb": 1024 ** 2, "gb": 1024 ** 3, "tb": 1024 ** 4 } + parsed = {} + for line in info: + if line[0] == "Beschreibung:": + backup_id = " ".join(line[1:-1]) + if backup_id[-1] == ".": + backup_id = backup_id[0:-1] + backup = {} + parsed[backup_id] = backup + elif len(line) > 5 and line[1] == "Verzeichnis(se)" and line[3] == "Datei(en)" \ + and line[5][-1] == ")": + dirs = int(line[0].replace(".", "")) + files = int(line[2].replace(".", "")) + unit = line[5].replace(")", "").lower() + size = int(float(line[4].replace("(", "").replace(",", ".")) * unit_factor[unit]) + backup["dirs"] = dirs + backup["files"] = files + backup["size"] = size + elif len(line) > 1 and line[0] == "Vorgang": + result = " ".join(line[1:]) + backup["result"] = result + return parsed + +def inventory_arcserve_backup(info): + parsed = parse_arcserve_backup(info) + inventory = [] + for backup in parsed: + inventory.append( (backup, None) ) + return inventory + +def check_arcserve_backup(item, _no_params, info): + parsed = parse_arcserve_backup(info) + if item not in parsed: + return 3, "Backup %s not found in agent output" % item + + message = "" + perfdata = [] + + # directories + if "dirs" in parsed[item]: + dirs=parsed[item]["dirs"] + message += "%s directories" % parsed[item]["dirs"] + else: + dirs=0 + perfdata.append(("dirs", dirs)) + + # files + if "files" in parsed[item]: + if message != "": + message += ", " + files=parsed[item]["files"] + message += "%s files" % parsed[item]["files"] + else: + files=0 + perfdata.append(("files", files)) + + # size + if "size" in parsed[item]: + if message != "": + message += ", " + size=parsed[item]["size"] + message += "Size: %s" % get_bytes_human_readable(parsed[item]["size"]) + else: + size=0 + perfdata.append(("size", str(size) + "Bytes")) + + # result + if message != "": + message += ", " + + if parsed[item]["result"].startswith("Sichern erfolgreich"): + status=0 + elif parsed[item]["result"].startswith("Sichern unvollst"): + status=1 + elif parsed[item]["result"].startswith("Sichern konnte nicht durchgef"): + status=2 + else: + message += "unknown Result: %s" % parsed[item]["result"] + return 3, message, perfdata + + message += "Result: %s" % parsed[item]["result"] + + return status, message, perfdata + +check_info["arcserve_backup"] = { + "check_function" : check_arcserve_backup, + "inventory_function" : inventory_arcserve_backup, + "service_description" : "Arcserve Backup %s", + "has_perfdata" : True, +} + diff -Nru check-mk-1.2.2p3/arris_cmts_cpu check-mk-1.2.6p12/arris_cmts_cpu --- check-mk-1.2.2p3/arris_cmts_cpu 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/arris_cmts_cpu 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,78 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +arris_cmts_cpu_default_levels = ( 90, 95 ) + +def inventory_arris_cmts_cpu(info): + for oid_id, cpu_id, cpu_idle_util in info: + # Sadly the cpu_id seams empty. Referring to + # the MIB, its slot id + if cpu_id: + yield cpu_id, 'arris_cmts_cpu_default_levels' + else: + # Fallback to the oid end + item = int(oid_id) - 1 + yield item, 'arris_cmts_cpu_default_levels' + +def check_arris_cmts_cpu(item, params, info): + for oid_id, cpu_id, cpu_idle_util in info: + # see inventory function + if cpu_id: + citem = cpu_id + else: + citem = int(oid_id) -1 + + if citem == item: + # We get the IDLE percentage, but need the usage + cpu_util = 100 - int(cpu_idle_util) + warn, crit = params + infotext = "Current utilization is: %d %% " % cpu_util + levels = " (warn/crit at %.1f/%.1f %%)" % (warn, crit) + perfdata = [ ( "util", cpu_util, warn, crit ) ] + if cpu_util >= crit: + yield 2, infotext + levels, perfdata + elif cpu_util >= warn: + yield 1, infotext + levels, perfdata + else: + yield 0, infotext, perfdata + return + yield 3, "CPU information not found" + +check_info["arris_cmts_cpu"] = { + "check_function" : check_arris_cmts_cpu, + "inventory_function" : inventory_arris_cmts_cpu, + "service_description" : "CPU utilization Module %s", + "has_perfdata" : True, + "snmp_scan_function" : arris_cmts_scan_function, + "snmp_info" : ( ".1.3.6.1.4.1.4998.1.1.5.3.1.1.1", [ + OID_END, + 1, # cadCpuCardId + 8, # cadIdleCpuRecentPercent + ] ), + "group" : "cpu_utilization_multiitem", + "includes" : [ "arris_cmts.include" ] +} + diff -Nru check-mk-1.2.2p3/arris_cmts.include check-mk-1.2.6p12/arris_cmts.include --- check-mk-1.2.2p3/arris_cmts.include 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/arris_cmts.include 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,28 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def arris_cmts_scan_function(oid): + return oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.4998.2.1" diff -Nru check-mk-1.2.2p3/arris_cmts_mem check-mk-1.2.6p12/arris_cmts_mem --- check-mk-1.2.2p3/arris_cmts_mem 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/arris_cmts_mem 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,87 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +factory_settings["arris_cmts_mem"] = { + "levels" : ( 80.0, 90.0 ), +} + +def inventory_arris_cmts_mem(info): + for cid, heap, heap_free in info: + # The Module numbers are starting with 0, not with 1 like the OIDs + cid = int(cid) - 1 + yield cid, {} + +def check_arris_cmts_mem(item, params, info): + for cid, heap, heap_free in info: + cid = int(cid) - 1 + if cid == item: + heap_free, heap = float(heap_free), float(heap) + usage = heap - heap_free + usage_perc = (usage / heap) * 100 + infotext = "Usage: %s of %s (%.2f %%)" % \ + (get_bytes_human_readable(usage), get_bytes_human_readable(heap), usage_perc) + warn, crit = params['levels'] + # Percent level + if type(crit) == float: + warn_abs = heap * warn / 100 + crit_abs = heap * crit / 100 + perfdata = [ ( 'usage', usage, warn_abs, crit_abs, 0, heap )] + levels = " (warn/crit at %.0f/%.0f %%)" % ( warn, crit ) + if usage_perc >= crit: + yield 2, infotext+levels, perfdata + elif usage_perc >= warn: + yield 1, infotext+levels, perfdata + else: + yield 0, infotext, perfdata + else: + perfdata = [ ( 'usage', usage, warn, crit, 0, heap )] + levels = " (warn/crit at %s/%s)" % \ + ( get_bytes_human_readable(warn), get_bytes_human_readable(crit) ) + if usage >= crit: + yield 2, infotext+levels, perfdata + elif usage >= warn: + yield 1, infotext+levels, perfdata + else: + yield 0, infotext, perfdata + return + yield 3, "Card not found in Output" + +check_info["arris_cmts_mem"] = { + "check_function" : check_arris_cmts_mem, + "inventory_function" : inventory_arris_cmts_mem, + "service_description" : "Memory Module %s", + "has_perfdata" : True, + "snmp_scan_function" : arris_cmts_scan_function, + "default_levels_variable" : "arris_cmts_mem", + "snmp_info" : ( ".1.3.6.1.4.1.4998.1.1.5.3.2.1.1", [ + OID_END, + 2, # cadMeHeapSize + 3, # cadMeHeapRemaining + ] ), + "group" : "memory_multiitem", + "includes" : [ "arris_cmts.include" ] +} + diff -Nru check-mk-1.2.2p3/arris_cmts_temp check-mk-1.2.6p12/arris_cmts_temp --- check-mk-1.2.2p3/arris_cmts_temp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/arris_cmts_temp 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,57 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +arris_cmts_temp_default_levels = ( 40, 46 ) + +def inventory_arris_cmts_temp(info): + for line in info: + # only devices with not default temperature + if line[1] != '999': + yield line[0], 'arris_cmts_temp_default_levels' + + +def check_arris_cmts_temp(item, params, info): + for name, temp in info: + if name == item: + return check_temperature(int(temp), params) + + return 3, "Sensor not found in SNMP data" + + +check_info["arris_cmts_temp"] = { + "check_function" : check_arris_cmts_temp, + "inventory_function" : inventory_arris_cmts_temp, + "service_description" : "Temperature Module %s", + "has_perfdata" : True, + "snmp_scan_function" : arris_cmts_scan_function, + "snmp_info" : ( ".1.3.6.1.4.1.4998.1.1.10.1.4.2.1", [ + 3, # cardName + 29, # Card Temperature + ] ), + "group" : "hw_temperature", + "includes" : [ "arris_cmts.include", "temperature.include" ], +} + diff -Nru check-mk-1.2.2p3/asciimail check-mk-1.2.6p12/asciimail --- check-mk-1.2.2p3/asciimail 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/asciimail 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,241 @@ +#!/usr/bin/python +# ASCII Email +# Bulk: yes + +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +# This script creates an ASCII email. It replaces the builtin ASCII email feature and +# is configurable via WATO with named parameters (only). + +import os, re, sys, subprocess +from email.mime.text import MIMEText + +opt_debug = '-d' in sys.argv +bulk_mode = '--bulk' in sys.argv + +# Note: When you change something here, please also change this +# in web/plugins/wato/notifications.py in the default values of the configuration +# ValueSpec. +tmpl_host_subject = 'Check_MK: $HOSTNAME$ - $EVENT_TXT$' +tmpl_service_subject = 'Check_MK: $HOSTNAME$/$SERVICEDESC$ $EVENT_TXT$' +tmpl_common_body = """Host: $HOSTNAME$ +Alias: $HOSTALIAS$ +Address: $HOSTADDRESS$ +""" +tmpl_host_body = """Event: $EVENT_TXT$ +Output: $HOSTOUTPUT$ +Perfdata: $HOSTPERFDATA$ +$LONGHOSTOUTPUT$ +""" +tmpl_service_body = """Service: $SERVICEDESC$ +Event: $EVENT_TXT$ +Output: $SERVICEOUTPUT$ +Perfdata: $SERVICEPERFDATA$ +$LONGSERVICEOUTPUT$ +""" + +def substitute_context(template, context): + # First replace all known variables + for varname, value in context.items(): + template = template.replace('$'+varname+'$', value) + + # Remove the rest of the variables and make them empty + template = re.sub("\$[A-Z_][A-Z_0-9]*\$", "", template) + return template + + +def build_mail(target, subject, from_address, reply_to, content_txt): + # The plain text part + m = MIMEText(content_txt, 'plain', _charset='utf-8') + + m['Subject'] = subject + m['To'] = target + + # Set a few configurable headers + if from_address: + m['From'] = from_address + + if reply_to: + m['Reply-To'] = reply_to + + return m + +def send_mail(m, target, from_address): + cmd = ["/usr/sbin/sendmail"] + if from_address: + cmd += ['-f', from_address] + cmd += [ "-i", target] + p = subprocess.Popen(cmd, stdin = subprocess.PIPE) + p.communicate(m.as_string()) + +def construct_content(context): + + # Create a notification summary in a new context variable + # Note: This code could maybe move to cmk --notify in order to + # make it available every in all notification scripts + # We have the following types of notifications: + + # - Alerts OK -> CRIT + # NOTIFICATIONTYPE is "PROBLEM" or "RECOVERY" + + # - Flapping Started, Ended + # NOTIFICATIONTYPE is "FLAPPINGSTART" or "FLAPPINGSTOP" + + # - Downtimes Started, Ended, Cancelled + # NOTIFICATIONTYPE is "DOWNTIMESTART", "DOWNTIMECANCELLED", or "DOWNTIMEEND" + + # - Acknowledgements + # NOTIFICATIONTYPE is "ACKNOWLEDGEMENT" + + # - Custom notifications + # NOTIFICATIONTYPE is "CUSTOM" + + notification_type = context["NOTIFICATIONTYPE"] + if notification_type in [ "PROBLEM", "RECOVERY" ]: + txt_info = "$PREVIOUS@HARDSHORTSTATE$ -> $@SHORTSTATE$" + + elif notification_type.startswith("FLAP"): + if "START" in notification_type: + txt_info = "Started Flapping" + else: + txt_info = "Stopped Flapping ($@SHORTSTATE$)" + + elif notification_type.startswith("DOWNTIME"): + what = notification_type[8:].title() + txt_info = "Downtime " + what + " ($@SHORTSTATE$)" + + elif notification_type == "ACKNOWLEDGEMENT": + txt_info = "Acknowledged ($@SHORTSTATE$)" + + elif notification_type == "CUSTOM": + txt_info = "Custom Notification ($@SHORTSTATE$)" + + else: + txt_info = notification_type # Should neven happen + + txt_info = substitute_context(txt_info.replace("@", context["WHAT"]), context) + + context["EVENT_TXT"] = txt_info + + # Prepare the mail contents + if "PARAMETER_COMMON_BODY" in context: + tmpl_body = context['PARAMETER_COMMON_BODY'] + else: + tmpl_body = tmpl_common_body + + # Compute the subject and body of the mail + if context['WHAT'] == 'HOST': + tmpl = context.get('PARAMETER_HOST_SUBJECT') or tmpl_host_subject + if "PARAMETER_HOST_BODY" in context: + tmpl_body += context["PARAMETER_HOST_BODY"] + else: + tmpl_body += tmpl_host_body + else: + tmpl = context.get('PARAMETER_SERVICE_SUBJECT') or tmpl_service_subject + if "PARAMETER_SERVICE_BODY" in context: + tmpl_body += context["PARAMETER_SERVICE_BODY"] + else: + tmpl_body += tmpl_service_body + + context['SUBJECT'] = substitute_context(tmpl, context) + body = substitute_context(tmpl_body, context) + + return body + +def read_bulk_contexts(): + parameters = {} + contexts = [] + in_params = True + + # First comes a section with global variables + for line in sys.stdin: + line = line.strip() + if line: + try: + key, value = line.split("=", 1) + value = value.replace("\1", "\n") + except: + print "Invalid line '%s' in bulked notification context" % line + continue + + if in_params: + parameters[key] = value + else: + context[key] = value + + else: + in_params = False + context = {} + contexts.append(context) + + return parameters, contexts + +def main(): + if bulk_mode: + attachments = [] + content_txt = "" + content_html = "" + parameters, contexts = read_bulk_contexts() + hosts = set([]) + for context in contexts: + context.update(parameters) + txt = construct_content(context) + content_txt += txt + mailto = context['CONTACTEMAIL'] # Assume the same in each context + subject = context['SUBJECT'] + hosts.add(context["HOSTNAME"]) + + # Create a useful subject + hosts = list(hosts) + if len(contexts) > 1: + if len(hosts) == 1: + subject = "Check_MK: %d notifications for %s" % (len(contexts), hosts[0]) + else: + subject = "Check_MK: %d notifications for %d hosts" % ( + len(contexts), len(hosts)) + + else: + # gather all options from env + context = dict([ + (var[7:], value.decode("utf-8")) + for (var, value) + in os.environ.items() + if var.startswith("NOTIFY_")]) + content_txt = construct_content(context) + mailto = context['CONTACTEMAIL'] + subject = context['SUBJECT'] + + if not mailto: # e.g. empty field in user database + sys.stdout.write("Cannot send HTML email: empty destination email address") + sys.exit(2) + + # Create the mail and send it + from_address = context.get("PARAMETER_FROM") + reply_to = context.get("PARAMETER_REPLY_TO") + m = build_mail(mailto, subject, from_address, reply_to, content_txt) + send_mail(m, mailto, from_address) + +main() diff -Nru check-mk-1.2.2p3/asmcmd.sh check-mk-1.2.6p12/asmcmd.sh --- check-mk-1.2.2p3/asmcmd.sh 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/asmcmd.sh 2014-12-11 10:41:49.000000000 +0000 @@ -0,0 +1,26 @@ +#!/bin/sh +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +su - griduser -c "asmcmd $@" diff -Nru check-mk-1.2.2p3/autodetect.py check-mk-1.2.6p12/autodetect.py --- check-mk-1.2.2p3/autodetect.py 2013-11-05 09:42:58.000000000 +0000 +++ check-mk-1.2.6p12/autodetect.py 2015-09-21 11:01:35.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -40,6 +40,7 @@ 'nagiosaddconf' : "Snippet to add to nagios.cfg", 'nagios_auth_name' : "HTTP Basic AuthName for Nagios", 'nagios_binary' : "Absolute path to Nagios binary itself", + 'nagios_version' : "Nagios version", 'nagios_config_file': "Absolute path to nagios.cfg", 'nagios_startscript': "Nagios startskript (usually in /etc/init.d)", 'nagios_status_file': "Absolute path to Nagios' status.dat", @@ -437,6 +438,12 @@ # Path to executable result['nagios_binary'] = process_executable(pid) + # Nagios version + result['nagios_version'] = "" + for line in os.popen(result["nagios_binary"]+ " --version 2>/dev/null"): + if line.startswith("Nagios Core") or line.startswith("Icinga Core"): + result['nagios_version'] = line.split()[2] + # Path to startscript for path in [ '/etc/init.d/nagios', '/etc/init.d/nagios3', '/etc/init.d/icinga' ]: if os.path.exists(path): diff -Nru check-mk-1.2.2p3/automation.py check-mk-1.2.6p12/automation.py --- check-mk-1.2.2p3/automation.py 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/automation.py 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -34,19 +34,28 @@ def do_automation(cmd, args): try: if cmd == "get-configuration": - read_config_files(with_autochecks=False, with_conf_d=False) + read_config_files(with_conf_d=False) result = automation_get_configuration() elif cmd == "get-check-information": result = automation_get_check_information() elif cmd == "delete-host": - read_config_files(with_autochecks=False) + read_config_files() result = automation_delete_host(args) + elif cmd == "notification-get-bulks": + result = automation_get_bulks(args) + elif cmd == "update-dns-cache": + read_config_files() + result = automation_update_dns_cache() else: read_config_files() if cmd == "try-inventory": - result = automation_try_inventory(args) + result = automation_try_discovery(args) elif cmd == "inventory": - result = automation_inventory(args) + result = automation_discovery(args) + elif cmd == "analyse-service": + result = automation_analyse_service(args) + elif cmd == "active-check": + result = automation_active_check(args) elif cmd == "get-autochecks": result = automation_get_autochecks(args) elif cmd == "set-autochecks": @@ -57,6 +66,18 @@ result = automation_restart("restart") elif cmd == "scan-parents": result = automation_scan_parents(args) + elif cmd == "diag-host": + result = automation_diag_host(args) + elif cmd == "rename-host": + result = automation_rename_host(args) + elif cmd == "create-snapshot": + result = automation_create_snapshot(args) + elif cmd == "notification-replay": + result = automation_notification_replay(args) + elif cmd == "notification-analyse": + result = automation_notification_analyse(args) + elif cmd == "bake-agents": + result = automation_bake_agents() else: raise MKAutomationError("Automation command '%s' is not implemented." % cmd) @@ -88,255 +109,114 @@ # "remove" - remove exceeding services # "fixall" - find new, remove exceeding # "refresh" - drop all services and reinventorize -def automation_inventory(args): - if len(args) < 2: - raise MKAutomationError("Need two arguments: [new|remove|fixall|refresh] HOSTNAME") +def automation_discovery(args): - how = args[0] - hostname = args[1] - - count_added = 0 - count_removed = 0 - count_kept = 0 - if how == "refresh": - count_removed = remove_autochecks_of(hostname) # checktype could be added here - reread_autochecks() - - # Compute current state of new and existing checks - table = automation_try_inventory([hostname]) - # Create new list of checks - new_items = [] - for entry in table: - state_type, ct, checkgroup, item, paramstring = entry[:5] - if state_type in [ "legacy", "active", "manual", "ignored" ]: - continue # this is not an autocheck or ignored and currently not checked - - if state_type == "new": - if how in [ "new", "fixall", "refresh" ]: - count_added += 1 - new_items.append((ct, item, paramstring)) - - elif state_type == "old": - # keep currently existing valid services in any case - new_items.append((ct, item, paramstring)) - count_kept += 1 - - elif state_type in [ "obsolete", "vanished" ]: - # keep item, if we are currently only looking for new services - # otherwise fix it: remove ignored and non-longer existing services - if how not in [ "fixall", "remove" ]: - new_items.append((ct, item, paramstring)) - count_kept += 1 - else: - count_removed += 1 - - automation_write_autochecks_file(hostname, new_items) - return (count_added, count_removed, count_kept, len(new_items)) - - -def automation_try_inventory(args): - global opt_use_cachefile, inventory_max_cachefile_age, check_max_cachefile_age - if args[0] == '--cache': - opt_use_cachefile = True - check_max_cachefile_age = 1000000000 - inventory_max_cachefile_age = 1000000000 + # perform full SNMP scan on SNMP devices? + if args[0] == "@scan": + do_snmp_scan = True args = args[1:] - - hostname = args[0] - - # hostname might be a cluster. In that case we compute the clustered - # services of that cluster. - services = [] - if is_cluster(hostname): - already_added = set([]) - for node in nodes_of(hostname): - new_services = automation_try_inventory_node(node) - for entry in new_services: - if host_of_clustered_service(node, entry[6]) == hostname: - # 1: check, 6: Service description - if (entry[1], entry[6]) not in already_added: - services.append(entry) - already_added.add((entry[1], entry[6])) # make it unique - else: - new_services = automation_try_inventory_node(hostname) - for entry in new_services: - if host_of_clustered_service(hostname, entry[6]) == hostname: - services.append(entry) - - return services + do_snmp_scan = False + # use cache files if present? + if args[0] == "@cache": + args = args[1:] + use_caches = True + else: + use_caches = False + if len(args) < 2: + raise MKAutomationError("Need two arguments: new|remove|fixall|refresh HOSTNAME") -def automation_try_inventory_node(hostname): - global opt_use_cachefile, opt_no_tcp, opt_dont_submit - - try: - ipaddress = lookup_ipaddress(hostname) - except: - raise MKAutomationError("Cannot lookup IP address of host %s" % hostname) - - found_services = [] + how = args[0] + hostnames = args[1:] - dual_host = is_snmp_host(hostname) and is_tcp_host(hostname) + counts = {} + failed_hosts = {} - # if we are using cache files, then we restrict us to existing - # check types. SNMP scan is only done without the --cache option - snmp_error = None - if is_snmp_host(hostname): + for hostname in hostnames: + counts.setdefault(hostname, [0, 0, 0, 0]) # added, removed, kept, total try: - if opt_use_cachefile: - existing_checks = set([ cn for (cn, item) in get_check_table(hostname) ]) - for cn in inventorable_checktypes("snmp"): - if cn in existing_checks: - found_services += make_inventory(cn, [hostname], True, True) - else: - sys_descr = get_single_oid(hostname, ipaddress, ".1.3.6.1.2.1.1.1.0") - if sys_descr != None: - found_services = do_snmp_scan([hostname], True, True) + # in "refresh" mode we first need to remove all previously discovered + # checks of the host, so that get_host_services() does show us the + # new discovered check parameters. + if how == "refresh": + counts[hostname][1] += remove_autochecks_of(hostname) # this is cluster-aware! + + # Compute current state of new and existing checks + services = get_host_services(hostname, use_caches=use_caches, do_snmp_scan=do_snmp_scan) + + # Create new list of checks + new_items = {} + for (check_type, item), (check_source, paramstring) in services.items(): + if check_source in ("custom", "legacy", "active", "manual"): + continue # this is not an autocheck or ignored and currently not checked + # Note discovered checks that are shadowed by manual checks will vanish + # that way. + + if check_source in ("new"): + if how in ("new", "fixall", "refresh"): + counts[hostname][0] += 1 # added + counts[hostname][3] += 1 # total + new_items[(check_type, item)] = paramstring + + elif check_source in ("old", "ignored"): + # keep currently existing valid services in any case + new_items[(check_type, item)] = paramstring + counts[hostname][2] += 1 # kept + counts[hostname][3] += 1 # total + + elif check_source in ("obsolete", "vanished"): + # keep item, if we are currently only looking for new services + # otherwise fix it: remove ignored and non-longer existing services + if how not in ("fixall", "remove"): + new_items[(check_type, item)] = paramstring + counts[hostname][2] += 1 # kept + counts[hostname][3] += 1 # total + else: + counts[hostname][1] += 1 # removed + + # Silently keep clustered services + elif check_source.startswith("clustered_"): + new_items[(check_type, item)] = paramstring + else: - raise MKSNMPError("Cannot get system description via SNMP. " - "SNMP agent is not responding. Probably wrong " - "community or wrong SNMP version.") + raise MKGeneralException("Unknown check source '%s'" % check_source) - except Exception, e: - if not dual_host: - raise - snmp_error = str(e) + set_autochecks_of(hostname, new_items) - tcp_error = None - if is_tcp_host(hostname): - try: - for cn in inventorable_checktypes("tcp"): - found_services += make_inventory(cn, [hostname], True, True) except Exception, e: - if not dual_host: + if opt_debug: raise - tcp_error = str(e) + failed_hosts[hostname] = str(e) - # raise MKAutomationError("%s/%s/%s" % (dual_host, snmp_error, tcp_error)) - if dual_host and snmp_error and tcp_error: - raise MKAutomationError("Error using TCP (%s)\nand SNMP (%s)" % - (tcp_error, snmp_error)) - - found = {} - for hn, ct, item, paramstring, state_type in found_services: - found[(ct, item)] = ( state_type, paramstring ) - - # Check if already in autochecks (but not found anymore) - for hn, ct, item, params in autochecks: - if hn == hostname and (ct, item) not in found: - found[(ct, item)] = ( 'vanished', repr(params) ) # This is not the real paramstring! - - # Find manual checks - existing = get_check_table(hostname) - for (ct, item), (params, descr, deps) in existing.items(): - if (ct, item) not in found: - found[(ct, item)] = ('manual', repr(params) ) - - # Add legacy checks and active checks with artificial type 'legacy' - legchecks = host_extra_conf(hostname, legacy_checks) - for cmd, descr, perf in legchecks: - found[('legacy', descr)] = ( 'legacy', 'None' ) + return counts, failed_hosts - # Similar for 'active_checks', but here we have parameters - for acttype, rules in active_checks.items(): - act_info = active_check_info[acttype] - entries = host_extra_conf(hostname, rules) - for params in entries: - descr = act_info["service_description"](params) - found[(acttype, descr)] = ( 'active', repr(params) ) - - - # Collect current status information about all existing checks - table = [] - for (ct, item), (state_type, paramstring) in found.items(): - params = None - if state_type not in [ 'legacy', 'active' ]: - # apply check_parameters - try: - if type(paramstring) == str: - params = eval(paramstring) - else: - params = paramstring - except: - raise MKAutomationError("Invalid check parameter string '%s'" % paramstring) - - - descr = service_description(ct, item) - infotype = ct.split('.')[0] - opt_use_cachefile = True - opt_no_tcp = True - opt_dont_submit = True - try: - exitcode = None - perfdata = [] - info = get_host_info(hostname, ipaddress, infotype) - # Handle cases where agent does not output data - except MKAgentError, e: - exitcode = 3 - output = "Error getting data from agent" - if str(e): - output += ": %s" % e - tcp_error = output - - except MKSNMPError, e: - exitcode = 3 - output = "Error getting data from agent for %s via SNMP" % infotype - if str(e): - output += ": %s" % e - snmp_error = output - - except Exception, e: - exitcode = 3 - output = "Error getting data for %s: %s" % (infotype, e) - if check_uses_snmp(ct): - snmp_error = output - else: - tcp_error = output - - if exitcode == None: - check_function = check_info[ct]["check_function"] - if state_type != 'manual': - params = compute_check_parameters(hostname, ct, item, params) - - try: - result = check_function(item, params, info) - except MKCounterWrapped, e: - result = (None, "WAITING - Counter based check, cannot be done offline") - except Exception, e: - result = (3, "UNKNOWN - invalid output from agent or error in check implementation") - if len(result) == 2: - result = (result[0], result[1], []) - exitcode, output, perfdata = result - else: - descr = item - exitcode = None - output = "WAITING - Legacy check, cannot be done offline" - perfdata = [] - - if state_type == "active": - params = eval(paramstring) - - if state_type in [ "legacy", "active" ]: - checkgroup = None - else: - checkgroup = check_info[ct]["group"] - table.append((state_type, ct, checkgroup, item, paramstring, params, descr, exitcode, output, perfdata)) - - if not table and (tcp_error or snmp_error): - error = "" - if snmp_error: - error = "Error getting data via SNMP: %s" % snmp_error - if tcp_error: - if error: - error += ", " - error += "Error getting data from Check_MK agent: %s" % tcp_error - raise MKAutomationError(error) +def automation_try_discovery(args): + use_caches = False + do_snmp_scan = False + if args[0] == '@noscan': + args = args[1:] + do_snmp_scan = False + use_caches = True + elif args[0] == '@scan': + args = args[1:] + do_snmp_scan = True + use_caches = False + # TODO: Remove this unlucky option opt_use_cachefile. At least do not + # handle this option so deep in the code. It should only be handled + # by top-level functions. + global opt_use_cachefile, check_max_cachefile_age + opt_use_cachefile = use_caches + if use_caches: + check_max_cachefile_age = inventory_max_cachefile_age + hostname = args[0] + table = get_check_preview(hostname, use_caches=use_caches, do_snmp_scan=do_snmp_scan) return table + # Set the new list of autochecks. This list is specified by a # table of (checktype, item). No parameters are specified. Those # are either (1) kept from existing autochecks or (2) computed @@ -345,103 +225,197 @@ def automation_set_autochecks(args): hostname = args[0] new_items = eval(sys.stdin.read()) + set_autochecks_of(hostname, new_items) - do_cleanup_autochecks() - existing = automation_parse_autochecks_file(hostname) +def set_autochecks_of(hostname, new_items): + # A Cluster does not have an autochecks file + # All of its services are located in the nodes instead + # So we cycle through all nodes remove all clustered service + # and add the ones we've got from stdin + if is_cluster(hostname): + for node in nodes_of(hostname): + new_autochecks = [] + existing = parse_autochecks_file(node) + for check_type, item, paramstring in existing: + descr = service_description(check_type, item) + if hostname != host_of_clustered_service(node, descr): + new_autochecks.append((check_type, item, paramstring)) + for (check_type, item), paramstring in new_items.items(): + new_autochecks.append((check_type, item, paramstring)) + # write new autochecks file for that host + automation_write_autochecks_file(node, new_autochecks) + else: + existing = parse_autochecks_file(hostname) + # write new autochecks file, but take paramstrings from existing ones + # for those checks which are kept + new_autochecks = [] + for ct, item, paramstring in existing: + if (ct, item) in new_items: + new_autochecks.append((ct, item, paramstring)) + del new_items[(ct, item)] - # write new autochecks file, but take paramstrings from existing ones - # for those checks which are kept - new_autochecks = [] - for ct, item, params, paramstring in existing: - if (ct, item) in new_items: + for (ct, item), paramstring in new_items.items(): new_autochecks.append((ct, item, paramstring)) - del new_items[(ct, item)] - - for (ct, item), paramstring in new_items.items(): - new_autochecks.append((ct, item, paramstring)) - - # write new autochecks file for that host - automation_write_autochecks_file(hostname, new_autochecks) + # write new autochecks file for that host + automation_write_autochecks_file(hostname, new_autochecks) -def automation_get_autochecks(args): - hostname = args[0] - do_cleanup_autochecks() - return automation_parse_autochecks_file(hostname) def automation_write_autochecks_file(hostname, table): if not os.path.exists(autochecksdir): os.makedirs(autochecksdir) path = "%s/%s.mk" % (autochecksdir, hostname) f = file(path, "w") - f.write("# Autochecks for host %s, created by Check_MK automation\n[\n" % hostname) - for ct, item, paramstring in table: - f.write(" (%r, %r, %r, %s),\n" % (hostname, ct, item, paramstring)) + f.write("[\n") + for check_type, item, paramstring in table: + f.write(" (%r, %r, %s),\n" % (check_type, item, paramstring)) f.write("]\n") + if inventory_check_autotrigger and inventory_check_interval: + schedule_inventory_check(hostname) -def automation_parse_autochecks_file(hostname): - def split_python_tuple(line): - quote = None - bracklev = 0 - backslash = False - for i, c in enumerate(line): - if backslash: - backslash = False - continue - elif c == '\\': - backslash = True - elif c == quote: - quote = None # end of quoted string - elif c in [ '"', "'" ]: - quote = c # begin of quoted string - elif quote: - continue - elif c in [ '(', '{', '[' ]: - bracklev += 1 - elif c in [ ')', '}', ']' ]: - bracklev -= 1 - elif bracklev > 0: - continue - elif c == ',': - value = line[0:i] - rest = line[i+1:] - return value.strip(), rest - return line.strip(), None - path = "%s/%s.mk" % (autochecksdir, hostname) - if not os.path.exists(path): - return [] - lineno = 0 - - table = [] - for line in file(path): - lineno += 1 - try: - line = line.strip() - if not line.startswith("("): +def automation_get_autochecks(args): + hostname = args[0] + result = [] + for ct, item, paramstring in parse_autochecks_file(hostname): + result.append((ct, item, eval(paramstring), paramstring)) + return result + + +def schedule_inventory_check(hostname): + try: + import socket + s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + s.connect(livestatus_unix_socket) + now = int(time.time()) + if 'cmk-inventory' in use_new_descriptions_for: + command = "SCHEDULE_FORCED_SVC_CHECK;%s;Check_MK Discovery;%d" % (hostname, now) + else: + # FIXME: Remove this old name handling one day + command = "SCHEDULE_FORCED_SVC_CHECK;%s;Check_MK inventory;%d" % (hostname, now) + s.send("COMMAND [%d] %s\n" % (now, command)) + except Exception, e: + if opt_debug: + raise + + + +# Determine the type of the check, and how the parameters are being +# constructed +def automation_analyse_service(args): + global g_hostname + hostname = args[0] + servicedesc = args[1] + g_hostname = hostname # To be sure for all subfunctions + + # We just consider types of checks that are managed via WATO. + # We have the following possible types of services: + # 1. manual checks (static_checks) (currently overriding inventorized checks) + # 2. inventorized check + # 3. classical checks + # 4. active checks + + # Compute effective check table, in order to remove SNMP duplicates + check_table = get_check_table(hostname, remove_duplicates = True) + + # 1. Manual checks + for nr, (checkgroup, entries) in enumerate(static_checks.items()): + for entry in entries: + entry, rule_options = get_rule_options(entry) + if rule_options.get("disabled"): continue - # drop everything after potential '#' (from older versions) - i = line.rfind('#') - if i > 0: # make sure # is not contained in string - rest = line[i:] - if '"' not in rest and "'" not in rest: - line = line[:i].strip() - - if line.endswith(","): - line = line[:-1] - line = line[1:-1] # drop brackets - - hostnamestring, line = split_python_tuple(line) # should be hostname - checktypestring, line = split_python_tuple(line) - itemstring, line = split_python_tuple(line) - paramstring, line = split_python_tuple(line) - table.append((eval(checktypestring), eval(itemstring), eval(paramstring), paramstring)) - except: - if opt_debug: - raise - raise MKAutomationError("Invalid line %d in autochecks file %s" % (lineno, path)) - return table + # Parameters are optional + if len(entry[0]) == 2: + checktype, item = entry[0] + params = None + else: + checktype, item, params = entry[0] + if len(entry) == 3: + taglist, hostlist = entry[1:3] + else: + hostlist = entry[1] + taglist = [] + + if hosttags_match_taglist(tags_of_host(hostname), taglist) and \ + in_extraconf_hostlist(hostlist, hostname): + descr = service_description(checktype, item) + if descr == servicedesc: + return { + "origin" : "static", + "checkgroup" : checkgroup, + "checktype" : checktype, + "item" : item, + "rule_nr" : nr, + "parameters" : params, + } + + + # 2. Load all autochecks of the host in question and try to find + # our service there + try: + path = "%s/%s.mk" % (autochecksdir, hostname) + for entry in eval(file(path).read()): + if len(entry) == 4: # old format + hn, ct, item, params = entry + else: + ct, item, params = entry # new format without host name + hn = hostname + + if (ct, item) not in check_table: + continue # this is a removed duplicate or clustered service + descr = service_description(ct, item) + if hn == hostname and descr == servicedesc: + dlv = check_info[ct].get("default_levels_variable") + if dlv: + fs = factory_settings.get(dlv, None) + else: + fs = None + + return { + "origin" : "auto", + "checktype" : ct, + "checkgroup" : check_info[ct].get("group"), + "item" : item, + "inv_parameters" : params, + "factory_settings" : fs, + "parameters" : compute_check_parameters(hostname, ct, item, params), + } + except: + if opt_debug: + raise + + # 3. Classical checks + custchecks = host_extra_conf(hostname, custom_checks) + for nr, entry in enumerate(custchecks): + desc = entry["service_description"] + if desc == servicedesc: + result = { + "origin" : "classic", + "rule_nr" : nr, + } + if "command_line" in entry: # Only active checks have a command line + result["command_line"] = entry["command_line"] + return result + + # 4. Active checks + for acttype, rules in active_checks.items(): + entries = host_extra_conf(hostname, rules) + if entries: + act_info = active_check_info[acttype] + for params in entries: + description = act_info["service_description"](params) + if description == servicedesc: + return { + "origin" : "active", + "checktype" : acttype, + "parameters" : params, + } + + return {} # not found + # TODO: Was ist mit Clustern??? + # TODO: Klappt das mit automatischen verschatten von SNMP-Checks (bei dual Monitoring) + def automation_delete_host(args): hostname = args[0] @@ -455,17 +429,24 @@ "%s/%s.*" % (tcp_cache_dir, hostname)]: os.system("rm -rf '%s'" % path) -def automation_restart(job="restart"): +def automation_restart(job = "restart", use_rushd = True): # make sure, Nagios does not inherit any open # filedescriptors. This really happens, e.g. if # check_mk is called by WATO via Apache. Nagios inherits # the open file where Apache is listening for incoming # HTTP connections. Really. - for fd in range(3, 256): - try: - os.close(fd) - except: - pass + if monitoring_core == "nagios": + objects_file = nagios_objects_file + for fd in range(3, 256): + try: + os.close(fd) + except: + pass + else: + objects_file = var_dir + "/core/config" + if job == "restart": + job = "reload" # force reload for CMC + # os.closerange(3, 256) --> not available in older Python versions class null_file: @@ -480,40 +461,52 @@ try: backup_path = None - if not lock_nagios_objects_file(): + if not lock_objects_file(): raise MKAutomationError("Cannot activate changes. " "Another activation process is currently in progresss") - if os.path.exists(nagios_objects_file): - backup_path = nagios_objects_file + ".save" - os.rename(nagios_objects_file, backup_path) + + if os.path.exists(objects_file): + backup_path = objects_file + ".save" + os.rename(objects_file, backup_path) else: backup_path = None try: - create_nagios_config(file(nagios_objects_file, "w")) + if monitoring_core == "nagios": + create_nagios_config(file(objects_file, "w")) + else: + do_create_cmc_config(opt_cmc_relfilename, use_rushd = use_rushd) + + if "do_bake_agents" in globals() and bake_agents_on_restart: + do_bake_agents() + except Exception, e: if backup_path: - os.rename(backup_path, nagios_objects_file) + os.rename(backup_path, objects_file) + if opt_debug: + raise raise MKAutomationError("Error creating configuration: %s" % e) if do_check_nagiosconfig(): if backup_path: os.remove(backup_path) - do_precompile_hostchecks() - if job == 'restart': - do_restart_nagios(False) - elif job == 'reload': - do_restart_nagios(True) + if monitoring_core == "cmc": + do_pack_config() + else: + do_precompile_hostchecks() + do_core_action(job) else: if backup_path: - os.rename(backup_path, nagios_objects_file) + os.rename(backup_path, objects_file) else: - os.remove(nagios_objects_file) - raise MKAutomationError("Nagios configuration is invalid. Rolling back.") + os.remove(objects_file) + raise MKAutomationError("Configuration for monitoring core is invalid. Rolling back.") except Exception, e: if backup_path and os.path.exists(backup_path): os.remove(backup_path) + if opt_debug: + raise raise MKAutomationError(str(e)) sys.stdout = old_stdout @@ -525,7 +518,8 @@ result = {} for varname in variable_names: if varname in globals(): - result[varname] = globals()[varname] + if not hasattr(globals()[varname], '__call__'): + result[varname] = globals()[varname] return result def automation_get_check_information(): @@ -540,6 +534,8 @@ checks[check_type] = { "title" : title } if check["group"]: checks[check_type]["group"] = check["group"] + checks[check_type]["service_description"] = check.get("service_description","%s") + checks[check_type]["snmp"] = check_uses_snmp(check_type) return checks def automation_scan_parents(args): @@ -560,4 +556,599 @@ except Exception, e: raise MKAutomationError(str(e)) +def automation_diag_host(args): + import subprocess + + hostname, test, ipaddress, snmp_community = args[:4] + agent_port, snmp_timeout, snmp_retries = map(int, args[4:7]) + cmd = args[7] + + if not ipaddress: + try: + ipaddress = lookup_ipaddress(hostname) + except: + raise MKGeneralException("Cannot resolve hostname %s into IP address" % hostname) + + try: + if test == 'ping': + p = subprocess.Popen('ping -A -i 0.2 -c 2 -W 5 %s 2>&1' % ipaddress, shell = True, stdout = subprocess.PIPE) + response = p.stdout.read() + return (p.wait(), response) + + elif test == 'agent': + if not cmd: + cmd = get_datasource_program(hostname, ipaddress) + + if cmd: + return 0, get_agent_info_program(cmd) + else: + return 0, get_agent_info_tcp(hostname, ipaddress, agent_port or None) + + elif test == 'traceroute': + traceroute_prog = find_bin_in_path('traceroute') + if not traceroute_prog: + return 1, "Cannot find binary traceroute." + else: + p = subprocess.Popen('traceroute -n %s 2>&1' % ipaddress, shell = True, stdout = subprocess.PIPE) + response = p.stdout.read() + return (p.wait(), response) + + elif test.startswith('snmp'): + if snmp_community: + explicit_snmp_communities[hostname] = snmp_community + + # override timing settings if provided + if snmp_timeout or snmp_retries: + timing = {} + if snmp_timeout: + timing['timeout'] = snmp_timeout + if snmp_retries: + timing['retries'] = snmp_retries + snmp_timing.insert(0, (timing, [], [hostname])) + + # SNMP versions + global bulkwalk_hosts, snmpv2c_hosts + if test == 'snmpv2': + bulkwalk_hosts = [hostname] + + elif test == 'snmpv2_nobulk': + bulkwalk_hosts = [] + snmpv2c_hosts = [hostname] + elif test == 'snmpv1': + bulkwalk_hosts = [] + snmpv2c_hosts = [] + + else: + return 1, "SNMP command not implemented" + + data = get_snmp_table(hostname, ipaddress, None, ('.1.3.6.1.2.1.1', ['1.0', '4.0', '5.0', '6.0'])) + if data: + return 0, 'sysDescr:\t%s\nsysContact:\t%s\nsysName:\t%s\nsysLocation:\t%s\n' % tuple(data[0]) + else: + return 1, 'Got empty SNMP response' + + else: + return 1, "Command not implemented" + + except Exception, e: + if opt_debug: + raise + return 1, str(e) + +# WATO calls this automation when a host has been renamed. We need to change +# several file and directory names. +# HIRN: Hier auch das neue Format berücksichtigen! Andererseits sollte +# eigentlich auch nix Schlimmes passieren, wenn der Hostname *nicht* in +# der Datei steht. +def automation_rename_host(args): + oldname = args[0] + newname = args[1] + actions = [] + + # Autochecks: simply read and write out the file again. We do + # not store a host name here anymore - but old versions did. + # by rewriting we get rid of the host name. + + acpath = autochecksdir + "/" + oldname + ".mk" + if os.path.exists(acpath): + old_autochecks = parse_autochecks_file(oldname) + out = file(autochecksdir + "/" + newname + ".mk", "w") + out.write("[\n") + for ct, item, paramstring in old_autochecks: + out.write(" (%r, %r, %s),\n" % (ct, item, paramstring)) + out.write("]\n") + out.close() + os.remove(acpath) # Remove old file + actions.append("autochecks") + + # At this place WATO already has changed it's configuration. All further + # data might be changed by the still running core. So we need to stop + # it now. + core_was_running = core_is_running() + if core_was_running: + do_core_action("stop", quiet=True) + + # Rename temporary files of the host + for d in [ "cache", "counters" ]: + if rename_host_file(tmp_dir + "/" + d + "/", oldname, newname): + actions.append(d) + + if rename_host_dir(tmp_dir + "/piggyback/", oldname, newname): + actions.append("piggyback-load") + + # Rename piggy files *created* by the host + piggybase = tmp_dir + "/piggyback/" + if os.path.exists(piggybase): + for piggydir in os.listdir(piggybase): + if rename_host_file(piggybase + piggydir, oldname, newname): + actions.append("piggyback-pig") + + # Logwatch + if rename_host_dir(logwatch_dir, oldname, newname): + actions.append("logwatch") + + # SNMP walks + if rename_host_file(snmpwalks_dir, oldname, newname): + actions.append("snmpwalk") + + # OMD-Stuff. Note: The question really is whether this should be + # included in Check_MK. The point is - however - that all these + # actions need to take place while the core is stopped. + if omd_root: + actions += omd_rename_host(oldname, newname) + + # Start monitoring again. In case of CMC we need to ignore + # any configuration created by the CMC Rushahead daemon + if core_was_running: + global ignore_ip_lookup_failures + ignore_ip_lookup_failures = True # force config generation to succeed. The core *must* start. + automation_restart("start", use_rushd = False) + if monitoring_core == "cmc": + try: + os.remove(var_dir + "/core/config.rush") + os.remove(var_dir + "/core/config.rush.id") + except: + pass + + if failed_ip_lookups: + actions.append("ipfail") + + return actions + + +def rename_host_dir(basedir, oldname, newname): + import shutil + if os.path.exists(basedir + "/" + oldname): + if os.path.exists(basedir + "/" + newname): + shutil.rmtree(basedir + "/" + newname) + os.rename(basedir + "/" + oldname, basedir + "/" + newname) + return 1 + return 0 + +def rename_host_file(basedir, oldname, newname): + if os.path.exists(basedir + "/" + oldname): + if os.path.exists(basedir + "/" + newname): + os.remove(basedir + "/" + newname) + os.rename(basedir + "/" + oldname, basedir + "/" + newname) + return 1 + return 0 + +# This functions could be moved out of Check_MK. +def omd_rename_host(oldname, newname): + oldregex = oldname.replace(".", "[.]") + newregex = newname.replace(".", "[.]") + actions = [] + + # Temporarily stop processing of performance data + npcd_running = os.path.exists(omd_root + "/tmp/pnp4nagios/run/npcd.pid") + if npcd_running: + os.system("omd stop npcd >/dev/null 2>&1 /dev/null 2>&1 /dev/null" % (oldname, newname, dirpath)) + + # RRD files + if rename_host_dir(rrd_path, oldname, newname): + actions.append("rrd") + + # entries of rrdcached journal + dirpath = omd_root + "/var/rrdcached/" + if not os.system("sed -i 's@/perfdata/%s/@/perfdata/%s/@' " + "%s/var/rrdcached/rrd.journal.* 2>/dev/null" % ( oldregex, newregex, omd_root)): + actions.append("rrdcached") + + # Spoolfiles of NPCD + if not os.system("sed -i 's/HOSTNAME::%s /HOSTNAME::%s /' " + "%s/var/pnp4nagios/perfdata.dump %s/var/pnp4nagios/spool/perfdata.* 2>/dev/null" % ( + oldregex, newregex, omd_root, omd_root)): + actions.append("pnpspool") + + if rrdcache_running: + os.system("omd start rrdcached >/dev/null 2>&1 /dev/null 2>&1 /dev/null" % ( + oldregex, newregex, omd_root)): + actions.append("nagvis") + + return actions + + + +def automation_create_snapshot(args): + try: + import tarfile, time, cStringIO, shutil, subprocess, thread, traceback, threading + from hashlib import sha256 + the_data = sys.stdin.read() + data = eval(the_data) + + snapshot_name = data["snapshot_name"] + snapshot_dir = var_dir + "/wato/snapshots" + work_dir = snapshot_dir + "/workdir/%s" % snapshot_name + if not os.path.exists(work_dir): + os.makedirs(work_dir) + + # Open / initialize files + filename_target = "%s/%s" % (snapshot_dir, snapshot_name) + filename_work = "%s/%s.work" % (work_dir, snapshot_name) + filename_status = "%s/%s.status" % (work_dir, snapshot_name) + filename_pid = "%s/%s.pid" % (work_dir, snapshot_name) + filename_subtar = "" + current_domain = "" + + file(filename_target, "w").close() + file(filename_status, "w").close() + + def wipe_directory(path): + for entry in os.listdir(path): + if entry not in [ '.', '..' ]: + p = path + "/" + entry + if os.path.isdir(p): + shutil.rmtree(p) + else: + os.remove(p) + + lock_status_file = threading.Lock() + def update_status_file(domain = None, infotext = None): + lock_status_file.acquire() + if os.path.exists(filename_status): + if domain: + statusinfo[domain] = infotext + statusfile = file(filename_status, "w") + statusfile.write("comment:%s\n" % data.get("comment"," ").encode("utf-8")) + status_list = list(statusinfo.items()) + status_list.sort() + for status in status_list: + statusfile.write("%s.tar.gz:%s\n" % status) + lock_status_file.release() + + # Set initial status info + statusinfo = {} + for name in data.get("domains", {}).keys(): + statusinfo[name] = "TODO:0" + update_status_file() + + # Now fork into our own process to have an asynchronous backup creation + try: + pid = os.fork() + if pid > 0: + # Exit parent process + return + # Decouple from parent environment + os.chdir("/") + os.umask(0) + os.setsid() + + # Close all fd except stdin,out,err + for fd in range(3, 256): + try: + os.close(fd) + except OSError: + pass + + sys.stdout.flush() + sys.stderr.flush() + + si = os.open("/dev/null", os.O_RDONLY) + so = os.open("/dev/null", os.O_WRONLY) + os.dup2(si, 0) + os.dup2(so, 1) + os.dup2(so, 2) + os.close(si) + os.close(so) + + except OSError, e: + raise MKAutomationError(str(e)) + + # Save pid of working process. + file(filename_pid, "w").write("%d" % os.getpid()) + + def cleanup(): + wipe_directory(work_dir) + os.rmdir(work_dir) + + def check_should_abort(): + if not os.path.exists(filename_target): + cleanup() + sys.exit(0) + + def get_basic_tarinfo(name): + tarinfo = tarfile.TarInfo(name) + tarinfo.mtime = time.time() + tarinfo.uid = 0 + tarinfo.gid = 0 + tarinfo.mode = 0644 + tarinfo.type = tarfile.REGTYPE + return tarinfo + + def update_subtar_size(seconds): + while current_domain != None: + try: + if current_domain: + if os.path.exists(path_subtar): + update_status_file(current_domain, "Processing:%d" % os.stat(path_subtar).st_size) + except: + pass + time.sleep(seconds) + + def snapshot_secret(): + path = default_config_dir + '/snapshot.secret' + try: + return file(path).read() + except IOError: + # create a secret during first use + try: + s = os.urandom(256) + except NotImplementedError: + s = sha256(time.time()) + file(path, 'w').write(s) + return s + + # + # Initialize the snapshot tar file and populate with initial information + # + + tar_in_progress = tarfile.open(filename_work, "w") + + # Add comment to tar file + if data.get("comment"): + tarinfo = get_basic_tarinfo("comment") + tarinfo.size = len(data.get("comment").encode("utf-8")) + tar_in_progress.addfile(tarinfo, cStringIO.StringIO(data.get("comment").encode("utf-8"))) + + if data.get("created_by"): + tarinfo = get_basic_tarinfo("created_by") + tarinfo.size = len(data.get("created_by")) + tar_in_progress.addfile(tarinfo, cStringIO.StringIO(data.get("created_by"))) + + # Add snapshot type + snapshot_type = data.get("type") + tarinfo = get_basic_tarinfo("type") + tarinfo.size = len(snapshot_type) + tar_in_progress.addfile(tarinfo, cStringIO.StringIO(snapshot_type)) + + # Close tar in progress, all other files are included via command line tar + tar_in_progress.close() + + # + # Process domains (sorted) + # + + subtar_update_thread = thread.start_new_thread(update_subtar_size, (1,)) + domains = map(lambda x: x, data.get("domains").items()) + domains.sort() + + subtar_info = {} + for name, info in domains: + current_domain = name # Set name for update size thread + prefix = info.get("prefix","") + exclude_options = "" + for entry in info.get("exclude", []): + exclude_options += "--exclude=%s " % entry + + check_should_abort() + + filename_subtar = "%s.tar.gz" % name + path_subtar = "%s/%s" % (work_dir, filename_subtar) + + if info.get("backup_command"): + command = info.get("backup_command") % { + "prefix" : prefix, + "path_subtar" : path_subtar, + "work_dir" : work_dir + } + else: + paths = map(lambda x: x[1] == "" and "." or x[1], info.get("paths", [])) + command = "tar czf %s --ignore-failed-read --force-local %s -C %s %s" % \ + (path_subtar, exclude_options, prefix, " ".join(paths)) + + proc = subprocess.Popen(command, shell=True, stdin=None, close_fds=True, + stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=prefix) + stdout, stderr = proc.communicate() + exit_code = proc.wait() + # Allow exit codes 0 and 1 (files changed during backup) + if exit_code not in [0, 1]: + raise MKAutomationError("Error while creating backup of %s (Exit Code %d) - %s.\n%s" % + (current_domain, exit_code, stderr, command)) + + subtar_size = os.stat(path_subtar).st_size + subtar_hash = sha256(file(path_subtar).read()).hexdigest() + subtar_signed = sha256(subtar_hash + snapshot_secret()).hexdigest() + subtar_info[filename_subtar] = (subtar_hash, subtar_signed) + + # Append tar.gz subtar to snapshot + command = "tar --append --file=%s %s ; rm %s" % \ + (filename_work, filename_subtar, filename_subtar) + proc = subprocess.Popen(command, shell=True, cwd = work_dir) + proc.communicate() + exit_code = proc.wait() + if exit_code != 0: + raise MKAutomationError("Error on adding backup domain %s to tarfile" % current_domain) + + current_domain = "" + update_status_file(name, "Finished:%d" % subtar_size) + + # Now add the info file which contains hashes and signed hashes for + # each of the subtars + info = ''.join([ '%s %s %s\n' % (k, v[0], v[1]) for k, v in subtar_info.items() ]) + '\n' + tar_in_progress = tarfile.open(filename_work, "a") + tarinfo = get_basic_tarinfo("checksums") + tarinfo.size = len(info) + tar_in_progress.addfile(tarinfo, cStringIO.StringIO(info)) + tar_in_progress.close() + + current_domain = None + + shutil.move(filename_work, filename_target) + cleanup() + + except Exception, e: + cleanup() + raise MKAutomationError(str(e)) + + +def automation_notification_replay(args): + nr = args[0] + return notification_replay_backlog(int(nr)) + +def automation_notification_analyse(args): + nr = args[0] + return notification_analyse_backlog(int(nr)) + +def automation_get_bulks(args): + only_ripe = args[0] == "1" + return find_bulks(only_ripe) + +def automation_active_check(args): + hostname, plugin, item = args + actchecks = [] + needed_commands = [] + + if plugin == "custom": + custchecks = host_extra_conf(hostname, custom_checks) + for entry in custchecks: + if entry["service_description"] == item: + command_line = replace_core_macros(hostname, entry.get("command_line", "")) + if command_line: + command_line = autodetect_plugin(command_line) + return execute_check_plugin(command_line) + else: + return -1, "Passive check - cannot be executed" + else: + rules = active_checks.get(plugin) + if rules: + entries = host_extra_conf(hostname, rules) + if entries: + act_info = active_check_info[plugin] + for params in entries: + description = act_info["service_description"](params).replace('$HOSTNAME$', hostname) + if description == item: + args = act_info["argument_function"](params) + command_line = replace_core_macros(hostname, act_info["command_line"].replace("$ARG1$", args)) + return execute_check_plugin(command_line) + + +def load_resource_file(macros): + try: + for line in file(omd_root + "/etc/nagios/resource.cfg"): + line = line.strip() + if not line or line[0] == '#': + continue + varname, value = line.split('=', 1) + macros[varname] = value + except: + if opt_debug: + raise + +# Simulate replacing some of the more important macros of hosts. We +# cannot use dynamic macros, of course. Note: this will not work +# without OMD, since we do not know the value of $USER1$ and $USER2$ +# here. We could read the Nagios resource.cfg file, but we do not +# know for sure the place of that either. +def replace_core_macros(hostname, commandline): + macros = { + "$HOSTNAME$" : hostname, + "$HOSTADDRESS$" : lookup_ipaddress(hostname), + } + load_resource_file(macros) + for varname, value in macros.items(): + commandline = commandline.replace(varname, value) + return commandline + + +def execute_check_plugin(commandline): + try: + p = os.popen(commandline + " 2>&1") + output = p.read().strip() + ret = p.close() + if not ret: + status = 0 + else: + if ret & 0xff == 0: + status = ret / 256 + else: + status = 3 + if status < 0 or status > 3: + status = 3 + output = output.split("|",1)[0] # Drop performance data + return status, output + + except Exception, e: + if opt_debug: + raise + return 3, "UNKNOWN - Cannot execute command: %s" % e + + +def automation_update_dns_cache(): + return do_update_dns_cache() +def automation_bake_agents(): + if "do_bake_agents" in globals(): + return do_bake_agents() diff -Nru check-mk-1.2.2p3/benchmark/bench.cfg check-mk-1.2.6p12/benchmark/bench.cfg --- check-mk-1.2.2p3/benchmark/bench.cfg 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/benchmark/bench.cfg 2014-10-30 13:30:24.000000000 +0000 @@ -5,7 +5,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/benchmark/cmkbench.sh check-mk-1.2.6p12/benchmark/cmkbench.sh --- check-mk-1.2.2p3/benchmark/cmkbench.sh 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/benchmark/cmkbench.sh 2014-10-30 13:30:24.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/bi-example.mk check-mk-1.2.6p12/bi-example.mk --- check-mk-1.2.2p3/bi-example.mk 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/bi-example.mk 2013-11-05 09:58:00.000000000 +0000 @@ -85,15 +85,6 @@ ] ) -aggregation_rules["nic"] = ( - "NIC $NIC$", - [ "HOST", "NIC" ], - "worst", - [ - ( "$HOST$", "NIC $NIC$" ), - ] -) - aggregation_rules["checkmk"] = ( "Check_MK", [ "HOST" ], diff -Nru check-mk-1.2.2p3/bin/mkeventd check-mk-1.2.6p12/bin/mkeventd --- check-mk-1.2.2p3/bin/mkeventd 2013-11-05 09:22:41.000000000 +0000 +++ check-mk-1.2.6p12/bin/mkeventd 2015-09-21 10:59:54.000000000 +0000 @@ -6,7 +6,7 @@ from pwd import getpwnam from grp import getgrnam -VERSION="1.2.2p3" +VERSION="1.2.6p12" # .--Declarations--------------------------------------------------------. # | ____ _ _ _ | @@ -24,29 +24,31 @@ syslog_priorities = [ "emerg", "alert", "crit", "err", "warning", "notice", "info", "debug" ] syslog_facilities = [ "kern", "user", "mail", "daemon", "auth", "syslog", "lpr", "news", - "uucp", "cron", "authpriv", "ftp", - "(unused 12)", "(unused 13)", "(unused 13)", "(unused 14)", - "local0", "local1", "local2", "local3", "local4", "local5", "local6", "local7", ] - -event_columns = [ - ( "event_id", 1 ), - ( "event_count", 1 ), - ( "event_text", "" ), - ( "event_first", 0.0 ), - ( "event_last", 0.0 ), - ( "event_comment", "" ), - ( "event_sl", 0 ), # filter fehlt - ( "event_host", "" ), - ( "event_contact", "" ), - ( "event_application", "" ), - ( "event_pid", 0 ), - ( "event_priority", 5 ), - ( "event_facility", 1 ), - ( "event_rule_id", "" ), - ( "event_state", 0 ), - ( "event_phase", "" ), - ( "event_owner", "" ), - ( "event_match_groups", "" ), + "uucp", "cron", "authpriv", "ftp", + "(unused 12)", "(unused 13)", "(unused 13)", "(unused 14)", + "local0", "local1", "local2", "local3", "local4", "local5", "local6", "local7", + None, None, None, None, None, None, None, "snmptrap" ] + +event_columns = [ + ( "event_id", 1 ), + ( "event_count", 1 ), + ( "event_text", "" ), + ( "event_first", 0.0 ), + ( "event_last", 0.0 ), + ( "event_comment", "" ), + ( "event_sl", 0 ), # filter fehlt + ( "event_host", "" ), + ( "event_contact", "" ), + ( "event_application", "" ), + ( "event_pid", 0 ), + ( "event_priority", 5 ), + ( "event_facility", 1 ), + ( "event_rule_id", "" ), + ( "event_state", 0 ), + ( "event_phase", "" ), + ( "event_owner", "" ), + ( "event_match_groups", "" ), + ( "event_contact_groups", "" ), ] history_columns = [ @@ -61,6 +63,9 @@ if type(s) != str: return s + elif s.startswith('\2'): + return None # \2 is the designator for None + elif s.startswith('\1'): if len(s) == 1: return () @@ -71,7 +76,7 @@ # Speed-critical function for converting string representation # of log line back to Python values -def convert_history_line(values): +def convert_history_line(values): values[0] = float(values[0]) # time values[4] = int(values[4]) values[5] = int(values[5]) @@ -82,7 +87,11 @@ values[15] = int(values[15]) values[16] = int(values[16]) values[18] = int(values[18]) - values[21] = unsplit(values[21]) + values[21] = unsplit(values[21]) # match groups + if len(values) >= 23: + values[22] = unsplit(values[22]) # contact groups + else: + values.append(None) @@ -167,6 +176,8 @@ g_regex_cache[reg] = r return r +get_regex = regex # make compatible with check_mk_base.py + # Checks if a text contains characters that make it # neccessary to use regular expression logic in order # to match it. @@ -188,7 +199,12 @@ # Assume compiled regex m = pattern.search(text) if m: - return m.groups() + groups = m.groups() + if None in groups: + # Remove None from result tuples and replace it with empty strings + return tuple([g != None and g or '' for g in groups]) + else: + return groups else: return False @@ -239,6 +255,60 @@ #. +# .--Timeperiods---------------------------------------------------------. +# | _____ _ _ _ | +# | |_ _(_)_ __ ___ ___ _ __ ___ _ __(_) ___ __| |___ | +# | | | | | '_ ` _ \ / _ \ '_ \ / _ \ '__| |/ _ \ / _` / __| | +# | | | | | | | | | | __/ |_) | __/ | | | (_) | (_| \__ \ | +# | |_| |_|_| |_| |_|\___| .__/ \___|_| |_|\___/ \__,_|___/ | +# | |_| | +# +----------------------------------------------------------------------+ +# | Timeperiods are used in rule conditions | +# '----------------------------------------------------------------------' + +# Dictionary from name to True/False (active / inactive) +g_timeperiods = None +g_last_timeperiod_update = 0 + +def update_timeperiods(): + global g_timeperiods, g_last_timeperiod_update + + if g_timeperiods != None and int(time.time()) / 60 == g_last_timeperiod_update: + return # only update once a minute + log("Updating timeperiod information") + + try: + livesock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + livesock.connect(g_livestatus_socket) + livesock.send("GET timeperiods\nColumns: name alias in\n") + livesock.shutdown(socket.SHUT_WR) + answer = livesock.recv(10000000) + table = [ line.split(';') for line in answer.split('\n')[:-1] ] + new_timeperiods = {} + for tpname, alias, isin in table: + new_timeperiods[tpname] = (alias, isin == '1' and True or False) + g_timeperiods = new_timeperiods + g_last_timeperiod_update = int(time.time()) / 60 + except Exception, e: + log("Cannot update timeperiod information: %s" % e) + if opt_debug: + raise + +def check_timeperiod(tpname): + update_timeperiods() + if not g_timeperiods: + log("Warning: no timeperiod information. Assuming %s active" % tpname) + return True + + elif tpname not in g_timeperiods: + log("Warning: no such timeperiod %s. Assume to active" % tpname) + return True + + else: + return g_timeperiods[tpname][1] + + +#. # .--Daemonize-----------------------------------------------------------. # | ____ _ | # | | _ \ __ _ ___ _ __ ___ ___ _ __ (_)_______ | @@ -312,6 +382,185 @@ Exception.__init__(self, t) #. +# .--MongoDB-------------------------------------------------------------. +# | __ __ ____ ____ | +# | | \/ | ___ _ __ __ _ ___ | _ \| __ ) | +# | | |\/| |/ _ \| '_ \ / _` |/ _ \| | | | _ \ | +# | | | | | (_) | | | | (_| | (_) | |_| | |_) | | +# | |_| |_|\___/|_| |_|\__, |\___/|____/|____/ | +# | |___/ | +# +----------------------------------------------------------------------+ +# | The Event Log Archive can be stored in a MongoDB instead of files, | +# | this section contains MongoDB related code. | +# '----------------------------------------------------------------------' + +try: + from pymongo.connection import Connection + from pymongo import DESCENDING, ASCENDING + from pymongo.errors import OperationFailure + import datetime +except ImportError: + Connection = None +g_mongo_conn = None +g_mongo_db = None + +def mongodb_local_connection_opts(): + ip, port = None, None + for l in file('%s/etc/mongodb.conf' % os.environ['OMD_ROOT']): + if l.startswith('bind_ip'): + ip = l.split('=')[1].strip() + elif l.startswith('port'): + port = int(l.split('=')[1].strip()) + return ip, port + +def connect_mongodb(): + global g_mongo_conn, g_mongo_db + if Connection == None: + raise Exception('Could not initialize MongoDB (Python-Modules are missing)') + g_mongo_conn = Connection(*mongodb_local_connection_opts()) + g_mongo_db = g_mongo_conn.__getitem__(os.environ['OMD_SITE']) + +def flush_event_history_mongodb(): + g_mongo_db.ec_archive.drop() + +def get_mongodb_max_history_age(): + result = g_mongo_db.ec_archive.index_information() + if 'dt_-1' not in result or 'expireAfterSeconds' not in result['dt_-1']: + return -1 + else: + return result['dt_-1']['expireAfterSeconds'] + +def update_mongodb_indexes(): + if not g_mongo_conn: + connect_mongodb() + result = g_mongo_db.ec_archive.index_information() + + if 'time_-1' not in result: + g_mongo_db.ec_archive.ensure_index([('time', DESCENDING)]) + +def update_mongodb_history_lifetime(): + if not g_mongo_conn: + connect_mongodb() + + if get_mongodb_max_history_age() == g_config['history_lifetime']: + return # do not update already correct index + + try: + g_mongo_db.ec_archive.drop_index("dt_-1") + except OperationFailure: + pass # Ignore not existing index + + log(repr(g_config['history_lifetime'])) + + # Delete messages after x days + g_mongo_db.ec_archive.ensure_index([('dt', DESCENDING)], + expireAfterSeconds = g_config['history_lifetime'], + unique = False + ) + + log(repr(get_mongodb_max_history_age())) + +def mongodb_next_id(name, first_id = 0): + ret = g_mongo_db.counters.find_and_modify( + query = { '_id': name }, + update = { '$inc': { 'seq': 1 } }, + new = True + ) + + if not ret: + # Initialize the index! + g_mongo_db.counters.insert({ + '_id': name, + 'seq': first_id + }) + return first_id + else: + return ret['seq'] + +def log_event_history_to_mongodb(event, what, who, addinfo): + if not g_mongo_conn: + connect_mongodb() + # We converted _id to be an auto incrementing integer. This makes the unique + # index compatible to history_line of the file (which is handled as integer) + # within mkeventd. It might be better to use the ObjectId() of MongoDB, but + # for the first step, we use the integer index for simplicity + now = time.time() + g_mongo_db.ec_archive.insert({ + '_id' : mongodb_next_id('ec_archive_id'), + 'dt' : datetime.datetime.fromtimestamp(now), + 'time' : now, + 'event' : event, + 'what' : what, + 'who' : who, + 'addinfo' : addinfo, + }) + +def get_event_history_from_mongodb(filters, limit): + history_entries = [] + headers = [ c[0] for c in history_columns ] + + if not g_mongo_conn: + connect_mongodb() + + # Construct the mongodb filtering specification. We could fetch all information + # and do filtering on this data, but this would be way too inefficient. + query = {} + for filter_name, opfunc, args in filters: + + if opfunc == filter_operators['=']: + mongo_filter = args + elif opfunc == filter_operators['>']: + mongo_filter = {'$gt': args} + elif opfunc == filter_operators['<']: + mongo_filter = {'$lt': args} + elif opfunc == filter_operators['>=']: + mongo_filter = {'$gte': args} + elif opfunc == filter_operators['<=']: + mongo_filter = {'$lte': args} + elif opfunc == filter_operators['~']: # case sensitive regex, find pattern in string + mongo_filter = {'$regex': args, '$options': ''} + elif opfunc == filter_operators['=~']: # case insensitive, match whole string + mongo_filter = {'$regex': args, '$options': 'mi'} + elif opfunc == filter_operators['~~']: # case insensitive regex, find pattern in string + mongo_filter = {'$regex': args, '$options': 'i'} + elif opfunc == filter_operators['in']: + mongo_filter = {'$in': args} + else: + raise Exception('Filter operator of filter %s not implemented for MongoDB archive' % filter_name) + + if filter_name[:6] == 'event_': + query['event.' + filter_name[6:]] = mongo_filter + elif filter_name[:8] == 'history_': + key = filter_name[8:] + if key == 'line': + key = '_id' + query[key] = mongo_filter + else: + raise Exception('Filter %s not implemented for MongoDB' % filter_name) + + result = g_mongo_db.ec_archive.find(query).sort('time', -1) + # Might be used for debugging / profiling + #file(os.environ['OMD_ROOT'] + '/var/log/check_mk/ec_history_debug.log', 'a').write(pprint.pformat(filters) + '\n' + pprint.pformat(result.explain()) + '\n') + if limit: + result = result.limit(limit + 1) + + # now convert the MongoDB data structure to the eventd internal one + for entry in result: + item = [ + entry['_id'], + entry['time'], + entry['what'], + entry['who'], + entry['addinfo'], + ] + for colname, defval in event_columns: + key = colname[6:] # drop "event_" + item.append(entry['event'].get(key, defval)) + history_entries.append(item) + + return headers, history_entries + +#. # .--History-------------------------------------------------------------. # | _ _ _ _ | # | | | | (_)___| |_ ___ _ __ _ _ | @@ -323,6 +572,15 @@ # | Functions for logging the history of events | # '----------------------------------------------------------------------' +def log_event_history(event, what, who="", addinfo=""): + if g_config["debug_rules"]: + log("Event %d: %s/%s/%s - %s" % (event["id"], what, who, addinfo, event["text"])) + + if g_config['archive_mode'] == 'mongodb': + log_event_history_to_mongodb(event, what, who, addinfo) + else: + log_event_history_to_file(event, what, who, addinfo) + # Make a new entry in the event history. Each entry is tab-separated line # with the following columns: # 0: time of log entry @@ -330,9 +588,7 @@ # 2: user who initiated the action (for GUI actions) # 3: additional information about the action # 4-oo: event_columns -def log_event_history(event, what, who="", addinfo=""): - if g_config["debug_rules"]: - log("Event %d: %s/%s/%s - %s" % (event["id"], what, who, addinfo, event["text"])) +def log_event_history_to_file(event, what, who, addinfo): with lock_logging: columns = [ str(time.time()), @@ -342,7 +598,7 @@ columns += [ quote_tab(event.get(colname[6:], defval)) # drop "event_" for colname, defval in event_columns ] - get_logfile().write("\t".join(map(to_utf8, columns)) + "\n") + get_logfile("history").write("\t".join(map(to_utf8, columns)) + "\n") def to_utf8(x): if type(x) == unicode: @@ -354,7 +610,9 @@ if type(col) in [ float, int ]: return str(col) elif type(col) in [ tuple, list ]: - col = "\1" + "\1".join(col) + col = "\1" + "\1".join(map(to_utf8, col)) + elif col == None: + col = "\2" return col.replace("\t", " ") @@ -362,9 +620,9 @@ # Get file object to current log file, handle also # history and lifetime limit. -def get_logfile(): +def get_logfile(basename): global active_history_period - log_dir = g_state_dir + "/history" + log_dir = g_state_dir + "/" + basename make_parentdirs(log_dir + "/foo") # Log into file starting at current history period, @@ -373,7 +631,7 @@ # weekly. timestamp = current_history_period() - # Log period has changed or we have not computed a filename yet -> + # Log period has changed or we have not computed a filename yet -> # compute currently active period if active_history_period == None or timestamp > active_history_period: @@ -415,7 +673,7 @@ log("Flushed log file %s" % path) os.remove(path) elif os.stat(path).st_mtime < min_mtime: - log("Deleting log file %s (lifetime expired after %d days)" % + log("Deleting log file %s (lifetime expired after %d days)" % (path, g_config["history_lifetime"])) os.remove(path) except Exception, e: @@ -424,13 +682,25 @@ log("Error expiring log files: %s" % e) def flush_event_history(): + if g_config['archive_mode'] == 'mongodb': + flush_event_history_mongodb() + else: + flush_event_history_files() + +def flush_event_history_files(): with lock_logging: - expire_logfiles(True) + expire_logfiles(True) -grepping_filters = [ 'event_text', 'event_comment', 'event_host', 'event_host_regex', +grepping_filters = [ 'event_text', 'event_comment', 'event_host', 'event_host_regex', 'event_contact', 'event_application', 'event_rule_id', 'event_owner' ] def get_event_history(filters, limit): + if g_config['archive_mode'] == 'mongodb': + return get_event_history_from_mongodb(filters, limit) + else: + return get_event_history_from_file(filters, limit) + +def get_event_history_from_file(filters, limit): history_entries = [] headers = [ c[0] for c in history_columns ] log_dir = g_state_dir + "/history" @@ -453,7 +723,7 @@ greptexts.sort() greptexts = [ x[1] for x in greptexts ] - time_filters = [ f for f in filters + time_filters = [ f for f in filters if f[0].split("_")[-1] == "time" ] # We do not want to open all files. So our strategy is: @@ -464,6 +734,15 @@ timestamps = [ int(fn[:-4]) for fn in os.listdir(log_dir) if fn.endswith(".log") ] timestamps.sort() + # Use the later logfiles first, to get the newer log entries + # first. When a limit is reached, the newer entries should + # be processed in most cases. We assume that now. + # To keep a consistent order of log entries, we should care + # about sorting the log lines in reverse, but that seems to + # already be done by the GUI, so we don't do that twice. Skipping + # this # will lead into some lines of a single file to be limited in + # wrong order. But this should be better than before. + timestamps.reverse() for ts in timestamps: if limit != None and limit <= 0: break @@ -497,36 +776,28 @@ line_no = 0 # If we have greptexts we pre-filter the file using the extremely # fast GNU Grep + # Revert lines from the log file to have the newer lines processed first + cmd = 'tac "%s"' % path if greptexts: - args = [ 'grep', '-i', '-e' , ".*".join(greptexts), path ] - grep = subprocess.Popen(args, close_fds=True, stdout=subprocess.PIPE) - logfile = grep.stdout - else: - logfile = file(path) + cmd += " | grep -i -e %s" % quote_shell_string(".*".join(greptexts)) + grep = subprocess.Popen(cmd, shell=True, close_fds=True, stdout=subprocess.PIPE) - for line in logfile: + for line in grep.stdout: line_no += 1 if limit != None and len(entries) > limit: - if greptexts: - os.kill(grep.pid, 15) + grep.kill() + grep.wait() break try: - parts = line.rstrip('\n').split('\t') - if False: - values = [line_no] + parts - elif True: - convert_history_line(parts) - values = [line_no] + parts - else: - pairs = zip(history_columns[1:], parts) - values = [line_no] + map(lambda p: unsplit(type(p[0][1])(p[1])), pairs) - # Handle \1 separated lists/tuples (e.g. match_groups) + parts = line.decode('utf-8').rstrip('\n').split('\t') + convert_history_line(parts) + values = [line_no] + parts if g_status_server.filter_row(headers, filters, values): entries.append(values) except Exception, e: - log("Invalid line %d in history file %s: %s" % (line_no, path, e)) - + log("Invalid line '%s' in history file %s: %s" % (line, path, e)) + return entries @@ -632,9 +903,9 @@ for name in self._weights.keys(): headers.append("status_average_%s_time" % name) row.append(self._times.get(name, 0.0)) - + return headers, row - + #. # .--EventServer---------------------------------------------------------. @@ -649,18 +920,24 @@ # '----------------------------------------------------------------------' class EventServer: - month_names = { "Jan":1, "Feb":2, "Mar":3, "Apr":4, "May":5, "Jun":6, + month_names = { "Jan":1, "Feb":2, "Mar":3, "Apr":4, "May":5, "Jun":6, "Jul":7, "Aug":8, "Sep":9, "Oct":10, "Nov":11, "Dec":12, } def __init__(self): - self._syslog = None + self._syslog = None + self._syslog_tcp = None + self._snmptrap = None + self.create_pipe() self.open_eventsocket() self.open_syslog() + self.open_syslog_tcp() + self.open_snmptrap() + self._rules = [] self._hash_stats = [] - for facility in range(24): + for facility in range(32): self._hash_stats.append([ 0 ] * 8 ) def status_columns(self): @@ -675,7 +952,7 @@ headers, row = g_perfcounters.get_status() # Replication - headers += [ "status_replication_slavemode", "status_replication_last_sync", + headers += [ "status_replication_slavemode", "status_replication_last_sync", "status_replication_success" ] if is_replication_slave(): row.append(g_slave_status["mode"]) @@ -688,7 +965,7 @@ def create_pipe(self): try: - if not stat.S_ISFIFO(os.stat(g_pipe_path).st_mode): + if not stat.S_ISFIFO(os.stat(g_pipe_path).st_mode): os.remove(g_pipe_path) except: pass @@ -713,6 +990,40 @@ except Exception, e: raise Exception("Cannot start builtin syslog server: %s" % e) + def open_syslog_tcp(self): + if opt_syslog_tcp: + try: + if opt_syslog_tcp_fd != None: + self._syslog_tcp = socket.fromfd(opt_syslog_tcp_fd, socket.AF_INET, socket.SOCK_STREAM) + self._syslog_tcp.listen(20) + os.close(opt_syslog_tcp_fd) + log("Opened builtin syslog-tcp server on inherited filedescriptor %d" % opt_syslog_tcp_fd) + + else: + self._syslog_tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self._syslog_tcp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + self._syslog_tcp.bind(("0.0.0.0", 514)) + self._syslog_tcp.listen(20) + log("Opened builtin syslog-tcp server on TCP port 514") + except Exception, e: + raise Exception("Cannot start builtin syslog-tcp server: %s" % e) + + def open_snmptrap(self): + if opt_snmptrap: + try: + if opt_snmptrap_fd != None: + self._snmptrap = socket.fromfd(opt_snmptrap_fd, socket.AF_INET, socket.SOCK_DGRAM) + os.close(opt_snmptrap_fd) + log("Opened builtin snmptrap server on inherited filedescriptor %d" % opt_snmptrap_fd) + + else: + self._snmptrap = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + self._snmptrap.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + self._snmptrap.bind(("0.0.0.0", 162)) + log("Opened builtin snmptrap server on UDP port 162") + except Exception, e: + raise Exception("Cannot start builtin snmptrap server: %s" % e) + def open_eventsocket(self): if g_eventsocket_path: if os.path.exists(g_eventsocket_path): @@ -742,7 +1053,7 @@ else: self.run_loop() - + def run_loop(self): @@ -751,7 +1062,7 @@ c += 1 try: self.serve() - + except Exception, e: log("EXCEPTION in event server:\n%s" % format_exception()) if opt_debug: @@ -770,36 +1081,155 @@ return os.open(g_pipe_path, os.O_RDWR | os.O_NONBLOCK) + # Format time difference seconds into approximated + # human readable value + def fmt_timeticks(self, ticks): + secs = float(ticks) / 100 + if secs < 240: + return "%d sec" % secs + mins = secs / 60 + + if mins < 120: + return "%d min" % mins + + hours, mins = divmod(mins, 60) + if hours < 48: + return "%d hours, %d min" % (hours, mins) + + days, hours = divmod(hours, 24) + return "%d days, %d hours, %d min" % (days, hours, mins) + + # Convert pysnmp datatypes to simply handable ones + def snmptrap_convert_var_binds(self, var_bind_list): + var_binds = [] + for oid, value in var_bind_list: + key = str(oid) + + if value.__class__.__name__ == 'ObjectIdentifier': + val = str(value) + elif value.__class__.__name__ == 'TimeTicks': + val = self.fmt_timeticks(value._value) + else: + val = value._value + + # Translate some standard SNMPv2 oids + if key == '1.3.6.1.2.1.1.3.0': + key = 'Uptime' + + var_binds.append((key, val)) + return var_binds + + def process_snmptrap(self, (whole_msg, (host, port))): + while whole_msg: + # Verify the version is supported + proto_version = int(pysnmp_api.decodeMessageVersion(whole_msg)) + if proto_version not in pysnmp_api.protoModules: + verbose('Dropped invalid snmptrap (Unsupported SNMP version %s)' % proto_version) + return + + proto = pysnmp_api.protoModules[proto_version] + + req_msg, whole_msg = pyasn_decoder.decode(whole_msg, asn1Spec = proto.Message()) + req_pdu = proto.apiMessage.getPDU(req_msg) # pdu = protocol data unit + if not req_pdu.isSameTypeWith(proto.TrapPDU()): + return # Skip non trap packagesa (according to header) + + # community: proto.apiMessage.getCommunity(req_msg) + + if proto_version == pysnmp_api.protoVersion1: + # These fields are available by specification in v1, but not in v2. + + # use the enterprise oid as application + application = proto.apiTrapPDU.getEnterprise(req_pdu).prettyPrint() + # override the host with the agent address + host = proto.apiTrapPDU.getAgentAddr(req_pdu).prettyPrint() + + trap = [] + trap.append(('Generic-Trap', proto.apiTrapPDU.getGenericTrap(req_pdu)._value)) + trap.append(('Specific-Trap', proto.apiTrapPDU.getSpecificTrap(req_pdu)._value)) + trap.append(('Uptime', self.fmt_timeticks(proto.apiTrapPDU.getTimeStamp(req_pdu)._value))) + trap += self.snmptrap_convert_var_binds(proto.apiTrapPDU.getVarBinds(req_pdu)) + + elif proto_version == pysnmp_api.protoVersion2c: + trap = self.snmptrap_convert_var_binds(proto.apiPDU.getVarBinds(req_pdu)) + + # use the trap-oid as application + application = '' + for index, (oid, val) in enumerate(trap): + if oid == '1.3.6.1.6.3.1.1.4.1.0': + application = trap.pop(index)[1] + break + else: + return # dropping unhandled snmp version + + # once we got here we have a real parsed trap which we convert to an event now + text = ', '.join([ '%s: %s' % (item[0], str(item[1]).replace('\n', '')) for item in trap ]) + # Convert to Unicode, first assume UTF-8, then latin-1 + try: + text = text.decode("utf-8") + except: + text = text.decode("latin-1") + event = { + 'time' : time.time(), + 'host' : host.replace('\n', ''), + 'priority' : 5, # notice + 'facility' : 31, # not used by syslog -> we use this for all traps + 'application' : application.replace('\n', ''), + 'text' : text + } + self.do_translate_hostname(event) + self.process_event(event) + def serve(self): pipe_fragment = '' - pipe = self.open_pipe() + pipe = self.open_pipe() listen_list = [ pipe ] + + # Wait for incoming syslog packets via UDP if self._syslog != None: listen_list.append(self._syslog.fileno()) + + # Wait for new connections for events via TCP socket + if self._syslog_tcp != None: + listen_list.append(self._syslog_tcp) + + # Wait for new connections for events via unix socket if self._eventsocket: listen_list.append(self._eventsocket) - # Keep list of client connections via UNIX socket and + # Wait for incomding SNMP traps + if self._snmptrap != None: + listen_list.append(self._snmptrap.fileno()) + + + # Keep list of client connections via UNIX socket and # read data that is not yet processed. Map from # fd to (fileobject, data) client_sockets = {} + select_timeout = 1 while True: - readable = select.select(listen_list + client_sockets.keys(), [], [], None)[0] + readable = select.select(listen_list + client_sockets.keys(), [], [], select_timeout)[0] data = None # Accept new connection on event unix socket if self._eventsocket in readable: - client_socket, addr_info = self._eventsocket.accept() - client_sockets[client_socket.fileno()] = (client_socket, "") + client_socket, address = self._eventsocket.accept() + client_sockets[client_socket.fileno()] = (client_socket, address, "") + + # Same for the TCP syslog socket + if self._syslog_tcp and self._syslog_tcp in readable: + client_socket, address = self._syslog_tcp.accept() + client_sockets[client_socket.fileno()] = (client_socket, address, "") # Read data from existing event unix socket connections - for fd, (cs, previous_data) in client_sockets.items(): + for fd, (cs, address, previous_data) in client_sockets.items(): if fd in readable: # Receive next part of data try: new_data = cs.recv(4096) except: new_data = "" + address = None # Put together with incomplete messages from last time data = previous_data + new_data @@ -811,23 +1241,22 @@ # Do we have any complete messages? if '\n' in data: complete, rest = data.rsplit("\n", 1) - self.process_event_data(complete + "\n") + self.process_raw_lines(complete + "\n", address) else: rest = data # keep for next time # Only complete messages else: if data: - self.process_event_data(data) + self.process_raw_lines(data, address) rest = "" # Connection still open? if new_data: - client_sockets[fd] = (cs, rest) + client_sockets[fd] = (cs, address, rest) else: cs.close() del client_sockets[fd] - # Read data from pipe if pipe in readable: @@ -838,7 +1267,7 @@ pipe = self.open_pipe() listen_list[0] = pipe # Pending fragments from previos reads that are not terminated - # by a \n are ignored. + # by a \n are ignored. if pipe_fragment: log("Warning: ignoring incomplete message '%s' from pipe" % pipe_fragment) pipe_fragment = "" @@ -850,28 +1279,62 @@ # Last message still incomplete? if data[-1] != '\n': if '\n' in data: # at least one complete message contained - messages, pipe_fragment = data.rsplit('\n', 1) - self.process_event_data(messages + '\n') # got lost in split + messages, pipe_fragment = data.rsplit('\n', 1) + self.process_raw_lines(messages + '\n') # got lost in split else: pipe_fragment = data # keep beginning of message, wait for \n else: - self.process_event_data(data) + self.process_raw_lines(data) except: pass # Read events from builtin syslog server if self._syslog != None and self._syslog.fileno() in readable: - self.process_event_data(self._syslog.recv(4096)) + self.process_raw_lines(*self._syslog.recvfrom(4096)) + + # Read events from builtin snmptrap server + if self._snmptrap != None and self._snmptrap.fileno() in readable: + try: + self.process_raw_data(self.process_snmptrap, self._snmptrap.recvfrom(65535)) + except Exception, e: + log('Exception handling a snmptrap (skipping this one): %s' % format_exception()) + + # check wether or not spool files are available + spool_dir = g_state_dir + "/spool" + if os.path.exists(spool_dir): + spool_files = [ f for f in os.listdir(spool_dir) if f[0] != '.' ] + if spool_files: + # progress the first spool file we get + this_path = spool_dir + '/' + spool_files.pop() + self.process_raw_lines(file(this_path).read()) + os.remove(this_path) + if spool_files: + select_timeout = 0 # enable fast processing to process further files + else: + select_timeout = 1 # restore default select timeout if opt_profile.get("event"): return + # Processes incoming data, just a wrapper between the real data and the + # handler function to record some statistics etc. + def process_raw_data(self, handler_func, data): + g_perfcounters.count("messages") + before = time.time() + # In replication slave mode (when not took over), ignore all events + if not is_replication_slave() or g_slave_status["mode"] != "sync": + handler_func(data) + elif opt_debug: + log("Replication: we are in slave mode, ignoring event") + elapsed = time.time() - before + g_perfcounters.count_time("processing", elapsed) - def process_event_data(self, data): + # Takes several lines of messages, handles encoding and processes them separated + def process_raw_lines(self, data, address = None): lines = data.splitlines() for line in lines: - line = line.rstrip() + line = line.rstrip().replace('\0', '') # Convert to Unicode, first assume UTF-8, then latin-1 try: line = line.decode("utf-8") @@ -879,24 +1342,20 @@ line = line.decode("latin-1") if line: - g_perfcounters.count("messages") - before = time.time() - # In replication slave mode (when not took over), ignore all events - if not is_replication_slave() or g_slave_status["mode"] != "sync": - self.process_event(line.rstrip()) - elif opt_debug: - log("Replication: we are in slave mode, ignoring event") - elapsed = time.time() - before - g_perfcounters.count_time("processing", elapsed) - + try: + self.process_raw_data(self.process_line, (line, address)) + except Exception, e: + log('Exception handling a log line (skipping this one): %s' % format_exception()) def do_housekeeping(self): with lock_eventstatus: with lock_configuration: self.hk_handle_event_timeouts() self.hk_check_expected_messages() - with lock_logging: - expire_logfiles() + + if g_config['archive_mode'] != 'mongodb': + with lock_logging: + expire_logfiles() def hk_handle_event_timeouts(self): # 1. Automatically delete all events that are in state "counting" @@ -916,12 +1375,14 @@ if not rule: log("Deleting orphaned event %d created by obsolete rule %s" % (event["id"], event["rule_id"])) - events_to_delete.append(nr) + event["phase"] = "closed" log_event_history(event, "ORPHANED") + events_to_delete.append(nr) elif not "count" in rule and not "expect" in rule: log("Count-based event %d belonging to rule %s: rule does not " "count/expect anymore. Deleting event." % (event["id"], event["rule_id"])) + event["phase"] = "closed" log_event_history(event, "NOCOUNT") events_to_delete.append(nr) @@ -930,7 +1391,7 @@ count = rule["count"] if count.get("algorithm") in [ "tokenbucket", "dynabucket" ]: last_token = event.get("last_token", event["first"]) - secs_per_token = count["period"] / float(count["count"]) + secs_per_token = count["period"] / float(count["count"]) if count["algorithm"] == "dynabucket": # get fewer tokens if count is lower if event["count"] <= 1: secs_per_token = count["period"] @@ -944,13 +1405,18 @@ event["count"] = max(0, event["count"] - new_tokens) event["last_token"] = last_token + new_tokens * secs_per_token # not now! would be unfair if event["count"] == 0: - log("Rule %s, event %d: again without allowed rate, dropping event" % + log("Rule %s, event %d: again without allowed rate, dropping event" % (rule["id"], event["id"])) + event["phase"] = "closed" + log_event_history(event, "COUNTFAILED") events_to_delete.append(nr) + else: # algorithm 'interval' if event["first"] + count["period"] <= now: # End of period reached log("Rule %s: reached only %d out of %d events within %d seconds. " "Resetting to zero." % (rule["id"], event["count"], count["count"], count["period"])) + event["phase"] = "closed" + log_event_history(event, "COUNTFAILED") events_to_delete.append(nr) # Handle delayed actions @@ -961,7 +1427,12 @@ event["phase"] = "open" log_event_history(event, "DELAYOVER") if rule: - do_rule_actions(rule, event) + event_has_opened(rule, event) + if rule.get("autodelete"): + event["phase"] = "closed" + log_event_history(event, "AUTODELETE") + events_to_delete.append(nr) + else: log("Cannot do rule action: rule %s not present anymore." % event["rule_id"]) @@ -971,6 +1442,7 @@ if now >= event["live_until"]: allowed_phases = event.get("live_until_phases", ["open"]) if event["phase"] in allowed_phases: + event["phase"] = "closed" events_to_delete.append(nr) log("Livetime of event %d (rule %s) exceeded. Deleting event." % ( event["id"], event["rule_id"])) @@ -992,7 +1464,7 @@ # 1. An event for such a rule already exists and is # in the state "counting" -> this can only be the case if # more than one occurrance is required. - # 2. No event at all exists. + # 2. No event at all exists. # in that case. for rule in self._rules: if "expect" in rule: @@ -1026,11 +1498,11 @@ if event["count"] < expected_count: # no -> trigger alarm self.handle_absent_event(rule, event["count"], expected_count, event["last"]) else: # yes -> everything is fine. Just log. - log("Rule %s has reached %d occurrances (%d required). Starting next period." % + log("Rule %s has reached %d occurrances (%d required). Starting next period." % (rule["id"], event["count"], expected_count)) log_event_history(event, "COUNTREACHED") # Counting event is no longer needed. - events_to_delete.append(nr) + events_to_delete.append(nr) break # Ou ou, no event found at all. @@ -1076,25 +1548,31 @@ # Create artifical event from scratch. Make sure that all important # fields are defined. event = { - "rule_id" : rule["id"], - "text" : text, - "phase" : "open", - "count" : 1, - "time" : now, - "first" : now, - "last" : now, - "comment" : "", - "host" : "", - "application" : "", - "pid" : 0, - "priority" : 3, - "facility" : 1, # user - "match_groups" : (), + "rule_id" : rule["id"], + "text" : text, + "phase" : "open", + "count" : 1, + "time" : now, + "first" : now, + "last" : now, + "comment" : "", + "host" : "", + "application" : "", + "pid" : 0, + "priority" : 3, + "facility" : 1, # user + "match_groups" : (), + "contact_groups" : rule.get("contact_groups"), } self.rewrite_event(rule, event, ()) g_event_status.new_event(event) log_event_history(event, "COUNTFAILED") - do_rule_actions(rule, event) + event_has_opened(rule, event) + if rule.get("autodelete"): + event["phase"] = "closed" + log_event_history(event, "AUTODELETE") + g_event_status.remove_event(event) + # Precompile regular expressions and similar stuff @@ -1106,6 +1584,22 @@ count_rules = 0 count_unspecific = 0 + def compile_matching_value(key, val): + value = val.strip() + # Remove leading .* from regex. This is redundant and + # dramatically destroys performance when doing an infix search. + if key in [ "match", "match_ok" ]: + while value.startswith(".*") and not value.startswith(".*?"): + value = value[2:] + + if not value: + return None + + if is_regex(value): + return re.compile(value, re.IGNORECASE) + else: + return val.lower() + for rule in rules: if rule.get("disabled"): count_disabled += 1 @@ -1124,25 +1618,28 @@ try: for key in [ "match", "match_ok", "match_host", "match_application" ]: if key in rule: - value = rule[key].strip() - # Remote leading .* from regex. This is redundant and - # dramatically destroys performance when doing an infix search. - if key in [ "match", "match_ok" ]: - while value.startswith(".*") and not value.startswith(".*?"): - value = value[2:] - if not value: + value = compile_matching_value(key, rule[key]) + if value == None: del rule[key] continue - if is_regex(value): - rule[key] = re.compile(value, re.IGNORECASE) - else: - rule[key] = rule[key].lower() + + rule[key] = value + + if 'state' in rule and type(rule['state']) == tuple and rule['state'][0] == 'text_pattern': + for key in [ '2', '1', '0' ]: + if key in rule['state'][1]: + value = compile_matching_value('state', rule['state'][1][key]) + if value == None: + del rule['state'][1][key] + else: + rule['state'][1][key] = value + except Exception, e: if opt_debug: raise rule["disabled"] = True count_disabled += 1 - log("Ignoring rule '%s' because of an invalid regex (%s)." % + log("Ignoring rule '%s' because of an invalid regex (%s)." % (rule["id"], e)) if g_config["rule_optimizer"]: @@ -1155,30 +1652,31 @@ log("Compiled %d active rules (ignoring %d disabled rules)" % (count_rules, count_disabled)) if g_config["rule_optimizer"]: - log("Rule hash: %d rules - %d hashed, %d unspecific" % + log("Rule hash: %d rules - %d hashed, %d unspecific" % (len(self._rules), len(self._rules) - count_unspecific, count_unspecific)) - for facility in range(24): + for facility in range(32): if facility in self._rule_hash: stats = [] for prio, entries in self._rule_hash[facility].items(): stats.append("%s(%d)" % (syslog_priorities[prio], len(entries))) - log(" %-12s: %s" % (syslog_facilities[facility], " ".join(stats))) + if syslog_facilities[facility]: + log(" %-12s: %s" % (syslog_facilities[facility], " ".join(stats))) def hash_rule(self, rule): - # Construct rule hash for faster execution. + # Construct rule hash for faster execution. facility = rule.get("match_facility") if facility: self.hash_rule_facility(rule, facility) else: - for facility in range(24): # all syslog facilities + for facility in range(32): # all syslog facilities self.hash_rule_facility(rule, facility) def hash_rule_facility(self, rule, facility): needed_prios = [False] * 8 for key in [ "match_priority", "cancel_priority" ]: - if key in rule: + if key in rule: prio_from, prio_to = rule[key] # Beware: from > to! for p in range(prio_to, prio_from + 1): @@ -1197,7 +1695,7 @@ log("Top 20 of facility/priority:") entries = [] total_count = 0 - for facility in range(24): + for facility in range(32): for priority in range(8): count = self._hash_stats[facility][priority] if count: @@ -1207,21 +1705,30 @@ entries.reverse() for count, (facility, priority) in entries[:20]: log(" %s/%s - %d (%.2f%%)" % ( - syslog_facilities[facility], syslog_priorities[priority], count, + syslog_facilities[facility], syslog_priorities[priority], count, (100.0 * count / float(total_count)))) - def process_event(self, line): + def process_line(self, (line, address)): + line = line.rstrip() if g_config["debug_rules"]: - log(u"Processing message '%s'" % line) - event = self.parse_event(line) + if address: + log(u"Processing message from %r: '%s'" % (address, line)) + else: + log(u"Processing message '%s'" % line) + self.process_event(self.parse_event(line, address)) + + def process_event(self, event): + # Log all incoming messages into a syslog-like text file if that is enabled + if g_config["log_messages"]: + self.log_message(event) # Rule optimizer if g_config["rule_optimizer"]: self._hash_stats[event["facility"]][event["priority"]] += 1 rule_candidates = self._rule_hash.get(event["facility"], {}).get(event["priority"], []) else: - rule_candidates = self._rules - + rule_candidates = self._rules + for rule in rule_candidates: try: result = self.event_rule_matches(rule, event) @@ -1249,7 +1756,12 @@ return else: + # Remember the rule id that this event originated from event["rule_id"] = rule["id"] + + # Attach optional contact group information for visibility + event["contact_groups"] = rule.get("contact_groups") + # Store groups from matching this event. In order to make # persistence easier, we do not safe them as list but join # them on ASCII-1. @@ -1269,9 +1781,17 @@ existing_event["delay_until"] = time.time() + rule["delay"] existing_event["phase"] = "delayed" else: - do_rule_actions(rule, existing_event) + event_has_opened(rule, existing_event) + log_event_history(existing_event, "COUNTREACHED") + if "delay" not in rule and rule.get("autodelete"): + existing_event["phase"] = "closed" + log_event_history(existing_event, "AUTODELETE") + with lock_eventstatus: + g_event_status.remove_event(existing_event) + + elif "expect" in rule: g_event_status.count_expected_event(event) @@ -1286,9 +1806,19 @@ with lock_eventstatus: g_event_status.new_event(event) if event["phase"] == "open": - do_rule_actions(rule, event) + event_has_opened(rule, event) + if rule.get("autodelete"): + event["phase"] = "closed" + log_event_history(event, "AUTODELETE") + with lock_eventstatus: + g_event_status.remove_event(event) return + # End of loop over rules. + if g_config["archive_orphans"]: + g_event_status.archive_event(event) + + # Checks if an event matches a rule. Returns either False (no match) # or a pair of matchtype, groups, where matchtype is False for a # normal match and True for a cancelling match and the groups is a tuple @@ -1312,7 +1842,7 @@ if False == match(rule.get("match_application"), event["application"], complete=False): if debug: - log(" did not match because of wrong application '%s' (need '%s')" % + log(" did not match because of wrong application '%s' (need '%s')" % (event["application"], pattern(rule.get("match_application")))) return False @@ -1321,6 +1851,11 @@ log(" did not match because of wrong syslog facility") return False + if "match_timeperiod" in rule and not check_timeperiod(rule["match_timeperiod"]): + if debug: + log(" did not match, because timeperiod %s is not active" % rule["match_timeperiod"]) + return False + if "match_ok" in rule or "cancel_priority" in rule: if "cancel_priority" in rule: up, lo = rule["cancel_priority"] @@ -1364,7 +1899,7 @@ if p < sl_from or p > sl_to: if debug: - log(" did not match because of wrong service level %d (need %d..%d)" % + log(" did not match because of wrong service level %d (need %d..%d)" % (p, sl_from, sl_to),) return False @@ -1382,8 +1917,20 @@ event["state"] = 2 else: event["state"] = 1 + elif type(rule["state"]) == tuple and rule["state"][0] == "text_pattern": + for key in [ '2', '1', '0', '3' ]: + if key in rule["state"][1]: + log(repr(rule["state"][1][key])) + match_groups = match(rule["state"][1][key], event["text"], complete = False) + if match_groups != False: + event["state"] = int(key) + break + elif key == '3': # No rule matched! + event["state"] = 3 + else: event["state"] = rule["state"] + if "sl" not in event: event["sl"] = rule["sl"] event["first"] = event["time"] @@ -1422,7 +1969,6 @@ event = {} # line starts with '@' if line[11] == ';': - log(line) timestamp_str, sl, contact, rest = line[1:].split(';', 3) host, rest = rest.split(None, 1) if len(sl): @@ -1432,7 +1978,7 @@ else: timestamp_str, host, rest = line[1:].split(None, 2) - event["time"] = float(int(timestamp_str)) + event["time"] = float(int(timestamp_str)) service, message = rest.split(":", 1) event["application"] = service event["text"] = message.strip() @@ -1440,7 +1986,60 @@ return event - def parse_event(self, line): + # Translate a hostname if this is configured. We are + # *really* sorry: this code snipped is copied from modules/check_mk_base.py. + # There is still no common library. Please keep this in sync with the + # original code + def translate_hostname(self, backedhost): + translation = g_config["hostname_translation"] + + # Here comes the original code from modules/check_mk_base.py + if translation: + # 1. Case conversion + caseconf = translation.get("case") + if caseconf == "upper": + backedhost = backedhost.upper() + elif caseconf == "lower": + backedhost = backedhost.lower() + + # 2. Drop domain part (not applied to IP addresses!) + if translation.get("drop_domain") and backedhost: + # only apply if first part does not convert successfully into an int + firstpart = backedhost.split(".", 1)[0] + try: + int(firstpart) + except: + backedhost = firstpart + + # 3. Regular expression conversion + if "regex" in translation: + regex, subst = translation.get("regex") + if not regex.endswith('$'): + regex += '$' + rcomp = get_regex(regex) + mo = rcomp.match(backedhost) + if mo: + backedhost = subst + for nr, text in enumerate(mo.groups()): + backedhost = backedhost.replace("\\%d" % (nr+1), text) + + # 4. Explicity mapping + for from_host, to_host in translation.get("mapping", []): + if from_host == backedhost: + backedhost = to_host + break + + return backedhost + + def do_translate_hostname(self, event): + try: + event["host"] = self.translate_hostname(event["host"]) + except Exception, e: + if g_config["debug_rules"]: + log('Unable to parse host "%s" (%s)' % (event.get("host"), e)) + event["host"] = "" + + def parse_event(self, line, address): event = {} try: # Variant 1: plain syslog message without priority/facility: @@ -1462,7 +2061,18 @@ # - Leap seconds MUST NOT be used. # <166>2013-04-05T13:49:31.685Z esx Vpxa: message.... - # Variant 2,3,4,5 + # Variant 6: syslog message without date / host: + # <5>SYSTEM_INFO: [WLAN-1] Triggering Background Scan + + #Varian 7: logwatch.ec event forwarding + # <78>@1341847712 Klapprechner /var/log/syslog: message.... + + # FIXME: Would be better to parse the syslog messages in another way: + # Split the message by the first ":", then split the syslog header part + # and detect which information are present. Take a look at the syslog RFCs + # for details. + + # Variant 2,3,4,5,6,7 if line.startswith('<'): i = line.find('>') prio = int(line[1:i]) @@ -1475,8 +2085,14 @@ event["facility"] = 1 # user event["priority"] = 5 # notice + # Variant 7 + if line[0] == '@' and line[11] == ' ': + timestamp, event['host'], line = line.split(' ', 2) + event['time'] = float(timestamp[1:]) + event.update(self.parse_syslog_info(line)) + # Variant 3 - if line.startswith("@"): + elif line.startswith("@"): event.update(self.parse_monitoring_info(line)) # Variant 5 @@ -1488,6 +2104,15 @@ event['time'] = time.mktime(time.strptime(rfc3339_part[:19], '%Y-%m-%dT%H:%M:%S')) event.update(self.parse_syslog_info(line)) + # Variant 6 + elif len(line.split(': ', 1)[0].split(' ')) == 1: + event.update(self.parse_syslog_info(line)) + # There is no datetime information in the message, use current time + event['time'] = time.time() + # There is no host information, use the provided address + if address and type(address) == tuple: + event["host"] = address[0] + # Variant 1,2,4 else: month_name, day, timeofday, host, rest = line.split(None, 4) @@ -1517,8 +2142,6 @@ # A further problem here: we do not now wether the message is in DST or not event["time"] = time.mktime((year, month, day, hours, minutes, seconds, 0, 0, lt.tm_isdst)) - - except Exception, e: if g_config["debug_rules"]: log('Got non-syslog message "%s" (%s)' % (line, e) ) @@ -1532,12 +2155,31 @@ "time" : time.time(), } + self.do_translate_hostname(event) + if g_config["debug_rules"]: - log('Parsed message:\n' + - ("".join([ " %-15s %s\n" % (k+":",v) for (k,v) in sorted(event.items())])).rstrip()) + log('Parsed message:\n' + + ("".join([ " %-15s %s\n" % (k+":",v) for (k,v) in + sorted(event.items())])).rstrip()) + return event - + def log_message(self, event): + try: + get_logfile("messages").write("%s %s %s%s: %s\n" % ( + time.strftime("%b %d %H:%M:%S", time.localtime(event["time"])), + event["host"], + event["application"], + event["pid"] and ("[%s]" % event["pid"]) or "", + event["text"])) + except Exception, e: + if opt_debug: + raise + # Better silently ignore errors. We could have run out of + # diskspace and make things worse by logging that we could + # not log. + + #. # .--StatusServer--------------------------------------------------------. @@ -1560,6 +2202,10 @@ self.open_sockets() def open_sockets(self): + self.open_unix_socket() + self.open_tcp_socket() + + def open_unix_socket(self): if os.path.exists(g_socket_path): os.remove(g_socket_path) self._socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) @@ -1567,7 +2213,9 @@ # Make sure that socket is group writable os.chmod(g_socket_path, 0664) self._socket.listen(g_config['socket_queue_len']) + self._unix_socket_queue_len = g_config['socket_queue_len'] # detect changes in config + def open_tcp_socket(self): if g_config["remote_status"]: try: self._tcp_port, self._tcp_allow_commands = g_config["remote_status"][:2] @@ -1592,13 +2240,28 @@ self._tcp_access_list = None def close_sockets(self): + self.close_tcp_socket() + self.close_unix_socket() + + def close_unix_socket(self): if self._socket: self._socket.close() self._socket = None + + def close_tcp_socket(self): if self._tcp_socket: self._tcp_socket.close() self._tcp_socket = None + def reopen_sockets(self): + if self._unix_socket_queue_len != g_config["socket_queue_len"]: + log("socket_queue_len has changed. Reopening UNIX socket.") + self.close_unix_socket() + self.open_unix_socket() + + self.close_tcp_socket() + self.open_tcp_socket() + def reload_configuration(self): self._reopen_sockets = True @@ -1633,10 +2296,7 @@ addr_info = None if self._reopen_sockets: - if opt_debug: - log("Reopening sockets") - self.close_sockets() - self.open_sockets() + self.reopen_sockets() self._reopen_sockets = False listen_list = [ self._socket ] @@ -1688,9 +2348,11 @@ method, table = parts + output_format = "python" + with lock_eventstatus: if method == "GET": - response = self.handle_get_request(table, query[1:]) + response, output_format = self.handle_get_request(table, query[1:]) elif method == "REPLICATE": response = self.handle_replicate(table, client_ip) @@ -1704,8 +2366,19 @@ else: raise MKClientError("Invalid method %s (allowed are GET, COMMAND and REPLICATE)" % method) - socket.send(repr(response)) - socket.send("\n") + if output_format == "plain": + def format_column(value): + try: + return value.encode("utf-8") + except Exception, e: + return repr(value) + for line in response: + socket.send("\x02".join(map(lambda x: format_column(x), line))) + socket.send("\n") + else: + socket.send(repr(response)) + socket.send("\n") + socket.close() def handle_command_request(self, commandline): @@ -1747,8 +2420,8 @@ g_event_status.delete_event(int(event_id), user) def handle_command_update(self, arguments): - event_id, user, acknowledged, comment, contact = arguments - event = g_event_status.event(int(event_id)) + event_id, user, acknowledged, comment, contact = arguments + event = g_event_status.event(int(event_id)) if not event: raise MKClientError("No event with id %s" % event_id) if comment: @@ -1807,21 +2480,25 @@ g_event_status.reset_counters(rule_id) def handle_command_action(self, arguments): - with lock_configuration: - event_id, user, action_id = arguments - event = g_event_status.event(int(event_id)) - if action_id not in g_config["action"]: - raise MKClientError("The action '%s' is not defined. After adding new commands please " - "make sure that you activate the changes in the Event Console.") % action_id - action = g_config["action"][action_id] - do_rule_action(action, event, user) + event_id, user, action_id = arguments + event = g_event_status.event(int(event_id)) + + if action_id == "@NOTIFY": + do_notify(event, user, is_cancelling = False) + else: + with lock_configuration: + if action_id not in g_config["action"]: + raise MKClientError("The action '%s' is not defined. After adding new commands please " + "make sure that you activate the changes in the Event Console." % action_id) + action = g_config["action"][action_id] + do_event_action(action, event, user) def handle_command_switchmode(self, arguments): new_mode = arguments[0] if not is_replication_slave(): raise MKClientError("Cannot switch replication mode: this is not a replication slave.") elif new_mode not in [ "sync", "takeover" ]: - raise MKClientError("Invalid target mode '%s': allowed are only 'sync' and 'takeover'" % + raise MKClientError("Invalid target mode '%s': allowed are only 'sync' and 'takeover'" % new_mode) g_slave_status["mode"] = new_mode save_slave_status() @@ -1853,6 +2530,7 @@ filters = [] only_host = None limit = None + output_format = "python" for line in headerlines: try: header, argument = line.rstrip().split(":", 1) @@ -1863,11 +2541,13 @@ if name == "event_host" and opfunc == filter_operators['=']: only_host = argument filters.append((name, opfunc, argument)) + elif header == "OutputFormat": + output_format = argument elif header == "Limit": limit = int(argument) else: log("Ignoring not-implemented header %s" % header) - + except Exception, e: raise MKClientError("Invalid header line '%s': %s" % (line.rstrip(), e)) @@ -1890,7 +2570,7 @@ else: rows += list_rows - return rows + return rows, output_format def filter_row(self, headers, filters, list_row): row = dict(zip(headers, list_row)) @@ -1903,7 +2583,7 @@ # Examples: # id = 17 # name ~= This is some .* text - # host_name = + # host_name = parts = textspec.split(None, 2) if len(parts) == 2: parts.append("") @@ -1963,7 +2643,7 @@ # Wait until either housekeeping or retention is due, but at # maximum 60 seconds. That way changes of the interval from a very # high to a low value will never require more than 60 seconds - + event_list = [ next_housekeeping, next_retention, next_statistics ] if is_replication_slave(): event_list.append(next_replication) @@ -2005,7 +2685,6 @@ time.sleep(1) - #. # .--EventStatus---------------------------------------------------------. # | _____ _ ____ _ _ | @@ -2080,7 +2759,7 @@ next_start = self.next_interval_start(interval, current_start) self._interval_starts[rule_id] = next_start if opt_debug: - log("Rule %s: next interval starts %s (i.e. now + %.2f sec)" % + log("Rule %s: next interval starts %s (i.e. now + %.2f sec)" % (rule_id, next_start, time.time() - next_start)) def pack_status(self): @@ -2088,7 +2767,7 @@ "next_event_id" : self._next_event_id, "events" : self._events, "rule_stats" : self._rule_stats, - "interval_starts" : self._interval_starts, + "interval_starts" : self._interval_starts, } def unpack_status(self, status): @@ -2138,6 +2817,21 @@ self._events.append(event) log_event_history(event, "NEW") + def archive_event(self, event): + g_perfcounters.count("events") + event["id"] = self._next_event_id + self._next_event_id += 1 + event["phase"] = "closed" + log_event_history(event, "ARCHIVED") + + + def remove_event(self, event): + try: + self._events.remove(event); + except Exception, e: + log("Cannot remove event %d: not present" % event["id"]) + + # Cancel all events the belong to a certain rule id and are # of the same "breed" as a new event. def cancel_events(self, new_event, match_groups, rule): @@ -2146,7 +2840,19 @@ for nr, event in enumerate(self._events): if event["rule_id"] == rule["id"]: if self.cancelling_match(match_groups, new_event, event, rule): + # Fill a few fields of the cancelled event with data from + # the cancelling event so that action scripts have useful + # values and the logfile entry if more relevant. + event["phase"] = "closed" + event["state"] = 0 # OK + event["text"] = new_event["text"] + event["time"] = new_event["time"] + event["last"] = new_event["time"] + event["priority"] = new_event["priority"] log_event_history(event, "CANCELLED") + actions = rule.get("cancel_actions", []) + do_event_actions(actions, event, is_cancelling = True) + to_delete.append(nr) for nr in to_delete[::-1]: del self._events[nr] @@ -2179,7 +2885,7 @@ if event["facility"] != new_event["facility"]: if debug: - log("Do not cancel event %d: syslog facility is not the same (%d != %d)" % + log("Do not cancel event %d: syslog facility is not the same (%d != %d)" % (event["id"], event["facility"], new_event["facility"])) # Make sure, that the matching groups are the same. If the OK match @@ -2190,7 +2896,7 @@ if prev_group != cur_group: if debug: log("Do not cancel event %d: match group number " - "%d does not match (%s != %s)" % + "%d does not match (%s != %s)" % (event["id"], nr+1, prev_group, cur_group)) return False @@ -2272,6 +2978,7 @@ def delete_event(self, event_id, user): for nr, event in enumerate(self._events): if event["id"] == event_id: + event["phase"] = "closed" log_event_history(event, "DELETE", user) del self._events[nr] return @@ -2303,7 +3010,7 @@ # | executing scripts. | # '----------------------------------------------------------------------' -def do_rule_actions(rule, event): +def event_has_opened(rule, event): # Prepare for events with a limited livetime. This time starts # when the event enters the open state or acked state if "livetime" in rule: @@ -2311,25 +3018,35 @@ event["live_until"] = time.time() + livetime event["live_until_phases"] = phases - for aname in rule.get("actions", []): - action = g_config["action"].get(aname) - if not action: - log("Cannot execute undefined action '%s'" % aname) - log("We have to following actions: %s" % ", ".join(g_config["action"].keys())) - else: - log("Going to execute action '%s' on event %d" % (action["title"], event["id"])) - do_rule_action(action, event) + do_event_actions(rule.get("actions", []), event, is_cancelling = False) + + +# Execute a list of actions on an event that has just been +# opened or cancelled. +def do_event_actions(actions, event, is_cancelling): + for aname in actions: + if aname == "@NOTIFY": + do_notify(event, None, is_cancelling) + else: + action = g_config["action"].get(aname) + if not action: + log("Cannot execute undefined action '%s'" % aname) + log("We have to following actions: %s" % ", ".join(g_config["action"].keys())) + else: + log("Going to execute action '%s' on event %d" % (action["title"], event["id"])) + do_event_action(action, event) + # Rule actions are currently done synchronously. Actions should # not hang for more than a couple of ms. -def do_rule_action(action, event, user=""): +def do_event_action(action, event, user=""): if action["disabled"]: return try: action_type, settings = action["action"] if action_type == 'email': - to = substitute_event_tags(settings["to"], event) + to = substitute_event_tags(settings["to"], event) subject = substitute_event_tags(settings["subject"], event) body = substitute_event_tags(settings["body"], event) send_email(to, subject, body) @@ -2344,22 +3061,104 @@ raise log("Error during execution of action %s: %s" % (action["id"], format_exception())) + +# This function creates a Check_MK Notification for a locally running Check_MK. +# We simulate a *service* notification. +monitoring_state_names = [ "OK", "WARN", "CRIT", "UNKNOWN" ] + +def do_notify(event, username, is_cancelling): + # Create notification context based on event + + # If the host name is the IP address, then use that. Otherwise let + # the variable empty. + if event["host"] and event["host"][0].isdigit(): + ipaddress = event["host"] + else: + ipaddress = "" + + context = { + "WHAT": "SERVICE", + "DATE": str(int(event["last"])), # -> Event: Time + "MICROTIME": str(int(event["last"] * 1000000)), + "HOSTADDRESS": ipaddress, + "HOSTALIAS": event["host"], # -> = HOSTNAME + "HOSTDOWNTIME": "0", + "HOSTNAME": event["host"], + "HOSTTAGS": "", # alas, we have not host tags... + "LASTSERVICESTATE": is_cancelling and "CRIT" or "OK", # better assume OK, we have no transition information + "LASTSERVICESTATEID": is_cancelling and "2" or "0", # -> immer OK + "LASTSERVICEOK": "0", # 1.1.1970 + "LASTSERVICESTATECHANGE": str(int(event["last"])), + "LONGSERVICEOUTPUT": "", + "NOTIFICATIONAUTHOR": username or "", + "NOTIFICATIONAUTHORALIAS": username or "", + "NOTIFICATIONAUTHORNAME": username or "", + "NOTIFICATIONCOMMENT": "", + "NOTIFICATIONTYPE": is_cancelling and "RECOVERY" or "PROBLEM", + "SERVICEACKAUTHOR": "", + "SERVICEACKCOMMENT": "", + "SERVICEATTEMPT": "1", + "SERVICECHECKCOMMAND": "ec-rule-" + event["rule_id"], + "SERVICEDESC": event["application"] or "Unset", + "SERVICENOTIFICATIONNUMBER": "1", + "SERVICEOUTPUT": event["text"], + "SERVICEPERFDATA": "", + "SERVICEPROBLEMID": "ec-id-" + str(event["id"]), + "SERVICESTATE": monitoring_state_names[event["state"]], + "SERVICESTATEID": str(event["state"]), + "SERVICE_EC_CONTACT": event.get("owner", ""), + "SERVICE_SL": str(event["sl"]), + "SVC_SL": str(event["sl"]), + + # Some fields only found in EC notifications + "EC_ID": str(event["id"]), + "EC_RULE_ID": event["rule_id"], + "EC_PRIORITY": str(event["priority"]), + "EC_FACILITY": str(event["facility"]), + "EC_PHASE": event["phase"], + "EC_COMMENT": event.get("comment", ""), + "EC_OWNER": event.get("owner", ""), + "EC_PID": str(event.get("pid", 0)), + "EC_MATCH_GROUPS": "\t".join(event["match_groups"]), + "EC_CONTACT_GROUPS": event["contact_groups"] and " ".join(event["contact_groups"]) or "", + } + + # Send notification context via stdin. + context_string = "".join([ "%s=%s\n" % (varname, value.replace("\n", "\\n")) for (varname, value) in context.items() ]) + + context_string = to_utf8(context_string) + p = subprocess.Popen(["cmk", "--notify", "stdin"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + response = p.communicate(input=context_string)[0] + status = p.returncode + if status: + log("Error notifying via Check_MK: %s" % response.strip()) + else: + log("Successfully forwarded notification for event %d to Check_MK" % event["id"]) + + def substitute_event_tags(text, event): - groups = [("match_group_%d" % (nr+1), g) for (nr, g) in enumerate(event.get("match_groups", ()))] - for key, value in event.items() + groups: + substs = [("match_group_%d" % (nr+1), g) + for (nr, g) + in enumerate(event.get("match_groups", ()))] + + for key, defaultvalue in event_columns: + varname = key[6:] + substs.append((varname, event.get(varname, defaultvalue))) + + for key, value in substs: if type(value) == tuple: - value = " ".join(value) + value = " ".join(map(str,value)) elif type(value) not in [ str, unicode ]: value = str(value) text = text.replace('$%s$' % key.upper(), value) return text -def shell_escape(s): - return s.replace("'", "'\"'\"") +def quote_shell_string(s): + return "'" + s.replace("'", "'\"'\"'") + "'" def send_email(to, subject, body): - os.popen("mail -s '%s' '%s'" % (shell_escape(subject), shell_escape(to)), "w").write(body) + os.popen("mail -s %s %s" % (quote_shell_string(subject), quote_shell_string(to)), "w").write(body) def execute_script(body): p = subprocess.Popen( @@ -2412,7 +3211,7 @@ def replication_pull(): # We distinguish two modes: # 1. slave mode: just pull the current state from the master. - # if the master is not reachable then decide wether to + # if the master is not reachable then decide wether to # switch to takeover mode. # 2. takeover mode: if automatic fallback is enabled and the # time frame for that has not yet ellapsed, then try to @@ -2430,7 +3229,7 @@ if need_sync: with lock_eventstatus: with lock_configuration: - + try: new_state = get_state_from_master() replication_update_state(new_state) @@ -2439,7 +3238,7 @@ g_slave_status["last_sync"] = now g_slave_status["success"] = True - # Fall back to slave mode after successful sync + # Fall back to slave mode after successful sync # (time frame has already been checked) if mode == "takeover": if g_slave_status["last_master_down"] == None: @@ -2467,7 +3266,7 @@ offline = now - g_slave_status["last_sync"] if offline < repl_settings["takeover"]: if repl_settings.get("logging"): - log("Replication: no takeover yet, still %d seconds to wait" % + log("Replication: no takeover yet, still %d seconds to wait" % (repl_settings["takeover"] - offline)) else: log("Replication: master not reached for %d seconds, taking over!" % @@ -2540,7 +3339,7 @@ except Exception, e: raise Exception("Cannot connect to event daemon: %s" % e) - + def save_slave_status(): global g_slave_status @@ -2558,7 +3357,7 @@ g_slave_status = eval(file(path).read()) except: g_slave_status = { - "last_sync" : 0, # Time of last successfull sync + "last_sync" : 0, # Time of last successfull sync "last_master_down" : None, "mode" : "sync", "average_sync_time" : None, @@ -2572,7 +3371,7 @@ os.remove(path) try: del g_slave_status - except: + except: pass #. @@ -2594,6 +3393,7 @@ "debug_rules" : False, "rule_optimizer" : True, "log_rulehits" : False, + "log_messages" : False, "retention_interval" : 60, "housekeeping_interval" : 60, "statistics_interval" : 5, @@ -2602,6 +3402,9 @@ "remote_status" : None, "socket_queue_len" : 10, "eventsocket_queue_len" : 10, + "hostname_translation" : {}, + "archive_orphans" : False, + "archive_mode" : "file", } main_file = g_config_dir + "/mkeventd.mk" if not os.path.exists(main_file): @@ -2615,6 +3418,11 @@ for path in [ main_file ] + list_of_files: execfile(path, g_config, g_config) + # Configure the auto deleting indexes in the DB when mongodb is enabled + if g_config['archive_mode'] == 'mongodb': + update_mongodb_indexes() + update_mongodb_history_lifetime() + # Are we a replication slave? Parts of the configuration # will be overridden by values from the master. load_slave_status() @@ -2653,9 +3461,14 @@ -C, --configdir Path to directory where mkevent.mk lives -S, --socket P Path to unix socket for querying status -E, --eventsocket P Path to unix socket for receiving events (optional) + -L, --livestatus P Path to livestatus socket of monitoring core (optional) -P, --pipe P Path to pipe for receiving events - --syslog Enable builtin syslog server + --syslog Enable builtin UDP syslog server --syslog-fd FD Do not open UDP port 514, but inherit it via this FD + --syslog-tcp Enable builtin TCP syslog server + --syslog-tcp-fd FD Do not open TCP port 514, but inherit it via this FD + --snmptrap Enable builtin snmptrap server + --snmptrap-fd FD Do not open UDP port 162, but inherit it via this FD --statedir D Path to directory for saving status --logdir D Path to directory where mkeventd.log is created -p, --pidfile Path to PID file @@ -2663,16 +3476,17 @@ """) if os.getenv("OMD_ROOT"): - sys.stdout.write("""You are running OMD, which is generally a good idea. + sys.stdout.write("""You are running OMD, which is generally a good idea. The following defaults are set: - Config dir: %(g_config_dir)s - Unix socket: %(g_socket_path)s - Event socket: %(g_eventsocket_path)s - Event Pipe: %(g_pipe_path)s - PID file: %(g_pid_file)s - Log file: %(g_logfile_path)s - Status dir: %(g_state_dir)s + Config dir: %(g_config_dir)s + Unix socket: %(g_socket_path)s + Event socket: %(g_eventsocket_path)s + Livestatus socket %(g_livestatus_socket)s + Event Pipe: %(g_pipe_path)s + PID file: %(g_pid_file)s + Log file: %(g_logfile_path)s + Status dir: %(g_state_dir)s """ % globals()) @@ -2701,31 +3515,37 @@ opt_profile = {} opt_syslog = False opt_syslog_fd = None +opt_syslog_tcp = False +opt_syslog_tcp_fd = None +opt_snmptrap = False +opt_snmptrap_fd = None # Set default values for options omd_root = os.getenv("OMD_ROOT") if omd_root: - g_config_dir = omd_root + "/etc/check_mk" - g_socket_path = omd_root + "/tmp/run/mkeventd/status" - g_eventsocket_path = omd_root + "/tmp/run/mkeventd/eventsocket" - g_pipe_path = omd_root + "/tmp/run/mkeventd/events" - g_pid_file = omd_root + "/tmp/run/mkeventd/pid" - g_logfile_path = omd_root + "/var/log/mkeventd.log" - g_state_dir = omd_root + "/var/mkeventd" + g_config_dir = omd_root + "/etc/check_mk" + g_socket_path = omd_root + "/tmp/run/mkeventd/status" + g_eventsocket_path = omd_root + "/tmp/run/mkeventd/eventsocket" + g_livestatus_socket = omd_root + "/tmp/run/live" + g_pipe_path = omd_root + "/tmp/run/mkeventd/events" + g_pid_file = omd_root + "/tmp/run/mkeventd/pid" + g_logfile_path = omd_root + "/var/log/mkeventd.log" + g_state_dir = omd_root + "/var/mkeventd" else: - g_config_dir = "/etc/check_mk" - g_socket_path = None - g_eventsocket_path = None - g_pipe_path = None - g_pid_file = "/var/run/mkeventd.pid" - g_logfile_path = "/var/log/mkeventd.log" - g_state_dir = "/var/lib/mkeventd" + g_config_dir = "/etc/check_mk" + g_socket_path = None + g_eventsocket_path = None + g_livestatus_socket = None + g_pipe_path = None + g_pid_file = "/var/run/mkeventd.pid" + g_logfile_path = "/var/log/mkeventd.log" + g_state_dir = "/var/lib/mkeventd" -short_options = "hvVgS:P:p:C:E:" +short_options = "hvVgS:P:p:C:E:L:" long_options = [ "help", "version", "verbose", "debug", "foreground", "socket=", "eventsocket=", "pipe=", "pidfile=", "statedir=", "configdir=", "logdir=", "profile-status", "profile-event", "debug-locking", - "syslog", "syslog-fd=" ] + "syslog", "syslog-fd=", "syslog-tcp", "syslog-tcp-fd=", "snmptrap", "snmptrap-fd=", "livestatus=" ] try: opts, args = getopt.getopt(sys.argv[1:], short_options, long_options) @@ -2744,12 +3564,22 @@ g_socket_path = a elif o in [ '-E', '--eventsocket' ]: g_eventsocket_path = a + elif o in [ '-L', '--livestatus' ]: + g_livestatus_socket = a elif o in [ '-P', '--pipe' ]: g_pipe_path = a elif o == '--syslog': opt_syslog = True elif o == '--syslog-fd': opt_syslog_fd = int(a) + elif o == '--syslog-tcp': + opt_syslog_tcp = True + elif o == '--syslog-tcp-fd': + opt_syslog_tcp_fd = int(a) + elif o == '--snmptrap': + opt_snmptrap = True + elif o == '--snmptrap-fd': + opt_snmptrap_fd = int(a) elif o in [ '-p', '--pidfile' ]: g_pid_file = a elif o in [ '-C', '--configdir' ]: @@ -2770,6 +3600,13 @@ sys.stdout.write("mkeventd version %s\n" % VERSION) sys.exit(0) + # Handler specific imports + if opt_snmptrap: + #from pysnmp.carrier.asynsock.dispatch import AsynsockDispatcher as pysnmp_AsynsockDispatcher + #from pysnmp.carrier.asynsock.dgram import udp as pysnmp_udp + from pysnmp.proto import api as pysnmp_api + from pyasn1.codec.ber import decoder as pyasn_decoder + if not g_pipe_path: bail_out("Please specify the path to the pipe (using -P).") @@ -2787,7 +3624,7 @@ if os.path.exists(g_pid_file): old_pid = int(file(g_pid_file).read()) if process_exists(old_pid): - bail_out("Old PID file %s still existing and mkeventd still running with PID %d." % + bail_out("Old PID file %s still existing and mkeventd still running with PID %d." % (g_pid_file, old_pid)) os.remove(g_pid_file) log("Removed orphaned PID file %s (process %d not running anymore)." % (g_pid_file, old_pid)) @@ -2833,7 +3670,7 @@ # Now let's go... run_eventd() - + # We reach this point, if the server has been killed by # a signal or hitting Ctrl-C (in foreground mode) @@ -2853,6 +3690,13 @@ g_event_server.output_hash_stats() + # Closing fds which might be still open + for fd in [ opt_syslog_fd, opt_syslog_tcp_fd, opt_snmptrap_fd ]: + try: + os.close(fd) + except: + pass + log("Successfully shut down.") os.remove(g_pid_file) sys.exit(0) diff -Nru check-mk-1.2.2p3/bintec_brrp_status check-mk-1.2.6p12/bintec_brrp_status --- check-mk-1.2.2p3/bintec_brrp_status 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/bintec_brrp_status 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,67 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def bintec_brrp_status_compose_item(brrp_id): + return re.sub("\..*", "", brrp_id) + +def inventory_bintec_brrp_status(info): + inventory = [] + for brrp_id, brrp_status in info: + inventory.append( (bintec_brrp_status_compose_item(brrp_id), None ) ) + return inventory + +def check_bintec_brrp_status(item, _no_params, info): + for brrp_id, brrp_status in info: + brrp_id = bintec_brrp_status_compose_item(brrp_id) + if brrp_id == item: + if brrp_status == "1": + message = "Status for %s is initialize" % brrp_id + status = 1 + elif brrp_status == "2": + message = "Status for %s is backup" % brrp_id + status = 0 + elif brrp_status == "3": + message = "Status for %s is master" % brrp_id + status = 0 + else: + message = "Status for %s is at unknown value %s" % (brrp_id, brrp_status) + status = 3 + + return status, message + + return 3, "Status for %s not found" % item + +check_info["bintec_brrp_status"] = { + "check_function" : check_bintec_brrp_status, + "inventory_function" : inventory_bintec_brrp_status, + "service_description" : "BRRP Status %s", + "has_perfdata" : False, + "snmp_info" : ( ".1.3.6.1.4.1.272.4.40.1.1", [ + OID_END, + 4, # biboBrrpOperState + ]), + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.272.4") +} diff -Nru check-mk-1.2.2p3/bintec_info check-mk-1.2.6p12/bintec_info --- check-mk-1.2.2p3/bintec_info 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/bintec_info 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -30,18 +30,23 @@ def check_bintec_info(checktype, params, info): if len(info[0]) < 2: - return (3, "UNKNOWN - No data retrieved") + return (3, "No data retrieved") sw_version, serial = info[0] return (0, "Serial: %s, Software: %s" % (serial, sw_version)) -check_info['bintec_info'] = (check_bintec_info, "Bintec Info", 0, inventory_bintec_info) # 1.3.6.1.4.1.272.4.1.26.0 SW Version # 1.3.6.1.4.1.272.4.1.31.0 S/N -snmp_info['bintec_info'] = ( ".1.3.6.1.4.1.272.4.1", [ - '26.0', # SW Version - '31.0', # S/N - ] ) # This check works on all SNMP hosts -snmp_scan_functions['bintec_info'] = lambda oid: oid(".1.3.6.1.2.1.1.2.0") == \ - ".1.3.6.1.4.1.272.4.200.83.88.67.66.0.0" + +check_info["bintec_info"] = { + 'check_function': check_bintec_info, + 'inventory_function': inventory_bintec_info, + 'service_description': 'Bintec Info', + # 1.3.6.1.4.1.272.4.1.31.0 S/N + # 1.3.6.1.4.1.272.4.1.26.0 SW Version + 'snmp_info': ('.1.3.6.1.4.1.272.4.1', ['26.0', '31.0']), + 'snmp_scan_function': lambda oid: oid(".1.3.6.1.2.1.1.2.0") in [ + ".1.3.6.1.4.1.272.4.200.83.88.67.66.0.0", + ".1.3.6.1.4.1.272.4.158.82.78.66.48.0.0", ] +} diff -Nru check-mk-1.2.2p3/bintec_sensors check-mk-1.2.6p12/bintec_sensors --- check-mk-1.2.2p3/bintec_sensors 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/bintec_sensors 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,162 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def bintec_sensors_scan(oid): + return oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.272.4") + +bintec_sensors_info = ( ".1.3.6.1.4.1.272.4.17.7.1.1.1", [ + 2, # sensorNumber + 3, # sensorDescr + 4, # sensorType + 5, # sensorValue + 7, # sensorMeasurementUnit + ]) + +# .--fans----------------------------------------------------------------. +# | __ | +# | / _| __ _ _ __ ___ | +# | | |_ / _` | '_ \/ __| | +# | | _| (_| | | | \__ \ | +# | |_| \__,_|_| |_|___/ | +# | | +# '----------------------------------------------------------------------' + +bintec_sensors_fan_default_levels = { "lower": ( 1000, 2000), "upper": (8000, 8400) } + +def inventory_bintec_sensors_fan(info): + inventory = [] + for sensor_id, sensor_descr, sensor_type, sensor_value, sensor_unit in info: + if sensor_type == "2": + inventory.append( (sensor_descr, "bintec_sensors_fan_default_levels" ) ) + return inventory + +def check_bintec_sensors_fan(item, params, info): + for sensor_id, sensor_descr, sensor_type, sensor_value, sensor_unit in info: + if sensor_descr == item: + status = 0 + sensor_value = int(sensor_value) + if sensor_value <= params["lower"][1] or sensor_value >= params["upper"][0]: + status = 1 + if sensor_value <= params["lower"][0] or sensor_value >= params["upper"][1]: + status = 2 + + message = "%s is at %s %s" % (sensor_descr, sensor_value, sensor_unit) + perfdata = [ ("rpm", sensor_value, "%d:%d" % (params["lower"][1], params["upper"][0]), \ + "%d:%d" % (params["lower"][0], params["upper"][1]), 0) ] + + return status, message, perfdata + + return 3, "Sensor %s not found" % item + +check_info["bintec_sensors.fan"] = { + "check_function" : check_bintec_sensors_fan, + "inventory_function" : inventory_bintec_sensors_fan, + "service_description" : "%s", + "group" : "hw_fans", + "has_perfdata" : True, + "snmp_info" : bintec_sensors_info, + "snmp_scan_function" : bintec_sensors_scan, +} + +#. +# .--temp----------------------------------------------------------------. +# | _ | +# | | |_ ___ _ __ ___ _ __ | +# | | __/ _ \ '_ ` _ \| '_ \ | +# | | || __/ | | | | | |_) | | +# | \__\___|_| |_| |_| .__/ | +# | |_| | +# '----------------------------------------------------------------------' + +bintec_sensors_temp_default_levels = (35, 40) + + +def inventory_bintec_sensors_temp(info): + inventory = [] + for sensor_id, sensor_descr, sensor_type, sensor_value, sensor_unit in info: + if sensor_type == "1": + inventory.append( (sensor_descr, "bintec_sensors_temp_default_levels" ) ) + return inventory + + +def check_bintec_sensors_temp(item, params, info): + for sensor_id, sensor_descr, sensor_type, sensor_value, sensor_unit in info: + if sensor_descr == item: + return check_temperature(int(sensor_value), params) + + return 3, "Sensor not found in SNMP data" + + +check_info["bintec_sensors.temp"] = { + "check_function" : check_bintec_sensors_temp, + "inventory_function" : inventory_bintec_sensors_temp, + "service_description" : "Temperature %s", + "group" : "hw_temperature", + "has_perfdata" : True, + "snmp_info" : bintec_sensors_info, + "snmp_scan_function" : bintec_sensors_scan, + "includes" : [ "temperature.include" ], +} + +#. +# .--voltage-------------------------------------------------------------. +# | _ _ | +# | __ _____ | | |_ __ _ __ _ ___ | +# | \ \ / / _ \| | __/ _` |/ _` |/ _ \ | +# | \ V / (_) | | || (_| | (_| | __/ | +# | \_/ \___/|_|\__\__,_|\__, |\___| | +# | |___/ | +# '----------------------------------------------------------------------' + +def inventory_bintec_sensors_voltage(info): + inventory = [] + for sensor_id, sensor_descr, sensor_type, sensor_value, sensor_unit in info: + if sensor_type == "3": + inventory.append( (sensor_descr, None ) ) + return inventory + +def check_bintec_sensors_voltage(item, _no_params, info): + for sensor_id, sensor_descr, sensor_type, sensor_value, sensor_unit in info: + if sensor_descr == item: + sensor_value = int(sensor_value) / 1000.0 + + message = "%s is at %s V" % (sensor_descr, sensor_value) + perfdata = [ ("voltage", str(sensor_value)+"V") ] + + return 0, message, perfdata + + return 3, "Sensor %s not found" % item + +check_info["bintec_sensors.voltage"] = { + "check_function" : check_bintec_sensors_voltage, + "inventory_function" : inventory_bintec_sensors_voltage, + "service_description" : "Voltage %s", + "has_perfdata" : True, + "snmp_info" : bintec_sensors_info, + "snmp_scan_function" : bintec_sensors_scan, +} + +#. diff -Nru check-mk-1.2.2p3/bintec_sensors.fan check-mk-1.2.6p12/bintec_sensors.fan --- check-mk-1.2.2p3/bintec_sensors.fan 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/bintec_sensors.fan 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,38 @@ +title: Bintec Routers: Fan Speed +agents: snmp +catalog: hw/network/bintec +license: GPL +distribution: check_mk +description: + Checks the Fan Speed of Bintec Routers. + + Returns {WARN} or {CRIT} if the speed is above or equal given levels or it is + below or equal given levels. + +item: + The sensorDescr from SNMP. + +examples: + # set new default levels + bintec_sensors_fan_default_levels = { "lower": ( 1, 1000), "upper": (9000, 10000) } + + # check Fan 1 of router1 with default levels + checks += [ + ("router1", "bintec_sensors.fan", 'Fan 1', bintec_sensors_fan_default_levels) + ] + +perfdata: + One value: The speed of the fan in rpm, together with the upper and lower + levels for {WARN} and {CRIT}. + +inventory: + Creates one check per fan, concrete: One check for every sensor of sensorType 2 + (fan). + +[parameters] +dict: key "lower" references a tuple with lower crit level and lower warn level. + key "upper" references a tuple with upper warn level and upper crit level. + +[configuration] +bintec_sensors_fan_default_levels (dict): + defaults to { "lower": ( 1000, 2000), "upper": (8000, 8400) diff -Nru check-mk-1.2.2p3/bintec_sensors.temp check-mk-1.2.6p12/bintec_sensors.temp --- check-mk-1.2.2p3/bintec_sensors.temp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/bintec_sensors.temp 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,35 @@ +title: Bintec Routers: Temperature Sensors +agents: snmp +catalog: hw/network/bintec +license: GPL +distribution: check_mk +description: + Checks the Temperature Sensors of Bintec Routers. + + Returns {WARN} or {CRIT} if the temperature is above or equal given levels. + +item: + The sensorDescr from SNMP. + +examples: + # set new default levels + bintec_sensors_temp_default_levels = (50, 60) + + # check temperature sensor "Left 1" of router1 with default levels + checks += [ + ("router1", "bintec_sensors.temp", 'Left 1', bintec_sensors_temp_default_levels), + ] + +perfdata: + One value: The temperature in degree celsius, together with it's levels for {WARN} + and {CRIT}. + +inventory: + Creates one check per temperature sensor (sensorType 1). + +[parameters] +tuple (int, int): The two values are the levels for {WARN} and {CRIT}. + +[configuration] +bintec_sensors_temp_default_levels (int, int): The default levels for {WARN} + and {CRIT}, defaults to (35, 40). diff -Nru check-mk-1.2.2p3/bintec_sensors.voltage check-mk-1.2.6p12/bintec_sensors.voltage --- check-mk-1.2.2p3/bintec_sensors.voltage 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/bintec_sensors.voltage 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,18 @@ +title: Bintec Routers: Voltage Sensors +agents: snmp +catalog: hw/network/bintec +license: GPL +distribution: check_mk +description: + Displays the Voltage measured at different Voltage Sensors of Bintec Routers. + + The check is for display only and always returns {OK}. + +item: + The sensorDescr from SNMP. + +perfdata: + One value: The voltage. + +inventory: + Creates one check per voltage sensor (sensorType 3). diff -Nru check-mk-1.2.2p3/blade_bays check-mk-1.2.6p12/blade_bays --- check-mk-1.2.2p3/blade_bays 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/blade_bays 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -55,22 +55,27 @@ state = saveint(line[1]) type = line[2].split('(')[0] if state == 1: - return (0, "OK - State %s (Type: %s, ID: %s)" % + return (0, "State %s (Type: %s, ID: %s)" % (blade_bays_module_state.get(state, 'Unhandled'), type, line[3])) elif state == 2: - return (1, "WARN - Not present") + return (1, "Not present") + elif state == 3: + return (1, "Device is switched off") elif state == 0: - return (1, "WARN - Device is in standby") + return (1, "Device is in standby") else: - return (2, "CRIT - invalid state %d" % state) - return (3, "UNKNOWN - no data for '%s' in SNMP info" % item) + return (2, "invalid state %d" % state) + return (3, "no data for '%s' in SNMP info" % item) -check_info['blade_bays'] = (check_blade_bays, "BAY %s", 0, inventory_blade_bays) -snmp_info['blade_bays'] = ( - ".1.3.6.1.4.1.2.3.51.2.2.10", [ - "2", # powerDomain1 - "3", # powerDomain2 - ], [ "1.1.5", "1.1.6", "1.1.2", "1.1.1" ] ) # BLADE-MIB - -snmp_scan_functions['blade_bays'] = \ - lambda oid: re.match('BladeCenter( Advanced)* Management Module', oid(".1.3.6.1.2.1.1.1.0")) +check_info["blade_bays"] = { + 'check_function': check_blade_bays, + 'inventory_function': inventory_blade_bays, + 'service_description': 'BAY %s', + 'snmp_info': ( + ".1.3.6.1.4.1.2.3.51.2.2.10", [ + "2", # powerDomain1 + "3", # powerDomain2 + ], [ "1.1.5", "1.1.6", "1.1.2", "1.1.1" ] ), # BLADE-MIB + 'snmp_scan_function': \ + lambda oid: re.match('BladeCenter( Advanced)* Management Module', oid(".1.3.6.1.2.1.1.1.0")) != None, +} diff -Nru check-mk-1.2.2p3/blade_blades check-mk-1.2.6p12/blade_blades --- check-mk-1.2.2p3/blade_blades 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/blade_blades 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -37,8 +37,8 @@ blade_blades_health_labels = ('unknown', 'good', 'warning', 'bad') def inventory_blade_blades(info): - # find only blades that are present - return [ (line[0], '', line[1]) for line in info if line[1] == '1' ] + # find only blades that are powered on + return [ (line[0], '', line[1]) for line in info if line[2] == '1' ] def check_blade_blades(item, params, info): for line in info: @@ -54,24 +54,30 @@ ('Power', power_state, blade_blades_power_states[power_state], blade_blades_power_labels[power_state]), ('Health', health_state, blade_blades_health_states[health_state], blade_blades_health_labels[health_state])): output += '%s: %s' % (label, state_label) - if nag_state != 0: - output += ' (%s)' % nagios_state_names[nag_state] - if nag_state > state: - state = nag_state + if nag_state == 1: + output += ' (!)' + elif nag_state == 2: + output += ' (!!)' + elif nag_state == 3: + output += ' (UNKNOWN)' + state = max(state, nag_state) output += ', ' - return (state, "%s - %s" % (nagios_state_names[state], output.rstrip(', '))) - return (3, "UNKNOWN - no data for '%s' in SNMP info" % item) + return (state, output.rstrip(', ')) + return (3, "no data for '%s' in SNMP info" % item) - -check_info['blade_blades'] = (check_blade_blades, "Blade %s", 0, inventory_blade_blades) -snmp_info['blade_blades'] = ( ".1.3.6.1.4.1.2.3.51.2.22.1.5.1.1", [ # BLADE-MIB - 2, # bladeId - 3, # bladeExists - 4, # bladePowerState - 5, # bladeHealthState - 6, # bladeName - ]) - -snmp_scan_functions['blade_blades'] = \ - lambda oid: re.match('BladeCenter( Advanced)* Management Module', oid(".1.3.6.1.2.1.1.1.0")) +check_info["blade_blades"] = { + 'check_function': check_blade_blades, + 'inventory_function': inventory_blade_blades, + 'service_description': 'Blade %s', + 'snmp_info': ( + ".1.3.6.1.4.1.2.3.51.2.22.1.5.1.1", [ # BLADE-MIB + 2, # bladeId + 3, # bladeExists + 4, # bladePowerState + 5, # bladeHealthState + 6, # bladeName + ]), + 'snmp_scan_function': \ + lambda oid: re.match('BladeCenter( Advanced)* Management Module', oid(".1.3.6.1.2.1.1.1.0")) != None, +} diff -Nru check-mk-1.2.2p3/blade_blowers check-mk-1.2.6p12/blade_blowers --- check-mk-1.2.2p3/blade_blowers 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/blade_blowers 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -92,13 +92,19 @@ pass if state == "1": - return (0, "OK - " + output, perfdata) + return (0, output, perfdata) else: - return (2, "CRIT - " + output, perfdata) + return (2, output, perfdata) -check_info['blade_blowers'] = ( check_blade_blowers, "Blower %s", 1, inventory_blade_blowers) -snmp_info['blade_blowers'] = ( ".1.3.6.1.4.1.2.3.51.2.2", [3] ) -snmp_scan_functions['blade_blowers'] = \ - lambda oid: re.match('BladeCenter( Advanced)* Management Module', oid(".1.3.6.1.2.1.1.1.0")) + +check_info["blade_blowers"] = { + 'check_function': check_blade_blowers, + 'inventory_function': inventory_blade_blowers, + 'service_description': 'Blower %s', + 'has_perfdata': True, + 'snmp_info': ('.1.3.6.1.4.1.2.3.51.2.2', [3]), + 'snmp_scan_function': \ + lambda oid: re.match('BladeCenter( Advanced)* Management Module', oid(".1.3.6.1.2.1.1.1.0")) != None, +} diff -Nru check-mk-1.2.2p3/blade_bx_blades check-mk-1.2.6p12/blade_bx_blades --- check-mk-1.2.2p3/blade_bx_blades 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/blade_bx_blades 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -33,9 +33,9 @@ if blade_status == 4 or blade_status == 5: state = 2 - return (state, nagios_state_names[state] + " - Blade %s %s %s Status %s " % (blade_id, blade_name, blade_serial, blade_status_codes.get(blade_status, 1))) + return (state, "Blade %s %s %s Status %s " % (blade_id, blade_name, blade_serial, blade_status_codes.get(blade_status, 1))) - return (3, "UNKNOWN - no data for '%s' in SNMP info" % item) + return (3, "no data for '%s' in SNMP info" % item) check_info['blade_bx_blades'] = { "check_function" : check_blade_bx_blades, @@ -48,5 +48,6 @@ 5, # Serialnumber 21, # Name ]), - "snmp_scan_function" : lambda oid: "BX600" in oid(".1.3.6.1.2.1.1.1.0"), + "snmp_scan_function" : lambda oid: "BX600" in oid(".1.3.6.1.2.1.1.1.0") \ + or oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.7244.1.1.1", } diff -Nru check-mk-1.2.2p3/blade_bx_load check-mk-1.2.6p12/blade_bx_load --- check-mk-1.2.2p3/blade_bx_load 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/blade_bx_load 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -31,26 +31,20 @@ return [ (None, "blade_bx_cpuload_default_levels")] def check_blade_bx_load(item, params, info): - load = [] - for i in [ 0, 1, 2 ]: - load.append(float(info[i][2])) - - warn, crit = params - - perfdata = [ ('load' + str(z), l, warn, crit, 0 ) for (z,l) in [ (1,load[0]), (5,load[1]), (15, load[2]) ] ] - - if load[2] >= crit: - return (2, "CRIT - 15min Load %.2f (critical at %.2f)" % (load[2], crit), perfdata) - elif load[2] >= warn: - return (1, "WARN - 15min Load %.2f (warning at %.2f)" % (load[2], warn), perfdata) - else: - return (0, "OK - 15min Load %.2f " % (load[2]), perfdata) + return check_cpu_load_generic(params, [ float(l[2]) for l in info ]) check_info['blade_bx_load'] = { - "check_function" : check_blade_bx_load, - "inventory_function" : inventory_blade_bx_load , - "service_description" : "Load", - "has_perfdata" : True, - "snmp_info" : (".1.3.6.1.4.1.2021.10.1", [ 1, 2, 6 ]), - "snmp_scan_function" : lambda oid: "BX600" in oid(".1.3.6.1.2.1.1.1.0"), + "check_function" : check_blade_bx_load, + "inventory_function" : inventory_blade_bx_load , + "service_description" : "CPU load", + "has_perfdata" : True, + # Note: I'm not sure if this check is working at all. If yes, + # then the SNMP implementation of that device must be broken. + # It would use the same MIB as ucd_snmp_load, but with other + # semantics. Please compare. Please mail us an cmk --snmpwalk of + # such a device, if you have one. + "snmp_info" : (".1.3.6.1.4.1.2021.10.1", [ 1, 2, 6 ]), + "snmp_scan_function" : lambda oid: "BX600" in oid(".1.3.6.1.2.1.1.1.0"), + "group" : "cpu_load", + "includes" : ["cpu_load.include"], } diff -Nru check-mk-1.2.2p3/blade_bx_powerfan check-mk-1.2.6p12/blade_bx_powerfan --- check-mk-1.2.2p3/blade_bx_powerfan 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/blade_bx_powerfan 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -48,18 +48,18 @@ speedperc_int = saveint(speedperc) if ctrlstate != "2": - return (2, "CRIT - Fan not present or poweroff", perfdata) + return (2, "Fan not present or poweroff", perfdata) elif status != "3": - return (2, "CRIT - Status not OK (Status %s)" % blade_bx_status(status) , perfdata) + return (2, "Status not OK (Status %s)" % blade_bx_status[status] , perfdata) elif speedperc_int <= crit_perc: - return (2, "CRIT - Speed at %d%% of max (crit at %d%%)" % (speedperc_int, crit_perc), perfdata) + return (2, "Speed at %d%% of max (crit at %d%%)" % (speedperc_int, crit_perc), perfdata) elif speedperc_int <= warn_perc: - return (1, "WARNING - Speed at %d%% of max (warning at %d%%)" % (speedperc_int, warn_perc), perfdata) + return (1, "Speed at %d%% of max (warning at %d%%)" % (speedperc_int, warn_perc), perfdata) else: - return (0, "OK - Speed at %s RPM (%d%% of max)" % (rpm, speedperc_int), perfdata) - return (3, "UNKNOWN - Device %s not found in SNMP data %s " % (item, perfdata ) ) + return (0, "Speed at %s RPM (%d%% of max)" % (rpm, speedperc_int), perfdata) + return (3, "Device %s not found in SNMP data %s " % (item, perfdata ) ) - return (3, "UNKNOWN - Device %s not found in SNMP data" % item) + return (3, "Device %s not found in SNMP data" % item) check_info['blade_bx_powerfan'] = { @@ -77,6 +77,7 @@ 7 # ctrlstate ]), - "snmp_scan_function" : lambda oid: "BX600" in oid(".1.3.6.1.2.1.1.1.0"), + "snmp_scan_function" : lambda oid: "BX600" in oid(".1.3.6.1.2.1.1.1.0") \ + or oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.7244.1.1.1", } diff -Nru check-mk-1.2.2p3/blade_bx_powermod check-mk-1.2.6p12/blade_bx_powermod --- check-mk-1.2.2p3/blade_bx_powermod 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/blade_bx_powermod 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -45,7 +45,7 @@ if status != 2: state = 2 - return (state, nagios_state_names[state] + " - %s Status is %s" % (product_name, power_status[status]) ) + return (state, "%s Status is %s" % (product_name, power_status[status]) ) return (3, "Module %s not found in SNMP info" % index) @@ -59,6 +59,7 @@ 2, # status 4, # product name ]), - "snmp_scan_function" : lambda oid: "BX600" in oid(".1.3.6.1.2.1.1.1.0"), + "snmp_scan_function" : lambda oid: "BX600" in oid(".1.3.6.1.2.1.1.1.0") \ + or oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.7244.1.1.1", } diff -Nru check-mk-1.2.2p3/blade_bx_temp check-mk-1.2.6p12/blade_bx_temp --- check-mk-1.2.2p3/blade_bx_temp 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/blade_bx_temp 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -47,23 +47,23 @@ perfdata=[ ('temp', temp, level_warn, level_crit, "0", max_temp ) ] if crit_react != "2": - return (2, "CRIT - Temperature not present or poweroff", perfdata) + return (2, "Temperature not present or poweroff", perfdata) elif status != 3: - return (2, "CRIT - Status is %s" % blade_bx_status.get(status,1) , perfdata) + return (2, "Status is %s" % blade_bx_status.get(status,1) , perfdata) elif temp >= level_crit: - return (2, "CRIT - Temperature at %s%s " % (temp, unit), perfdata) + return (2, "Temperature at %s%s " % (temp, unit), perfdata) elif temp >= level_warn: - return (1, "WARNING - Temperature at %s%s " % (temp,unit), perfdata) + return (1, "Temperature at %s%s " % (temp,unit), perfdata) else: - return (0, "OK - Temperature at %s%s " % (temp,unit), perfdata) - return (3, "UNKNOWN - Device %s not found in SNMP data %s " % (item, perfdata ) ) + return (0, "Temperature at %s%s " % (temp,unit), perfdata) + return (3, "Device %s not found in SNMP data %s " % (item, perfdata ) ) - return (3, "UNKNOWN - Device %s not found in SNMP data" % item) + return (3, "Device %s not found in SNMP data" % item) check_info['blade_bx_temp'] = { "check_function" : check_blade_bx_temp, "inventory_function" : lambda info: [ (line[2], None) for line in info if int(line[1]) != 7] , - "service_description" : "Blade Temp %s", + "service_description" : "Temperature Blade %s", "has_perfdata" : True, "snmp_info" : (".1.3.6.1.4.1.7244.1.1.1.3.4.1.1", [ 1, # index @@ -74,7 +74,8 @@ 6, # temp 7, # crit react ]), - "snmp_scan_function" : lambda oid: "BX600" in oid(".1.3.6.1.2.1.1.1.0"), + "snmp_scan_function" : lambda oid: "BX600" in oid(".1.3.6.1.2.1.1.1.0") \ + or oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.7244.1.1.1", "group" : "hw_temperature", } diff -Nru check-mk-1.2.2p3/blade_health check-mk-1.2.6p12/blade_health --- check-mk-1.2.2p3/blade_health 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/blade_health 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -38,25 +38,26 @@ def check_blade_health(_no_item, _no_params, info): state = info[0][0] - descr = '' - if len(info[0]) > 1: - descr = ': %s' % info[1][1] + descr = ": " + ", ".join([ line[1] for line in info if len(line) > 1 ]) if state == "255": - return (0, "OK - State is good") + return (0, "State is good") elif state == "2": - return (2, "CRIT - State is degraded (non critical)" + descr) + return (2, "State is degraded (non critical)" + descr) elif state == "4": - return (1, "WARN - State is degraded (system level)" + descr) + return (1, "State is degraded (system level)" + descr) elif state == "0": - return (2, "CRIT - State is critical!" + descr) + return (2, "State is critical!" + descr) else: - return (3, "UNKNOWN - Undefined state code %s%s" % (state, descr)) + return (3, "Undefined state code %s%s" % (state, descr)) -check_info['blade_health'] = ( check_blade_health, "Summary health state", 0, inventory_blade_health) -snmp_info['blade_health'] = ( ".1.3.6.1.4.1.2.3.51.2.2.7", [ '1.0', '2.1.3.1' ]) - -snmp_scan_functions['blade_health'] = \ - lambda oid: re.match('BladeCenter( Advanced)* Management Module', oid(".1.3.6.1.2.1.1.1.0")) +check_info["blade_health"] = { + 'check_function': check_blade_health, + 'inventory_function': inventory_blade_health, + 'service_description': 'Summary health state', + 'snmp_info': ('.1.3.6.1.4.1.2.3.51.2.2.7', ['1.0', '2.1.3.1']), + 'snmp_scan_function': \ + lambda oid: re.match('BladeCenter( Advanced)* Management Module', oid(".1.3.6.1.2.1.1.1.0")) != None, +} diff -Nru check-mk-1.2.2p3/blade_mediatray check-mk-1.2.6p12/blade_mediatray --- check-mk-1.2.2p3/blade_mediatray 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/blade_mediatray 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -30,20 +30,25 @@ def check_blade_mediatray(_no_item, _no_params, info): if len(info) < 1: - return (3, "UNKNOWN - no information about media tray in SNMP output") + return (3, "no information about media tray in SNMP output") present = info[0][0] communicating = info[0][1] if present != "1": - return (2, "CRIT - media tray not present") + return (2, "media tray not present") elif communicating != "1": - return (2, "CRIT - media tray not communicating") + return (2, "media tray not communicating") else: - return (0, "OK - media tray present and communicating") + return (0, "media tray present and communicating") -check_info['blade_mediatray'] = ( check_blade_mediatray, "Media tray", 0, inventory_blade_mediatray) -snmp_info['blade_mediatray'] = ( ".1.3.6.1.4.1.2.3.51.2.2.5.2", [74, 75]) -snmp_scan_functions['blade_mediatray'] = \ - lambda oid: re.match('BladeCenter( Advanced)* Management Module', oid(".1.3.6.1.2.1.1.1.0")) + +check_info["blade_mediatray"] = { + 'check_function': check_blade_mediatray, + 'inventory_function': inventory_blade_mediatray, + 'service_description': 'Media tray', + 'snmp_info': ('.1.3.6.1.4.1.2.3.51.2.2.5.2', [74, 75]), + 'snmp_scan_function': \ + lambda oid: re.match('BladeCenter( Advanced)* Management Module', oid(".1.3.6.1.2.1.1.1.0")) != None, +} diff -Nru check-mk-1.2.2p3/blade_misc check-mk-1.2.6p12/blade_misc --- check-mk-1.2.2p3/blade_misc 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/blade_misc 1970-01-01 00:00:00.000000000 +0000 @@ -1,35 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# ails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - - -def check_blade_misc(item, params, info): - return (3, "UNKNOWN - Sorry. Check not implemented in this version.") - -check_info['blade_misc'] = ( - check_blade_misc, - "%s", - 0, - no_inventory_possible) diff -Nru check-mk-1.2.2p3/blade_powerfan check-mk-1.2.6p12/blade_powerfan --- check-mk-1.2.2p3/blade_powerfan 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/blade_powerfan 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -36,22 +36,28 @@ ('rpm', rpm ) ] speedperc_int = saveint(speedperc) if present != "1": - return (2, "CRIT - Fan not present", perfdata) + return (2, "Fan not present", perfdata) elif status != "1": - return (2, "CRIT - Status not OK", perfdata) + return (2, "Status not OK", perfdata) elif ctrlstate != "0": - return (2, "CRIT - Controller state not OK", perfdata) + return (2, "Controller state not OK", perfdata) elif speedperc_int <= crit_perc: - return (2, "CRIT - Speed at %d%% of max (crit at %d%%)" % (speedperc_int, crit_perc), perfdata) + return (2, "Speed at %d%% of max (crit at %d%%)" % (speedperc_int, crit_perc), perfdata) elif speedperc_int <= warn_perc: - return (1, "WARN - Speed at %d%% of max (warning at %d%%)" % (speedperc_int, warn_perc), perfdata) + return (1, "Speed at %d%% of max (warning at %d%%)" % (speedperc_int, warn_perc), perfdata) else: - return (0, "OK - Speed at %s RPM (%d%% of max)" % (rpm, speedperc_int), perfdata) + return (0, "Speed at %s RPM (%d%% of max)" % (rpm, speedperc_int), perfdata) - return (3, "UNKNOWN - Device %s not found in SNMP data" % item) + return (3, "Device %s not found in SNMP data" % item) -check_info['blade_powerfan'] = (check_blade_powerfan, "Power Module Cooling Device %s", 1, inventory_blade_powerfan) -snmp_info['blade_powerfan'] = ( ".1.3.6.1.4.1.2.3.51.2.2.6.1.1", [ 1, 2, 3, 4, 5, 6, 7 ] ) # BLADE-MIB -snmp_scan_functions['blade_powerfan'] = \ - lambda oid: re.match('BladeCenter( Advanced)* Management Module', oid(".1.3.6.1.2.1.1.1.0")) + +check_info["blade_powerfan"] = { + 'check_function': check_blade_powerfan, + 'inventory_function': inventory_blade_powerfan, + 'service_description': 'Power Module Cooling Device %s', + 'has_perfdata': True, + 'snmp_info': ('.1.3.6.1.4.1.2.3.51.2.2.6.1.1', [1, 2, 3, 4, 5, 6, 7]), + 'snmp_scan_function': \ + lambda oid: re.match('BladeCenter( Advanced)* Management Module', oid(".1.3.6.1.2.1.1.1.0")) != None, +} diff -Nru check-mk-1.2.2p3/blade_powermod check-mk-1.2.6p12/blade_powermod --- check-mk-1.2.2p3/blade_powermod 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/blade_powermod 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -33,16 +33,21 @@ if line[0] == index: present, status, text = line[1:] if present != "1": - return (2, "CRIT - Not present") + return (2, "Not present") elif status != "1": - return (2, "CRIT - %s" % text) + return (2, "%s" % text) else: - return (0, "OK - %s" % text) + return (0, "%s" % text) return (3, "Module %s not found in SNMP info" % index) -check_info['blade_powermod'] = (check_blade_powermod, "Power Module %s", 0, inventory_blade_powermod) -snmp_info['blade_powermod'] = ( ".1.3.6.1.4.1.2.3.51.2.2.4.1.1", [ 1, 2, 3, 4 ]) # BLADE-MIB -snmp_scan_functions['blade_powermod'] = \ - lambda oid: re.match('BladeCenter( Advanced)* Management Module', oid(".1.3.6.1.2.1.1.1.0")) + +check_info["blade_powermod"] = { + 'check_function': check_blade_powermod, + 'inventory_function': inventory_blade_powermod, + 'service_description': 'Power Module %s', + 'snmp_info': ('.1.3.6.1.4.1.2.3.51.2.2.4.1.1', [1, 2, 3, 4]), + 'snmp_scan_function': \ + lambda oid: re.match('BladeCenter( Advanced)* Management Module', oid(".1.3.6.1.2.1.1.1.0")) != None, +} diff -Nru check-mk-1.2.2p3/bluecat_commandserver check-mk-1.2.6p12/bluecat_commandserver --- check-mk-1.2.2p3/bluecat_commandserver 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/bluecat_commandserver 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,13 @@ +title: Bluecat Adonis Command Server State +agents: snmp +catalog: hw/network/bluecat +license: GPL +distribution: check_mk +description: + This Check monitors the OperState of the Command Server Service on bluecat adonis devices. + The Check will return {OK} if the Service is running, {CRITICAL} if the state is fault, {WARNING} in each other case. + It is possible to change the behavior in WATO. + +inventory: + One service will be created + diff -Nru check-mk-1.2.2p3/bluecat_command_server check-mk-1.2.6p12/bluecat_command_server --- check-mk-1.2.2p3/bluecat_command_server 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/bluecat_command_server 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,65 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +factory_settings["bluecat_command_server"] = { + "oper_states" : { + "warning" : [ 2, 3, 4 ], + "critical" : [ 5 ], + }, +} + +def inventory_bluecat_command_server(info): + return [(None, None)] + +def check_bluecat_command_server(item, params, info): + oper_state = int(info[0][0]) + oper_states = { + 1 : "running normally", + 2 : "not running", + 3 : "currently starting", + 4 : "currently stopping", + 5 : "fault" + + } + state = 0 + if oper_state in params['oper_states']['warning']: + state = 1 + elif oper_state in params['oper_states']['critical']: + state = 2 + yield state, "Command Server is %s" % oper_states[oper_state] + +check_info["bluecat_command_server"] = { + "check_function" : check_bluecat_command_server, + "inventory_function" : inventory_bluecat_command_server, + "service_description" : "Command Server", + "default_levels_variable" : "bluecat_command_server", + "group" : "bluecat_command_server", + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.13315.2.1", + "snmp_info" : (".1.3.6.1.4.1.13315.3.1.7.2.1", [ + 1, # bcnCommandServerSerOperState + ]) +} + diff -Nru check-mk-1.2.2p3/bluecat_dhcp check-mk-1.2.6p12/bluecat_dhcp --- check-mk-1.2.2p3/bluecat_dhcp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/bluecat_dhcp 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,101 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +factory_settings["bluecat_dhcp"] = { + "oper_states" : { + "warning" : [ 2, 3, 4 ], + "critical" : [ 5 ], + }, +} + +def inventory_bluecat_dhcp(info): + # Check if DHCP is not stopped on at least one host + for node, oper_state, leases in info: + if oper_state != '2': + return [(None, None)] + +def check_bluecat_dhcp(item, params, info): + oper_states = { + 1 : "running normally", + 2 : "not running", + 3 : "currently starting", + 4 : "currently stopping", + 5 : "fault" + } + + ok_on_node = False + states = {} + state = 0 + + # Collect states of nodes + for node, oper_state, leases in info: + oper_state, leases_sec = map(int, (oper_state, leases)) + temp_state = 0 + if oper_state in params['oper_states']['warning']: + state = max(state, 1) + temp_state = 1 + elif oper_state in params['oper_states']['critical']: + state = 2 + temp_state = 2 + else: + # If node one ok, the total check is ok + ok_on_node = node + # Only needed in cluster: + states[node] = {'oper_state' : oper_states[oper_state], 'leases_sec' : leases_sec, 'state' : temp_state } + + # Are we in a Cluster? + if len(info) > 1: + if ok_on_node: + node = ok_on_node + # One Node is OK: + yield 0, "DHCP is %s on %s" % (states[node]['oper_state'], node) + yield 0, '%s Leases per second' % (states[node]['leases_sec']), [ ('leases', states[node]['leases_sec']) ] + else: + # None of the nodes is ok: + for node, data in states.items(): + yield data['state'], "%s on %s" % (data['oper_state'], node) + return + + # Default behavior without Cluster + yield state, "DHCP is %s" % oper_states[oper_state] + yield 0, '%s Leases per second' % leases_sec, [ ('leases', leases_sec ) ] + + +check_info["bluecat_dhcp"] = { + "check_function" : check_bluecat_dhcp, + "inventory_function" : inventory_bluecat_dhcp, + "node_info" : True, + "service_description" : "DHCP", + "has_perfdata" : True, + "default_levels_variable" : "bluecat_dhcp", + "group" : "bluecat_dhcp", + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.13315.2.1", + "snmp_info" : (".1.3.6.1.4.1.13315.3.1.1.2.1", [ + 1, # dhcpOperState + 3, # dhcpLeaseStatsSuccess + ]) +} + diff -Nru check-mk-1.2.2p3/bluecat_dns check-mk-1.2.6p12/bluecat_dns --- check-mk-1.2.2p3/bluecat_dns 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/bluecat_dns 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,95 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +factory_settings["bluecat_dns"] = { + "oper_states" : { + "warning" : [ 2, 3, 4 ], + "critical" : [ 5 ], + }, +} + +def inventory_bluecat_dns(info): + return [(None, None)] + +def check_bluecat_dns(item, params, info): + oper_states = { + 1 : "running normally", + 2 : "not running", + 3 : "currently starting", + 4 : "currently stopping", + 5 : "fault" + } + + if not info: + return + + ok_on_node = False + states = {} + state = 0 + + for node, oper_state in info: + oper_state = int(oper_state) + temp_state = 0 + if oper_state in params['oper_states']['warning']: + state = max(1, state) + temp_state = 1 + elif oper_state in params['oper_states']['critical']: + state = 2 + temp_state = 2 + else: + # If node one ok, the total check is ok + ok_on_node = node + # Only needed in cluster: + states[node] = {'oper_state' : oper_states[oper_state], 'state' : temp_state } + + # Are we in a Cluster? + if len(info) > 1: + if ok_on_node: + node = ok_on_node + # One Node is OK: + yield 0, "DNS is %s on %s" % (states[node]['oper_state'], node) + else: + # None of the nodes is ok: + for node, data in states.items(): + yield data['state'], "%s on %s" % (data['oper_state'], node) + return + + # Default behavior without Cluster + yield state, "DNS is %s" % oper_states[oper_state] + +check_info["bluecat_dns"] = { + "check_function" : check_bluecat_dns, + "inventory_function" : inventory_bluecat_dns, + "node_info" : True, + "service_description" : "DNS", + "default_levels_variable" : "bluecat_dns", + "group" : "bluecat_dns", + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.13315.2.1", + "snmp_info" : (".1.3.6.1.4.1.13315.3.1.2.2.1", [ + 1, # DnsSerOperState + ]) +} + diff -Nru check-mk-1.2.2p3/bluecat_dns_queries check-mk-1.2.6p12/bluecat_dns_queries --- check-mk-1.2.2p3/bluecat_dns_queries 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/bluecat_dns_queries 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,56 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +def inventory_bluecat_dns_queries(info): + return [(None, None)] + + +def check_bluecat_dns_queries(item, _no_params, info): + value_names = [ 'Success', 'Referral', 'NXRSet', + 'NXDomain', 'Recursion', 'Failure' ] + now = time.time() + for value, name in zip(map(int, info[0]), value_names): + rate = get_rate("bluecat_dns_queries." + name, now, value) + yield 0, "%s: %s" % (name, rate), [(name, rate)] + + +check_info["bluecat_dns_queries"] = { + "check_function" : check_bluecat_dns_queries, + "inventory_function" : inventory_bluecat_dns_queries, + "service_description" : "DNS Queries", + "has_perfdata" : True, + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.13315.2.1", + "snmp_info" : (".1.3.6.1.4.1.13315.3.1.2.2.2.1", [ + 1, # bcnDnsStatSrvQrySuccess + 2, # bcnDnsStatSrvQryReferral + 3, # bcnDnsStatSrvQryNXRRSet + 4, # bcnDnsStatSrvQryNXDomain + 5, # bcnDnsStatSrvQryRecursion + 6, # bcnDnsStatSrvQryFailure + ]) +} + diff -Nru check-mk-1.2.2p3/bluecat_ha check-mk-1.2.6p12/bluecat_ha --- check-mk-1.2.2p3/bluecat_ha 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/bluecat_ha 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,69 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +factory_settings["bluecat_ha"] = { + "oper_states" : { + "warning" : [ 5, 6, 7 ], + "critical" : [ 8, 4 ], + }, +} + +def inventory_bluecat_ha(info): + # Only add if device is not in standalone mode + if info[0][0] != '1': + return [(None, None)] + +def check_bluecat_ha(item, params, info): + oper_state = int(info[0][0]) + oper_states = { + 1 : "standalone", + 2 : "active", + 3 : "passiv", + 4 : "stopped", + 5 : "stopping", + 6 : "becoming active", + 7 : "becomming passive", + 8 : "fault", + } + + state = 0 + if oper_state in params['oper_states']['warning']: + state = 1 + elif oper_state in params['oper_states']['critical']: + state = 2 + yield state, "State is %s" % oper_states[oper_state] + + +check_info["bluecat_ha"] = { + "check_function" : check_bluecat_ha, + "inventory_function" : inventory_bluecat_ha, + "service_description" : "HA State", + "default_levels_variable" : "bluecat_ha", + "group" : "bluecat_ha", + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.13315.2.1", + "snmp_info" : (".1.3.6.1.4.1.13315.3.1.5.2.1", [ 1 ] ), +} + diff -Nru check-mk-1.2.2p3/bluecat_ntp check-mk-1.2.6p12/bluecat_ntp --- check-mk-1.2.2p3/bluecat_ntp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/bluecat_ntp 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,91 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +factory_settings["bluecat_ntp"] = { + "oper_states" : { + "warning" : [ 2, 3, 4 ], + "critical" : [ 5 ], + }, + "stratum" : ( 8, 10 ), +} + +def inventory_bluecat_ntp(info): + if len(info) > 0 and info[0][0] != 'NULL': + return [(None, None)] + +def check_bluecat_ntp(item, params, info): + oper_state, sys_leap, stratum = map(int, info[0]) + oper_states = { + 1 : "running normally", + 2 : "not running", + 3 : "currently starting", + 4 : "currently stopping", + 5 : "fault" + + } + + state = 0 + if oper_state in params['oper_states']['warning']: + state = 1 + elif oper_state in params['oper_states']['critical']: + state = 2 + yield state, "Process is %s" % oper_states[oper_state] + + sys_leap_states = { + 0 : 'no Warning', + 1 : 'add second', + 10 : 'subtract second', + 11 : 'Alarm' + } + state = 0 + if sys_leap == 11: + state = 2 + elif sys_leap in [ 1, 10 ]: + state = 1 + yield state, "Sys Leap: %s" % sys_leap_states[sys_leap] + + warn, crit = params['stratum'] + state = 0 + if stratum >= crit: + state = 2 + elif stratum >= warn: + state = 1 + yield state, "Stratum: %s" % stratum + +check_info["bluecat_ntp"] = { + "check_function" : check_bluecat_ntp, + "inventory_function" : inventory_bluecat_ntp, + "service_description" : "NTP", + "default_levels_variable" : "bluecat_ntp", + "group" : "bluecat_ntp", + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.13315"), + "snmp_info" : (".1.3.6.1.4.1.13315.3.1.4.2", [ + '1.1', # bcnNtpSerOperState + '2.1', # bcnNtpSysLeap + '2.2', # bcnNtpSysStratum + ]) +} + diff -Nru check-mk-1.2.2p3/bluecat_threads check-mk-1.2.6p12/bluecat_threads --- check-mk-1.2.2p3/bluecat_threads 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/bluecat_threads 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,56 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +threads_default_levels = (2000, 4000) + +def inventory_bluecat_threads(info): + if info: + return [(None, "threads_default_levels")] + +def check_bluecat_threads(item, params, info): + nthreads = int(info[0][0]) + warn, crit = params + perfdata = [('threads', nthreads, warn, crit, 0 )] + if nthreads >= crit: + return (2, "%d threads (critical at %d)" % (nthreads, crit), perfdata) + elif nthreads >= warn: + return (1, "%d threads (warning at %d)" % (nthreads, warn), perfdata) + else: + return (0, "%d threads" % (nthreads,), perfdata) + +check_info["bluecat_threads"] = { + "check_function" : check_bluecat_threads, + "inventory_function" : inventory_bluecat_threads, + "service_description" : "Number of threads", + "group" : "threads", + "has_perfdata" : True, + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.13315.100.200"), + "snmp_info" : (".1.3.6.1.4.1.13315.100.200.1.1.2", [ + 1, # activeThreadCount + ]) +} + diff -Nru check-mk-1.2.2p3/bluecoat_diskcpu check-mk-1.2.6p12/bluecoat_diskcpu --- check-mk-1.2.2p3/bluecoat_diskcpu 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/bluecoat_diskcpu 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -27,27 +27,27 @@ def inventory_bluecoat_diskcpu(info): - return [ (line[0], "current value: " + line[1], '""') for line in info ] + return [ (line[0], None) for line in info ] -def check_bluecoat_diskcpu(item, params, info): +def check_bluecoat_diskcpu(item, _no_params, info): for line in info: if line[0] == item: perfdata = [("value", line[1]) ] if line[2] == '1': - return (0, "OK - %s" % (line[1],), perfdata) + return (0, "%s" % (line[1],), perfdata) else: - return (2, "CRIT - %s" % (line[1], ), perfdata) - return (3, "UNKNOWN - item not found in SNMP data") + return (2, "%s" % (line[1], ), perfdata) + return (3, "item not found in SNMP data") -check_info['bluecoat_diskcpu'] = ( - check_bluecoat_diskcpu, - "%s", - 1, - inventory_bluecoat_diskcpu) - -snmp_info['bluecoat_diskcpu'] = ( - ".1.3.6.1.4.1.3417.2.4.1.1.1", - [ 3, 4, 6 ]) # BLUECOAT disk und CPU-Tabelle -snmp_scan_functions["bluecoat_diskcpu"] = \ - lambda oid: '1.3.6.1.4.1.3417.1.1' in oid(".1.3.6.1.2.1.1.2.0") + + +check_info["bluecoat_diskcpu"] = { + 'check_function': check_bluecoat_diskcpu, + 'inventory_function': inventory_bluecoat_diskcpu, + 'service_description': '%s', + 'has_perfdata': True, + 'snmp_info': ('.1.3.6.1.4.1.3417.2.4.1.1.1', [3, 4, 6]), + 'snmp_scan_function': \ + lambda oid: '1.3.6.1.4.1.3417.1.1' in oid(".1.3.6.1.2.1.1.2.0"), +} diff -Nru check-mk-1.2.2p3/bluecoat_sensors check-mk-1.2.6p12/bluecoat_sensors --- check-mk-1.2.2p3/bluecoat_sensors 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/bluecoat_sensors 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -34,20 +34,20 @@ value = int(line[1]) * 10 ** int(line[3]) perfdata = [("value", value) ] if line[2] == '1': - return (0, "OK - %s" % (value,), perfdata) + return (0, "%s" % (value,), perfdata) else: - return (2, "CRIT - %s" % (value, ), perfdata) - return (3, "UNKNOWN - item not found in SNMP data") + return (2, "%s" % (value, ), perfdata) + return (3, "item not found in SNMP data") -check_info['bluecoat_sensors'] = ( - check_bluecoat_sensors, - "%s", - 1, - inventory_bluecoat_sensors) - -snmp_info['bluecoat_sensors'] = ( - ".1.3.6.1.4.1.3417.2.1.1.1.1.1", - [ 9, 5, 7, 4 ]) # BLUECOAT fan, voltage, temperatures, scale -snmp_scan_functions["bluecoat_sensors"] = \ - lambda oid: '1.3.6.1.4.1.3417.1.1' in oid(".1.3.6.1.2.1.1.2.0") + + +check_info["bluecoat_sensors"] = { + 'check_function': check_bluecoat_sensors, + 'inventory_function': inventory_bluecoat_sensors, + 'service_description': '%s', + 'has_perfdata': True, + 'snmp_info': ('.1.3.6.1.4.1.3417.2.1.1.1.1.1', [9, 5, 7, 4]), + 'snmp_scan_function': \ + lambda oid: '1.3.6.1.4.1.3417.1.1' in oid(".1.3.6.1.2.1.1.2.0"), +} diff -Nru check-mk-1.2.2p3/bonding.include check-mk-1.2.6p12/bonding.include --- check-mk-1.2.2p3/bonding.include 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/bonding.include 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -27,7 +27,7 @@ def inventory_bonding(parsed): inventory = [] for bond, status in parsed.items(): - if status["status"] == "up": + if status["status"] in ("up", "degraded"): # If no information about primary interface is available # then assume currently active one as primary if "primary" not in status and "active" in status: @@ -40,10 +40,10 @@ def check_bonding(item, params, parsed): if item not in parsed: - return (3, "UNKNOWN - no such bonding interface") + return (3, "no such bonding interface") status = parsed[item] - if status["status"] != "up": - return 2, "CRIT - interface is " + status["status"] + if status["status"] not in ("up", "degraded"): + return 2, "interface is " + status["status"] infos = [ "mode: " + status["mode"] ] state = 0 @@ -76,6 +76,9 @@ infos.append("expected is %s" % expected_active) state = 1 - return state, nagios_state_names[state] + " - " + ", ".join(infos) - + infos.append("bond status: " + status["status"]) + if status["status"] != "up": + infos[-1] += "(!)" + state = 1 + return state, ", ".join(infos) diff -Nru check-mk-1.2.2p3/brocade check-mk-1.2.6p12/brocade --- check-mk-1.2.2p3/brocade 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/brocade 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,152 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +# [['1', '24', 'SLOT #0: TEMP #1'], +# ['2', '12', 'SLOT #0: TEMP #2'], +# ['3', '12', 'SLOT #0: TEMP #3'], +# ['4', '4687', 'FAN #1'], +# ['5', '4560', 'FAN #2'], +# ['6', '4821', 'FAN #3'], +# ['7', '1', 'Power Supply #1'], +# ['8', '1', 'Power Supply #2']] + + +def brocade_sensor_convert(info, what): + return_list = [] + for presence, state, name in info: + if name.startswith(what) and presence != "6" and (saveint(state) > 0 or what == "Power"): + sensor_id = name.split('#')[-1] + return_list.append([sensor_id, name, state]) + return return_list + +def brocade_scan(oid): + return oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.1588.2.1.1") or \ + oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.24.1.1588.2.1.1") or \ + oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.1588.2.2.1") or \ + oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.1588.3.3.1") + +brocade_info = ('.1.3.6.1.4.1.1588.2.1.1.1.1.22.1', [ + 3, # swSensorStatus, 6 = absent + 4, # swSensorValue, -2147483648 = unknown + 5, # swSensorInfo + ]) + +brocade_fan_default_levels = { "lower": (3000 , 2800 ) } + +def inventory_brocade_fan(info): + converted = brocade_sensor_convert(info, "FAN") + return [ (x[0], 'brocade_fan_default_levels') for x in converted ] + +def check_brocade_fan(item, params, info): + converted = brocade_sensor_convert(info, "FAN") + if type(params) is tuple: # old format + warn, crit = params + else: # new format + warn, crit = params.get("lower") + if params.get("upper"): + upperwarn, uppercrit = params["upper"] + else: + upperwarn, uppercrit = ( None, None ) + + for snmp_item, name, value in converted: + if item == snmp_item: + state = 0 + label = "" + value = saveint(value) + perf = [ ('fan', value, warn, crit) ] + if value <= crit: + state = 2 + label = "(Levels below: %d/%d)" % (warn, crit) + elif value <= warn: + state = 1 + label = "(Levels below: %d/%d)" % (warn, crit) + elif uppercrit and value >= uppercrit: + state = 2 + label = "(Levels above: %d/%d)" % (upperwarn, uppercrit) + elif upperwarn and value >= upperwarn: + state = 1 + label = "(Levels above: %d/%d)" % (upperwarn, uppercrit) + return state, "Fans at %drpm %s " % (value, label), perf + return 3, "FAN not found" + +check_info["brocade.fan"] = { + "check_function" : check_brocade_fan, + "inventory_function" : inventory_brocade_fan, + "service_description" : "FAN %s", + "has_perfdata" : True, + "group" : "hw_fans", + "snmp_info" : brocade_info, + 'snmp_scan_function' : brocade_scan, +} + +def inventory_brocade_power(info): + converted = brocade_sensor_convert(info, "Power") + return [ (x[0], None) for x in converted ] + +def check_brocade_power(item, _no_params, info): + converted = brocade_sensor_convert(info, "Power") + for snmp_item, name, value in converted: + if item == snmp_item: + value = saveint(value) + if value != 1: + return 2, "Error on supply %s" % name + return 0, "No problems found" + + return 3, "Supply not found" + +check_info["brocade.power"] = { + "check_function" : check_brocade_power, + "inventory_function" : inventory_brocade_power, + "service_description" : "Power supply %s", + "has_perfdata" : False, + "snmp_info" : brocade_info, + 'snmp_scan_function' : brocade_scan, +} + +brocade_temp_default_levels = ( 30, 40 ) + +def inventory_brocade_temp(info): + converted = brocade_sensor_convert(info, "SLOT") + return [ (x[0], 'brocade_temp_default_levels') for x in converted ] + +def check_brocade_temp(item, params, info): + converted = brocade_sensor_convert(info, "SLOT") + for snmp_item, name, value in converted: + if item == snmp_item: + return check_temperature(int(value), params) + + +check_info["brocade.temp"] = { + "check_function" : check_brocade_temp, + "inventory_function" : inventory_brocade_temp, + "service_description" : "Temperature %s", + "has_perfdata" : True, + "group" : "hw_temperature", + "snmp_info" : brocade_info, + 'snmp_scan_function' : brocade_scan, + "includes" : [ "temperature.include" ], +} diff -Nru check-mk-1.2.2p3/brocade.fan check-mk-1.2.6p12/brocade.fan --- check-mk-1.2.2p3/brocade.fan 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/brocade.fan 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,24 @@ +title: Brocade FibreChannel Switches: Fans +catalog: hw/storagehw/brocade +agents: snmp +license: GPL +distribution: check_mk +description: + This checks monitors the FAN speeds of a Brocade FC switch. + +item: + The number of the FAN (1, 2, 3 ...) as described in the SNMP output. + +perfdata: + The speed of each fan. + +inventory: + + The inventory creates a service for each fan unless it is marked as absent + in {swSensorStatus} + + +[parameters] +warn(int): the minimum fan speed for an OK state +crit(int): the minimum fan speed for a WARN state + diff -Nru check-mk-1.2.2p3/brocade_fcport check-mk-1.2.6p12/brocade_fcport --- check-mk-1.2.2p3/brocade_fcport 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/brocade_fcport 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -25,48 +25,72 @@ # Boston, MA 02110-1301 USA. # lookup tables for check implementation +# Taken from swFCPortPhyState brocade_fcport_phystates = [ '', 'noCard', 'noTransceiver', 'laserFault', 'noLight', - 'noSync', 'inSync', 'portFault', 'diagFault', 'lockRef' ] + 'noSync', 'inSync', 'portFault', 'diagFault', 'lockRef', + 'validating', 'invalidModule', 'noSigDet', 'unkown' ] +# Taken from swFCPortOpStatus brocade_fcport_opstates = [ 'unknown', 'online', 'offline', 'testing', 'faulty' ] +# Taken from swFCPortAdmStatus brocade_fcport_admstates = [ '', 'online', 'offline', 'testing', 'faulty' ] -brocade_fcport_speed = [ '', '1Gbit', '2Gbit', 'auto-Neg', '4Gbit', '8Gbit', '10Gbit', ] +# Taken from swFCPortSpeed +brocade_fcport_speed = [ 'unknown', '1Gbit', '2Gbit', 'auto-Neg', '4Gbit', '8Gbit', '10Gbit', 'unknown', '16Gbit' ] +# Taken from swNbBaudRate +isl_speed = { + "1": 0, # other (1) - None of the following. + "2": 0.155, # oneEighth (2) - 155 Mbaud. + "4": 0.266, #•quarter (4) - 266 Mbaud. + "8": 0.532, # half (8) - 532 Mbaud. + "16": 1, #•full (16) - 1 Gbaud. + "32": 2, # double (32) - 2 Gbaud. + "64": 4, # quadruple (64) - 4 Gbaud. + "128": 8, # octuple (128) - 8 Gbaud. + "256": 10, # decuple (256) - 10 Gbaud. + "512": 16, # sexdecuple (512) - 16 Gbaud +} # settings for inventory: which ports should be inventorized -brocade_fcport_inventory_phystates = [ 3, 4, 5, 6, 7, 8, 9, ] +brocade_fcport_inventory_phystates = [ 3, 4, 5, 6, 7, 8, 9, 10 ] brocade_fcport_inventory_opstates = [ 1, 2, 3, 4, ] brocade_fcport_inventory_admstates = [ 1, 3, 4, ] brocade_fcport_inventory_use_portname = True # use swFCPortName as part of service description brocade_fcport_inventory_show_isl = True # add "ISL" to service description for interswitch links +brocade_fcport_inventory = [] -check_default_levels["brocade_fcport"] = "brocade_fcport_default_levels" - factory_settings["brocade_fcport_default_levels"] = { "rxcrcs": (3.0, 20.0), # allowed percentage of CRC errors "rxencoutframes": (3.0, 20.0), # allowed percentage of Enc-OUT Frames "notxcredits": (3.0, 20.0), # allowed percentage of No Tx Credits - "c3discards": (3.0, 20.0), # allowed percentage of CRC errors - "assumed_speed": 2.0, # used, if speed not available in SNMP data + "c3discards": (3.0, 20.0), # allowed percentage of C3 discards + "assumed_speed": 2.0, # used if speed not available in SNMP data } # Helper function for computing item from port number -def brocade_fcport_getitem(ports, index, portname, is_isl): +def brocade_fcport_getitem(ports, index, portname, is_isl, uses_portname, shows_isl): int_len = str(len(str(len(ports)))) itemname = ("%0" + int_len + "d") % (index - 1) - if is_isl and brocade_fcport_inventory_show_isl: + if is_isl and shows_isl: itemname += " ISL" - if portname.strip() and brocade_fcport_inventory_use_portname: + if portname.strip() and uses_portname: itemname += " " + portname.strip() return itemname def inventory_brocade_fcport(info): # info[0] is port table, info[1] is ISL table - if len(info) != 2: + if len(info) < 2: return + settings = host_extra_conf_merged(g_hostname, brocade_fcport_inventory) + uses_portname = settings.get('use_portname', brocade_fcport_inventory_use_portname) + shows_isl = settings.get('show_isl', brocade_fcport_inventory_show_isl) + admstates = settings.get('admstates', brocade_fcport_inventory_admstates) + opstates = settings.get('opstates', brocade_fcport_inventory_opstates) + phystates = settings.get('phystates', brocade_fcport_inventory_phystates) + inventory = [] isl_ports = {} if len(info) > 1: @@ -84,12 +108,12 @@ portname = line[13] is_isl = line[0] in isl_ports - if admstate in brocade_fcport_inventory_admstates and \ + if admstate in admstates and \ opstate in brocade_fcport_inventory_opstates and \ phystate in brocade_fcport_inventory_phystates: - inventory.append(( brocade_fcport_getitem(info[0], index, portname, is_isl), - '{ "phystate": %d, "opstate": %d, "admstate": %d }' + inventory.append(( brocade_fcport_getitem(info[0], index, portname, is_isl, uses_portname, shows_isl), + '{ "phystate": [%d], "opstate": [%d], "admstate": [%d] }' % (phystate, opstate, admstate) )) return inventory @@ -100,26 +124,50 @@ item_index = int(item.split()[0]) portinfo = [ line for line in info[0] if int(line[0]) == item_index + 1] index, phystate, opstate, admstate, txwords, rxwords, txframes, rxframes, \ - notxcredits, rxcrcs, rxencoutframes, c3discards, speed = map(int, portinfo[0][:-1]) + notxcredits, rxcrcs, rxencoutframes, c3discards = map(int, portinfo[0][:-2]) + speed = saveint(portinfo[0][-2]) summarystate = 0 output = [] perfdata = [] + perfaverages = [] # Lookup port speed in ISL table for ISL ports (older switches do not provide this # information in the normal table) isl_ports = dict(info[1]) if str(index) in isl_ports: - gbit = float(int(isl_ports.get(str(index))) / 16) + gbit = isl_speed.get( isl_ports.get( str(index) ) ) speedmsg = ("ISL at %.0fGbit/s" % gbit) else: # no ISL port - if brocade_fcport_speed[speed] == "auto-Neg": - # let user specify assumed speed via check parameter, default is 2.0 - gbit = params.get("assumed_speed") - speedmsg = ("assuming %gGbit/s" % gbit) + if brocade_fcport_speed[speed] in [ "auto-Neg", "unknown" ]: + try: + # extract the speed from IF-MIB::ifHighSpeed. + # unfortunately ports in the IF-MIB and the brocade MIB + # dont have a common index. We hope that at least + # the FC ports have the same sequence in both lists. + # here we go through ports of the IF-NIB, but consider only FC ports (type 56) + # and assume that the sequence number of the FC port here is the same + # as the sequence number in the borcade MIB (pindex = item_index) + pindex = -1 + for vals in info[2]: + port_type, actual_speed = map(saveint, vals) + if port_type == 56: + pindex += 1 + if pindex == item_index: + break # we found it + except: + actual_speed = 0 + if actual_speed > 0: + # use actual speed of port if available + gbit = actual_speed / 1000 + speedmsg = "actual speed %gGbit/s" % gbit + else: + # let user specify assumed speed via check parameter, default is 2.0 + gbit = params.get("assumed_speed") + speedmsg = "assumed speed %gGbit/s" % gbit else: gbit = float(brocade_fcport_speed[speed].replace("Gbit", "")) - speedmsg = ("%.0fGbit/s" % gbit) + speedmsg = "%.0fGbit/s" % gbit output.append(speedmsg) @@ -128,119 +176,104 @@ # Now check rates of various error counters this_time = time.time() - try: - timedif, rxwords_rate = get_counter("brocade_fcport.rxwords.%s" % index, this_time, rxwords) - timedif, txwords_rate = get_counter("brocade_fcport.txwords.%s" % index, this_time, txwords) - - # compute traffic in B/s and MB/s - in_bytes = rxwords_rate * 4 - out_bytes = txwords_rate * 4 - - average = params.get("average") # range in minutes - - # B A N D W I D T H - # convert thresholds in percentage into MB/s - bw_thresh = params.get("bw") - if bw_thresh == None: # no levels - warn_bytes, crit_bytes = None, None + + rxwords_rate = get_rate("brocade_fcport.rxwords.%s" % index, this_time, rxwords) + txwords_rate = get_rate("brocade_fcport.txwords.%s" % index, this_time, txwords) + + # compute traffic in B/s and MB/s + in_bytes = rxwords_rate * 4 + out_bytes = txwords_rate * 4 + + average = params.get("average") # range in minutes + + # B A N D W I D T H + # convert thresholds in percentage into MB/s + bw_thresh = params.get("bw") + if bw_thresh == None: # no levels + warn_bytes, crit_bytes = None, None + else: + warn, crit = bw_thresh + if type(warn) == float: + warn_bytes = wirespeed * warn / 100.0 + else: # in MB + warn_bytes = warn * 1048576.0 + if type(crit) == float: + crit_bytes = wirespeed * crit / 100.0 + else: # in MB + crit_bytes = crit * 1048576.0 + + for what, value in [("In", in_bytes), ("Out", out_bytes)]: + output.append("%s: %s/s" % (what, get_bytes_human_readable(value))) + perfdata.append((what.lower(), value, warn_bytes, crit_bytes, 0, wirespeed)) + + # average turned on: use averaged traffic values instead of current ones + if average: + value = get_average("brocade_fcport.%s.%s.avg" % (what, item), this_time, value, average) + output.append("Avg(%dmin): %s/s" % (average, get_bytes_human_readable(value))) + perfaverages.append( ("%s_avg" % what.lower(), value, warn_bytes, crit_bytes, 0, wirespeed)) + + # handle levels for in/out + if crit_bytes != None and value >= crit_bytes: + summarystate = 2 + output.append(" >= %s/s(!!)" % (get_bytes_human_readable(crit_bytes))) + elif warn_bytes != None and value >= warn_bytes: + summarystate = max(1, summarystate) + output.append(" >= %s/s(!!)" % (get_bytes_human_readable(warn_bytes))) + + # put perfdata of averages after perfdata for in and out in order not to confuse the perfometer + perfdata.extend(perfaverages) + + # R X F R A M E S & T X F R A M E S + # Put number of frames into performance data (honor averaging) + rxframes_rate = get_rate("brocade_fcport.rxframes.%s" % index, this_time, rxframes) + txframes_rate = get_rate("brocade_fcport.txframes.%s" % index, this_time, txframes) + for what, value in [ ("rxframes", rxframes_rate), ("txframes", txframes_rate) ]: + perfdata.append((what, value)) + if average: + value = get_average("brocade_fcport.%s.%s.avg" % (what, item), this_time, value, average) + perfdata.append( ("%s_avg" % what, value) ) + + # E R R O R C O U N T E R S + # handle levels on error counters + + for descr, counter, value, ref in [ + ("CRC errors", "rxcrcs", rxcrcs, rxframes_rate, ), + ("ENC-Out", "rxencoutframes", rxencoutframes, rxframes_rate, ), + ("C3 discards", "c3discards", c3discards, txframes_rate, ), + ("no TX buffer credits", "notxcredits", notxcredits, txframes_rate, ),]: + per_sec = get_rate("brocade_fcport.%s.%s" % (counter, index), this_time, value) + + perfdata.append((counter, per_sec)) + + # if averaging is on, compute average and apply levels to average + if average: + per_sec_avg = get_average("brocade_fcport.%s.%s.avg" % \ + (counter, item), this_time, per_sec, average) + perfdata.append( ("%s_avg" % counter, per_sec_avg ) ) + + # compute error rate (errors in relation to number of frames) (from 0.0 to 1.0) + if ref > 0 or per_sec > 0: + rate = per_sec / (ref + per_sec) else: - warn, crit = bw_thresh - if type(warn) == float: - warn_bytes = wirespeed * warn / 100.0 - else: # in MB - warn_bytes = warn * 1048576.0 - if type(crit) == float: - crit_bytes = wirespeed * crit / 100.0 - else: # in MB - crit_bytes = crit * 1048576.0 - - for what, value in [("In", in_bytes), ("Out", out_bytes)]: - output.append("%s: %s/s" % (what, get_bytes_human_readable(value))) - perfdata.append((what.lower(), value, warn_bytes, crit_bytes, 0, wirespeed)) - - # average turned on: use averaged traffic values instead of current ones - if average: - timedif, value = get_average("brocade_fcport.%s.%s.avg" % (what, item), this_time, value, average) - output.append("Avg(%dmin): %s/s" % (average, get_bytes_human_readable(value))) - perfdata.append( ("%s_avg" % what.lower(), value, warn_bytes, crit_bytes, 0, wirespeed)) - - # handle levels for in/out - if crit_bytes != None and value >= crit_bytes: - summarystate = 2 - output.append(" >= %s/s(!!)" % (get_bytes_human_readable(crit_bytes))) - elif warn_bytes != None and value >= warn_bytes: - summarystate = max(1, summarystate) - output.append(" >= %s/s(!!)" % (get_bytes_human_readable(warn_bytes))) - - # R X F R A M E S & T X F R A M E S - # Put number of frames into performance data (honor averaging) - timedif, rxframes_rate = get_counter("brocade_fcport.rxframes.%s" % index, this_time, rxframes) - timedif, txframes_rate = get_counter("brocade_fcport.txframes.%s" % index, this_time, txframes) - for what, value in [ ("rxframes", rxframes_rate), ("txframes", txframes_rate) ]: - perfdata.append((what, value)) - if average: - timedif, value = get_average("brocade_fcport.%s.%s.avg" % (what, item), this_time, value, average) - perfdata.append( ("%s_avg" % what, value) ) - - # E R R O R C O U N T E R S - # handle levels on error counters - - for descr, counter, value, ref in [ - ("CRC errors", "rxcrcs", rxcrcs, rxframes_rate, ), - ("ENC-Out", "rxencoutframes", rxencoutframes, rxframes_rate, ), - ("C3 discards", "c3discards", c3discards, txframes_rate, ), - ("no TX buffer credits", "notxcredits", notxcredits, txframes_rate, ),]: - timedif, per_sec = get_counter("brocade_fcport.%s.%s" % (counter, index), this_time, value) - - perfdata.append((counter, per_sec)) - - # if averaging is on, compute average and apply levels to average - if average: - timedif, per_sec_avg = get_average("brocade_fcport.%s.%s.avg" % \ - (counter, item), this_time, per_sec, average) - perfdata.append( ("%s_avg" % counter, per_sec_avg ) ) - - # compute error rate (errors in relation to number of frames) (from 0.0 to 1.0) - if ref > 0 or per_sec > 0: - rate = per_sec / (ref + per_sec) - else: - rate = 0 - text = "%s: %.2f%%" % (descr, rate * 100.0) + rate = 0 + text = "%s: %.2f%%" % (descr, rate * 100.0) - # Honor averaging of error rate - if average: - timedif, rate = get_average("brocade_fcport.%s.%s.avgrate" % - (counter, item), this_time, rate, average) - text += ", Avg: %.2f%%" % (rate * 100.0) - - error_percentage = rate * 100.0 - warn, crit = params[counter] - if crit != None and error_percentage >= crit: - summarystate = 2 - text += "(!!)" - output.append(text) - elif warn != None and error_percentage >= warn: - summarystate = max(1, summarystate) - text += "(!)" - output.append(text) - - - except MKCounterWrapped, e: - # Assume that this is the first check of this port. Make sure, all counters - # are initialized. If a counter is updated twice, get_counter will handle - # that correctly. - for counter, value in [ ( "rxwords", rxwords), - ( "txwords", txwords), - ( "txframes", txframes, ), - ( "notxcredits", notxcredits), - ( "rxcrcs", rxcrcs), - ( "rxencoutframes", rxencoutframes), - ( "c3discards", c3discards)]: - try: - get_counter("brocade_fcport.%s.%s" % (counter, index), this_time, value) - except MKCounterWrapped, e: - pass - perfdata = [] # perfdata might not be valid + # Honor averaging of error rate + if average: + rate = get_average("brocade_fcport.%s.%s.avgrate" % + (counter, item), this_time, rate, average) + text += ", Avg: %.2f%%" % (rate * 100.0) + + error_percentage = rate * 100.0 + warn, crit = params[counter] + if crit != None and error_percentage >= crit: + summarystate = 2 + text += "(!!)" + output.append(text) + elif warn != None and error_percentage >= warn: + summarystate = max(1, summarystate) + text += "(!)" + output.append(text) # P O R T S T A T E @@ -282,40 +315,45 @@ summarystate = 2 output.append("Adm:%s(%d)%s" % (brocade_fcport_admstates[admstate], admstate, errorflag)) - return (summarystate, '%s - %s' % (nagios_state_names[summarystate], ', '.join(output)), perfdata) - - + return (summarystate, ', '.join(output), perfdata) -check_info['brocade_fcport'] = (check_brocade_fcport, "Port %s", 1, inventory_brocade_fcport) - - -snmp_info['brocade_fcport'] = [ - ( ".1.3.6.1.4.1.1588.2.1.1.1.6.2.1",[ - 1, # swFCPortIndex - 3, # swFCPortPhyState - 4, # swFCPortOpStatus - 5, # swFCPortAdmStatus - 11, # swFCPortTxWords - 12, # swFCPortRxWords - 13, # swFCPortTxFrames - 14, # swFCPortRxFrames - 20, # swFCPortNoTxCredits - 22, # swFCPortRxCrcs - 26, # swFCPortRxEncOutFrs - 28, # swFCPortC3Discards - 35, # swFCPortSpeed - 36, # swFCPortName (not supported by all devices) - ]), - - # Information about Inter-Switch-Links (contains baud rate of port) - ( ".1.3.6.1.4.1.1588.2.1.1.1.2.9.1", [ - 2, # swNbMyPort - 5, # swNbBaudRate - ]) -] - -snmp_scan_functions['brocade_fcport'] = \ - lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.1588.2.1.1") or \ - oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.24.1.1588.2.1.1") - -checkgroup_of["brocade_fcport"] = "brocade_fcport" +check_info["brocade_fcport"] = { + 'check_function': check_brocade_fcport, + 'inventory_function': inventory_brocade_fcport, + 'service_description': 'Port %s', + 'has_perfdata': True, + 'snmp_info': [ + ( ".1.3.6.1.4.1.1588.2.1.1.1.6.2.1",[ + 1, # swFCPortIndex + 3, # swFCPortPhyState + 4, # swFCPortOpStatus + 5, # swFCPortAdmStatus + 11, # swFCPortTxWords + 12, # swFCPortRxWords + 13, # swFCPortTxFrames + 14, # swFCPortRxFrames + 20, # swFCPortNoTxCredits + 22, # swFCPortRxCrcs + 26, # swFCPortRxEncOutFrs + 28, # swFCPortC3Discards + 35, # swFCPortSpeed, deprecated from at least firmware version 7.2.1 + 36, # swFCPortName (not supported by all devices) + ]), + + # Information about Inter-Switch-Links (contains baud rate of port) + ( ".1.3.6.1.4.1.1588.2.1.1.1.2.9.1", [ + 2, # swNbMyPort + 5, # swNbBaudRate + ]), + + # new way to get port speed supported by Brocade + ( ".1.3.6.1.2.1", [ + "2.2.1.3", # ifType, needed to extract fibre channel ifs only (type 56) + "31.1.1.1.15", # IF-MIB::ifHighSpeed + ]), + ], + 'snmp_scan_function' : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.1588.2.1.1") \ + and oid(".1.3.6.1.4.1.1588.2.1.1.1.6.2.1.*") != None, + 'group' : 'brocade_fcport', + 'default_levels_variable' : 'brocade_fcport_default_levels', +} diff -Nru check-mk-1.2.2p3/brocade_info check-mk-1.2.6p12/brocade_info --- check-mk-1.2.2p3/brocade_info 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/brocade_info 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,96 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +def inventory_brocade_info(info): + data = "".join(brocade_info_try_it(info)) + if data != "----": + return [ ( None, None ) ] + +def brocade_info_try_it(info): + try: + model = info[0][0][0] + except: + model = "-" + try: + wwn = info[2][0][0] + except: + wwn = "-" + try: + fw = info[1][0][0] + except: + fw = "-" + try: + ssn = info[1][0][1] + except: + ssn = "-" + + return model, ssn, fw, wwn + +def brocade_info_parse_wwn(val): + if val == "": + val = "-" + elif val != "-": + val = ":".join(val.split(" ")[:8]) + return val + +def check_brocade_info(item, params, info): + model, ssn, fw, wwn = brocade_info_try_it(info) + data = "".join((model, ssn, fw, wwn)) + if data != "----": + wwn = brocade_info_parse_wwn(wwn) + infotext = "Model: %s, SSN: %s, Firmware Version: %s, WWN: %s" % ( model, ssn, fw, wwn ) + return 0, infotext + else: + return 3, "no information found" + +check_info["brocade_info"] = { + 'check_function' : check_brocade_info, + 'inventory_function' : inventory_brocade_info, + 'service_description' : 'Brocade Info', + 'has_perfdata' : False, + 'snmp_info' : [ + ( ".1.3.6.1.2.1.47.1.1.1.1.2", + [ + 1, # entPhysicalDescr.1 + ], + ), + ( ".1.3.6.1.4.1.1588.2.1.1.1.1", + [ + 6, # swFirmwareVersion + 10, # swSsn + ] + ), + ( ".1.3.6.1.3.94.1.6.1", + [ + 1, # connUnitId + ], + ), + ], + 'snmp_scan_function' : lambda oid: (oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.1588") \ + or oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.24.1.1588.2.1.1")) \ + and oid(".1.3.6.1.4.1.1588.2.1.1.1.1.6") != None, +} diff -Nru check-mk-1.2.2p3/brocade_mlx check-mk-1.2.6p12/brocade_mlx --- check-mk-1.2.2p3/brocade_mlx 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/brocade_mlx 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,232 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +brocade_mlx_states = { + 0: (1, "Slot is empty"), + 2: (1, "Module is going down"), + 3: (2, "Rejected due to wrong configuration"), + 4: (2, "Hardware is bad"), + 8: (1, "Configured / Stacking"), + 9: (1, "In power-up cycle"), + 10: (0, "Running"), + 11: (0, "Blocked for full height card"), +} + +brocade_mlx_info = [ + ('.1.3.6.1.4.1.1991.1.1.2.2.1.1', [ 1, 2, 12, 24, 25 ]), + # id, descr, overall status, MemoryTotal, MemoryAvailable + ('.1.3.6.1.4.1.1991.1.1.2.11.1.1.5', [ OID_END, "" ]), + # Rest of OId starting with module ID, CpuUtilPercent +] + +def brocade_mlx_get_state(state): + return brocade_mlx_states.get(state, (3, 'Unhandled state - %d' % state)) + +def brocade_mlx_scan(oid): + return oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.1991.1.") + +def brocade_mlx_combine_item(id, descr): + if descr == "": + return id + else: + descr = re.sub(" *Module", "", descr) + return "%s %s" % (id, descr) + +# .--Overall Status------------------------------------------------------. +# | ___ _ _ ____ _ _ | +# | / _ \__ _____ _ __ __ _| | | / ___|| |_ __ _| |_ _ _ ___ | +# | | | | \ \ / / _ \ '__/ _` | | | \___ \| __/ _` | __| | | / __| | +# | | |_| |\ V / __/ | | (_| | | | ___) | || (_| | |_| |_| \__ \ | +# | \___/ \_/ \___|_| \__,_|_|_| |____/ \__\__,_|\__|\__,_|___/ | +# | | +# +----------------------------------------------------------------------+ + +def inventory_brocade_mlx_module(info): + inventory = [] + for module_id, module_descr, module_state, mem_total, mem_avail in info[0]: + # do not inventorize modules reported as empty + if module_state != "0": + inventory.append( (brocade_mlx_combine_item(module_id, module_descr), None) ) + return inventory + +def check_brocade_mlx_module(item, _no_params, info): + for module_id, module_descr, module_state, mem_total, mem_avail in info[0]: + if brocade_mlx_combine_item(module_id, module_descr) == item: + return brocade_mlx_get_state(int(module_state)) + return 3, "Module not found" + +check_info["brocade_mlx.module_status"] = { + "check_function" : check_brocade_mlx_module, + "inventory_function" : inventory_brocade_mlx_module, + "service_description" : "Status Module %s", + "snmp_info" : brocade_mlx_info, + "snmp_scan_function" : brocade_mlx_scan, + "has_perfdata" : False, +} + +#. +# .--Memory--------------------------------------------------------------. +# | __ __ | +# | | \/ | ___ _ __ ___ ___ _ __ _ _ | +# | | |\/| |/ _ \ '_ ` _ \ / _ \| '__| | | | | +# | | | | | __/ | | | | | (_) | | | |_| | | +# | |_| |_|\___|_| |_| |_|\___/|_| \__, | | +# | |___/ | +# +----------------------------------------------------------------------+ + +brocade_mlx_mem_default_levels = { "levels": (80.0, 90.0) } + +def inventory_brocade_mlx_module_mem(info): + inventory = [] + for module_id, module_descr, module_state, mem_total, mem_avail in info[0]: + # do not inventorize modules reported as empty or "Blocked for full height card" + # and: monitor cpu only on NI-MLX and BR-MLX modules + if module_state != "0" and module_state != "11" and ( module_descr.startswith("NI-MLX") or module_descr.startswith("BR-MLX") ): + inventory.append( (brocade_mlx_combine_item(module_id, module_descr), "brocade_mlx_mem_default_levels") ) + return inventory + +def check_brocade_mlx_module_mem(item, params, info): + warn, crit = params["levels"] + for module_id, module_descr, module_state, mem_total, mem_avail in info[0]: + module_state = int(module_state) + if brocade_mlx_combine_item(module_id, module_descr) == item: + if module_state != 10: + return 3, "Module is not running (Current State: %s)" % brocade_mlx_get_state(module_state)[1] + + mem_avail = saveint(mem_avail) + mem_total = saveint(mem_total) + mem_used = mem_total - mem_avail + mem_used_percent = savefloat(mem_used) / savefloat(mem_total) * 100 + + if type(warn) is int: + warn_absolut = warn + else: + warn_absolut = int(mem_total * warn / 100) + + if type(crit) is int: + crit_absolut = crit + else: + crit_absolut = int(mem_total * crit / 100) + + perfdata = [ ('memused', str(mem_used) + 'Bytes', warn_absolut, crit_absolut, 0, mem_total) ] + + status = 0 + if mem_used > warn_absolut: + status = 1 + if mem_used > crit_absolut: + status = 2 + + return status, "%s used (%0.1f%%) of total %s" % \ + (get_bytes_human_readable(mem_used), mem_used_percent, \ + get_bytes_human_readable(mem_total)), perfdata + + return 3, "Module not found" + +check_info["brocade_mlx.module_mem"] = { + "check_function" : check_brocade_mlx_module_mem, + "inventory_function" : inventory_brocade_mlx_module_mem, + "service_description" : "Memory Module %s", + "snmp_info" : brocade_mlx_info, + "snmp_scan_function" : brocade_mlx_scan, + "has_perfdata" : True, + "group" : "memory_multiitem", +} + +#. +# .--CPU-----------------------------------------------------------------. +# | ____ ____ _ _ | +# | / ___| _ \| | | | | +# | | | | |_) | | | | | +# | | |___| __/| |_| | | +# | \____|_| \___/ | +# | | +# +----------------------------------------------------------------------+ + +brocade_mlx_cpu_default_levels = { "levels" : (80.0, 90.0) } + +def inventory_brocade_mlx_module_cpu(info): + inventory = [] + for module_id, module_descr, module_state, mem_total, mem_avail in info[0]: + # do not inventorize modules reported as empty or "Blocked for full height card" + # and: monitor cpu only on NI-MLX and BR-MLX modules + if module_state != "0" and module_state != "11" and ( module_descr.startswith("NI-MLX") or module_descr.startswith("BR-MLX") ): + inventory.append( (brocade_mlx_combine_item(module_id, module_descr), "brocade_mlx_cpu_default_levels") ) + return inventory + +def check_brocade_mlx_module_cpu(item, params, info): + warn, crit = params["levels"] + for module_id, module_descr, module_state, mem_total, mem_avail in info[0]: + if brocade_mlx_combine_item(module_id, module_descr) == item: + if module_state != "10": + return 3, "Module is not in state running" + + cpu_util1 = "" + cpu_util5 = "" + cpu_util60 = "" + cpu_util300 = "" + for oid_end, cpu_util in info[1]: + if oid_end == "%s.1.1" % module_id: + cpu_util1 = saveint(cpu_util) + if oid_end == "%s.1.5" % module_id: + cpu_util5 = saveint(cpu_util) + if oid_end == "%s.1.60" % module_id: + cpu_util60 = saveint(cpu_util) + if oid_end == "%s.1.300" % module_id: + cpu_util300 = saveint(cpu_util) + + if cpu_util1 == "" or cpu_util5 == "" or cpu_util60 == "" or cpu_util300 == "": + return 3, "did not find all cpu utilization values in snmp output" + + perfdata = [ ('cpu_util1', str(cpu_util1) + '%', '', '', 0, 100), + ('cpu_util5', str(cpu_util5) + '%', '', '', 0, 100), + ('cpu_util60', str(cpu_util60) + '%', warn, crit, 0, 100), + ('cpu_util300', str(cpu_util300) + '%', '', '', 0, 100), + ] + + status = 0 + errorstring = "" + if cpu_util60 > warn: + status = 1 + errorstring = "(!)" + if cpu_util60 > crit: + status = 2 + errorstring = "(!!)" + + return status, "CPU utilization was %s/%s/%s%s/%s%% for the last 1/5/60/300 sec" % \ + (cpu_util1, cpu_util5, cpu_util60, errorstring, cpu_util300), perfdata + + return 3, "Module not found" + +check_info["brocade_mlx.module_cpu"] = { + "check_function" : check_brocade_mlx_module_cpu, + "inventory_function" : inventory_brocade_mlx_module_cpu, + "service_description" : "CPU utilization Module %s", + "snmp_info" : brocade_mlx_info, + "snmp_scan_function" : brocade_mlx_scan, + "has_perfdata" : True, + "group" : "cpu_utilization_multiitem", +} + diff -Nru check-mk-1.2.2p3/brocade_mlx_fan check-mk-1.2.6p12/brocade_mlx_fan --- check-mk-1.2.2p3/brocade_mlx_fan 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/brocade_mlx_fan 2015-09-16 14:25:30.000000000 +0000 @@ -0,0 +1,61 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def brocade_mlx_fan_combine_item(id, descr): + if descr == "" or "(RPM " in descr: + return id + else: + return "%s %s" % (id, descr) + +def inventory_brocade_mlx_fan(info): + inventory = [] + for fan_id, fan_descr, fan_state in info: + # Only add Fans who are present + if fan_state != "1": + inventory.append((brocade_mlx_fan_combine_item(fan_id, fan_descr), None)) + return inventory + +def check_brocade_mlx_fan(item, _no_params, info): + for fan_id, fan_descr, fan_state in info: + if brocade_mlx_fan_combine_item(fan_id, fan_descr) == item: + if fan_state == "2": + return 0, "Fan reports state: normal" + elif fan_state == "3": + return 2, "Fan reports state: failure" + elif fan_state == "1": + return 3, "Fan reports state: other" + else: + return 3, "Fan reports an unhandled state (%s)" % fan_state + return 3, "Fan not found" + +check_info["brocade_mlx_fan"] = { + "check_function" : check_brocade_mlx_fan, + "inventory_function" : inventory_brocade_mlx_fan, + "service_description" : "Fan %s", + "snmp_info" : ('.1.3.6.1.4.1.1991.1.1.1.3.1.1', [ 1, 2, 3 ]), + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.1991.1."), + "has_perfdata" : False, +} diff -Nru check-mk-1.2.2p3/brocade_mlx.module_cpu check-mk-1.2.6p12/brocade_mlx.module_cpu --- check-mk-1.2.2p3/brocade_mlx.module_cpu 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/brocade_mlx.module_cpu 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,50 @@ +title: Brocade NetIron MLX devices: NI-MLX or BR-MLX Module CPU Utilization +agents: snmp +catalog: hw/network/brocade +license: GPL +distribution: check_mk +description: + Checks the Module CPU Utilization of a NI-MLX or BR-MLX module in Brocade + NetIron MLX switching / routing devices. + + {WARN} or {CRIT} is returned, if the usage in the last 60 sec was above + given thresholds. {OK} is returned otherwise. + + Please note: Even if the check reports and graphs the CPU Utilization in + the last 1/5/60/300 sec, the thresholds are only checked against the value + of the last 60 sec. The other values are informational only. + +item: + If a module description is delivered by SNMP, the item is build from the + module ID plus the description. Otherwise it is just the ID. + +examples: + # set default levels to 70 and 80 percent: + brocade_mlx_cpu_default_levels = { "levels": (70.0, 80.0) } + + # Check Module with ID 33 on a box called my-mlx-device with default levels + checks += [ + ("my-mlx-device", "brocade_mlx.module_cpu", '33 NI-MLX-32_MR Management', brocade_mlx_cpu_default_levels), + ] + + # or use individual levels for warn and crit + checks += [ + ("my-mlx-device", "brocade_mlx.module_cpu", '33 NI-MLX-32_MR Management', { "levels": (75.0, 85.0) }), + ] + +perfdata: + four value are returned, cpu_util1, cpu_util5, cpu_util60 and cpu_util300: + The CPU Utilization (in percent) in the last 1/5/60/300 sec. + cpu_util60 together with warn and crit levels. + +inventory: + Finds one item per NI-MLX or BR-MLX module. + Modules with state "empty" or "Blocked for full height card" are omitted. + +[parameters] +parameters (dict): with the element +{"levels"}: (float, float): levels of CPU utilization for {WARN} and {CRIT} in percent + +[configuration] +brocade_mlx_cpu_default_levels(dict): The standard levels for {WARN} and + {CRIT}, preset to { "levels": (80.0, 90.0) } diff -Nru check-mk-1.2.2p3/brocade_mlx.module_mem check-mk-1.2.6p12/brocade_mlx.module_mem --- check-mk-1.2.2p3/brocade_mlx.module_mem 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/brocade_mlx.module_mem 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,47 @@ +title: Brocade NetIron MLX devices: NI-MLX or BR-MLX Module Memory (RAM usage) +agents: snmp +catalog: hw/network/brocade +license: GPL +distribution: check_mk +description: + Checks the Module Memory (RAM usage) of a NI-MLX or BR-MLX module in Brocade + NetIron MLX switching / routing devices. + + {WARN} or {CRIT} is returned, if the usage is above given thresholds. + {OK} is returned otherwise. + +item: + If a module description is delivered by SNMP, the item is build from the + module ID plus the description. Otherwise it is just the ID. + +examples: + # set default levels to 70 and 80 percent: + brocade_mlx_mem_default_levels = { "levels": (70.0, 80.0) } + + # Check Module with ID 33 on a box called my-mlx-device with default levels + checks += [ + ("my-mlx-device", "brocade_mlx.module_mem", '33 NI-MLX-32_MR Management', brocade_mlx_mem_default_levels), + ] + + # or use individual levels for warn and crit + checks += [ + ("my-mlx-device", "brocade_mlx.module_mem", '33 NI-MLX-32_MR Management', { "levels": (75.0, 85.0) }), + ] + +perfdata: + one value is returned: The amount of memory used (in Bytes), + together with warn and crit levels and maximum value + +inventory: + Finds one item per NI-MLX or BR-MLX module. + Modules with state "empty" or "Blocked for full height card" are omitted. + +[parameters] +parameters (dict): with the element +{"levels"}: (int or float, int or float): Levels of memory usage for {WARN} and {CRIT}. + If a value is given as float, it is interpreted as percentage. + If a value is given as int, it is interpreted as an absolute value in megabytes. + +[configuration] +brocade_mlx_mem_default_levels(dict): The standard levels for {WARN} and + {CRIT}, preset to { "levels": (80.0, 90.0) } diff -Nru check-mk-1.2.2p3/brocade_mlx.module_status check-mk-1.2.6p12/brocade_mlx.module_status --- check-mk-1.2.2p3/brocade_mlx.module_status 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/brocade_mlx.module_status 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,20 @@ +title: Brocade NetIron MLX / ADX / FGS / ICX devices: Module Overall Status +agents: snmp +catalog: hw/network/brocade +license: GPL +distribution: check_mk +description: + Checks the overall status of modules in Brocade NetIron MLX / ADX / FGS / + ICX switching / routing devices. + + Returns {OK} on status 10 (running) and 11 (Blocked for full height card). + Returns {WARN} on status 0 (empty), 2 (going down) and 9 (coming up). + Returns {CRIT} on status 3 (rejected) and 4 (bad). + Returns {UNKN} on every other status. + +item: + If a module description is delivered by SNMP, the item is build from the + module ID plus the description. Otherwise it is just the ID. + +inventory: + Finds one item per power supply. diff -Nru check-mk-1.2.2p3/brocade_mlx_power check-mk-1.2.6p12/brocade_mlx_power --- check-mk-1.2.2p3/brocade_mlx_power 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/brocade_mlx_power 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,59 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def brocade_mlx_power_combine_item(id, descr): + if descr == "" or "AC " in descr: + return id + else: + return "%s %s" % (id, descr) + +def inventory_brocade_mlx_power(info): + inventory = [] + for power_id, power_descr, power_state in info: + inventory.append( (brocade_mlx_power_combine_item(power_id, power_descr), None) ) + return inventory + +def check_brocade_mlx_power(item, _no_params, info): + for power_id, power_descr, power_state in info: + if brocade_mlx_power_combine_item(power_id, power_descr) == item: + if power_state == "2": + return 0, "Power supply reports state: normal" + elif power_state == "3": + return 2, "Power supply reports state: failure" + elif power_state == "1": + return 3, "Power supply reports state: other" + else: + return 3, "Power supply reports an unhandled state (%s)" % power_state + return 3, "Power supply not found" + +check_info["brocade_mlx_power"] = { + "check_function" : check_brocade_mlx_power, + "inventory_function" : inventory_brocade_mlx_power, + "service_description" : "Power supply %s", + "snmp_info" : ('.1.3.6.1.4.1.1991.1.1.1.2.1.1', [ 1, 2, 3 ]), # power supplies (id, descr, state) + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.1991.1."), + "has_perfdata" : False, +} diff -Nru check-mk-1.2.2p3/brocade_mlx_temp check-mk-1.2.6p12/brocade_mlx_temp --- check-mk-1.2.2p3/brocade_mlx_temp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/brocade_mlx_temp 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,94 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +brocade_mlx_temperature_default_levels = (50, 60) + +def compose_item(id, name): + id = re.sub("\.[0-9]+", "", id) + name = re.sub(" *temperature", "", name) + name = re.sub(", sensor [0-9]+", "", name) + name = re.sub("module ?[0-9]*", "Module %s" % id, name) + return name + +def inventory_brocade_mlx_temp(info): + inventory = [] + for temp_descr, temp_id, temp_value in info: + temp_descr = compose_item(temp_id, temp_descr) + # BigIron RX devices have problems when queried by SNMPv2c bulk walk and + # do not deliver values. So in this case we do not inventorize them to + # avoid the check to break + if ((temp_descr, "brocade_mlx_temperature_default_levels") not in inventory and \ + temp_value != ""): + inventory.append( (temp_descr, "brocade_mlx_temperature_default_levels") ) + return inventory + +def check_brocade_mlx_temp(item, params, info): + warn, crit = params + status = 0 + message = [] + perfdata = [] + + for temp_descr, temp_id, temp_value in info: + current_item = compose_item(temp_id, temp_descr) + if current_item == item: + # OID_END needs to be used for sensor id because especially + # Active management modules may have more temperature sensors + # with the same description + temp_id = re.sub("[0-9]+\.", "", temp_id) + + # some devices do not deliver values on one single check in + # between - setting to unknown in this case + if temp_value == "": + return 3, "No temperature value delivered by SNMP for sensor " + temp_id + + # Info from the MIB: "Each unit is 0.5 degrees Celcius." + temp_value = int(temp_value) / 2 + + txt = "Sensor %s: %s°C" % (temp_id, temp_value) + if temp_value > crit: + status = max(status, 2) + txt += "(!!)" + elif temp_value > warn: + status = max(status, 1) + txt += "(!)" + + perfdata.append(('sensor%s' % temp_id, temp_value, warn, crit)) + message.append(txt) + + if not message: + return 3, "Temperature sensor not found" + else: + return status, ', '.join(message), perfdata + +check_info["brocade_mlx_temp"] = { + "check_function" : check_brocade_mlx_temp, + "inventory_function" : inventory_brocade_mlx_temp, + "service_description" : "Temperature %s", + "snmp_info" : ('.1.3.6.1.4.1.1991.1.1.2.13.1.1', [ 3, OID_END, 4 ]), # descr, sensor ID, temperature + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.1991.1."), + "has_perfdata" : True, + "group" : "hw_temperature", +} diff -Nru check-mk-1.2.2p3/brocade.power check-mk-1.2.6p12/brocade.power --- check-mk-1.2.2p3/brocade.power 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/brocade.power 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,16 @@ +title: Brocade FibreChannel Switches: Power Supplies +catalog: hw/storagehw/brocade +agents: snmp +license: GPL +distribution: check_mk +description: + This check monitors the state of the power supplies of + a Brocade switch. + +item: + The number of the power supply (1, 2, 3 ...) as described in the SNMP output. + +inventory: + The inventory creates a service for each power supply unless it is marked as absent + in {swSensorStatus} + diff -Nru check-mk-1.2.2p3/brocade.temp check-mk-1.2.6p12/brocade.temp --- check-mk-1.2.2p3/brocade.temp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/brocade.temp 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,23 @@ +title: Brocade FibreChannel Switches: Temperature Sensors +catalog: hw/storagehw/brocade +agents: snmp +license: GPL +distribution: check_mk +description: + This checks monitors the hardware temperatures of a Brocade FC switch. + +item: + The number of the sensor (1, 2, 3 ...) as described in the SNMP output. + +perfdata: + The current temperature value for each sensor + +inventory: + The inventory creates a service for each sensor unless it is marked as absent + in {swSensorStatus} + + +[parameters] +warn(int): the temperature at which a WARN state is reached +crit(int): the temperature at which a CRIT state is reached + diff -Nru check-mk-1.2.2p3/brocade_tm check-mk-1.2.6p12/brocade_tm --- check-mk-1.2.2p3/brocade_tm 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/brocade_tm 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,112 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +# FIXME: +# - no camel case in check parameters +# - use friendly output of values. Output +# "Ingress Dequeue Packets" instead of "brcdTMStatsIngressDequeuePkts" + +factory_settings["brocade_tm_default_levels"] = { + 'brcdTMStatsTotalIngressPktsCnt': (1000, 10000), + 'brcdTMStatsIngressEnqueuePkts': (1000, 10000), + 'brcdTMStatsEgressEnqueuePkts': (1000, 10000), + 'brcdTMStatsIngressDequeuePkts': (1000, 10000), + 'brcdTMStatsIngressTotalQDiscardPkts': (1000, 10000), + 'brcdTMStatsIngressOldestDiscardPkts': (1000, 10000), + 'brcdTMStatsEgressDiscardPkts': (1000, 10000), +} + +def inventory_brocade_tm(info): + inventory = [] + for line in info: + inventory.append(( line[0], None )) + return inventory + + +def check_brocade_tm(item, params, info): + for line in info: + if line[0] == item: + tm = {} + + tm['TotalIngressPktsCnt'] = line[1] + tm['IngressEnqueuePkts'] = line[2] + tm['EgressEnqueuePkts'] = line[3] + tm['IngressDequeuePkts'] = line[4] + tm['IngressTotalQDiscardPkts'] = line[5] + tm['IngressOldestDiscardPkts'] = line[6] + tm['EgressDiscardPkts'] = line[7] + + now = time.time() + infotext = "" + perfdata = [] + overall_state = 0 + + for name, counter in tm.items(): + rate = get_rate("%s.%s" % (name, item), now, int(counter)) + + warn, crit = params["brcdTMStats" + name] + if re.search("Discard", name): + if rate > crit: + state = 2 + sym = "(!!)" + elif rate > warn: + state = 1 + sym = "(!)" + else: + state = 0 + sym = "" + else: + state = 0 + sym = "" + infotext += "%s: %.1f%s, " % (name, rate, sym) + perfdata.append( (name, rate, warn, crit) ) + overall_state = max(overall_state, state) + + return (overall_state, infotext, perfdata) + + return (3, "Interface not found") + + +check_info["brocade_tm"] = { + 'check_function' : check_brocade_tm, + 'inventory_function' : inventory_brocade_tm, + 'service_description' : 'TM %s', + 'has_perfdata' : True, + 'snmp_scan_function' : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.1991.1."), + 'group' : 'brocade_tm', + 'default_levels_variable': 'brocade_tm_default_levels', + 'snmp_info' : ( ".1.3.6.1.4.1.1991.1.14.2.1.2.2.1",[ + 3, # 'brcdTMStatsDescription', + 4, # 'brcdTMStatsTotalIngressPktsCnt', + 5, # 'brcdTMStatsIngressEnqueuePkts', + 6, # 'brcdTMStatsEgressEnqueuePkts', + 9, # 'brcdTMStatsIngressDequeuePkts', + 11, # 'brcdTMStatsIngressTotalQDiscardPkts', + 13, # 'brcdTMStatsIngressOldestDiscardPkts', + 15, # 'brcdTMStatsEgressDiscardPkts', + ]), +} diff -Nru check-mk-1.2.2p3/brocade_vdx_status check-mk-1.2.6p12/brocade_vdx_status --- check-mk-1.2.2p3/brocade_vdx_status 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/brocade_vdx_status 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,67 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example SNMP data: +# .1.3.6.1.4.1.1588.2.1.1.1.1.6.0 v4.0.1 Firmware +# .1.3.6.1.4.1.1588.2.1.1.1.1.7.0 1 Status + + +def inventory_brocade_vdx_status(info): + return [ ( None, None )] + +def check_brocade_vdx_status(_no_item, _no_params, info): + states = { + 1 : "online", + 2 : "offline", + 3 : "testing", + 4 : "faulty", + } + firmware = info[0][0] + state = saveint(info[0][1]) + message = "State: %s, Firmware: %s" % ( states[state], firmware) + if state == 1: + return 0, message + if state in [ 2, 4 ]: + return 2, message + if state == 3: + return 1, message + +check_info["brocade_vdx_status"] = { + "check_function" : check_brocade_vdx_status, + "inventory_function" : inventory_brocade_vdx_status, + "service_description" : "Status", + # It does not seem to work to exclude several OIDs here, there seem + # to be too many devices which do not have the needed OIDs. We try + # another approach: check for existance of the first needed OID + #not oid('.1.3.6.1.2.1.1.2.0').startswith( ".1.3.6.1.4.1.1588.2.1.1.1"), + "snmp_scan_function" : lambda oid: oid('.1.3.6.1.2.1.1.2.0').startswith( ".1.3.6.1.4.1.1588") \ + and oid(".1.3.6.1.4.1.1588.2.1.1.1.1.6") != None, + "snmp_info" : ( ".1.3.6.1.4.1.1588.2.1.1.1.1", [ + 6, # Firmware + 7 # Status + ] ), +} + diff -Nru check-mk-1.2.2p3/canon_pages check-mk-1.2.6p12/canon_pages --- check-mk-1.2.2p3/canon_pages 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/canon_pages 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -46,17 +46,19 @@ def check_canon_pages(item, _no_params, info): if item not in canon_pages_map: - return (3, "UNKNOWN - unknown counter type %s" % item) + return (3, "unknown counter type %s" % item) for endoid, value in info: if endoid == canon_pages_map[item]: - return (0, 'OK - Current count: %s' % value, [('count', int(value))]) + return (0, '%s pages printed' % value, [('count', int(value))]) - return (3, 'UNKNOWN - Item not found in SNMP data') + return (3, 'Item not found in SNMP data') +check_info["canon_pages"] = { + 'check_function': check_canon_pages, + 'inventory_function': inventory_canon_pages, + 'service_description': 'Pages %s', + 'has_perfdata': True, + 'snmp_info': ( ".1.3.6.1.4.1.1602.1.11.1.3.1.4", [ OID_END, "" ]), + 'snmp_scan_function': lambda oid: oid(".1.3.6.1.4.1.1602.1.1.1.1.0") != None +} -check_info['canon_pages'] = (check_canon_pages, "Pages %s", 1, inventory_canon_pages) - -snmp_info['canon_pages'] = \ - ( ".1.3.6.1.4.1.1602.1.11.1.3.1.4", [ OID_END, "" ]) - -snmp_scan_functions['canon_pages'] = lambda oid: oid(".1.3.6.1.4.1.1602.1.1.1.1.0") != None diff -Nru check-mk-1.2.2p3/carel_sensors check-mk-1.2.6p12/carel_sensors --- check-mk-1.2.2p3/carel_sensors 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/carel_sensors 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,107 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# This is not intended for overriding in main.mk, as any changes will require +# re-inventory. +carel_temp_defaultlevels = { # This still needs sensible values + "Room" : (30, 35), + "Outdoor" : (60, 70), + "Delivery" : (60, 70), + "Cold Water" : (60, 70), + "Hot Water" : (60, 70), + "Cold Water Outlet" : (60, 70), + "Circuit 1 Suction" : (60, 70), + "Circuit 2 Suction" : (60, 70), + "Circuit 1 Evap" : (60, 70), + "Circuit 2 Evap" : (60, 70), + "Circuit 1 Superheat" : (60, 70), + "Circuit 2 Superheat" : (60, 70), + "Cooling Set Point" : (60, 70), + "Cooling Prop. Band" : (60, 70), + "Cooling 2nd Set Point" : (60, 70), + "Heating Set Point" : (60, 70), + "Heating 2nd Set Point" : (60, 70), + "Heating Prop. Band" : (60, 70), +} + +def carel_sensors_parse(info): + + oidtothing = { + "1.0" : ("temp", "Room"), + "2.0" : ("temp", "Outdoor"), + "3.0" : ("temp", "Delivery"), + "4.0" : ("temp", "Cold Water"), + "5.0" : ("temp", "Hot Water"), + "7.0" : ("temp", "Cold Water Outlet"), + "10.0" : ("temp", "Circuit 1 Suction"), + "11.0" : ("temp", "Circuit 2 Suction"), + "12.0" : ("temp", "Circuit 1 Evap"), + "13.0" : ("temp", "Circuit 2 Evap"), + "14.0" : ("temp", "Circuit 1 Superheat"), + "15.0" : ("temp", "Circuit 2 Superheat"), + "20.0" : ("temp", "Cooling Set Point"), + "21.0" : ("temp", "Cooling Prop. Band"), + "22.0" : ("temp", "Cooling 2nd Set Point"), + "23.0" : ("temp", "Heating Set Point"), + "24.0" : ("temp", "Heating 2nd Set Point"), + "25.0" : ("temp", "Heating Prop. Band"), + } + + parsed = {} + parsed["temp"] = {} + for oidend, value in info: + quantity, sensor = oidtothing.get(oidend, (None, None)) + if quantity == "temp": + if value and value != "0" and value != "-9999": + parsed[quantity][sensor] = float(value) / 10 + + return parsed + + +def inventory_carel_sensors_temp(parsed): + for thing in parsed["temp"].keys(): + yield thing, carel_temp_defaultlevels[thing] + + +def check_carel_sensors_temp(item, params, parsed): + if item in parsed["temp"]: + return check_temperature(parsed["temp"][item], params) + + +check_info["carel_sensors"] = { + "parse_function" : carel_sensors_parse, + "inventory_function" : inventory_carel_sensors_temp, + "check_function" : check_carel_sensors_temp, + "service_description" : "Temperature %s", + "group" : "room_temperature", + "has_perfdata" : True, + "snmp_info" : ( ".1.3.6.1.4.1.9839.2.1", [ OID_END, "2" ] ), + "snmp_scan_function" : lambda oid: ( "pCO" in oid(".1.3.6.1.2.1.1.1.0") or \ + oid(".1.3.6.1.2.1.1.1.0").endswith("armv4l") + ) and \ + oid(".1.3.6.1.4.1.9839.1.1.0") , + "includes" : [ "temperature.include" ], +} diff -Nru check-mk-1.2.2p3/carel_uniflair_cooling check-mk-1.2.6p12/carel_uniflair_cooling --- check-mk-1.2.2p3/carel_uniflair_cooling 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/carel_uniflair_cooling 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -58,11 +58,11 @@ perfdata = [ ("humidity", humidity) ] if err_waterloss or err_global_status or err_emergency_op: - return (2, "CRITICAL - %s" % output, perfdata) + return (2, output, perfdata) else: - return (0, "OK - %s" % output, perfdata) + return (0, output, perfdata) - return (3, "UNKNOWN - Unknown data from agent", perfdata) + return (3, "Unknown data from agent", perfdata) check_info["carel_uniflair_cooling"] = { diff -Nru check-mk-1.2.2p3/casa_cpu_mem check-mk-1.2.6p12/casa_cpu_mem --- check-mk-1.2.2p3/casa_cpu_mem 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/casa_cpu_mem 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,104 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def parse_casa_info_mem(info): + entity_names = dict(map(lambda x: [int(x[0]), x[1]], info[0])) + data = {} + for idx, entry in enumerate(info[1]): + entry_nr = int(entry[0]) + data[entity_names[entry_nr]] = { + "mem_total" : info[1][idx][1], + "mem_used" : info[2][idx][1], + } + return data + +def inventory_casa_cpu_mem(info): + data = parse_casa_info_mem(info) + inventory = [] + for key, value in data.items(): + if value.get("mem_total"): + inventory.append( (key, {}) ) + return inventory + +def check_casa_cpu_mem(item, params, info): + data = parse_casa_info_mem(info) + perfdata = [] + if item in data: + state = 0 + mem_total = float(data[item]["mem_total"]) + mem_used = float(data[item]["mem_used"]) + error_text = "" + levels_text = "" + warn, crit = None, None + if "levels" in params: + warn, crit = params["levels"] + if type(warn) == int: + levels_text = "(levels at %s/%s)" % ( + get_bytes_human_readable(warn), + get_bytes_human_readable(crit), + ) + if mem_used > crit: + state = 2 + error_text = "(!!)" + elif mem_used > warn: + state = 1 + error_text = "(!)" + else: + levels_text = "(levels at %s%%/%s%% used)" % (warn, crit) + perc_used = (mem_used / mem_total) * 100.0 + if perc_used > crit: + state = 2 + error_text = "(!!)" + elif perc_used > warn: + state = 1 + error_text = "(!)" + + + perfdata.append( ("used", mem_used, warn, crit, 0, mem_total) ) + mem_total = get_bytes_human_readable(mem_total, base=1000.0) + mem_used = get_bytes_human_readable(mem_used, base=1000.0) + infotext = "Total: %s, Used: %s %s%s" % \ + (mem_total, mem_used, error_text, levels_text) + return (state, infotext, perfdata) + else: + return (3, "%s not found in snmp output" % item, perfdata) + + + +check_info["casa_cpu_mem"] = { + "check_function" : check_casa_cpu_mem, + "inventory_function" : inventory_casa_cpu_mem, + "service_description" : "Memory %s", + "has_perfdata" : True, + "group" : "memory_multiitem", + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.20858.2."), + "snmp_info" : [ + ( ".1.3.6.1.2.1.47.1.1.1.1.2", [ OID_END, '' ] ), # Entity descriptions, quite long... + ( ".1.3.6.1.4.1.20858.10.13.1.1.1.1", [ OID_END, '' ] ), # Total mem + ( ".1.3.6.1.4.1.20858.10.13.1.1.1.2", [ OID_END, '' ] ), # Total allocated mem + ( ".1.3.6.1.4.1.20858.10.36.1.1.1.1", [ OID_END, '' ] ), # Installed slot + ] +} diff -Nru check-mk-1.2.2p3/casa_cpu_temp check-mk-1.2.6p12/casa_cpu_temp --- check-mk-1.2.2p3/casa_cpu_temp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/casa_cpu_temp 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,97 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def parse_casa_info_temp(info): + entity_names = dict(map(lambda x: [int(x[0]), x[1]], info[0])) + temp_value = dict(map(lambda x: [int(x[0]), x[1]], info[1])) + temp_status = dict(map(lambda x: [int(x[0]), x[1]], info[2])) + temp_unit = dict(map(lambda x: [int(x[0]), x[1]], info[3])) + data = {} + for entry in info[1]: + entry_nr = int(entry[0]) + def beautify_module_text(text): + text = text.replace("temperature sensor", "") + if text.startswith("Module "): + text = text.rsplit(None, 1)[0] # Drop trailing " CPU" + return text + data[beautify_module_text(entity_names[entry_nr])] = { + "temp_value" : temp_value.get(entry_nr), + "temp_status" : temp_status.get(entry_nr), + "temp_unit" : temp_unit.get(entry_nr), + } + return data + +def inventory_casa_cpu_temp(info): + data = parse_casa_info_temp(info) + inventory = [] + for key, value in data.items(): + if value.get("temp_value"): + inventory.append( (key, None) ) + return inventory + +def check_casa_cpu_temp(item, params, info): + data = parse_casa_info_temp(info) + state = 0 + perfdata = [] + if item in data: + if data[item]["temp_status"] == "1": + value = float(data[item]["temp_value"]) / 10 + levels_text = "" + error_text = "" + warn, crit = None, None + if params: + warn, crit = params + levels_text = "(levels at %d/%d°C)" % params + if value > crit: + state = 2 + error_text = "(!!)" + elif value > warn: + state = 1 + error_text = "(!)" + perfdata.append( ("temp", value, warn, crit, 0, 100) ) + infotext = "Temperature is %.1f °C %s%s" % (value, error_text, levels_text) + else: + state = 2 + infotext = "Sensor failure!" + return (state, infotext, perfdata) + else: + return (3, "%s not found in snmp output" % item, perfdata) + + +check_info["casa_cpu_temp"] = { + "check_function" : check_casa_cpu_temp, + "inventory_function" : inventory_casa_cpu_temp, + "service_description" : "Temperature %s", + "group" : "hw_temperature", + "has_perfdata" : True, + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.20858.2."), + "snmp_info" : [ + ( ".1.3.6.1.2.1.47.1.1.1.1.2", [ OID_END, '' ] ), # Entity descriptions, quite long... + ( ".1.3.6.1.2.1.99.1.1.1.4", [ OID_END, '' ] ), # Temperatures, Value + ( ".1.3.6.1.2.1.99.1.1.1.5", [ OID_END, '' ] ), # Temperatures, Status + ( ".1.3.6.1.2.1.99.1.1.1.6", [ OID_END, '' ] ), # Temperatures, Unit + ] +} diff -Nru check-mk-1.2.2p3/casa_cpu_util check-mk-1.2.6p12/casa_cpu_util --- check-mk-1.2.2p3/casa_cpu_util 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/casa_cpu_util 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,84 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def parse_casa_info_util(info): + entity_names = dict(map(lambda x: [int(x[0]), x[1]], info[0])) + data = {} + for entry in info[1]: + entry_nr = int(entry[0]) + name = entity_names[entry_nr] # e.g. "Module 1 QEM". + # Drop "QEM" in order to be consistent with other DTCS checks... + if name.startswith("Module "): + name = name.rsplit(None, 1)[0] + data[name] = { + "cpu_util" : entry[1], + } + return data + +def inventory_casa_cpu_util(info): + data = parse_casa_info_util(info) + inventory = [] + for key, value in data.items(): + if value.get("cpu_util"): + inventory.append( (key, {}) ) + return inventory + +def check_casa_cpu_util(item, params, info): + data = parse_casa_info_util(info) + if item in data: + state = 0 + value = int(data[item]["cpu_util"]) + levels_text = "" + error_text = "" + warn, crit = None, None + if "levels" in params: + warn, crit = params["levels"] + levels_text = "(levels at %s%%/%s%%)" % (warn, crit) + if value > crit: + state = 2 + elif value > warn: + state = 1 + else: + levels_text = "" + infotext = "%d%% %s%s" % (value, error_text, levels_text) + perfdata = [ ("util", value, warn, crit, 0, 100) ] + return (state, infotext, perfdata) + else: + return 3, "%s not found in SNMP output" % item + + +check_info["casa_cpu_util"] = { + "check_function" : check_casa_cpu_util, + "inventory_function" : inventory_casa_cpu_util, + "service_description" : "CPU utilization %s", + "has_perfdata" : True, + "group" : "cpu_utilization_multiitem", + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.20858.2."), + "snmp_info" : [ + ( ".1.3.6.1.2.1.47.1.1.1.1.2", [ OID_END, '' ] ), # Entity descriptions, quite long... + ( ".1.3.6.1.4.1.20858.10.13.1.1.1.4", [ OID_END, '' ] ), # CPU utilization + ] +} diff -Nru check-mk-1.2.2p3/casa_fan check-mk-1.2.6p12/casa_fan --- check-mk-1.2.2p3/casa_fan 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/casa_fan 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,62 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +def inventory_casa_fan(info): + inventory = [] + for nr, speed in info[0]: + inventory.append( (nr, None) ) + return inventory + +def check_casa_fan(item, no_params, info): + for idx, (nr, speed) in enumerate(info[0]): + if item == nr: + fan_status = info[1][idx][1] + if fan_status == "1": + return (0, "%s RPM" % speed) + elif fan_status == "3": + return (1, "%s RPM, running over threshold (!)" % speed) + elif fan_status == "2": + return (1, "%s RPM, running under threshold (!)" % speed) + elif fan_status == "0": + return (3, "%s RPM, unknown fan status (!)" % speed) + elif fan_status == "4": + return (2, "FAN Failure (!!)") + else: + return (3, "Fan %s not found in snmp output" % item) + + +check_info["casa_fan"] = { + "check_function" : check_casa_fan, + "inventory_function" : inventory_casa_fan, + "service_description" : "Fan %s", + "has_perfdata" : False, + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.20858.2."), + "snmp_info" : [ + ( ".1.3.6.1.4.1.20858.10.31.1.1.1.2", [ OID_END, '' ] ), # FAN Speed + ( ".1.3.6.1.4.1.20858.10.33.1.4.1.4", [ OID_END, '' ] ), # FAN State + ] +} diff -Nru check-mk-1.2.2p3/casa_power check-mk-1.2.6p12/casa_power --- check-mk-1.2.2p3/casa_power 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/casa_power 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,61 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +def inventory_casa_power(info): + inventory = [] + for idx, unit in enumerate(info): + inventory.append( (idx, None) ) + return inventory + +def check_casa_power(item, no_params, info): + unit_nr = int(item) + if len(info) < unit_nr: + return (3, "Power Supply %s not found in snmp output" % item) + + status = info[unit_nr][0] + if status == "1": + return (0, "Power supply OK") + elif status == "3": # not sure if this state is possible + return (1, "Power supply working over threshold (!)") + elif status == "2": # not sure if this state is possible + return (0, "Power supply working under threshold (!)") # OK, backup power.. + elif status == "0": + return (3, "Power supply - Unknown status (!)") + elif status == "4": + return (2, "Power Failure(!!)") + + +check_info["casa_power"] = { + "check_function" : check_casa_power, + "inventory_function" : inventory_casa_power, + "service_description" : "Power %s", + "has_perfdata" : False, + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.20858.2."), + "snmp_info" : + ( ".1.3.6.1.4.1.20858.10.33.1.5.1.4", [ '' ] ), # Power State + +} diff -Nru check-mk-1.2.2p3/catalog.py check-mk-1.2.6p12/catalog.py --- check-mk-1.2.2p3/catalog.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/catalog.py 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,135 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +manpage_catalog_titles = { + "hw" : "Appliances, other dedicated Hardware", + "environment" : "Environmental sensors", + "akcp" : "AKCP", + "allnet" : "ALLNET", + "betternet" : "better networks", + "carel" : "CAREL", + "eaton" : "Eaton", + "emerson" : "EMERSON", + "hwg" : "HW group", + "rittal" : "Rittal", + "sensatronics" : "Sensatronics", + "socomec" : "Socomec", + "stulz" : "STULZ", + "wut" : "Wiesemann & Theis", + "wagner" : "WAGNER Group", + "climaventa" : "Climaventa", + "other" : "Other devices", + "network" : "Networking (Switches, Routers, etc.)", + "alcatel" : "Alcatel", + "avm" : "AVM", + "bintec" : "Bintec", + "cbl" : "Communication by light (CBL)", + "checkpoint" : "Checkpoint", + "cisco" : "Cisco Systems (also IronPort)", + "casa" : "Casa", + "decru" : "Decru", + "dell" : "DELL", + "f5" : "F5 Networks", + "fortinet" : "Fortinet", + "genua" : "genua", + "h3c" : "H3C Technologies (also 3Com)", + "hp" : "Hewlett-Packard (HP)", + "juniper" : "Juniper Networks", + "kemp" : "KEMP", + "lancom" : "LANCOM Systems GmbH", + "mikrotik" : "MikroTik", + "netgear" : "Netgear", + "qnap" : "QNAP Systems", + "riverbed" : "Riverbed Technology", + "symantec" : "Symantec", + "viprinet" : "Viprinet", + "bluecoat" : "Blue Coat Systems", + "tplink" : "TP-LINK", + "power" : "Power supplies and PDUs", + "apc" : "APC", + "gude" : "Gude", + "printer" : "Printers", + "server" : "Servers (management boards, blade enclosures)", + "ibm" : "IBM", + "storagehw" : "Storage (filers, SAN, tape libs)", + "brocade" : "Brocade", + "fastlta" : "FAST LTA", + "fujitsu" : "Fujitsu", + "mcdata" : "McDATA", + "netapp" : "NetApp", + "hitachi" : "Hitachi", + "emc" : "EMC", + "qlogic" : "QLogic", + "quantum" : "Quantum", + "phone" : "Telephony", + + "app" : "Applications", + "apache" : "Apache Webserver", + "db2" : "IBM DB2", + "citrix" : "Citrix", + "exchange" : "Microsoft Exchange", + "java" : "Java (Tomcat, Weblogic, JBoss, etc.)", + "libelle" : "Libelle Business Shadow", + "lotusnotes" : "IBM Lotus Domino", + "mailman" : "Mailman", + "mssql" : "Microsoft SQL Server", + "mysql" : "MySQL", + "omd" : "Open Monitoring Distribution (OMD)", + "oracle" : "ORACLE Database", + "postfix" : "Postfix", + "postgresql" : "PostgreSQL", + "qmail" : "qmail", + "sap" : "SAP R/3", + "tsm" : "IBM Tivoli Storage Manager (TSM)", + "unitrends" : "Unitrends", + + "os" : "Operating Systems", + "aix" : "AIX", + "freebsd" : "FreeBSD", + "hpux" : "HP-UX", + "linux" : "Linux", + "macosx" : "Mac OS X", + "netbsd" : "NetBSD", + "openbsd" : "OpenBSD", + "openvms" : "OpenVMS", + "snmp" : "SNMP based generic and hardware checks", + "solaris" : "Solaris", + "vsphere" : "VMWare ESX (via vSphere)", + "windows" : "Microsoft Windows", + + "hardware" : "Hardware Sensors", + "kernel" : "CPU, Memory and Kernel Performance", + "ps" : "Processes, Services and Jobs", + "files" : "Files and Logfiles", + "services" : "Specific Daemons and Operating System Services", + "networking" : "Networking", + "misc" : "Miscellaneous", + "storage" : "Filesystems, Disks and RAID", + + "agentless" : "Networking checks without agent", + "generic" : "Generic check plugins", + "unsorted" : "Uncategorized", +} diff -Nru check-mk-1.2.2p3/cbl_airlaser check-mk-1.2.6p12/cbl_airlaser --- check-mk-1.2.2p3/cbl_airlaser 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/cbl_airlaser 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -166,7 +166,7 @@ if state == 0: msgtxt = "All sensors OK" - return (state, nagios_state_names[state] + " - " + msgtxt, perfdata) + return (state, msgtxt, perfdata) def inventory_cbl_airlaser(info): @@ -178,21 +178,21 @@ def check_cbl_airlaser_status(item, _no_params, info): if len(info) == 0: - return (3, "UNKNOWN - no information sent by agent") + return (3, "no information sent by agent") selftest, chassis, power, module, optrx, opttx = info status = selftest[0][0] if status == "1": - return (0, "OK - Airlaser: normal operation") + return (0, "Airlaser: normal operation") elif status == "2": - return (1, "WARN - Airlaser: testing mode") + return (1, "Airlaser: testing mode") elif status == "3": - return (1, "WARN - Airlaser: warning condition") + return (1, "Airlaser: warning condition") elif status == "4": - return (2, "CRIT - Airlaser: a component has failed self-tests") + return (2, "Airlaser: a component has failed self-tests") - return (3, "UNKNOWN - Unknown data from agent") + return (3, "Unknown data from agent") check_info["cbl_airlaser.status"] = { diff -Nru check-mk-1.2.2p3/cbl_airlaser.hardware check-mk-1.2.6p12/cbl_airlaser.hardware --- check-mk-1.2.2p3/cbl_airlaser.hardware 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/cbl_airlaser.hardware 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Check of runtime Status CBL Airlaser +title: CBL Airlaser: General Status agents: snmp -author: Florian Heigl +catalog: hw/network/cbl license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/cbl_airlaser.status check-mk-1.2.6p12/cbl_airlaser.status --- check-mk-1.2.2p3/cbl_airlaser.status 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/cbl_airlaser.status 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Check of Powerup Status CBL Airlaser +title: CBL Airlaser: Powerup Status agents: snmp -author: Andreas Boesl +catalog: hw/network/cbl license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/cfg_examples/apache_status.cfg check-mk-1.2.6p12/cfg_examples/apache_status.cfg --- check-mk-1.2.2p3/cfg_examples/apache_status.cfg 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/cfg_examples/apache_status.cfg 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,47 @@ +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example for configuration for apache_status plugin +# Note: you need this file only if the autodetection fails +# or you do not want to contact all servers it detects + +# Note: Activate this only if the autodetection fails. +#servers = [ +#{ +# 'protocol' : 'http', +# 'address' : 'localhost', +# 'port' : 80 , +#}, +#{ +# 'protocol' : 'http', +# 'address' : 'localhost', +# 'port' : 8080 , +#}, +#{ +# 'protocol' : 'https', +# 'address' : 'localhost', +# 'port' : 443 , +#}, +#] + diff -Nru check-mk-1.2.2p3/cfg_examples/jolokia.cfg check-mk-1.2.6p12/cfg_examples/jolokia.cfg --- check-mk-1.2.2p3/cfg_examples/jolokia.cfg 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/cfg_examples/jolokia.cfg 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,50 @@ +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Default settings or settings for only one +# instance: + +# Settings for authentication +# server = "127.0.0.1" +# user = "monitoring" +# password = None +# mode = "digest" +suburi = "jolokia" +instance = None + +# Configuration for multiple instances. Not-specified +# values will be taken from the upper settings +# instances = [ +# { +# "port" : 8080, +# "instance" : "FOO", +# }, +# { +# "server" : "10.1.88.5", +# "port" : 8081, +# "instance" : "BAR", +# "user" : "harri", +# "password" : "hirsch", +# } +# ] diff -Nru check-mk-1.2.2p3/cfg_examples/logwatch.cfg check-mk-1.2.6p12/cfg_examples/logwatch.cfg --- check-mk-1.2.2p3/cfg_examples/logwatch.cfg 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/cfg_examples/logwatch.cfg 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,59 @@ +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# logwatch.cfg +# This file configures mk_logwatch. Define your logfiles +# and patterns to be looked for here. + +# Name one or more logfiles +/var/log/messages +# Patterns are indented with one space are prefixed with: +# C: Critical messages +# W: Warning messages +# I: ignore these lines (OK) +# R: Rewrite the output previous match. You can use \1, \2 etc. for refer to groups (.*) of this match +# The first match decided. Lines that do not match any pattern +# are ignored + C Fail event detected on md device + I mdadm.*: Rebuild.*event detected + W mdadm\[ + W ata.*hard resetting link + W ata.*soft reset failed (.*FIS failed) + W device-mapper: thin:.*reached low water mark + C device-mapper: thin:.*no free space + C Error: (.*) + +/var/log/auth.log + W sshd.*Corrupted MAC on input + +/var/log/syslog /var/log/kern.log + I registered panic notifier + C panic + C Oops + W generic protection rip + W .*Unrecovered read error - auto reallocate failed + +# Globbing patterns are allowed: +# /sapdata/*/saptrans.log +# C ORA- diff -Nru check-mk-1.2.2p3/cfg_examples/nginx_status.cfg check-mk-1.2.6p12/cfg_examples/nginx_status.cfg --- check-mk-1.2.2p3/cfg_examples/nginx_status.cfg 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/cfg_examples/nginx_status.cfg 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,16 @@ +# Example configuration file. If you do not supply +# a configuration file then autodetection is being tried. + +servers = [ + { + "protocol" : "http", + "address" : "localhost", + "port" : 80, + }, + { + "protocol" : "https", + "address" : "localhost", + "port" : 443, + "page" : "nginx_status", + }, +] diff -Nru check-mk-1.2.2p3/cfg_examples/sqlnet.ora check-mk-1.2.6p12/cfg_examples/sqlnet.ora --- check-mk-1.2.2p3/cfg_examples/sqlnet.ora 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/cfg_examples/sqlnet.ora 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,11 @@ +LOG_DIRECTORY_CLIENT = /var/log/check_mk/oracle_client +DIAG_ADR_ENABLED = OFF + +SQLNET.WALLET_OVERRIDE = FALSE +WALLET_LOCATION = + (SOURCE= + (METHOD = FILE) + (METHOD_DATA = (DIRECTORY=/etc/check_mk/oracle_wallet)) + ) + + diff -Nru check-mk-1.2.2p3/cfg_examples/sqlplus.sh check-mk-1.2.6p12/cfg_examples/sqlplus.sh --- check-mk-1.2.2p3/cfg_examples/sqlplus.sh 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/cfg_examples/sqlplus.sh 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,47 @@ +#!/bin/sh +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# EXAMPLE + +# This script is called by the Check_MK ORACLE plugin in order to +# execute an SQL query. + +# It is your task to adapt this script so that the ORACLE environment +# is setup and the correct user chosen to execute sqlplus. + +# The script will get the query on stdin and shall output the +# result on stdout. Error messages goes to stderr. + +ORACLE_SID=$1 +if [ -z "$ORACLE_SID" ] ; then + echo "Usage: $0 ORACLE_SID" >&2 + exit 1 +fi + +su nagios -c " +ORACLE_SID=$ORACLE_SID +ORAENV_ASK=NO +. /usr/local/bin/oraenv +sqlplus -s /" diff -Nru check-mk-1.2.2p3/ChangeLog check-mk-1.2.6p12/ChangeLog --- check-mk-1.2.2p3/ChangeLog 2013-11-05 09:22:41.000000000 +0000 +++ check-mk-1.2.6p12/ChangeLog 2015-09-21 10:59:53.000000000 +0000 @@ -1,7 +1,2865 @@ +1.2.6p12: + WATO: + * 2599 FIX: Fix newly introduced fatal bug preventing creation of new hosts in WATO... + + +1.2.6p11: + Checks & Agents: + * 2455 FIX: raritan_pdu_inlet: Fixed scaling... + * 1280 FIX: check_mail_loop: Fixed name auf protocol parameter + * 1284 FIX: symantec_av_progstate: Now Supports the current version of Symantec AV + * 1285 FIX: blade_bx_powerfan: Fixed exception in case of fan failure + * 2545 FIX: akcp_daisy_temp: sensorProbe8-X20 not trying to discover services on these devices + * 2564 FIX: logins: missing manpage + * 2575 FIX: cpu.loads: Fix exception in displaying parameters for CPU load check... + * 2576 FIX: symantec_av_updates: fix crash due to missing datetime module, also handle DD.MM.YYYY date format + * 2567 FIX: jolokia_metrics: fixed crash if agent output is missing memory metrics + * 2577 FIX: printer_input, printer_output: Fix exception on Brother NC-340h + * 2554 FIX: oracle_recovery_status: Fixed exception when files have no checkpoints + * 2568 FIX: apache_status: fixed computation of bytes and requests per second... + * 2557 FIX: cmciii_lcp_airin cmciii_lcp_airout cmciii_lcp_waterin cmciii_lcp_waterout: Fixed exception parsing data from some devices + * 2558 FIX: cmciii.temp: Do not discover air temperature sensors anymore... + * 2582 FIX: printer_supply_ricoh: Fix exception when parameters have been set via WATO + * 1290 FIX: brocade_mlx_fan: Fix: Not longer add fans who not are present while discovery + * 2569 FIX: fixed crash in aix lvm check after volume disappears... + * 2559 FIX: job: Trying to read job result files as non root when agent is not executed as root + * 2570 FIX: winperf_msx_queues: fixed crash when winperf data is missing... + * 2593 FIX: cisco_wlc, cisco_wlc_clients: Fix discovery so that Cisco Virtual Wireless Controller are detected... + * 2561 FIX: check-mk-agent rpm is now also replacing check_mk_agent named rpms... + * 2609 FIX: mysql_capacity: Can now handle sizes reported being NULL... + + Multisite: + * 2612 SEC: Fixed possible XSS on service detail page using the long service output... + * 2613 SEC: Additional fix for refleced XSS on index page using start_url... + * 2340 FIX: Email validation: Top level domains can now have a maximum length of 24 characters + * 2509 FIX: Logwatch: Fixed exception when deleting a specific logfile on multiple hosts + * 2510 FIX: Fixed sidebar frame scaling in chrome when using browser zoom + * 2548 FIX: Fixed exceptions in different places in case of user errors... + * 2549 FIX: Silently ignore not existing painters and sorters in views... + * 2532 FIX: Fixed undefined variable exception in prediction in rare cases... + * 1289 FIX: Fixed missing table headers in hostgroup view + * 2551 FIX: Fixed locking issues when editing dashboards... + * 2578 FIX: Fix exception in case a user has a non-existant role... + * 2555 FIX: Availability: Fixed exception when trying to group BI aggregations by host/service-groups + * 2580 FIX: Remove bogus ;0 in comment of problem acknowledgements... + * 2607 FIX: Fixed broken links from BI views to aggregation group views + * 2615 FIX: Fixed bug in legacy dashboard conversion when having users not permitted to access embedded views... + + WATO: + * 2264 WATO Web API: new function get_all_hosts (returns all host attributes) + * 2550 FIX: Catching exception when having name conflicts while moving a folder + * 2552 FIX: API: Service Discovery action is now creating missing pending log entry and marks the site dirty + * 2560 FIX: Fixed rare exception when using bulk host move... + * 2610 FIX: Fixed host search showing results of not permitted folder + * 2611 FIX: Fixed host view permission checks on host related pages... + + Event Console: + * 2614 FIX: Fixed exception when processing events with umlaut in names from history + + Livestatus: + * 2579 FIX: Fixed syntax of JSON in case of column headers turned on... + * 2581 FIX: Fix crashing Nagios core in rare case when excessive commands are being executed... + + HW/SW-Inventory: + * 2553 FIX: solaris_prtdiag: Can now handle Supermicro servers using Solaris 10 + + +1.2.6p10: + Checks & Agents: + * 2406 FIX: fileinfo: fixed missing size performance data for very large (e.g 2TB) files and fileinfo groups... + * 2377 FIX: cpu.loads: Fix output of reference for predition (was scaled wrongly by number of cores) + * 2476 FIX: check_bi_aggr: Fixed exception when authentication is enabled + * 1274 FIX: heartbeat_crm: Handle case of error messages from CRM + * 2488 FIX: veem_jobs: fix problems with spaces or single quotes in the names of jobs... + * 2494 FIX: Clasic SNMP mode now uses normal snmpwalk when bulkwalks are disabled with SNMPv3 + * 2495 FIX: printer_supply: Fixed exception when unit type is not available via SNMP + * 2497 FIX: veeam_client: Fixed exception when client is missing in agent output + * 1275 FIX: Fileinfo: Fixed handling of output from AIX Agent + * 2414 FIX: logwatch: now really ignores ignored logwatch lines... + * 2415 FIX: check_mail, check_mail_loop: fixed incorrect POP3 ssl setting (thanks to Bernhard Schmidt)... + * 2452 FIX: cisco_temperature: Fixed handling of scaling... + * 2416 FIX: agent_netapp: fixed rare problem where environmental sensor info (fan, psus, temp) were not shown... + * 2453 FIX: ups_socomec_capacity: Correctly handle time remaining when on inverter... + * 2418 FIX: agent_vsphere.pysphere: legacy agent no longer aborts when evaluating unknown ssl option... + * 2504 FIX: cups_queues: Fixed wrong queue discovery on hosts running apcupsd + + Multisite: + * 2470 FIX: Fixed exception in logwatch log list in rare cases... + * 2375 FIX: prediction preview: automatically select valid prediction period + + WATO: + * 2407 FIX: WATO master/slave replication: fixed problem where the configuration from the master site was not activate on slave site... + * 2474 FIX: Fixed possible corruption of user datasets (contact, mail, rbn config)... + * 2475 FIX: LDAP: Fixed possible exception during sync when syncing custom user attributes... + * 2499 FIX: Git: Fixed message "Please tell me who you are. Run git config ..." on making changes... + + Notifications: + * 2412 FIX: Fixed broken notification analysis feature... + + HW/SW-Inventory: + * 2483 FIX: win_exefiles: more gracefully handle incomplete lines, avoid execption + * 2454 FIX: lnx_distro: Fixed inventory for SuSE installations with patchlevel 0 + + +1.2.6p9: + Core & Setup: + * 2465 FIX: Fixed broken Check_MK Discovery checks (check reports "(null)") + + Multisite: + * 2468 FIX: Fixed actions for duplicate host names on different sites... + + WATO: + * 2466 FIX: Fixed exception when searching for rulesets / global settings using special characters like umlauts + * 2467 FIX: Fixed encoding exception occuring in localized GUI when a WATO action triggers an error message... + + +1.2.6p8: + Checks & Agents: + * 2398 agent_vsphere: now able to opt-out of servers ssl certifcate check... + * 2449 FIX: db2_mem: Fixed scaling of perfdata... + * 1264 FIX: ad_replication: Fixed handling of agent output without Destination DC entry + * 2374 FIX: md: Fix exception for certain outputs of certain special MD configurations... + * 2458 FIX: FreeBSD-Agent: Fixed uptime calculation + + Multisite: + * 1263 FIX: Fixed handling of urls in views... + * 2396 FIX: LDAP: Fixed handling of LDAP trees having special chars in the path (e.g. in OU names)... + * 2459 FIX: Preventing caching of all HTTP requests to dynamic pages (*.py)... + + WATO: + * 2372 FIX: Avoid freezing WATO during bulk discovery if hosts do not respond in a timely manner + * 1267 FIX: Fixed confirm activating foreign changes dialog... + * 2397 FIX: Fixed wrong confirm text in distributed setup when activating foreign changes + * 2464 FIX: UDP ports for SNMP communication can now be configured via WATO + + +1.2.6p7: + Checks & Agents: + * 2321 FIX: process discovery: fixed exception during service discovery when no explicit process matching was set... + * 2394 FIX: megaraid_ldisks: Is now supporting LSI CacheCade drives + + +1.2.6p6: + Core & Setup: + * 2307 FIX: Windows Agent MSI installer: removed version information from product name... + + Checks & Agents: + * 2305 FIX: agent_vsphere, esx_vsphere_sensors: now able to handle sensor names with semicolon... + * 1253 FIX: printer_io,printer_supply: prevent discovery on not supported devices + * 2217 FIX: md: Fix handling of found and expected disks + * 2218 FIX: hr_cpu: Not applying levels now works... + * 2309 FIX: cpu_util checks: removed superfluous space in check output + * 1257 FIX: oracle_jobs: Fix: Discovery now supports the old oracle plugin again + * 1258 FIX: fileinfo solaris: Fixed configuration crash... + * 2221 FIX: cisco_temperature: Fixed order of device levels for some devices... + * 2311 FIX: windows agent: now replaces BOM (byte order marks) from local and plugin scripts... + * 2350 FIX: FreeBSD Agent: Changed bash path to /usr/local/bin/bash... + NOTE: Please refer to the migration notes! + * 2351 FIX: FreeBSD Agent: Made plugin/local paths standard conform... + NOTE: Please refer to the migration notes! + * 2352 FIX: FreeBSD Agent: Removed statgrab cpu section because there is a dedicated cpu section... + * 2359 FIX: adva_fsp_if: Use more reliable SNMP table, avoids sporadic problems... + * 2360 FIX: esx_vsphere_vm.snapshot: Fix output of snapshot age in performance data and graph... + * 2378 FIX: if: Now fixing encoding in interface descriptions according to rule... + * 2363 FIX: jolokia_metrics.uptime: Do not inventory instances where Uptime is missing - avoid crashed check + * 2381 FIX: emc_datadomain_fans: Made check more robust against broken SNMP output + * 2366 FIX: carel_sensors: fix crash in case of missing temperature sensor + * 2382 FIX: mssql_backup: Formating age output more human friendly + * 2383 FIX: FreeBSD Agent: Fixed handling <<>> section when jailed + * 2368 FIX: ucd_cpu_load: fix exception in case of dump SNMP agent sending 12,540000 instead of 12.540000 + * 2320 FIX: winperf_msx_queues: no longer crashes in service discovery if there are no msx queues available + + Multisite: + * 2385 SEC: Fixed possible reflected XSS on all GUI pages where users can produce unhandled exceptions... + * 2387 SEC: Fixed XSS problem on all pages using confirm dialogs outputting user provided parameters... + * 2388 SEC: Fixed reflected XSS on the index page using the start_url parameter + * 2389 SEC: Fixed XSS using the _body_class parameter of views... + * 2390 SEC: Fixed possible XSS issue on views... + * 2391 SEC: Auth cookie is using "secure" flag when HTTPS request detected... + * 2392 SEC: Auth cookie is always using "httponly" flag... + * 2310 FIX: multisite view data export: fixed exception when using joined columns... + * 2361 FIX: Fix exception for missing key 'title' in certain cases of older customized views + * 2379 FIX: Plugin-Output: Fixed handling of URLs within output of check_http... + * 2380 FIX: Custom Host Notes painter was showing service notes when used in service based views + * 2393 FIX: Fixed exception "user_confdir" not set in case of exceptions during login + + WATO: + * 2364 Moved global settings for old plain emails to deprecated folder... + * 2384 SEC: Prevent user passwords from being visible in webserver log on user creation... + * 2386 SEC: Fixed possible XSS on WATO rule edit page... + * 2344 FIX: Improved validation of selected rules when editing BI aggregations... + + Notifications: + * 2343 FIX: Rule Based Notifications GUI can now deal with latin-1 encoded plugin output in backlog... + * 2347 FIX: Improved error message in notify.log when sendmail is missing + * 2348 FIX: HTML-Mails: Added missing link to service descriptions + * 2349 FIX: HTML-Mails: Fixed state colors in Outlook + + BI: + * 2370 FIX: Fix computation of "in downtime" and "acknownledged" of hosts in BI aggregations... + + Reporting & Availability: + * 2331 FIX: Fix missing column with name "Summary" when "Do not display the host name" is checked + + Event Console: + * 2328 FIX: Fix sporadic error "Connection reset by peer" when reloading Event Console... + + HW/SW-Inventory: + * 2367 FIX: win_system: Fixed exception when non-UTF-8 sequences are contained agent output + + +1.2.6p5: + Checks & Agents: + * 2280 agent_vsphere: now provides more information if the login attempt fails... + * 2255 FIX: checkpoint_*: Fixed exception in scan function when sysDescr has less than 2 spaces... + * 2278 FIX: printer_supply: now able to toggle the point of view for used and remaining material... + NOTE: Please refer to the migration notes! + * 2258 FIX: windows_multipath: Fixed exception in case check reports more paths than expected + + Multisite: + * 2257 FIX: Improved handling of duplicate hostnames across different sites... + * 2299 FIX: Fixed search filter for check command when command was active... + + WATO: + * 2259 FIX: Raw Edition: Added missing agent download icons to WATO quickaccess snapin + + Event Console: + * 2281 FIX: mkeventd: fix: now able to create notifications with events containing umlauts... + * 2282 FIX: mkeventd: fixed exception in notification if the application field of the event was not set... + + +1.2.6p4: + Checks & Agents: + * 2274 windows agent: "check_mk_agent.exe test" now also outputs stderr of plugins... + * 2279 windows agent: fixed invalid agent output, lead to broken memory check + * 2272 FIX: mrpe: made UNKNOWN message more clear in case of an invalid state char + + WATO: + * 2254 FIX: Fixed error message in parameter columns of discovered services... + * 2230 FIX: Fix two exceptions in case of errors during bulk discovery + + Reporting & Availability: + * 2295 FIX: Fix exception in reporting for views that do not show a host name + + +1.2.6p3: + Checks & Agents: + * 2252 SEC: mk_logwatch: Fixed mostly uncritical command injection from config... + * 2247 FIX: ibm_svc_mdiskgrp: Made check working with different firmware versions outputs + * 2071 FIX: windows agent: fileinfo check now correctly reports empty directories... + * 2072 FIX: agent_netapp, netapp_api_volumes, netapp_api_disks: Improved check output... + * 2075 FIX: check_mk_agent: fixed formatting error for windows eventlog entries > 2048 characters... + * 2077 FIX: Windows MSI Installer: fixed automatical install of agent plugins... + * 1244 FIX: windows_tasks: Fixed handling of tasks manually stopped by admin... + * 1245 FIX: printer_output: Now correctly detect a bin with unknown as name + * 2266 FIX: windows agent: fixed invalid agent output if system memory exceeds 2TB RAM... + + Multisite: + * 2233 FIX: Fixed WATO folder view and Host Tags search with HTML Entity encoding... + * 2074 FIX: pnptemplate netapp_api_volume: fixed title + * 2251 FIX: Adding views to dashboards / reports is now respecing all set filters... + * 2253 FIX: Availability context button is now visible again for host- and servicegroups + + WATO: + * 2250 Added download page for shipped agents and plugins... + * 2244 FIX: Fixed sorting of host tag topics in dropdown selections + * 2263 FIX: Bulk service discovery: Fixed error when doing bulk inventory on locked folders... + + Notifications: + * 2243 FIX: Check_MK notifications don't fail anymore when duplicate hosts are configured + + BI: + * 2073 FIX: BI aggregation: fixed exception when showing clusters in BI boxes + + Livestatus: + * 2229 Do not fail on non-existing Livestatus columns any longer, output None or null instead... + + HW/SW-Inventory: + * 2246 FIX: Linux CPU Inventory: Fixed wrong number of CPUs when first CPU is not in first slot + * 2226 FIX: prtconf: Fix computation of CPU clock speed: 1MHz is 1000^2 Hz, not 1024^2 + + +1.2.6p2: + Core & Setup: + * 2180 FIX: cmk -D showed always "bulkwalk: no" for SNMPv3 hosts (which is wrong)... + * 2182 FIX: Fixed services randomly becoming stale when using CMC... + + Checks & Agents: + * 2134 winperf_phydisk: allow device to appear more than one time in agent output... + * 2049 FIX: window agents: prevent errors with invalid plugin output... + * 1893 FIX: cisco_power: Now discovers all power supplies, not only redundant ones... + * 2198 FIX: windows_updates: Fix missing warning if reboot required in case forced reboot is in the past... + * 1955 FIX: lnx_quota: Fixed status message to show the correct values of limits... + * 2064 FIX: windows agent: crash.log now uses \r\n as newline + * 2065 FIX: windows agent: now correctly installs service with elevated privileges... + * 2179 FIX: apc_symmetra: Fixed regression introduced with last release when output state is empty + * 2067 FIX: windows agent: product version is no longer set to 1.0.0... + * 2213 FIX: cisco_temperature: Fixed behaviour in cases where device reports status but no temperature... + * 2069 FIX: netapp_api_disk.summary: fixed one missing disk in summary check... + * 2070 FIX: agent_netapp: fixed exception when a channel has no shelf-list configured + * 2212 FIX: oracle_tablespaces: Fix plugin output in case of detected problem in Autoextend settings... + + Multisite: + * 1945 FIX: doc/treasures/downtime: Fix setting and removing of downtimes... + * 2177 FIX: Host/Service statistics dashlets honor the site filter correctly now + * 1957 FIX: Fixed default value for text input fields in notification plugins... + + WATO: + * 1956 FIX: WATO Web-API: Fixed exception information for single sites... + * 2178 FIX: Fixed handling of user erros in WATO when using Python < 2.5 + * 2203 FIX: Fix sorting of unselected elements in a list choice with two panes + + Notifications: + * 2207 FIX: Fix exception in rule based notifiations with Nagios core and event type filter... + + BI: + * 1897 FIX: Fixed exception in BI Availability view... + + Reporting & Availability: + * 2206 FIX: Add missing option "Show timeline of each object directly in table" for reports... + + Livestatus: + * 2208 FIX: Add missing Livestatus column service_period... + + HW/SW-Inventory: + * 2197 FIX: win_reg_uninstall: Fix exception in case of invalid output line... + * 2224 FIX: Fixed sorting in inventory based views... + + +1.2.6p1: + Core & Setup: + * 2089 FIX: Debug mode shows details about errors in autochecks as expected now + * 2093 FIX: Fixed handling of check_mk commandline parameter "-c" + * 2187 FIX: Avoid CLOSE_WAIT sockets for agent connection in case of timeouts... + * 2194 FIX: Avoid new discovered checks from being used without config reload + + Checks & Agents: + * 1234 Fixed cisco_power in case of slashes in item names... + * 2051 FIX: windows agent: no longer outputs stderr of local and plugin scripts... + * 2088 FIX: cisco_cpu: Dealing with non CPU utilization information correctly... + * 2055 FIX: agent_vsphere, licenses check: now really configurable on / off... + * 2091 FIX: The check-mk-agent RPM packages can now update the old check_mk-agent named RPMs... + * 2046 FIX: Replace GBit with Gbit, MBit with Mbit, KBit with Kbit... + * 2098 FIX: ibm_svc_mdiskgrp: fix rounding bug, decimal digits of size (GB, TB) were always lost + * 2094 FIX: Fixed missing agent section when ntpq times out after 5 seconds + * 2095 FIX: oracle_crs_voting: Also handling voting disks with id 0 (seen on old CRS 10.2.0.5.0)... + * 2096 FIX: jolokia_metrics: Now deal with missing thread related infos (jboss might only send ThreadCount) + * 1895 FIX: temperature.include: Fixed Fahrenheit handling... + * 2097 FIX: apc_symmetra: Fixed false alert during self test... + * 2143 FIX: Solaris-Agent: Fixed broken zfsget checks on solaris 10 + * 2144 FIX: Fixed exception in inventory GUI when trying to render dates of inventorized exe files... + * 2058 FIX: ucs_bladecenter_fans.temp, ucs_bladecenter_psu.chassis_temp: fixed broken temperature checks (nagios only)... + * 2059 FIX: ucs_bladecenter_if: fixed exception when fibrechannel interfaces were not configured... + * 1233 FIX: Fixed fileinfo check for solaris in case of missing files + * 1236 FIX: multipath: Now show correct error in case of removed multipaths instead of check crash + * 2152 FIX: apache_status: Fixed plugin to work on CentOS/RedHat 5.x... + * 1237 FIX: Fixed missing temperature checks on cisco devices + * 1238 FIX: check_mk_agent.linux: Do not execute the multipath section if no /etc/multipath.conf exsist. + * 1240 FIX: multipath: improved detection for not installed multipath + * 2159 FIX: netapp_api_disk.summary: Changed default thresholds to WARN on the first broken disk, CRIT on 50%... + * 2161 FIX: heartbeat_crm: Fixed UnboundLocalError exception on some systems + * 2162 FIX: citrix_sessions: Handle not set thresholds on single values correctly... + * 2163 FIX: printer_supply: Now auto detects whether or not a supply is consumable or filling up... + * 2164 FIX: printer_supply: Fixed handling different capacities than percentage when used upturned levels + * 2169 FIX: jolokia_metrics.threads: Fixed graph template... + * 2170 FIX: jolokia_metrics_gc: Fixed exception when GC time not reported by server + * 2109 FIX: netapp_api_volumes: now using the defined levels when using the Nagios core + * 2171 FIX: netapp_api_vf_status: Handling "DR backup" as normal (OK) state now + * 2110 FIX: netapp_api_aggr: check did not take configured levels when using Nagios + * 1954 FIX: fileinfo / fileinfo.groups: Fixed discovery function for fileinfo groups and equalize agent output of fileinfo agents... + * 2111 FIX: hitachi_hnas_volume: fix for cases when size information of volumes is not available + * 2190 FIX: jolokia_metrics.gc: Fixed exception in check if no warn/crit levels are defined + * 2192 FIX: check_notify_count": Fix exception in PNP template in case of explit email addresses... + * 2172 FIX: Allowing OIDs in checks not starting with a dot again... + * 2173 FIX: mk-job: Fixed quoting of command line arguments + + Multisite: + * 2054 FIX: Sidebar snapin "Tree of folders": fixed exception when using localized default value... + * 2090 FIX: Fixed errors when editing / rendering custom url dashlets in some cases... + * 2092 FIX: Dashboards: Possible to configure an empty custom title + * 2145 FIX: LDAP-Sync: Handling user ids with special characters more user friendly... + * 1953 FIX: Fixed processing of html processing in input fields... + * 1239 FIX: Fixed doc/treasures/downtime script to work with current GUI version + * 2157 FIX: LDAP: Fixed group-to-role/group-to-contactgroup sync with OpenLDAP (using posixGroup) + * 2141 FIX: Fix computation of explicit time ranges with time of day... + * 2142 FIX: Fix non-working option for disabling column headers in grouped boxed views... + * 2168 FIX: Fixed automation actions with transid=-1 when using basic authentication + + WATO: + * 2045 FIX: Avoid fetching SNMP data when showing service list in WATO - unless Full Scan is pressed + * 2047 FIX: Allow overriding existing WATO rules by own files in local/ hierarchy... + * 2146 FIX: In distributed environments user notification rules trigger a profile synchronisation now... + * 1232 FIX: Ldap: Replaces special danish characters now in user sync + * 2149 FIX: LDAP: The diagnostic log has been changed to use a fixed path... + * 2158 FIX: Condition column in WATO rule tables has now a flexible width... + * 2160 FIX: Fixed rename of hosts where a host with this name did exist before + * 2191 FIX: Fixed handling of URL variable 'mode' in web API for discovering services... + + Notifications: + * 2193 FIX: Remove duplicate performance data from host notifications... + + Reporting & Availability: + * 2189 FIX: Allow changing graph number from 1 to "all" when editing PNP graph in report... + + Event Console: + * 1865 FIX: mkeventd: fixed exception when executing a shell script as action + * 2133 FIX: Fix visualization of global EC setting for Rule Optimizer... + * 2139 FIX: Fix exception in Event Console when archiving events with match groups and non ASCII characters + * 2151 FIX: Fixed wrong time in events when forwarding logwatch to EC between timezones... + + HW/SW-Inventory: + * 2147 FIX: Fixed exception in HW-/SW-Inventory GUI with special characters in inventorized data... + * 2148 FIX: win_os: Fixed inventory of OS with older powershell versions + * 2108 FIX: win_bios win_disks win_system win_video: these inventory checks can now handle colons in the output + + +1.2.6: + Checks & Agents: + * 2050 FIX: netapp_api_if: Fixed invalid speed warning for virtual interface groups... + * 2086 FIX: apc_ats_status: Fixed exception when source different than selected source + * 2087 FIX: netapp_api_temp: Fixed exception when dealing with old discovered checks... + + +1.2.6b12: + Checks & Agents: + * 2039 mk_logwatch: new per-logfile-options maxfilesize and maxlinesize... + * 2048 FIX: netapp_api_fan, netapp_api_psu, netapp_api_temp: fixed typo in service description Shelfes -> Shelves + NOTE: Please refer to the migration notes! + * 2021 FIX: if_lancom: Also used for checking ELSA/T-Systems branded devices + * 2023 FIX: if_lancom: Handle point-2-point interfaces of newer firmwares correctly + * 2027 FIX: fc_port: Fixed exception when wirespeed is reported as 0 by the device + * 1224 FIX: Fixed rare Bug in case of clusterd network interfaces... + * 2079 FIX: freebsd agent: Was unable to find ntpq command with FreeBSD10... + * 2082 FIX: jolokia_metrics.mem: Fixed levels on total memory usage + + Multisite: + * 2024 FIX: Views: Fixed problem when filtering views by strings containing umlauts... + + WATO: + * 1223 FIX: Fixed manual configuration of ntp peer check... + * 2025 FIX: Fixed exception when synchronising custom ldap attributes in distributed WATO setup + * 2026 FIX: Fixed exception when using umlauts in notification plugin descriptions... + * 2078 FIX: Fixed exception with some snapshots when using a localized GUI... + * 2080 FIX: Fixed UnicodeDecodeError when using a localized GUI on notification configuration page + * 2084 FIX: Disabled notification for a user is now shown on profile page even when not permitted to edit... + + Notifications: + * 2081 FIX: Improved logging of mkeventd in error cases + + BI: + * 2020 FIX: Fixed non working FOREACH_CHILD mechanism for BI rules + + +1.2.6b11: + Core & Setup: + * 2014 FIX: Fixed different issues running Check_MK on CentOS 5.x + * 2037 FIX: Inventorize piggy back data even if access to normal agent fails + * 2016 FIX: Fixed service discovery / monitoring on hosts which have only piggyback data (e.g. ESX VMs)... + + Checks & Agents: + * 1947 agent_ucs_bladecenter: Monitors UCS Bladecenter via Web-API... + * 2017 FIX: Solaris-Agent: Prevent hanging agent in uptime section... + * 1890 FIX: cisco_temperature: Replaces cisco_temp_perf and cisco_temp_sensor... + NOTE: Please refer to the migration notes! + * 2019 FIX: heartbeat_crm: Be compatible to yet unknown crm_mon output format + + WATO: + * 1946 FIX: WATO Web-API: edit host action does no longer unset all unspecified attributes... + + Notifications: + * 2015 FIX: Fixed sending notifications for services with umlauts in names... + * 2038 FIX: Log complete Email address into monitoring history when notifying explicity addresses + + +1.2.6b10: + Core & Setup: + * 2012 FIX: Piggyback hostname translation can now deal correctly with umlauts + * 2014 FIX: Fixed different issues running Check_MK on CentOS 5.x + + Checks & Agents: + * 2005 services: change service description from service_ to Service or new installations + * 1859 FIX: cups_queues: linux agent now runs section cups_queues in cached mode... + * 1942 FIX: netapp_api_volumes: fixed exception when performance data generation was enabled + * 1993 FIX: solaris_multipath: Fix detection of expected number of paths + * 1944 FIX: hr_mem: no longer reports incorrect memory values when cached memory values are broken... + * 1994 FIX: lparstat: Support new AIX version with two new columns nsp and utctc + * 1997 FIX: checkpoint_connections, checkpoint_packets: Detect more recent devices + * 2000 FIX: check_mk_agent.freebsd: Add missing <<>> section, plugins was twice instead... + * 2004 FIX: windows_updates: fix exception in WATO when displaying default levels + * 2006 FIX: services: Add WATO rule for configuring parameters of discovered checks... + * 2007 FIX: md: Handle rebuild of RAID 5 correctly, handle sitatuation of replacement correctly... + * 2028 FIX: hyperv_vms: new plugin that allows spaces in VM names... + * 2013 FIX: stulz_pump: Fixed exception during checking for some devices + * 2029 FIX: fortigate_cpu, fortigate_memory, fortigate_session: fix SNMP scan function, add WATO rule set for sessions... + + Multisite: + * 1983 FIX: Fixed special case in language configuration via user profile... + * 1984 FIX: Fixed loosing sidebar after switching to/from edit mode in dashboard edior on page reload... + * 1985 FIX: PNP graph dashlet handles graphs in distributed setups correctly... + * 2008 FIX: Users created during basic auth login get the role assigned configured in "default user profile"... + * 2011 FIX: "Service Group" view sorts/groups the services now correctly by host + + WATO: + * 1986 FIX: Added nicer error message when calling the rename host page with a non existant host + * 1987 FIX: Editing auxtags shows existing topics in dropdown instead of as "create new topic" + * 2001 FIX: Fix exception of missing .site when editing a non-existing host + * 2002 FIX: Mark slave sites as dirty if BI aggregates are changes and login is allowed... + * 2009 FIX: Fixed styling of site login page for establishing a distributed monitoring WATO sync... + * 2003 FIX: Fix saving of "Users are allowed to directly login into the Web GUI of this site"... + * 2010 FIX: Improved error message when trying to add group assignment rule without having a group configured + + HW/SW-Inventory: + * 1943 FIX: inventory plugin win_os: no longer detects incorrect i386 architecture... + * 1995 FIX: dmidecode: Fix parsing when memory devices are listed before controller + + +1.2.6b9: + Checks & Agents: + * 1668 Interface groups: Can create groups out of interface item names... + * 1827 oracle_tablespace: WATO rule for default increment... + NOTE: Please refer to the migration notes! + * 1940 FIX: ps: Fixed a rare crash on malformed agent output... + * 1941 FIX: df.include: fixed exception on emtpy filesystems... + + +1.2.6b8: + Core & Setup: + * 1882 FIX: Fixed exception "filesystem_levels" not defined when compiling config for nagios + * 1977 FIX: Dramatically reduced size of Check_MK check helper processes... + * 1982 FIX: Fixed exception during checking regular checking when having checks without discovery function + + Checks & Agents: + * 1673 netapp_volumes: now able to configure levels by magic factor + * 1676 if.include: now able to detect grouped interfaces... + * 1928 netapp_api_if: Improved handling and check output of virtual interfaces... + * 1929 netapp_api_if: improved inventory and check output of virtual interfaces... + * 1930 Windows agent: now able to unpack plugins.cap file (created by Check_MK agent bakery)... + * 1933 esx_vsphere_objects: now able to set a different alert level when the host/vm reports 'unknown'... + * 1992 df: Show usages near to zero with a higher precision - not simply as 0.00 + * 1881 FIX: omd_status: Check works now event when a site is reported as not OK... + * 1923 FIX: cisco_qos: Fixed exception in discovery that might lead to missing services + * 1924 FIX: cisco_power: Fixed missing power supplies in case where name is not unique + * 1886 FIX: win_printers: Fixed exception in WATO when displaying default parameters + * 1887 FIX: Logwatch event console forwarding: Better dealing with logwatch states + * 1969 FIX: apc_symmetra: Fix wrong critical state "0 batteries need replacement" + * 1926 FIX: ps: reenable compatiblity with existing configurations... + * 1970 FIX: lparstat_aix: Made the check compatible to different kind of lparstat output... + * 1971 FIX: printer_input/printer_output: Discovery is using name field when available no... + NOTE: Please refer to the migration notes! + * 1931 FIX: agent_vsphere: no longer crashes when host has no license information + * 1932 FIX: check_http: Check SSL Certificate: did not work when SNI Option was set... + * 1975 FIX: check_bi_aggr: Ignoring proxy settings from environment now + * 1936 FIX: check_form_submit: fixed crash on certain form fields with unnamed input elements + * 1938 FIX: docsis_channels_upstream: fixed missing checks if channels had the same ChannelId... + + Multisite: + * 1979 Relative timestamps display warnings when they should be in future but are in past + * 1937 cpu.loads: performance graph now displays number of CPUs + * 1884 FIX: Fixed exception in virtual host tree snapin + * 1885 FIX: Fixed filtering by software versions in software package search + * 1972 FIX: Prevent erasing of quicksearch field when sidebar is reloaded (e.g. during activate changes)... + * 1221 FIX: veeam_client: Multisite perfometer is now more robust + * 1989 FIX: Fix sorting of services in availability views + * 1978 FIX: Fixed linking to other views using "joined columns"... + * 1980 FIX: logwatch: Fixed exception when acknowledging errors of a single logfile on a single host + * 1981 FIX: Not trying to render view in view editor when the view is not valid + + WATO: + * 1935 WATO Web-API: Reduced number configurable role permissions... + * 1922 FIX: Fix exception in saving of hosttags if hosttag has at least one auxiliary tag + * 1883 FIX: Fixed lossing service context when cloning a rule + * 1925 FIX: Fix missing auxilliary tags that have their own topic... + * 1927 FIX: Fixed level description in WATO rules, change from if above into at + * 1976 FIX: Sorting BI rule choice dropdown field entries now + + Notifications: + * 1988 FIX: Gracefully handline invalid empty bulk notification files from previous buggy versions + + Reporting & Availability: + * 1990 FIX: Fix two exceptions in PDF exports of host group views + + Event Console: + * 1974 FIX: Event console views were randomly ignoring host filters... + + +1.2.6b7: + Core & Setup: + * 1842 FIX: Rewrote implementation of service discovery (formerly inventory)... + * 1869 FIX: Deleting outdated persisted agent sections now + * 1919 FIX: cmk --snmpwalk: continue if one of the OIDs to walk fails + * 1880 FIX: inventory_processes rules can now be configured without setting levels... + + Checks & Agents: + * 1822 oracle_undostat: rule for non space error count... + * 1823 mk_oracle_crs: compatibility against CRS 10.2 + 11.1... + * 1825 oracle_recovery_status: backupcheck for user managed backups... + * 1826 oracle_dataguard_stats: New rule for apply_lag_min, removed default rule... + * 1388 FIX: oracle_asm_diskgroup: fixed wrong calculation of free space in NORMAL/HIGH redundancy Disk Groups... + * 1389 FIX: oracle_rman: detect failed jobs... + * 1390 FIX: mk_oracle: better detection of RMAN Archivelog Backups... + * 1391 FIX: oracle_instance: New function for Primary Database not OPEN... + * 1821 FIX: mk_oracle: changed connection to dedicated server mode... + * 1824 FIX: oracle_recovery_status: removed default values from Check... + * 1817 FIX: The Check_MK service did not result in CRITICAL/WARNING states when using Nagios as core... + * 1844 FIX: oracle_crs_res: fix computation of node a ressource is running on... + * 1828 FIX: oracle_dataguard_stats: Bugfix for 'params_value' referenced before assignment... + * 1853 FIX: cisco_power, cisco_fan, cisco_temp_perf: fixed service description for some special cases... + NOTE: Please refer to the migration notes! + * 1856 FIX: ibm_svc_array ibm_svc_mdisk ibm_svc_mdiskgrp ibm_svc_portfc: made checks more robust for varying number of parameters of IBM SVC agent plugin... + * 1874 FIX: ps: Old process inventory configurations work now again... + * 1875 FIX: Fixed possible exceptions of CMC Check_MK helpers when using some custom checks... + * 1847 FIX: oracle_logswitches: Fixed description of WATO rule for levels... + * 1877 FIX: printer_input/printer_output: Check can now handle non reported capacity unit + + Multisite: + * 1843 FIX: Fixed crash in display of crash report for precompiled host checks + * 1870 FIX: Joined columns were empty in CSV, JSON or PYTHON exports of view... + * 1871 FIX: Site filter is only shown as host related filter now... + * 1872 FIX: View editor hides filter selection for object types which have no filter to choose... + * 1876 FIX: User sorting of views can now be disabled again + + WATO: + * 1816 FIX: Fixed garbled output on "rename host" result page + * 1879 FIX: Not showing "only show permitted hosts/services" option for users not having "see all" permissions... + + Notifications: + * 1213 New Notification macros $SERVICEFORURL$ and $HOSTFORURL$... + + Event Console: + * 1873 SEC: Escaping event text of event console messages correctly in views... + * 1861 FIX: exception in mkeventd when archiving certain event log lines + * 1878 SEC: Fixed possible shell injection when filtering the EC archive... + + HW/SW-Inventory: + * 1851 FIX: win_exefiles: inventory check can now handle time stamps in us english locale + + +1.2.6b6: + Core & Setup: + * 1832 FIX: Fix "global name 'splitted' is not defined" in bulk inventory... + * 1808 FIX: Fixed broken nagios config when using RBN without a host defined... + + Checks & Agents: + * 1807 check_mail: Added new check to check IMAP/POP3 login (incl. forwarding of mails to event console)... + * 1818 FIX: dell_poweredge_cpu: Fix exception where BrandName is missing + * 1819 FIX: dell_poweredge_temp: Make output and service description consistent with other temperature checks... + NOTE: Please refer to the migration notes! + * 1833 FIX: jolokia_metrics.gc: fix recently introduced exception for missing variable + * 1806 FIX: services check was not recognizing configured state when no service was found + * 1840 FIX: oracle_tablespaces: fix implementation of magic factor + * 1848 FIX: df: title of pnp graphs for filesystem checks fixed... + * 1209 FIX: livestatus_status: Check handles cluster using in cluster now + * 1809 FIX: cisco_temp_perf: Fixed exception when no temperature threshold provided by device + * 1812 FIX: juniper_screenos_mem: Fixed too large memory reported (byte <> kbyte mixup) + * 1814 FIX: agent_ibmsvc: Fixed missing executable flag + + Multisite: + * 1667 Sidebar snapin 'Tree of Folders' and 'WATO folder' filter now available on slave sites... + * 1802 FIX: Links in messages like "successfully sent X commands" are now working again... + * 1803 FIX: Fixed exception in Check_MK prediction page... + * 1804 FIX: Fixed prechecked checkboxes in view actions after first action submit... + + WATO: + * 1805 FIX: Changing roles marks sites where users can login dirty for sync now... + * 1211 FIX: Fixed g_git_messages error on activate changes... + * 1212 FIX: Fixed default value in wato parameter page for timeperiods... + + Notifications: + * 1810 FIX: Rule based notifications: Fixed output of non contact mail recipient address in analyze table... + + Event Console: + * 1839 FIX: Fix exception when notifying EC alert into monitoring for traps (because PID is missing) + * 1813 FIX: Fixed bug in event console rule editor when no contact groups configured + + +1.2.6b5: + Core & Setup: + * 1797 FIX: Fix incomplete configuration during checking when using CMC... + + Checks & Agents: + * 1795 FIX: Fix internal exception in WATO rule for filesystems... + * 1522 FIX: quantum_libsmall_door, quantum libsmall_status: Fixed broken scan function + + Multisite: + * 1801 FIX: "Add to visual" menu in views is now sorted + * 1796 FIX: Fix filtering in Multisite View BI Boxes... + + +1.2.6b4: + Core & Setup: + * 1791 FIX: Fix problem where many bogus RRD files for Check_MK service would be created... + * 1792 FIX: Fix path to special agents in case of manual installation + + Checks & Agents: + * 1775 FIX: logins: Fixed exception during check execution + * 1793 FIX: fritz: avoid Exception in inventory function of fritz checks if agent output is empty + + Multisite: + * 1776 Dashboard: Allowing unicode characters in static text dashlet + * 1774 FIX: IE: Always use the latest available rendering enginge of the used browser... + * 1777 FIX: Fixed js error making the "add to visual" link break on pages with context... + * 1798 FIX: Filters are now retained when adding a view to a dashboard... + * 1799 FIX: Dashboards: Existing views added to dashboards now get a correct title / title_url + * 1800 FIX: Fixed umlauts and HTML tags in exception texts... + + WATO: + * 1794 FIX: Fix exception in WATO service list in case of vanished checks + + +1.2.6b3: + Core & Setup: + * 1759 Packed RPM and DEB agent packages are now shipped with normal Check_MK package... + + Checks & Agents: + * 1665 agent_netapp: New special agent for NetApp monitoring via Web-API... + * 1786 casa_cpu_mem, casa_cpu_temp, casa_cpu_util, casa_fan, casa_power: support more devices, also C100G + * 1787 docsis_channels_upstream, docsis_channels_downstream: now also support CASA 100G + * 1457 FIX: logins: new check renamed from "users" check... + NOTE: Please refer to the migration notes! + * 1762 FIX: lnx_thermal: Now ignoring trip points with level 0... + * 1763 FIX: diskstat: Fixed error in config example of manpage + * 1755 FIX: cisco_vpn_tunnel: fix exception in case tunnel is not OK + * 1756 FIX: agent_ibmsvc: do not abort execution if one of the sections fail + * 1778 FIX: cisco_secure: do not warn for port where port security cannot be enabled + * 1764 FIX: mk_sap: Fixed exception when saving status file + * 1663 FIX: winperf_if: fixed incorrect enumeration of interface index... + * 1204 FIX: veeam_client: Not longer throwing an error in case of currenlty running backup + * 1666 FIX: inventory check esx_vsphere_hostsystem: no longer crashes if information is missing... + * 1767 FIX: fc_port: Re-enabled check discovery of this check + * 1768 FIX: brocade_fcport/brocade_info: Only try to discover these services when device provides correct info... + * 1769 FIX: megaraid_bbu: Fixed exception for some controllers reporting "full charge capacity" + * 1770 FIX: megaraid_pdisks: Now handling unconfigured good/bad states... + * 1771 FIX: domino_mailqueues: Fixed exception during inventory when no data usable data available + * 1208 FIX: cifsmounts: Detects now unreachable CIFS mounts + * 1772 FIX: lparstat_aix: Check handles already working agent output again + * 1793 FIX: fritz: avoid Exception in inventory function of fritz checks if agent output is empty + * 1795 FIX: Fix internal exception in WATO rule for filesystems... + * 1522 FIX: quantum_libsmall_door, quantum libsmall_status: Fixed broken scan function + * 1818 FIX: dell_poweredge_cpu: Fix exception where BrandName is missing + * 1819 FIX: dell_poweredge_temp: Make output and service description consistent with other temperature checks... + NOTE: Please refer to the migration notes! + * 1388 FIX: oracle_asm_diskgroup: fixed wrong calculation of free space in NORMAL/HIGH redundancy Disk Groups... + * 1389 FIX: oracle_rman: detect failed jobs... + * 1390 FIX: mk_oracle: better detection of RMAN Archivelog Backups... + * 1391 FIX: oracle_instance: New function for Primary Database not OPEN... + * 1833 FIX: jolokia_metrics.gc: fix recently introduced exception for missing variable + * 1463 FIX: juniper_screenos_mem, juniper_trpz_mem: pnp template fixed + * 1806 FIX: services check was not recognizing configured state when no service was found + * 1840 FIX: oracle_tablespaces: fix implementation of magic factor + * 1848 FIX: df: title of pnp graphs for filesystem checks fixed... + * 1821 FIX: mk_oracle: changed connection to dedicated server mode... + * 1824 FIX: oracle_recovery_status: removed default values from Check... + * 1209 FIX: livestatus_status: Check handles cluster using in cluster now + * 1809 FIX: cisco_temp_perf: Fixed exception when no temperature threshold provided by device + * 1812 FIX: juniper_screenos_mem: Fixed too large memory reported (byte <> kbyte mixup) + * 1814 FIX: agent_ibmsvc: Fixed missing executable flag + * 1817 FIX: The Check_MK service did not result in CRITICAL/WARNING states when using Nagios as core... + * 1844 FIX: oracle_crs_res: fix computation of node a ressource is running on... + * 1852 FIX: solaris_multipath: this check now works with inventory to remember the number of total paths... + NOTE: Please refer to the migration notes! + * 1853 FIX: cisco_power, cisco_fan, cisco_temp_perf: fixed service description for some special cases... + NOTE: Please refer to the migration notes! + + Multisite: + * 1758 Improved exception hander: Shows details without additional debug request, added mailto link for error report... + * 1788 New personal setting for start page, right after login... + * 1781 FIX: Fix broken grouping by host/service group in availability + * 1783 FIX: Finish the view "History of Scheduled Downtimes"... + * 1206 FIX: Hostname not longer shown as column in host views + * 1766 FIX: Fixed exceptions in Web GUI when host or service groups used non ascii characters in names... + * 1773 FIX: Fixed different exceptions when using localized multisite + + WATO: + * 1760 Added search form to manual checks page + * 1761 FIX: Ruleset search is now consistent for host & serviceparameters and manual checks + * 1779 FIX: Fix broken icon in host diagnostic mode + * 1765 FIX: Fixed bug when generating nagvis backends while having sites with livestatus proxy configured... + * 1789 FIX: Fix preview of passive checks in WATO list of services + * 1790 FIX: Fix WATO parameters page for passive checks... + + Notifications: + * 1662 notification plugin spectrum: finalized script. now able to handle host notications + * 1754 FIX: Recent notifications (for analysis): Fix wrong color of host DOWN (from yellow to red) + * 1661 FIX: mknotifyd: improved performance when receiving forwarded notifications + * 1664 FIX: mknotifyd: further performance improvements for notification forwarding + * 1205 FIX: RBN: Fixed match contactgroup condition... + + BI: + * 1784 FIX: Fix exception in BI Boxes when parents are being used + + +1.2.6b2: + +1.2.6b1: + Core & Setup: + * 1439 mk-job: now also available on solaris systems... + * 1648 New installations have the service to check for unchecked services enabled by default... + * 1723 New check API function get_average() as more intelligent replacement for get_counter()... + * 1725 The get_average() function from now on only returns one argument: the average... + NOTE: Please refer to the migration notes! + * 1483 FIX: Savely replace illegal vertical bars in check plugin output... + * 1431 FIX: windows_agent: fixed error on parsing unicode formatted logfiles... + * 1545 FIX: Check_MK Inventory check is now resulting in correct state on duplicate host + * 1555 FIX: Improved validation on timeperiod references of non existing periods... + * 1574 FIX: Hosts named like used python modules do not break precompiled checks anymore... + * 1624 FIX: Remove illegal characters from service descriptions of active checks... + * 1628 FIX: Remove trailing backslashes from service descriptions... + * 1649 FIX: Check_MK inventory service has been renamed to Check_MK Discovery... + * 1706 FIX: Fix file permissions when installing MKPs to 0644 or 0755... + * 1750 FIX: Handle rare cases where SNMP response string begins with a line feed... + * 1740 FIX: Changed default service discovery check intervall to 12 hours + + Checks & Agents: + * 1197 climaveneta_temp: New check for temperature sensors on Climaveneta clima devices + * 1167 citrix_license/esx_license: Can now be configured to always show OK as state + * 1198 climaveneta_fan: New check for fan speed on Climaveneta devices + * 1199 climaveneta_alarm: New check to display the alarm states on Climaveneta devcies + * 1484 dell_om_sensors: Use sensor name as item... + NOTE: Please refer to the migration notes! + * 1200 Docsis Checks: Now HW Rev2 of Arris Cable Modems are detected. + * 1486 mk_oracle: completely overhauled ORACLE monitoring... + * 1201 allnet_ip_sensoric: Detect Temperature Sensors now in more cases... + * 1171 Added new check for monitoring mail delivery (SMTP -> IMAP/POP3 mailbox)... + * 1444 f5_bigip_chassis_temp, f5_bigip_cpu_temp: Two new checks to replace the old f5_bigip_temp... + NOTE: Please refer to the migration notes! + * 1432 agent_vsphere: now able to monitor virtual machines snapshots... + * 1507 New optional parse_function for check API... + * 1445 quantum_libsmall_door, quantum_libsmall_status: Two new checks for monitoring small Quantum tape libraries + * 1448 domino_info: check is extended to also show and monitor the lnNotesServerState + * 1509 if, if64: New option for make inventory based on port alias... + * 1440 livedump: now able to add hosts icon_image on config generation... + * 1517 carel_sensors: New check for monitoring temperature sensors of Carel AC devices + * 1551 f5_bigip_vserver: add performance data for connections and connection rate + * 1554 mk_oracle: You can now monitor multiple ORACLE releases on the same host + * 1518 raritan_pdu_inlet, raritan_pdu_inlet_summary: Modified existing check to give one item per phase and support setting levels.... + NOTE: Please refer to the migration notes! + * 1592 AIX: New Plugin to monitor errpt in logwatch style... + * 1565 mem.win: set default levels for page file to 80%/90% + * 1608 zpool_status: Add an overall state check (thx to Craig Cook)... + * 1594 ibm_svc_host: Can now be set to be always OK... + * 1595 esx_vsphere_objects_count: New Check to Ouput the number of VMs + * 1567 postfix_mailq: speedup in Linux agent for large mail queues... + * 1611 mssql.vbs: Supporting SQL-Server 2014 now + * 1568 f5_bigip_cluster_v11: new check for F5 cluster status for firmware version 11 + * 1450 checkpoint_connections, checkpoint_packets: new checks to monitor Checkpoint firewalls + * 1569 check_mk_agent.openbsd: add sections for mem and lnx_if (memory and network interfaces)... + * 1451 users: new check to monitor number of users logged in on a linux system... + * 1615 qnap_disks: Added support for Fujitsu NAS QR802 + * 1616 drbd: Added support for Ahead/Behind cluster states (DRBD >= 8.3.10) + * 1626 Renamed service descriptions of filesystem, process and logwatch checks... + * 1627 megaraid_ldisks: Warn if current cache or write policy differs from logical drive default policy... + * 1629 check_mk_agent.freebsd: several new features and improvements, now only use statgrab... + * 1630 smart: update in plugin that also outputs information about disks attached to a MegaRAID controller... + * 1631 juniper_bgp_state: check now detects and supports more differen device models... + * 1645 Added basic kernel section to FreeBSD agent... + * 1597 bluecat_dhcp, bluecat_dns: Checks can now be used in Check_MK Cluster Mode + * 1599 check_mk_agent.aix: Simple run_cached Feature for plugins... + * 1699 Windows agent: new option "file" for writing output into a file... + * 1684 cisco_vpn_tunnel: Now supporting VPN 3000 Conncentrator devices + * 1685 enterasys_*: Now supporting device C2G124-48 (Rev 05.02.18.0002) + * 1694 cisco_wlc/cisco_wlc_clients: Added support for Cisco AIR-CT2504-K9 + * 1726 Move variable data of Linux/UNIX agents to /var/lib/check_mk_agent... + NOTE: Please refer to the migration notes! + * 1734 check_sql: Added support for DB2 (thanks to Troels Arvin) + * 1757 Check SSH can now be configured in WATO + * 1478 FIX: kernel.util, statgrab_cpu: fix computation of utilization... + * 1480 FIX: brocade_vdx_status: disable check on some devices that do not support it... + * 1485 FIX: dell_om_disks, dell_om_esmlog, dell_om_mem, dell_om_processors, dell_om_sensors: detect more devices... + * 1202 FIX: cisco_power, cisco_temp_perf: Both checks now using a new service description... + NOTE: Please refer to the migration notes! + * 1446 FIX: cisco_temp_perf: Check now finds missing sensors in case where also cisco_temp_sensor is being used.... + * 1203 FIX: veeam_client: Now supports multiple Backups for one host... + NOTE: Please refer to the migration notes! + * 1437 FIX: veeam_jobs: fixed incorrect state for BackupSync job... + * 1511 FIX: oracle_jobs: avoid broken checks, make compatible with old version... + * 1513 FIX: Handle broken SNMP bulk walk implementation of Mikrotik Router firmware RouterOS v6.22... + * 1503 FIX: Fixed monitoring of multiple SAP instances with one mk_sap plugin... + * 1515 FIX: cisco_secure: fix service description, fix OK state in case of no violation + * 1449 FIX: nginx_status: agent plugin no longer honours "http(s)_proxy" env variables of root user + * 1387 FIX: mk_oracle: Correctly deal with underscore in SID for Oracle 9.2-10.1... + * 1532 FIX: mk_sap: Cleaning up old state information from sap.state file... + * 1548 FIX: bluecat_ntp: do not inventorized devices where NTP information is missing + * 1549 FIX: bluecat_threads: do not inventorize this check where information is missing... + * 1536 FIX: fritz!Box special agent now deals with new URLs (firmware >= 6.0) correctly + * 1550 FIX: zfs_arc_cache: do not inventorize of no cache information available... + * 1572 FIX: Sample configs, plugins etc. for windows agent use windows linebreaks now... + * 1575 FIX: vSphere Monitoring works with RedHat 5.x now... + * 1584 FIX: winperf_if: Fixed checks of interfaces with equal names but one with index... + * 1590 FIX: printer_supply_ricoh: Fixed broken check + * 1591 FIX: netapp_volumes: The state mixed_raid_type is now treated as non-critical state + * 1602 FIX: dell_om_esmlog: Fixed typo in plugin output + * 1603 FIX: ad_replication: fixed typo in plugin output + * 1604 FIX: mysql_slave: Dealing with situation where connection with master is lost + * 1563 FIX: Reworked configuration of process monitoring... + NOTE: Please refer to the migration notes! + * 1593 FIX: IBM SVC Checks: The Service Descriptions not longer contain IBM SVC as prefix... + NOTE: Please refer to the migration notes! + * 1564 FIX: check_mk_agent.linux: fix situation where async plugin is not executed after crash... + * 1609 FIX: zpool_status: fix problem when the zpool has a separate log or cache device... + * 1566 FIX: 3ware_disks: consider VERIFYING state as OK now... + * 1612 FIX: job: Fixed wrong reported start time for running jobs + * 1596 FIX: etherbox: Fix for the inventory in case of not connected temperature sensors... + * 1571 FIX: check_mk_agent.linux: fix output of lnx_if on Ubuntu 8.04 (on older kernels), repairs tcp_conn_stats... + * 1622 FIX: megaraid_bbu: handle case isSOHGood and consider it as critical... + * 1617 FIX: lnx_if: Deal with data provided by cluster host + * 1618 FIX: ad_replication: Output of timeLastSuccess and timeLastFailure was inverted... + * 1623 FIX: hp_proliant_mem: support for some yet unhandled status situations + * 1640 FIX: check_jolokia_metrics_serv_req: Fixed wrong levels shown for upper thresholds + * 1632 FIX: hr_fs: remove ugly "mounted on:" information appearing on Juniper devices + * 1646 FIX: hyperv_vms: Plugin garbles following plugin output when no VMs exist... + * 1647 FIX: agent_ipmi: Check_MK service gets critical now when ipmi-sensors command fails + * 1453 FIX: drbd.stats: tried to send non-numeric write order parameter to rrd... + * 1598 FIX: bluecat_dhcp: Check is not longer found in inventory if dhcp service is not activated + * 1635 FIX: multipath: fix parsing output of multipath on RedHat6 with space in alias + * 1652 FIX: kaspersky_av_quarantine: Fixed exception when a file was found in quarantine + * 1653 FIX: megaraid_pdisks: Resulting states are now hard coded within the check... + * 1654 FIX: statgrab_disk: Fixed scaling of values shown in PNP graphs... + * 1655 FIX: AIX Agent: Fixed broken filesystem checks when having PowerHA installed... + * 1656 FIX: cisco_vpn_tunnel: Refactored complete check, fixed threshold bugs... + * 1677 FIX: f5_bigip_interfaces: Cleaned up check a bit + * 1679 FIX: ups_bat_temp: Now skipping sensors which are reported to have 0 upsBatteryTemperature + * 1681 FIX: cmciii_lcp_fans: Skipping non FAN units now; cleaned up check + * 1682 FIX: cmciii_lcp_waterflow: Check can now deal with devices with a different setup + * 1701 FIX: Correctly show absolute level for CPU load in case of warn/crit... + * 1702 FIX: Fix check_notify_count: notification had been counted twice... + * 1703 FIX: ups_test: Fix computation of time since last self test... + * 1454 FIX: megaraid checks: megacli binaries in lowercase (Ubuntu..) are now also detected by the linux agent + * 1455 FIX: hp_proliant_mem: avoid a crash of the check when module_condition is empty + * 1688 FIX: juniper_screenos_mem: Fixed wrong total memory computation + * 1658 FIX: agent_vsphere: no longer crashes when decommissioned vms report no hardware information... + * 1708 FIX: cups_queues: fix outputting of current printer jobs if printer daemon is CUPS... + * 1710 FIX: omd_status: Fix totally missing section in Linux agent... + * 1711 FIX: win_printers.ps1: ignore temporary printers created by RDP terminal sessions... + * 1712 FIX: hyper_vms: fixed for snapshot VMs with (...) in their names... + * 1713 FIX: check_fstab_mounts: now correctly ignores swap space... + * 1716 FIX: windows_tasks: consider state SCHED_S_TASK_QUEUED (0x00041325) as OK now + * 1721 FIX: dell_om_mem: Handle formerly unhandled situations with multiple errors... + * 1695 FIX: brocade_vdx_status: Is now not bein inventorized anymore for devices not supporting the check + * 1722 FIX: lnx_thermal: fix invalid zero temperature if mode file is missing + * 1696 FIX: cisco_temp_sensor: Value reported of check was not always correct (precision was wrong)... + * 1727 FIX: cisco_secure: Fixed inventory exception when port security is not enabled + * 1728 FIX: cisco_temp_perf: Not inventorized anymore for hosts supporting cisco_temp_sensor + * 1724 FIX: emc_datadomain_temps: convert to new standard check output, add PNP template + * 1729 FIX: apc_symmetra_test: Cleaned up check, fixed exception when self test date is zero + * 1730 FIX: apc_symmetra: Fixed exception when last diagnose date was not known + * 1731 FIX: ipmi_sensors: Fixed agent part when ipmi-sensors call on first agent run... + * 1732 FIX: dell_powerconnect_cpu: Fixed exception during inventory for incompatible devices + * 1733 FIX: dell_powerconnect_psu: Skipping inventory of not supported System temp sesnor for M6220 devices... + * 1747 FIX: zfsget: try to speed up agent code for Linux/Solaris/FreeBSD by using -t filesystem,volume... + * 1659 FIX: windows agent: fixed output of 64 bit performance counters... + * 1748 FIX: win_dhcp_pools: fix naming of WATO rules and informal WARN/CRIT levels in performance data + * 1735 FIX: oracle_instance: Inventory function deals better with old bogus agent output + * 1736 FIX: lparstat_aix: Trying to deal with more kind of lparstat output... + * 1737 FIX: mk_sap: Working around garbled SAP state file when multiple instances were running parallel + * 1738 FIX: oracle_instance: Be compatible to old oracle agent outputs + * 1751 FIX: winperf_ts_sessions: try to fix invalid number of active and inactive sessions... + * 1739 FIX: lnx_thermal: Be more compatible to thermal devices which report no "type" + + Multisite: + * 1508 Allow input of plugin output and perfdata when faking check results... + * 1493 Added config option "Default filter group" to set the initial network topology view filter... + * 1497 Implemented password policy capabilities for local users... + * 1499 SEC: Fixed XSS injections in different places... + * 1069 SEC: Replaced insecure auth.secret mechanism... + NOTE: Please refer to the migration notes! + * 1500 SEC: Preventing livestatus injections in different places... + * 1530 Dashboard: Host/service statistics dashlets now deal with the context... + * 1558 Better visualize manually changed notification enable/disable + * 1621 Sorting Check_MK* services always on top of services lists + * 1636 Crash checks now have an icon for viewing and sending a crash dump... + * 1700 Enable icon for link to host/service parameters per default now... + * 1705 Better styling of dashboard designer + * 1714 Add support for jsonp export (next to json and python)... + * 1715 Output icon information in CSV/JSON/Python export of views... + * 1164 FIX: Fixed links from servicegroup overviews to single servicegroups + * 1166 FIX: Also prevting stylesheet update issues during version updates (just like for JS files) + * 1481 FIX: Fix broken layout of Host-, Service- and Contactgroup filters + * 1482 FIX: Fix exception when editing a visual of type single host group... + * 1487 FIX: Fixed exception in Web GUI "Internal error:: name 'Filter' is not defined" in manual setups (using setup.py)... + * 1488 FIX: Fixed wrong information showing up on "Host Group" and "Service Group" views... + * 1433 FIX: Quicksearch: no longer shows an invalid search result when looking for multiple hosts... + * 1494 FIX: Fixed error in NagVis Maps snapin when some users had no contact groups assigned + * 1496 FIX: Fixed exception after editing a dashboard as user without permission to publish dashboards... + * 1436 FIX: quicksearch: search with multiple patterns (h: / s:) no longer discards the host pattern... + * 1438 FIX: quicksearch: fixed various non-working quicksearch filters... + * 1501 FIX: Legacy view formats created with 2014-09 snapshots are now converted... + * 1506 FIX: Fixed randomly hidden dashboard title... + * 1527 FIX: Fixed views missing values of some filters (serviceregex, hostgroup filters, ...)... + * 1528 FIX: Fixed actions in mobile GUI... + * 1529 FIX: Mobile-GUI: Fixed "all host problems" view not showing all problems... + * 1533 FIX: Fixed sorting of hosts with same name in "services of host" view + * 1534 FIX: Fixed filtering views in distributed setup lead to empty views... + * 1553 FIX: Fix deleting (acknowleding) of logfiles in logwatch... + * 1537 FIX: Added transformation code for user dashboards created between 2014-08 and 2014-10... + * 1538 FIX: Only allow switching sites on/off when permitted to... + * 1539 FIX: Fixed refreshing of PNP graphs in dashboards... + * 1543 FIX: Hosttag columns are now available right ater creating a tag... + * 1544 FIX: Fixed exception in complain phase in view editor... + * 1573 FIX: WATO Quickaccess snapin: Pending button is not overlapped by icons anymore + * 1557 FIX: Fix sorting of hostnames that only differ in lower/uppercaseness + * 1577 FIX: Fixed editing of views using the "Downtime for host/service" sorter or column... + * 1578 FIX: Folding states of containers with umlauts in titles are now persisted... + * 1580 FIX: Views: Hardcoded single context filters are not shown in filter form anymore... + * 1581 FIX: Single context views with missing context show an error message now... + * 1585 FIX: Dashboard: Fixed mass client CPU load consumption when making graph dashlets too small... + * 1586 FIX: Dashboard: Toggling edit/non-edit is now reflected when reloading the page + * 1605 FIX: Fixed perfometer of check check_mk-printer_supply_ricoh + * 1607 FIX: check_http: Fixed broken links in escaped plugin output + * 1614 FIX: Fixed wrong URL in webapi.py documentation + * 1619 FIX: Renamed "Hostgroups" and "Servicegroups" views to "Host Groups" and "Service Groups" + * 1638 FIX: Fixed styling small styling problems in wiki snapin + * 1641 FIX: Quicksearch: Now able to search for services with backslashes in names + * 1642 FIX: Quicksearch: Improved error handling on invalid search statements (invalid regexes) + * 1651 FIX: Consolidated painters of service list views... + * 1678 FIX: Fixed problem with garbled styles on user profile page after saving + * 1680 FIX: Fixed various dashlet designer position/resizing issues... + * 1683 FIX: Replaced a lot of old GIF images with better looking PNG images + * 1687 FIX: Add visual to dashboard menu can now be closed with click anywhere on page + * 1709 FIX: Fix exception when a non-Ascii character is part of the variable part of a view title + * 1691 FIX: Fixed problem when watching BI aggregations with umlauts in titles or group name + + WATO: + * 1170 Added buttons to move rules to top/bottom of the list to ruleset edit dialog + * 1489 Added iCalendar import for generating timeperiods e.g. for holidays... + * 1495 Most WATO tables can now be sorted (where useful)... + * 1504 WATO makes host tag and group information available for NagVis... + * 1535 Disabled services on service discovery page now link to the ruleset + * 1587 SEC: Prevent logging of passwords during initial distributed site login... + * 1560 Put host and service groups into one WATO menu item... + * 1561 Remove Auditlog from the main WATO menu and put it into the activate Changes page + * 1562 Move manual checks into a new WATO module... + * 1697 Allow non-Ascii characters in topic of host tag groups + * 1707 WATO rule editor: show title of tag group when rendering the conditions of a rule... + * 1689 Creating WATO backends for each configured site now... + * 1690 Pending changes can now be discarded... + * 1693 Added search form to global settings page... + * 1717 Split up LDAP configuration dialog into four boxes... + * 1165 FIX: Fixed exception in service discovery of logwatch event console forwarding checks... + * 1490 FIX: Timperiod excludes can now even be configured when creating a timeperiod... + * 1491 FIX: Fixed bug in dynamic lists where removing an item was not always possible... + * 1492 FIX: Fixed too long URL bug when deleting a timeperiod right after creating one + * 1498 FIX: Fixed displaying of global settings titles / help texts... + * 1502 FIX: Fixed removing elements from ListOf choices during complain phase + * 1505 FIX: Snapshots are now bound to the used monitoring core... + * 1540 FIX: Host diagnose page: Some tests were failing randomly + * 1541 FIX: Fixed missing form fields for notification method when editing rbn default rule + * 1542 FIX: Changed text of "debug_log" option to be clearer in distributed setups... + * 1546 FIX: Fixed adding cluster nodes to new cluster in complain phase... + * 1556 FIX: WATO inventory ignores already inventorized checks which does not exist anymore... + * 1576 FIX: SNMP Community host attribute is now visible for IE<=8... + * 1588 FIX: Renamed SNMP communities rule to SNMP credentials + * 1589 FIX: Restructured SNMP credentials rule specification... + * 1620 FIX: Fixed exception during host renaming when host has no perfdata + * 1625 FIX: Safely handle characters that have a special meaning in regexes when creating service-specific rules... + * 1637 FIX: Fixed exception in notification analysis when notifications have not NOTIFICATIONTYPE set + * 1639 FIX: Interfaces with speed more than 10GBit/s can now be configured correctly + * 1633 FIX: Fix problem that attributes of new WATO folders have not been saved... + * 1634 FIX: Fix editing of cluster hosts in WATO: cluster-property no longer goes lost... + * 1686 FIX: Host renaming also updates explicit negated hosts in rules + + Notifications: + * 1512 Bulk notification can now be grouped according to custom macro values... + * 1650 Enabled rule based notifications by default (for new installations)... + * 1749 Allow title of notifiation script to be in third line if second line is encoding: utf-8... + * 1660 notification plugin spectrum: now configurable via flexible notifications + * 1168 FIX: HTML mails can now be configured to display graphs among each other... + * 1514 FIX: Try harder to detect previous hard state in notification when using Nagios as core... + * 1582 FIX: Fixed missing graphs in mails when sending notifications to non-contacts... + * 1583 FIX: Can use contact groups without hosts/services assigned in RBN rules now... + * 1606 FIX: Moved notify.log to var/log/notify.log in OMD environments... + * 1570 FIX: Fix notification of check_http active checks with Nagios core... + * 1704 FIX: Fix notification analyser in case there are non-Ascii characters in the notification context + + BI: + * 1435 FIX: Saving BI aggregations: No longer reports 'Request-URI Too Large'... + * 1559 FIX: Fix link from BI icon to BI views (aggregations affected by this host/service) + * 1692 FIX: Aggregations with umlauts in title/topic can now be displayed in BI/Availability + + Reporting & Availability: + * 1720 FIX: Remove bogus column H.Down if "Consider times where the host is down" is switch off... + + Event Console: + * 1169 Added host state type filter to "recent event history" view + * 1718 Show groups of regex match of events in details views of Event Console + * 1719 Allow to allow both host name and IP address when checking for events in Event Console... + * 1531 FIX: Fixed exception in event history view when displaying CHANGESTATE events + * 1610 FIX: Hostname translation now also works for incoming SNMP traps + * 1643 FIX: Improved error handling of exceptions when processing log lines + * 1644 FIX: Fixed matching dynamic number of regex match groups... + * 1698 FIX: Fix specifying explicit path to unix socket for check_mkeventd + + Livestatus: + * 1613 FIX: Fixed invalid json format in Stats query with requested heaeders... + + HW/SW-Inventory: + * 1479 liveproxyd: new function for collecting remote inventory data... + NOTE: Please refer to the migration notes! + * 1452 Solaris HW/SW-Inventory added... + * 1547 FIX: win_cpuinfo: fix case where NumberOfCores is missing (Windows 2003)... + * 1552 FIX: mk_inventory.ps1: fix garbled or missing entries by removing bogus binary zeroes... + * 1752 FIX: win_exefiles: handle case gracefully where no size information is available + * 1753 FIX: win_bios: handle case with colons in BIOS version + + inventory: + * 1516 FIX: win_disks: fix exception in case of empty signature + + +1.2.5i6: + Core & Setup: + * 1008 Overall check timeout for Check_MK checks now defaults to CRIT state... + * 1373 SEC: Do not ouput complete command line when datasource programs fail... + * 1425 New section header option "encoding" for agent output... + * 1129 FIX: Windows MSI-Installer: some systems created corrupted check_mk_agent.msi files... + * 1426 FIX: windows agent: logwatch: no longer reports incorrect formatted texts (japanese characters)... + * 1429 FIX: Disabled snmp checktypes are now sorted out before Check_MK contacts the snmp host... + + Checks & Agents: + * 0185 knuerr_rms_humidity, knuerr_rms_temp: Two new Checks to Monitor the Temperature and the Humidity on Knürr RMS Devices + * 1065 heartbeat_crm / heartbeat_crm.resources: Rewrote checks / formalized parameters... + * 1068 livedump: Added optional check interval (detect staleness) / option to encrypt mails... + * 1093 windows agent: performance counter can now be specified by name... + * 0189 docsis_channels: Support for Frequency of Downstream Channels for Devices with DOCSIS MIB + * 0190 docsis_channels_upstream: New check for monitoring upstream channels on cable modems with DOCSIS MIB + * 0193 docsis_cm_status: New Check Status Check for Cable Modems with Docsis MIB. + * 1070 printer_input/printer_output: New checks to monitor input/output sub-units of printers... + * 0196 esx_vsphere_hostsystem: New subcheck for maintenance mode... + * 0197 check_uniserv: New Check for Uniserv Data Management Services... + * 0199 veeam_client: Check rewritten to get a nicer output + * 0200 arris_cmts_cpu,arris_cmts_temp: New Checks for Arris CMTS Devices ( Temperature and CPU Utilization) + * 0202 cisco_temp_sensor: It is now possible to configure this check in WATO.... + * 1172 New check sap.value_groups... + * 1173 cisco_secure: Check creates now a summary instead one service by port... + NOTE: Please refer to the migration notes! + * 1174 rms200_temp: New Temperature check for RMS200 Devices + * 1175 dell_idrac_disks: New Check for Harddisks using Dell iDrac + * 0644 adva_fsp_if: instead of lower warning and critical levels check now supports lower and upper levels + NOTE: Please refer to the migration notes! + * 1006 printer_pages: add Perf-O-Meter and PNP template + * 0646 brocade_fcport: the administrative states for which ports are inventorized can now be configured in WATO + * 1010 chrony: new check for NTP synchronization via chrony on Linux... + * 1011 ibm_svc_systemstats.disk_latency: introduce levels for alerting... + * 1372 cisco_vss: new check for monitoring state of Cisco Virtual Switches + * 0648 brocade_fcport: new speed calculation of isl_ports... + * 0649 f5_bigip_pool: check now also prints the node names of down nodes + * 1374 arc_raid_status: moved plugin into main Linux agent... + NOTE: Please refer to the migration notes! + * 1375 vxvm_enclosures, vxvm_multipath, vxvm_objstatus: joined into one agent plugin called vxvm... + * 1376 dmraid: moved plugin code into normal Linux agent... + * 1377 Renamed agent plugin resolve_hostname into dnsclient, make portable to all Unices... + * 1146 nfsmounts: supported by AIX agent now... + * 1103 windows agent: now able to omit context text of logfiles... + * 1150 netstat: new check for monitoring TCP/UDP connections and Linux and AIX... + * 0654 oracle_instance: now also monitors the log mode + * 1176 winperf_msx_queues: The list of counters for inventory can now be configured host based using wato + * 0656 brocade_fcport: inventory rule can now choose upon physical und operations states as well, state choices were also updated + * 1177 Hivemanger: New agent to check hivemanager devices + * 1383 oracle_asm_diskgroup: Account for offline disks and required mirror free space... + NOTE: Please refer to the migration notes! + * 1178 arris_cmts_mem: New check for Memory usage on arris cmts modules. + * 1179 bluecat_dhcp: New Check for DHCP Service on bluecat adonis devices. + * 1180 bluecat_dns, bluecat_dns_queries: New DNS Checks for Bluecat Adonis. + * 1181 bluecat_ntp: New Check for NTP on bluecat adonis or proteus devices + * 1105 wmic_if.ps1: Powershell version of the wmic_if.bat script... + * 1182 bluecat_ha: New Check for HA Status on Bluecat Adonis devices + * 1183 bluecat_commandserver: New Check for bluecat adonis devices + * 1397 juniper_screenos_cpu, juniper_screenos_fan, juniper_screenos_mem, juniper_screenos_temp, juniper_screenos_vpn: new checks for Juniper ScreenOS Firewalls + * 1106 mk_inventory.ps1: now uses the MK_CONFDIR environment variable from the agent (if available)... + * 1107 windows agent: now sets additional environment variables... + * 1108 printer_io.include: included tray description in check output + * 0657 diskstat: cluster support added for single disk modes + * 1111 vCenter monitoring: greatly improved performance (at least 40 times faster)... + * 1112 esx_vsphere_hostsystem.mem_usage_cluster: allows to monitor total RAM usage of all nodes in a cluster... + * 0658 brocade_info: new check to retrieve informational data about Brocade switches + * 1385 oracle_instance: new WATO rules for archivelog, logging, login and uptime... + * 1403 kernel.util: allow levels for the total CPU utilization... + NOTE: Please refer to the migration notes! + * 1117 agent_vsphere: now able to query license information from esx system... + * 1118 bluecat_dns, bluecat_dhcp: no able to run as clustered checks... + * 1409 Extended Check_MK-API: check function may return None... + * 0659 domino_tasks: new check to monitor tasks on a lotus domino server via snmp + * 1187 Hivemanager: Extended Check and Agent... + * 1130 esx monitoring: agent_vsphere now retrieves additional data (used by HW-inventory)... + * 1422 agent_vsphere: now able to configure where the power state of a vm or esx-host should be assigned... + * 1442 ups_socomec_out_source: New check for checking the power source of out phases for Socomec UPSs + * 0662 domino_mailqueues: new check to monitor mail queues in Lotus Domino + * 1188 veeam_client: Check now also outputs ReadSize and TransferedSize... + * 0663 domino_info: new check to extract informational data about a Lotus Domino Server + * 0664 domino_users: new check to monitor the number of users on a Domino Notes server + * 1447 domino_transactions: new check to monitor the number of transactions per minute on Lotus Domino servers + * 1190 statgrab_cpu: Check can now handle parameters + * 1191 Linux agent now also sends information about tmpfs... + * 1193 ps: Manual Checks can now use RegEx for user matching... + * 1194 Linux Agent now supports monitoring of cifs mounts + * 1195 AIX Agent now also supports monitoring of cifs mounts + * 1196 apache_status: Added timeout... + * 1443 ups_socomec_outphase: New check for monitoring the out phases of Socomec UPSs + * 1051 FIX: tcp_conn_stats: fix missing performance data... + * 1142 FIX: winperf_ts_sessions: fix computation, check has never really worked + * 1090 FIX: zfsget: fixed exception which happened on incomplete zfs entries + * 0187 FIX: hp_proliant_power: Fixed Wato configuration + * 0192 FIX: oracle_rman_backups: Not longer try to make a inventory for broken plugin outputs + * 0194 FIX: raritan_pdu_inlet: Check now outputs the correct values... + NOTE: Please refer to the migration notes! + * 1071 FIX: oracle_rman_backups: Only inventorize ARCHIVELOG / DB FULL / DB INCR entries... + * 1152 FIX: mk-job: The check now captures currently running jobs and their start time... + * 0198 FIX: cisco_temp_sensor: Removed dicey detection for temperature value.... + * 0645 FIX: brocade_fcport: since in newer firmware (7.*) swFCPortSpeed is deprecated, we then calculate port speed from IF-MIB::ifHighSpeed + * 1097 FIX: windows_agent: preventing missing agent sections on first query... + * 1009 FIX: df: deal with space in file system type for PlayStation file system... + * 1098 FIX: esx_vsphere_counters.diskio: Now reports unknown when counter data is missing + * 1143 FIX: dell_powerconnect_temp: fix configuration via WATO... + * 1144 FIX: blade_bx_temp, dell_chassis_temp, emerson_temp, ibm_svc_enclosurestats, ups_bat_temp: rename service description... + NOTE: Please refer to the migration notes! + * 1145 FIX: windows_tasks: handle case correctly where task is currently running... + * 1378 FIX: mk_logwatch: remove exceeding \n when rewriting message and using \0... + * 1147 FIX: upc_capacity, ups_socomec_capacity: Fix checking of battery left levels... + * 1099 FIX: tsm_scratch: now returns the variable name instead the values during inventory... + * 0650 FIX: f5_bigip_pool: limits to the number of active nodes are now correctly applied... + NOTE: Please refer to the migration notes! + * 1102 FIX: esx_vsphere_counters: no longer raise false alarms because of invalid data from ESX Host... + * 1149 FIX: check_mk-ibm_svc_systemstats.diskio, check_mk-ibm_svc_systemstats.iops: fix exception in Perf-O-Meter + * 0651 FIX: f5_bigip_interfaces: Fix invalid throughput values, detect newer F5 devices... + * 1393 FIX: casa_cpu_temp, casa_cpu_util: Change service description to standard... + NOTE: Please refer to the migration notes! + * 1104 FIX: winperf_if: Improved matching of data from wmic_if.bat / wmic_if.ps1 scripts... + * 1110 FIX: windows agent: fixed missing agent section problem if a cached script ran into a timeout... + * 1113 FIX: oracle_rman: fixed exception when backup was currently running + * 1114 FIX: bluecat_threads: no longer detected on wrong systems... + * 1116 FIX: megaraid_ldisk: now longer raises an exception for adapters with 'No Virtual Drive Configured' + * 1122 FIX: windows agent: unicode logfile monitoring: now able to detect incomplete written lines... + * 1184 FIX: cisco_power: Fixed detection of item. In some cases the status information was part of the item... + NOTE: Please refer to the migration notes! + * 1078 FIX: Fix compensation for daylight safing time in prediction + * 1126 FIX: bluecat_ntp: check no longer crashes on evaluating sysLeap values higher than 1... + * 1127 FIX: bluecat_dhcp: fixed exception when data was available.. returns UNKNOWN when data is missing + * 1128 FIX: bluecat_dns: now reports UNKNOWN if no snmp data is available + * 1131 FIX: esx_vsphere_hostsystem.maintenance: fixed misspelling in service description... + NOTE: Please refer to the migration notes! + * 1161 FIX: fc_port: Fixed invalid values of counters, fixed wrong values in graphs... + * 1192 FIX: veeam_jobs: Check now recognize sync jobs... + * 1386 FIX: oracle_jobs: Bugfix for forever running jobs... + * 1427 FIX: esx_vsphere_hostsystem.multipath: no longer crashes at invalid multipath types... + + Multisite: + * 1066 New Dashboard Designer... + * 1392 WATO Folder filter: show only the paths a user is allowed to see + * 1398 Allow to spread times of next check when rescheduling... + * 1405 Checkbox for settings downtimes on the hosts of the selected services... + * 1410 Output log text of scheduled downtime log entries... + * 1411 New builting views for the history of scheduled downtimes + * 1185 mobile ui: Added a new view to see events from the Event Console + * 1412 Speed up of displaying and sorting after WATO folder path + * 1477 New screenshot mode for Multisite... + * 1067 FIX: Fixed login problem in LDAP connector when no user filter specified... + * 1094 FIX: sidebar snaping 'Tree of folders': fixed exception + * 1154 FIX: Availability: Fixed unwanted redirect to edit annotation page after editing availability options... + * 1401 FIX: Display options in views are now again persistent... + * 1120 FIX: Multisite filters Host/Service Contactgroup: Fixed livestatus exception... + * 1158 FIX: Moved filter logic to visuals module... + NOTE: Please refer to the migration notes! + * 1077 FIX: Fixed labelling of Y achsis in prediction graphs... + * 1162 FIX: User profiles can not be edited on WATO remote sites anymore... + + WATO: + * 1096 New WATO web service: manage hosts via a new HTTP API... + * 1155 NagVis map edit/view permissions can now be set using roles/groups... + * 1115 Renamed rule: Hosts using SNMP v2c -> Legacy SNMP devices using SNMP v2c... + * 1404 Make title/help of custom user attributes localizable... + * 1159 Remote BI Aggregations can now be configured to be checked as single services... + * 1163 Service discovery: Added direct link to check parameter ruleset of services... + * 1428 Web-API: now able to add cluster hosts... + * 1064 FIX: Fixed rare issue with WATO communication in distributed setups (different OS versions)... + * 1089 FIX: Snapshot restore: fixed exception during exception handling...... + * 1091 FIX: logwatch patterns: allow unicode text in pattern comment + * 1092 FIX: logwatch: now able to enter unicode text into the "Pattern (Regex)" field + * 0191 FIX: Added swp files to the ignore list for the WATO git feature... + * 1153 FIX: Changed custom user attributes can now be used immediately... + * 0201 FIX: Fixed error message in Rulelist of RBN... + * 1100 FIX: WATO backup domains: fixed bug were excluded files still got deleted on snapshot restore... + * 1101 FIX: WATO check parameter: renamed 'Nominal Voltages' to 'Voltage Levels..' + * 1396 FIX: Fix default setting of Enable sounds in views... + * 1109 FIX: WATO active checks: passwords no longer shown as plain text.... + * 1119 FIX: WATO create rule: No longer raises an incorrect permission warning when creating a new rule... + * 1121 FIX: Rule based notifications formular: No longer raises Request-Uri-Too-Large errors... + * 1160 FIX: Fixed wrong named column in mkeventd rules + * 1430 FIX: Clone group: Now displays correct alias name of cloned group... + + Notifications: + * 1151 Add variables (HOST/SERVICE)ACK(AUTHOR/COMMENT) to notification context... + * 1394 HTML notifications have a new content field for debugging variables... + * 1400 Added example notification script for Pushover to doc/treasures/notifications... + * 1123 Rule based notifications: New condition "Match Service Groups" + * 1186 RBN: It's now possible to Filter for contactgroups... + * 1189 sms notification: also send information about Downtimes, Acknowledgments and Fallping now + * 1424 mknotifyd: now able to check if its still listening for telegrams... + * 1156 FIX: Graphs in HTML mails are now sent again where they where missing... + * 1157 FIX: Fixed SMS plugin on at least debian (distrs which have no sendsms/smssend)... + * 1407 FIX: Fix exception in rule based notification on non-Ascii characters in log message + * 1408 FIX: mknotifyd now really reads all configuration files below mknotifyd.d... + + BI: + * 1406 Assume PEND in count_ok aggregations if all nodes are PEND... + + Event Console: + * 1148 Allow execution of actions when cancelling events... + * 1395 Event Console can now create notifications via Check_MK RBN... + * 1007 FIX: check_mkevents: fix case where events contain binary zeroes + * 1399 FIX: Fix left-over tac processes when showing Event Console history... + * 1402 FIX: Fixed cased where counting events did not reach required count... + * 1124 FIX: WATO EC configuration: no longer raises an exception when user has restricted WATO access... + * 1125 FIX: EC actions are now saved when an EC rule has "Send monitoring notification" set... + + HW/SW-Inventory: + * 0643 windows inventory: OS now contains the install date, reg_uninstall now contains the path... + NOTE: Please refer to the migration notes! + * 0652 windows software inventory gives some more details about OS and installed software... + NOTE: Please refer to the migration notes! + * 0653 script to extract HW/SW-Inventory data in CSV format... + * 0660 mk_inventory-ps1: new uses the Install Location as path for win_reg_uninstall + * 0661 HW/SW-Inventory: install date of software packages no longer in unix timestamps but date format... + NOTE: Please refer to the migration notes! + * 1413 HW/SW-Inventory implementation step one finished... + * 0655 FIX: win_cpuinfo and mk_inventory.ps1 agent: unit of CPU speed fixed, fixes for long output lines in agent + * 1379 FIX: Fixed filter "Host has inventory data"... + * 1423 FIX: Host HW-inventory: now longer generates an exception on displaying the BIOS date + + check: + * 1384 oracle_jobs: new WATO rules, changed service name to SID.OWNER.NAME... + NOTE: Please refer to the migration notes! + + +1.2.5i5: + Core & Setup: + * 1012 Fix quoting of backslashes in custom checks with nagios core... + NOTE: Please refer to the migration notes! + * 1038 Massive speedup of cmk --snmptranslate + * 1035 FIX: Do not fail on errors in *.mk files anymore - except in interactive mode... + * 0174 FIX: Fixed appending of --keepalive-fd parameters to checkhelpers... + * 1053 FIX: Fixed events check always being reporting OK state... + * 1045 FIX: Gracefully restart check_mk helpers in case of memory leak... + * 0633 FIX: diskstat: fixed performance data of old legacy disk IO read/write data... + + Checks & Agents: + * 0168 f5_bigip_pool: Added Wato configuration... + * 0995 raritan_pdu_outletcount: new check for outlet count of Raritan PX-2000 family PDUs + * 0169 websphere_mq_channels,ebsphere_mq_queues: New Checks to monitor IBM Websphere MQ Queues and Channels... + * 1034 Always provide also 64 bit version of Windows agent + * 0170 hp_proliant_power: New check to monitor the Power Meter on Prolaint Servers and iLO Boards + * 0172 zfsget: Check is now usable in cluster_mode... + * 1039 aix_diskiod: new check for disk IO on AIX + * 0997 New checks and a special agent for ALLNET IP Sensoric devices... + * 0175 logwatch.groups: New logwatch subcheck who can be used to group logfiles together.... + * 1041 aix_memory: new check for RAM and SWAP on AIX + * 0998 ibm_imm_health: Trying to recognice newer versions of IBM IMM now too + * 0628 raritan_pdu_inlet: now also monitors the three phases of the inlet + * 1073 sni_octopuse_cpu: added PNP graph definition and Perf-O-Meter + * 0178 mssql_tablespaces: It is now possible to define thresholds + * 0999 allnet_ip_sensoric.pressure: New Check for Pressure Sensors in ALLNET IP Sensoric devices + * 1082 windows agent: now also available as msi installer... + * 0179 check_dns: It is now possible to use the local dns server in wato configuration... + * 1058 livedump-mail-fetch: Now supporting either quoted-printable or non encoded mails... + * 0180 sap: It is now possible to add multiple sap instances to the sap.cfg file... + * 0181 citrix_sessions, citrix_serverload: New checks for Citrix Load (a Score calculated by citrix) and the number of sessions + * 0637 jolokia_metrics.gc, jolokia_metrics.tp, jolokia_info: two new subchecks for the jolokia_metrics checks and better error handling for jolokia_info... + * 1000 qlogic_sanbox.temp: New Check for temperature sensors in QLogic SANbox Fibre Channel Switches + * 1001 qlogic_sanbox.psu: New Check for power supplies in QLogic SANbox Fibre Channel Switches + * 0182 MegaCli: Agent now also supports the 64bit version (Thanks to Philipp Lemke) + * 1132 qlogic_fcport: New Check for Fibre Channel Ports in QLogic SANbox FC Switches + * 1133 qlogic_sanbox_fabric_element: New Check for Fabric Elements in QLogic SANbox Fibre Channel Switches + * 1134 bintec_sensors.fan: New Check for Fan Speed of Bintec Routers + * 1135 bintec_sensors.voltage, bintec_sensors.temp: New Checks for Voltage and Temperature Sensors of Bintec Routers + * 1048 mem.win: support predictive levels... + * 1136 bintec_brrp_status: New Check for BRRP States on Bintec Routers + * 0640 jolokia_metrics.gc, jolokia_metrics.tp: now come with its own pnp templates + * 1088 included check_mk_agent windows msi installer... + * 0183 sentry_pdu: New check to monitor plugs of sentry PDUs + * 0184 knuerr_sensors: New Check to monitor Sensors on a Knürr RMS Device + * 0994 FIX: agent plugin smart: fixed syntax error + * 0989 FIX: logwatch.ec: Fix forwarding multiple messages via syslog/TCP... + * 0943 FIX: if.include: fixed incorrect traffic percentage values in the check output of if checks... + * 0944 FIX: oracle_tablespaces: fixed calculation of space left and number of remaining increments... + * 1032 FIX: check_traceroute: Fix option Use DNS, worked vice versa + * 0171 FIX: hp_blade_psu: Fixed pnp template... + * 0996 FIX: apc_symmetra_test: Handle unknown date of last self test as intended... + * 0173 FIX: hitachi_hnas_volume: Fixed bug when snmp outputs empty lines + * 1037 FIX: bintec_info: support bintec RXL12500 + * 0948 FIX: mk_inventory.ps1: increased caching time to 14400, fixed incorrect default cachefile path + * 0827 FIX: lnx_thermal: Not checking active trip points (e.g. cooling device triggers) anymore + * 1043 FIX: printer_supply: fix value error in default parameters... + * 0626 FIX: veeam_jobs: agent now supports output lines longer than 80 chars + * 1072 FIX: printer_supply: fix colors of Perf-O-Meter on HP OfficeJet... + * 0950 FIX: check_mkevents: now able to resolve the hostname of the remote hosts... + * 0177 FIX: esx_vsphere_hostsystem.multipath: Fixed return state in case of paths in standby... + * 1054 FIX: mysql_slave: Only monitor the age of the slave when it is running + * 1075 FIX: if, if64: Fixed PNP template in order to correctly scale Y axis + * 0631 FIX: fc_port: several fixes for the perfometer to display the right values... + * 0632 FIX: brocade_fcport: fix perfometer output of out bandwidth when averaging is switched on + * 1055 FIX: mysql_slave: Fixed detecting CRIT states when IO/SQL slaves are not running + * 0634 FIX: Max Bandwidth for PNP-Graphs of Interface checks corrected... + * 0635 FIX: fc_port: the check no longer inventorizes ports with administrative state of 'unknown' or 'offline' + * 0636 FIX: fc_port: do not inventorize if brocade fibre channel mib is also supported on the device... + * 1083 FIX: ad_replication.bat: does not return data if the server is no DC + * 0638 FIX: windows_updates: agent plugin now always sends section header, even if no update information provided... + * 1084 FIX: ps: now able to handle bigger process groups without constant MKCounterWrapped Exceptions... + * 1087 FIX: Active checks: Non-ascii check commands now converted into utf-8... + * 1049 FIX: ups_capacity: Fix exception when running on battery... + * 0639 FIX: jolokia_metrics: fix for problem when catalina uses the standalone engine + * 1050 FIX: websphere_mq_queues: make compatible with old agent, fix not-found case + + Multisite: + * 1013 Sort host names naturally, e.g. foobar11 comes after foobar2... + * 1033 New Mutisite filter for the number of services a host has... + * 0949 quicksearch: now able to search for multiple hosts at once... + * 1052 SEC: index start URL can not be used to redirect to absolute URLs anymore... + * 1085 quicksearch: multiple hostname matches now lead to the searchhost view instead of the hosts view... + * 1047 Virtual Host Tree: Allow to use topic as tree level... + * 1062 SEC: Fixed several XSS issues on different pages... + * 1063 SEC: Fixed several XSS issues on different pages... + * 0945 FIX: Sidebar snapin "Problem hosts": Now excludes hosts and services in downtime + * 1036 FIX: doc/treasures/downtime: fix --url option, better error output + * 1074 FIX: Fix Virtual Host Tree snapin... + * 1059 FIX: LDAP: Using configured user filter during login to prevent temporary created users... + * 1060 FIX: Fixed exception during first login of a user when saving of access times is enabled... + + WATO: + * 0825 WATO: Hover menu of user online state shows the last seen date/time now + * 1057 WATO folder permissions are only exported to NagVis when configured... + * 1086 check_http: now able to enter non-ascii signs in "Send HTTP POST data" rule... + * 0990 FIX: Fix HTTP error handling in bulk inventory... + * 1004 FIX: Fix exception when saving rules, caused by empty item + * 0947 FIX: WATO snapshots: fixed missing files on restoring nagvis backup domains + * 0826 FIX: Fixed problem where user access times were not updated correctly + * 1044 FIX: Remove icon for service parameters in WATO service list for missing services... + * 1056 FIX: Fixed selection of hosts for bulk actions + + Notifications: + * 1042 Rule based notifications: allow matching on host groups... + * 0828 FIX: Mails sent with mail/asciimail plugin now really set the from address + * 1061 FIX: SMS notifications: correctly handling spaces in phone numbers... + + Reporting & Availability: + * 0991 FIX: Availability: optionally show time stamps as UNIX epoch time... + * 1076 FIX: Fix wrong percentual host availability > 100% when excluding downtimes... + + Event Console: + * 1040 FIX: Avoid sporadic errors when checking event state in Event Console... + + Livestatus: + * 0988 FIX: livedump: Fix exception in case no contact groups are defined for a service + * 0951 FIX: table servicegroups: fixed service visibility when using group_authorization AUTH_STRICT... + + HW/SW-Inventory: + * 0625 hw/sw inventory now reads the kernel version and architecture for linux and windows + * 0627 lnx_video, win_video: added inventory function and agent for linux video cards, modified windows inventory function + * 0629 improvements to windows sw/hw inventory (encoding, more details for sw inventory) + * 0630 win_disks: hardware inventory for physical disks in windows + * 1046 Added AIX support for HW/SW-Inventory... + * 0167 FIX: mk_inventory.linux: Changed field separator from pipe to tab... + * 1005 FIX: Fix exception when using pretty-print output format + * 0946 FIX: hw/sw inventory: fixed display bug for byte fields with the value 0... + * 0641 FIX: windows inventory: moved encoding from checks to windows agent plugin + + +1.2.5i4: + Core & Setup: + * 0940 SEC: Fixed various core SIGSEGV when using malformed livestatus queries... + + Checks & Agents: + * 0812 nginx_status: New check for monitoring status information of the Nginx web server... + * 0986 citrix_licenses: new check for monitoring Citrix licenses + * 0814 Agent versions can now be checked with "at least version X" parameters... + * 0815 mysql_slave: New check for monitoring MySQL slave sync state + * 0617 adva_fsp_if: new check to monitor interfaces of the ADVA FSP 3000 scalable optical transport solution + * 0618 adva_fsp_current: new check for the power supply units of the ADVA FSP 3000 scalable optical transport solution + * 0619 adva_fsp_temp: new check to monitor temperature and temperature trends on ADVA scalable optical transport solutions + * 0993 raritan_pdu_inlet: now delivers performance data + * 0624 fc_port: new check for fibre channel devices supporting the FCMGMT MIB + * 1003 ibm_svc_enclosure: support new firmware, also check fan modules + * 0616 FIX: brocade.fan, brocade.power, brocade.temp: will now only discover services which are not marked as absent + * 0992 FIX: zfs_arc_cache: returns OK even if values of arc meta are missing... + * 0936 FIX: agent_ibmsvc: improved error messages on using wrong credentials + * 0621 FIX: zfsget: better filesystem selection and calculation of sizes... + * 0819 FIX: Fixed keepalive termination in case of exceptions during checking... + * 0622 FIX: cisco_temp_sensor: fix to also work with newer IOS versions + * 0623 FIX: fsc_fans: upper levels for fan RPMs are now optional also for the check + * 0823 FIX: mk_sap: Fixed some wrong calculated values (decimal numbers)... + + Multisite: + * 0982 SEC: Fix two XSS weaknesses according to CVSS 8.5 AV:N/AC:M/Au:S/C:C/I:C/A:C... + * 0983 SEC: Fix security issue in code of row selections (checkboxes) (CVSS 4.9 AV:N/AC:M/Au:S/C:N/I:P/A:P)... + * 0934 FIX: Logwatch messages with class unknown ( 'u' ) now displayed as WARN... + * 0166 FIX: mobile gui: Fixed colors of command list... + * 0820 FIX: Fixed wrong NagVis links in "custom links" snapin + * 0938 FIX: logwatch: fixed incorrect display of warning messages + * 0939 FIX: Fixed multisite exception caused by missing explanation text for a AUTODELETE event action + * 0822 FIX: Sorting columns in view dashlets is now working again + * 0941 FIX: esx_vsphere_hostsystem.cpu_usage: pnpgraph now displays AVERAGE instead of MAX values in all timeframes... + * 0942 FIX: check_mk-winperf.cpuusage.php: now displays AVERAGE values instead of MAX... + + WATO: + * 0984 Fix code injection for logged in users via automation url... + NOTE: Please refer to the migration notes! + * 0987 New button for updating DNS cache... + * 0824 SEC: Valuespecs: Fixed several possible HTML injections in valuespecs... + * 0813 FIX: LDAP: Improved slightly missleading logging of LDAP sync actions... + * 0935 FIX: CPU utilization: increased maximum value to 10000... + * 0821 FIX: Reducing size of auth.php (needed for authorisation in NagVis) in large environments... + + Notifications: + * 1002 FIX: Fix crash when debugging notifications with non-Ascii characters... + + Reporting & Availability: + * 0985 Availability: display phases of freqent state changes as "chaos"... + + Event Console: + * 0816 States of events can now be set by patterns... + + HW/SW-Inventory: + * 0620 new version of Check_MKs hardware and software inventory including a much extended windows agent and inventory functions + * 0818 FIX: Fixed exception in HW/SW inventory search dialog... + + +1.2.5i3: + Core & Setup: + * 0884 New options --oid and --extraoid for cmk --snmpwalk... + * 0785 FIX: Availability: fixed memory leak in table statehist... + * 0903 FIX: availability: fixed bug causing the availability feature not considering timeperiod transitions + * 0888 FIX: Fix SNMP inventory check in simulation mode + + Checks & Agents: + * 0149 cisco_secure: New check for Port Security on Cisco swichtes + * 0751 New localcheck for Linux that makes sure that filesystems in /etc/fstab are mounted... + * 0783 enterasys_lsnat: new check monitoring the current LSNAT bindings + * 0601 printer_alerts: check can now display a textual representation of the alert code... + NOTE: Please refer to the migration notes! + * 0799 ibm_svc_systemstats.cpu_util: New check for CPU Utilization of an IBM SVC / V7000 device in total + * 0800 ibm_svc_nodestats.cache, ibm_svc_systemstats.cache: New checks for Cache Usage of IBM SVC / V7000 devices + * 0150 printer_suply: New option to upturn toner levels... + * 0801 ibm_svc_eventlog: New Check for Messages in Event log of IBM SVC / V7000 devices + * 0151 enterasys_cpu_util: Changed check to not longer summarize all modules... + NOTE: Please refer to the migration notes! + * 0802 ibm_svc_nodestats.iops, ibm_svc_systemstats.iops: new checks for IO operations/sec on IBM SVC / V7000 devices + * 0602 cmciii.humidity: new check for Rittals CMC III humidity sensors + * 0829 oracle_tablespaces: improved formatting of levels text in check output... + * 0757 Linux multipath check can now use the alias instead of the UUID as item... + * 0879 windows_tasks: output last and next run time + * 0881 rmon_stats: now needs to be activated via a rule in order to be inventorized... + NOTE: Please refer to the migration notes! + * 0804 ibm_svc_portfc: New check for status of FC Ports in IBM SVC / Storwize V3700 / V7000 devices + * 0805 ibm_svc_enclosure: New Check for Enclosures, Canisters and PSUs in IBM SVC / Storwize V3700 / V7000 devices + * 0806 ibm_svc_enclosurestats.temp: New Check for temperature in enclosures of IBM SVC / Storwize V3700 / V7000 devices + * 0807 ibm_svc_enclosurestats.power: New check for power consumption of enclosures of IBM SVC / Storwize V3700 / V7000 devices + * 0808 brocade_mlx*: Checks now also work correctly with Brocade ADX / FGS / ICX devices + * 0892 wagner_titanus_topsense: new info check and overall status check for Wagner Titanus Top Sens devices + * 0893 wagner_titanus_topsense.alarm: New check for Alarms Triggered on Wagner Titanus Top Sens devices + * 0894 wagner_titanus_topsense.smoke: New check for Smoke Detectors in Wagner Titanus Top Sens devices + * 0895 wagner_titanus_topsense.chamber_deviation: New Check for Chamber Deviation from Calibration Point in Wagner Titanus Top Sens devices + * 0152 fsc_fans: Added support for Wato configuration and upper limits + * 0896 wagner_titanus_topsense.airflow_deviation: New Check for Airflow Deviation in Wagner Titanus Top Sens devices + * 0897 wagner_titanus_topsense.temp: New Check for Temperature measured by Wagner Titanus Top Sens devices + * 0898 ibm_svc_nodestats.disk_latency, ibm_svc_systemstats.disk_latency: New Checks for Disk Latency in IBM SVC / Storwize V3700 / V7000 devices + * 0156 akcp_daisy_temp: New Check for akcp daisyTemp sensor chains... + * 0899 enterasys_temp: New Check for temperature sensor in Enterasys Switches + * 0901 ibm_svc_portfc: more devices recognized... + * 0952 ibm_svc_array: New check for Status of RAID Arrays in IBM SVC / Storwize devices. + * 0911 esx_vsphere_hostsystem.multipath: now able to configure paths minimum count... + * 0159 brocade: Added support for brocade fdx switches + * 0160 brocade_vdx_status: New check to monitor the operational state of vdx switches. + * 0916 if: now able to configure minimum bandwidth limits + * 0917 df checks: now able to show time left until disk full as perfometer and pnpgraph... + * 0954 juniper_bgp_state: New Check for BGP status at Juniper Routers + * 0955 zfs_arc_cache, zfs_arc_cache.l2: New Checks for Hit Ratios and Sizes of ZFS arc Cache + * 0162 if_brocade: New if64 Check version for Brocade VDX Switches... + NOTE: Please refer to the migration notes! + * 0956 fast_lta_headunit.status, fast_lta_headunit.replication: New checks for FAST LTA Storage Systems + * 0957 fast_lta_silent_cubes.capacity: New check for Total Capacity over all Silent Cubes on FAST LTA Storage Systems + * 0975 esx_vsphere_vm.guest_tools: renamed check (formerly esx_vsphere_vm.guestTools)... + NOTE: Please refer to the migration notes! + * 0920 blade_bays: now also detects if blade server is switched off + * 0977 check_traceroute: new active check for checking presence and absence of routes... + * 0959 libelle_business_shadow.info, libelle_business_shadow.process, libelle_business_shadow.status: New Checks for Libelle Business Shadow + * 0960 libelle_business_shadow.archive_dir: New check for the Archive Dir of Libelle Business Shadow... + * 0978 Fix security issue with mk-job on Linux... + NOTE: Please refer to the migration notes! + * 0925 ps: improved/fixed calculation of CPU utilization (linux)... + * 0926 windows agent: local / plugin scripts now get the REMOTE_HOST as environment variable + * 0163 kaspersky_av_quarantine,kaspersky_av_tasks,kaspersky_av_updates: New checks for kaspersky anti virus on linux + * 0164 symantec_av_progstate,symantec_av_quarantine, symantec_av_updates: New checks for Symantec Anti Virus on Linux + * 0615 apc_symmetra: check now also monitors the battery replacement status + * 0927 windows agent: now able to evaluate logfiles written in unicode (2 bytes per character)... + * 0165 ups checks now supports also GE devices (Thanks to Andy Taylor)... + * 0928 runas: new plugin script to include and execute mrpe, local and plugin scripts as different user... + * 0929 windows agent: now able to include and execute additional local and plugin scripts as different user... + * 0812 nginx_status: New check for monitoring status information of the Nginx web server... + * 0961 fast_lta_volumes: new check of capacity of volumes in FAST LTA Storage Systems... + * 0777 FIX: special agent emcvnx: did not work with security file authentication... + * 0786 FIX: zfsget: fixed compatibility with older Solaris agents... + * 0809 FIX: brocade_fcport: Fixed recently introduced problem with port speed detection + * 0787 FIX: df: fixed problems on some filesystem checks when legacy check parameters where used... + * 0803 FIX: agent_ibmsvc: raw data for System Info Check and License Check now in correct format... + * 0788 FIX: oracle_tablespaces: now able to bear None values as warn/crit levels... + * 0789 FIX: oracle_tablespaces: fixed bug when using dynamic filesystem levels... + * 0603 FIX: cmciii checks: more general scan function plus perf-o-meters for humidity and temperature checks + * 0604 FIX: windows_updates: now handles situations with forced reboot and no limits correctly + * 0605 FIX: enterasys_cpu_util enterasys_lsnat: syntax fixes + * 0889 FIX: logwatch: fix case where rule wouldn't be applied... + * 0882 FIX: check_bi_local.py: fix crash in case of non-ascii characters... + * 0606 FIX: apache_status: now also sends an accept header to make it work with mod_security enables servers + * 0832 FIX: solaris_mem: fixed invalid calculation of total swap... + * 0810 FIX: fritz.link: Not inventorizing "unconfigured" interfaces anymore + * 0154 FIX: zfsget: Fixed inventory of filesystems + * 0155 FIX: mssql_counters: harded check agains odd agent output + * 0907 FIX: windows agent: register_service: fixed ImagePath registry entry... + * 0608 FIX: oracle_asm_diskgroup: check now also handles older oracle version 11.1.0 + * 0157 FIX: apc_symmetra_test: Fixed case of unkown last test date + * 0910 FIX: brocade.power: fixed an error where the check reports an UNKNOWN on power supply failure... + * 0158 FIX: dell_om_disks: Handle hotspares more correctly + * 0161 FIX: cisco_fru_power: Exluded not existing devices from the inventory + * 0969 FIX: blade_health: correctly output error message in non-OK state + * 0611 FIX: nfsexports.solaris: fix in determination of path prefix + * 0953 FIX: brocade_mlx_temp: special treatment for devices sometimes not delivering temperature by SNMP + * 0958 FIX: df.include: failed for checks with grouping patterns... + * 0924 FIX: windows agent: now able to execute python scripts again + * 0614 FIX: cmciii.temp, cmciii.humidity: fixed bugs to get performance data back + * 0932 FIX: prediction: fixed bug where predicted levels were not recalculated + + Multisite: + * 0779 Hostgroups (Summary): Empty hostgroups are no longer shown (can be re-enabled by filter) + * 0887 Add new column painter "Host Notifications Enabled"... + * 0963 New snapin with virtual host trees... + * 0914 Improved transaction handling to speedup the Web-GUI... + * 0905 FIX: Multisite context buttons: links in context buttons are no longer called twice... + * 0906 FIX: Improved transaction handling in Web GUI... + * 0909 FIX: Table checkboxes: Fixed bug where selected checkboxes got ignored... + * 0811 FIX: Fixed handling of exceptions occuring before login in debug mode + * 0912 FIX: Multisite Views: Fixed bug where custom views could not get deleted + * 0921 FIX: dashboards: fixed bug not updating header timestamp... + * 0923 FIX: json export: fixed bug not stripping html tags from output + * 0931 FIX: pnp-template ps.perf: fixed display bug of cpu averaging + + WATO: + * 0784 Improved security of WATO bulk inventory by using transaction ids + * 0880 Added support for 389 Directory Server to LDAP connector + * 0607 online help text for host creation in WATO now also explains hostname caching + * 0908 Check event state: New option "Less Verbose Output"... + * 0965 Cumulative permissions and contact groups for WATO folders... + * 0973 Renaming of hosts via WATO... + * 0976 Show preview of active and custom checks in WATO services table... + * 0930 WATO snapshots: disabled upload of legacy snaphots and snapshots with invalid checksums... + * 0781 FIX: host diag page: fixed problem with update of diagnose subwindows... + * 0904 FIX: Fixed exception in host parameter overview... + * 0971 FIX: Fix missing authentication of PHP addons in D-WATO when activation mode is reload... + * 0972 FIX: Do not loose site specific global settings anymore when chaning a site's configuration... + * 0933 FIX: WATO snapshots: excluded some superfluous files from nagvis backup domaim... + + Notifications: + * 0754 Allow users to disable their notifications completely... + * 0755 Added variables LASTHOSTUP_REL and LASTSERVICEOK_REL to notification context... + * 0883 Added Date / Time to HTML notification email + * 0900 notify_multitech.py: new treasures script for notifying via MultiTech SMS Gateway... + * 0968 Notification scripts are now configurable via WATO... + * 0974 New notification plugin for ASCII emails... + * 0752 FIX: FIX: compute correct state transitions for notifications... + * 0753 FIX: FIX: correctly show original state in HTML notification mails... + * 0609 FIX: mail notification script now uses 6 digit hex codes for colors to be better compatible with web based mail browsers + * 0964 FIX: Fix hanging shutdown of CMC on RedHat 5.X... + * 0918 FIX: notification: fixed exception when sending notifications as sms / ascii mail... + + Reporting & Availability: + * 0756 Allow availability of multisite BI aggregates at once... + * 0966 CSV export for availability works now also for BI aggregates + * 0967 BI Availability timewarp: new buttons for moving back and forth + * 0962 FIX: Fix CSV-Export in availability table + * 0890 FIX: Fix availability computation for hosts... + * 0891 FIX: Fix HTML encoding of tootip in inline timeline of availability + + Event Console: + * 0885 New option for writing all messages into a syslog-like logfile... + * 0902 FIX: event console view: fixed exception on rendering host tags for unknown hosts... + + Livestatus: + * 0747 FIX: livestatus table hostsbygroup: fixed bug with group_authorization strict... + * 0831 FIX: table statehist: no longer crashes on TIMEPERIOD TRANSITION entries with an invalid syntax... + + Livestatus-Proxy: + * 0970 FIX: liveproxyd: handle situations with more then 1024 open files... + * 0613 FIX: liveproxyd: fewer log messages in case a site is unreachable + + HW/SW-Inventory: + * 0913 lnx_distro: Now able to detect SuSE distributions... + * 0610 mk_inventory: windows inventory check now included, install date added to data + * 0886 FIX: Fix exception on non-UTF-8 encoded characters in software list + * 0922 FIX: dmidecode: fixed exceptions on missing/unknown data + + +1.2.5i2: + Checks & Agents: + * 0147 enterasys_fans: New Check to monitor fans of enterasys swichtes + * 0773 ibm_svc_system: new check for System Info of IBM SVC / V7000 devices + * 0774 ibm_svc_nodestats.diskio: new check for disk troughput per node on IBM SVC / V7000 devices + * 0775 ibm_svc_systemstats.diskio: new check for disk throughput in IBM SVC / V7000 devices in total + * 0764 lnx_quota: Added new check to monitor Linux File System Quota... + * 0776 ibm_svc_nodestats.cpu_util: new check for CPU Utilization per Node on IBM SVC / V7000 devices + * 0600 nfsexports.solaris: new agent plugin for monitoring nfs exports on solaris systems... + * 0743 mem, fortigate_memory, solaris_mem: display total SWAP info in check output + * 0745 drbd: Roles and diskstates are now configurable via WATO... + * 0740 FIX: winperf_if: now able to handle bandwidth > 4GBit... + + Multisite: + * 0765 NagVis-Maps-Snapin: Now visualizes downtime / acknowledgment states of maps... + * 0766 FIX: Changed transid implemtation to work as CSRF protection (Fixes CVE-2014-2330)... + + WATO: + * 0767 FIX: Signing and verification of WATO snapshot (addresses CVE-2014-2330)... + + BI: + * 0741 FIX: BI editor: fixed display bug in "Create nodes based on a service search"... + + Livestatus: + * 0742 FIX: table statehist: now able to cancel a running query if limit is reached... + + +1.2.5i1: + Core & Setup: + * 0386 Added all active checks to check_mk -L output... + * 0452 Speedup generation of configuration... + * 0124 Support multiline plugin output for Check_MK Checks... + * 0675 Activate inline SNMP per default (if available)... + * 0695 Remove obsolete option -u, --cleanup-autochecks... + NOTE: Please refer to the migration notes! + * 0087 FIX: Fixed possible locking issue when using datasource program with long output... + * 0313 FIX: Avoid duplicate reading of configuration file on --create-rrd... + * 0379 FIX: check_mk -c: Now also rewrites the location of conf.d directory + * 0354 FIX: Catch exception when check plugins do not return a state... + * 0398 FIX: Tolerate debug output in check plugins when using CMC... + * 0314 FIX: Fix CMC not executing any Check_MK checks after config reload... + * 0401 FIX: Fix rule precedence in WATO-configured manual checks... + * 0402 FIX: Fix exception in case of missing agent sections of cluster-aware checks... + * 0426 FIX: Fixed processing of cached agent plugins / local scripts... + * 0451 FIX: Ignore missing check types when creating configuration for Nagios + * 0259 FIX: Fixed htpasswd permission problem in check_mk standalone installation... + * 0453 FIX: Fix ugly Python exception in host diagnosis page in case of SNMP error... + * 0696 FIX: Remove garbled output of cmk -v in state of CMC + * 0682 FIX: Allow overriding of active and custom checks by more specific rule... + * 0267 FIX: Fixed auth.serials permission problem in check_mk standalone installation... + * 0282 FIX: TIMEPERIOD TRANSITION messages no longer cut at 64 bytes... + * 0730 FIX: cmc: fixed bug displaying logentries after a logfile rotation... + * 0140 FIX: Fixed unwanted handling of hostname as regex... + * 0739 FIX: Availablity: Prevent crash if the notification period is missing... + + Checks & Agents: + * 0306 esx_vsphere_counters: added missing ramdisk levels sfcbtickets + * 0073 moxa_iologik_register: new check to monitor moxa e2000 series registers + * 0105 apc_humidity: New Check for humidity levels on APC Devices + * 0106 3ware_units: The verifying state is now handled as ok... + * 0086 timemachine: new check checking the age of latest backup by timemachine on MAC OS + * 0074 raritan_pdu_plugs: new check for Raritan PX-2000 family PDUs... + * 0107 stulz_alerts, stulz_powerstate, stulz_temp, stulz_humidity: New Checks for Stulz clima devices + * 0075 raritan_pdu_inlet: new check to monitor inlet sensors of the Raritan PX-2000 PDUs + * 0315 hitachi_hnas_quorumdevice, hitachi_hnas_pnode, hitachi_hnas_vnode: New checks for Hitachi HNAS devices + * 0316 hitachi_hnas_cpu: New check for CPU utilization of Hitachi HNAS devices + * 0373 wut_webtherm: Supporting several other devices now + * 0377 check_http: Certificate Age mode now supports SNI... + * 0317 emc_isilon: New checks for EMC Isilon Storage System + * 0395 cmctc.temp: also detect older CMC devices + * 0396 cmciii_access cmciii_io cmciii_psm_current cmciii_psm_plugs: Support other firmeware versions as well... + * 0111 kemp_loadmaster_ha, kemp_loadmaster_realserver, kemp_loadmaster_services: New Checks for Kemp Loadbalancer + * 0318 hitachi_hnas_fan: New check for fans in Hitachi HNAS systems + * 0319 hitachi_hnas_psu, hitachi_hnas_psu: New checks for Hitachi HNAS storage systems + * 0320 hitachi_hnas_fpga: new check for Hitachi HNAS storage systems + * 0321 brocade_mlx: enhancing checks (BR-MLX modules, more OK states)... + * 0323 emcvnx_hwstatus, emcvnx_hba, emcvnx_disks: new checks for EMC VNX storage systems + * 0254 agent_vsphere: Make handling of spaces in hostnames of ESX configurable... + * 0077 cmciii.psm_current, cmciii_psm_plugs, cmciii_io, cmciii.access, cmciii.temp, cmciii.can_current, cmciii.sensor, cmciii.state: new sub checks included in one new check cmcmiii superseding and improving several previous checks of the Rittal CMCIII device... + NOTE: Please refer to the migration notes! + * 0078 job: check now monitors the time since last start of the job, limits can be configured in WATO + * 0079 f5_bigip_conns: new check to monitor number of current connections + * 0324 hitachi_hnas_cifs: new check for the number of users using a CIFS share + * 0455 hitachi_hnas_span: new check for Spans (Storage Pools) in Hitachi HNAS storage systems + * 0445 mem.win: Allow time-averaging of values before applying levels... + * 0446 mem.used, solaris_mem: Introduce optional averaging of used memory... + * 0566 services.summary: new check to monitor stopped services of mode autostart in windows + * 0568 f5_big_ip_conns: check now supports predictive monitoring and both connections types are merged in one check + * 0257 windows_agent: now reports extended process information (obsoletes psperf.bat plugin)... + * 0457 hitachi_hnas_volume: New check for Usage and Status of Volumes in Hitachi HNAS storage systems + * 0450 mem.used: Add information about shared memory (on Linux hosts) + * 0458 hitachi_hnas_fc_if: New check for FibreChannel Interfaces in Hitachi HNAS storage systems + * 0459 emcvnx_info: New info check providing Model, Revision and Serial Number of EMC VNX storage systems + * 0461 emcvnx_raidgroups.list_luns: New check for EMC VNX storage system... + * 0462 emcvnx_raidgroups.list_disks: New check for EMC VNX storage system... + * 0463 emcvnx_raidgroups.capacity, emcvnx_raidgroups.capacity_contiguous: New Checks for EMC VNX Storage systems... + * 0570 fileinfo.groups: file groups now allow exclude patterns as well + * 0464 stulz_pump: new check for the status of pumps of Stulz clima units + * 0125 unitrends_backup:Unitrends Backup... + * 0126 mikrotik_signal: Check for mikrotik wifi bridges + * 0127 hp_proliant_raid: Check for proliant RAID status. + * 0571 cmciii_lcp_fans: now monitors the lower limit for the rpm + * 0572 cmciii_lcp_waterflow: lower and upper limits to the flow are now monitored + * 0573 cmciii_lcp_airin, cmciii_lcp_airout, cmciii_lcp_waterin, cmciii_lcp_waterout: checks now observe limits to the temperatures + * 0128 unitrends_replication: Check for monitoring Replicaion staus on Unitrend systems + * 0265 mpre_include: run additional mrpe configs within user context... + * 0266 windows_agent: now supports mrpe include files... + * 0574 if64: check now supports clustering... + * 0576 fileinfo.groups: new feature to include current date in file pattern + * 0130 Support of new Firmware version of various Fujitsu Sotarge Systems + * 0698 emc_isilon.nodehealth: new check for EMC Isilon Storage systems: NodeHealth + * 0699 emc_isilon_iops: New check for Disk Operations per Second (IOPS) in EMC Isilon Storage + * 0132 New checks fjdarye101_disks fjdarye101_rluns: Fujitsu Storage Systems with 2013 Firmware + * 0697 check_dns: allow to specify multiple expected answers + * 0700 arcserve_backup: new check for status of backups in an Arcserve Backup Server + * 0580 emc_datadomain_fans, emc_datadomain_nvbat, emc_datadomain_power, emc_datadomain_temps: new hardware checks for EMC Datadomain + * 0691 Solaris agent: include lofs in list of monitored filesystem types + * 0694 wut_webtherm: Support new versions of WUT-Thermometer... + * 0135 apc_inputs: New Check for APC Input Contacts + * 0701 emc_isilon_diskstatus: new check for Status of Disks in EMC Isilon Storage Systems + * 0581 emc_datadomain_disks emc_datadomain_fs: new checks to monitor disks and filesystems of EMC Datadomain + * 0718 logwatch.ec: Optionally monitor the list of forwarded logfiles... + * 0556 esx_vsphere_counters.diskio: now also shows disk latency + * 0583 stulz_pump: now monitors the pumps rpm in precent of maximum and gathers performance data + * 0560 check_mk_agent.solaris: report statgrab_mem section if solaris_mem section is missing... + * 0702 Rule for checking agents for wanted version... + * 0586 rmon_stats: new snmp check to gather network traffic statistics on RMON enabled network interfaces + * 0704 windows_os_bonding: new check for bonding interfaces on windows... + * 0562 esx_vsphere_vm.guest_tools: new check to monitor guest tools status... + * 0674 brocade_fcport: Now supporting interface speed of 16 Gbit (just discovered in the wild) + * 0138 Removed caching function in Windows Update agent plugin... + NOTE: Please refer to the migration notes! + * 0564 esx_vsphere_vm.datastores: displays the datastores of the VM... + * 0731 mk_postgres: improved support for versions postgres < 9.2... + * 0588 dell_poweredge_amperage.current, dell_poweredge_amperage.power, dell_poweredge_cpu, dell_poweredge_status, dell_poweredge_temp: new checks for the Dell PowerEdge Blade Server + * 0589 brocade_tm: new check monitoring traffic manager statistics for interfaces of brocade devices + * 0591 dell_poweredge_mem: new check to monitor memory modules of Dell PowerEdge Servers + * 0592 dell_poweredge_pci: new check for pci devices on dell PowerEdge Servers + * 0141 ups_socomec_capacity: Battery Capacity Check for Socomec UPS Devices. + * 0705 arcserve_backup: improved documentation (check manpage and comments in the agent plugin) + * 0143 ups_socomec_in_voltage, ups_socomec_out_voltage: Socomec UPS Devices, Input and Output Voltages... + * 0732 df: now able to monitor inodes... + * 0716 Add Linux caching agent also to normal agent RPM... + * 0594 dell_poweredge_netdev: new check to monitor the status of network devices on Dells Poweredge Servers + * 0733 mem, solaris_mem: now able to configure amount of free memory... + * 0706 EMC VNX: special agent can alternatively authenticate via security files... + * 0734 esx_vsphere_vm.running_on: shows the esx host of the VM + * 0144 enterasys_cpu_util enterasys_powersupply: New Checks for CPU Utilization and Power Supplies on enterasys switches + * 0595 dell_chassis_power, dell_chassis_powersupplies: new checks for Dell Poweredge Chassis Ppower consumption... + * 0596 dell_chassis_status, dell_chassis_temp, dell_chassis_kvm, dell_chassis_io, dell_chassis_fans: new checks to monitor the overall status of various sections of the Dell Poweredge Chassis via CMC + * 0597 dell_chassis_slots: new check to monitor the status of the blade slots of the Dell Poweredge Blade Servers + * 0145 apc_symmetra: Changed naming of Batterie Temperature to System Temerature... + NOTE: Please refer to the migration notes! + * 0146 innovaphone_priports_l1, innovaphone_priports_l2: New Checks for Innovaphone PRI Ports + * 0707 ibm_svc_host: New check: Status of hosts an IBM SVC / V7000 presents volumes to + * 0598 kentix_temp, kentix_humidity: new checks for Kentix MultiSensor-Rack + * 0768 ibm_svc_license: New check for Licensing Status on IBM SVC / V7000 devices + * 0778 New Special Agent for innovaphone gateways... + * 0769 juniper_trpz_cpu_util, juniper_trpz_flash, juniper_trpz_info, juniper_trpz_power: new Checks for juniper trapeze switches + * 0770 innovaphone_licenses: New check to monitor licenses on innovaphone devices" + * 0771 juniper_trpz_aps: Show the number of connected access points on juniper wlan controllers + * 0772 added special agent for IBM SVC / V7000 storage systems... + * 0147 enterasys_fans: New Check to monitor fans of enterasys swichtes + * 0759 check_notify_count: New active check to monitor the number of notifications sent to contacts... + * 0760 The windows agent contains meta information about version, manufacturer etc.... + * 0103 FIX: services: Fixed bug with service inventory defined in main.mk... + * 0299 FIX: borcade_mlx_fan: Prettified output, handling "other" state now + * 0300 FIX: cisco_fru_power: Trying not to inventorize not plugged in FRUs... + * 0305 FIX: apache_status: Fixed exception when agent reports HTML code as apache-status data... + * 0104 FIX: mssql: Server instances with underline in name are now supported.... + * 0240 FIX: Virtualmachine names with space no longer have missing piggyback data... + * 0310 FIX: apache_status: Improved handling of unexpeted data sent by agents... + * 0088 FIX: esx_vsphere_datastores: fixed error with reported capacity of 0 bytes... + * 0243 FIX: cisco_qos: no longer crashes when the qos policy name is not set... + * 0326 FIX: hr_fs printer_supply: Improved translation of wrong encoded chars... + * 0059 FIX: agent_vpshere: new option for supporting ESX 4.1... + * 0334 FIX: cisco_fantray: Fixed error on Cisco devices which do not support this check... + * 0355 FIX: heartbeat_crm: Now handling "Failed actions:" output in agent... + * 0357 FIX: megaraid_bbu: Fixed expected state checking... + * 0358 FIX: df: now ignores filesystems with a reported size of '-'... + * 0360 FIX: multipath: Inventory handles non loaded kernel module now... + * 0339 FIX: blade_bays blade_blades blade_blowers blade_health blade_mediatray blade_powerfan blade_powermod: fix scan function... + * 0340 FIX: blade_health: fix check, it was totally broken... + * 0363 FIX: mysql_capacity: Did use wrong calculated warn / crit thresholds... + * 0364 FIX: brocade_mlx*: Several cleanups, fixed bug in brocade_mlx_fan where only the first worst state was shown in output + * 0365 FIX: RPMs: Cleaning up xinetd checkmk.rpmnew file after updating package... + * 0366 FIX: heartbeat_crm: Agent code is now compatible to pacemaker 1.1.9... + * 0367 FIX: Now using /dev/null instead of closing stdin in linux agent... + * 0342 FIX: postgres_stat_database: make agent compatible with PostgreSQL 8.4.x... + * 0343 FIX: postgres_sessions: make agent plugin compatible with PostgreSQL 9.2... + * 0369 FIX: cups_queues: Fixed bug checking the last queue reported by agent... + * 0370 FIX: brocade_mlx_module*: Improved output of checks + * 0372 FIX: megaraid_ldisks: Ignoring adapters without configured logical disks... + * 0345 FIX: Linux agent: fix detaching of background plugins... + * 0378 FIX: agent_vsphere.pysphere: Trying to deal with permissions only on some guests/hosts + * 0245 FIX: Inline SNMP no longer throws an exception when using SNMPv3 credentials... + * 0380 FIX: jolokia_metrics.mem: PNP-Template now handles non existant max values... + * 0381 FIX: win_printers: Fixed creation of duplicate services... + * 0347 FIX: smart.stats: Remove duplicate disks... + * 0349 FIX: winperf.cpuusage: update man page: this check is deprecated + * 0383 FIX: solaris_mem: Is now compatible to more systems... + * 0109 FIX: cisco_fantray: Prevent inventory for not available fans + * 0110 FIX: cisco_fru_power: Prevent inventory for not available FRUs + * 0350 FIX: nfsmounts: correctly handle mount points with spaces... + * 0387 FIX: df*: Negative filesystem space levels get a more clear text in check output... + * 0351 FIX: local: Catch invalid state codes and map to 3 (UNKNOWN)... + * 0397 FIX: mrpe: tolerate performance variable names with spaces... + * 0399 FIX: check_ftp: cleanup configuration via WATO, remove Hostname field... + * 0435 FIX: esx_vsphere_sensors: Fix garbled output in case of placeholder VMs... + * 0251 FIX: agent_vsphere / check_mk agent: fixed outdated systemtime of check_mk agent... + * 0439 FIX: postfix_mailq: Linux agent better detects Postfix installation... + * 0440 FIX: heartbeat_crm: Inventory more gracefully handles case where agent output is invalid... + * 0113 FIX: blade_blades: Now only make inventory for blades that are powered on... + * 0441 FIX: megaraid_bbu: Fix several false alarms and cases where inventory failed + * 0442 FIX: dell_om_disks: Treat global hot spare disks as OK, instead of WARN... + * 0443 FIX: brocade_fcport: cope with firmware that does not provide speed information... + * 0322 FIX: timemachine: Check now also works if there are spaces in the name of the backup volume or the hostname + * 0253 FIX: windows agent: fixed crash on processing eventlog records... + * 0403 FIX: mem.used: Prefer statgrab on FreeBSD for supporting more than 4GB... + * 0404 FIX: cups_queues: fix exception in case of alternative time format... + * 0444 FIX: timemachine: do not inventorize check when timemachine is not used + * 0116 FIX: cisco_vpn_tunnel: Fixed typo that lead to an exception + * 0118 FIX: stulz_humidity: Fixed coloring in pnp template... + * 0119 FIX: stulz_humidity: Fixed lower thresholds... + * 0565 FIX: windows_updates: fix for some cases when forced_reboot is not set + * 0255 FIX: windows_agent: now able to handle the removal of local/plugin scripts during runtime... + * 0447 FIX: fortigate_memory: Fix inventory, do not add check if no info available... + * 0567 FIX: apc_symmetra: transformation from old tuple to new dict format fixed and improved + * 0432 FIX: stulz_humidity: Fixed syntax error... + * 0120 FIX: stulz_humidity, apc_humidity: Fixed bug while processing check params... + * 0460 FIX: endless waiting for printer queues fixed... + * 0260 FIX: Fixed incorrect formatting of checks with long output... + * 0261 FIX: df_netapp32 / df_netapp: Fixed bug with negative size in check output... + * 0262 FIX: ps: Now able to skip disabled "Process Inventory" rules... + * 0264 FIX: printer_supply_ricoh: now reports correct filling levels... + * 0575 FIX: cmciii_lcp_airin, cmciii_lcp_airout, cmciii_lcp_waterin, cmciii_lcp_waterout: improved handling of warning state... + * 0272 FIX: if checks: port type 56 (fibrechannel) is no longer inventorized per default... + * 0577 FIX: fileinfo.groups: new date pattern is now available for inventory check as well + * 0688 FIX: winperf_msx_queues: Support output of Exchange 2013... + * 0578 FIX: zypper: check is always registered as soon as mk_zypper plugin detects zypper tool... + * 0689 FIX: postgres_sessions: fix empty agent section in case of 0 sessions... + * 0579 FIX: veeam_client: fix for case when no StopTime section in agent output + * 0692 FIX: fileinfo: Avoid duplicate entries in Solaris agent... + * 0693 FIX: hpux_lvm: avoid problem when alternative vgdisplay is installed... + * 0708 FIX: ntp.time, ntp: avoid DNS lookups in NTP queries and avoid timeouts... + * 0277 FIX: solaris agent: ntp now able to work with ntpd and xntpd... + * 0279 FIX: check_mk_agent.solaris: removed proc section from statgrab... + * 0281 FIX: statgrab_net.ctr: only inventorize interfaces with actual traffic... + * 0582 FIX: cisco_sys_mem: check now has a man page and a new WATO integration + * 0667 FIX: oracle_asm_diskgroup: Now really uses the generic filesystem levels... + * 0555 FIX: snmp_uptime: no longer fails if uptime is < 1 seconds + * 0136 FIX: cisco_fru_power: Prevent inventory of not exsisting devices + * 0557 FIX: check_mk_agent.solaris: removed section statgrab mem... + * 0673 FIX: zfsget: Fixed broken check - was not compatible to current agent output of "df" + * 0719 FIX: postfix_mailq: fix Linux agent in case of ssmtp being installed + * 0584 FIX: agent_vsphere: special agent now handles non-standard https port correctly... + * 0585 FIX: check_mk_agent.linux: more efficient handling of cups printer queues... + * 0703 FIX: brocade_mlx: omit inventory of cpu and memory on more states... + * 0137 FIX: Fixed printer_pages... + * 0587 FIX: if64: problems resolved when running as a clustered service... + * 0563 FIX: windows agent: now able to process perl scripts... + * 0729 FIX: esx_vsphere_hostsystem: fixed incorrect status label (not state)... + * 0142 FIX: winperf_if: treat unknown packets no longer as error packets + * 0593 FIX: zypper: agent plugin and check now lead to UNKNOWN result in case of repo problems + * 0758 FIX: check_sql: Fixed monitoring of stored procedures with oracle + * 0599 FIX: esx_vsphere_datastores: provisioning levels in WATO are no longer limited to 101% + * 0737 FIX: megaraid_ldisks: now able to handle "No Virtual Drive Configured" states... + * 0763 FIX: hpux_if: Fixed exception during parsing of provided data on some systems... + + Multisite: + * 0371 Added log class filter to hostsvcevents view + * 0352 Avoid Livestatus connections on pages that do not need them... + * 0390 Added an icon selector to the view editor... + * 0391 Added sorter / filter for host/service service levels... + * 0247 New mkp package for web applications: iNag / nagstatus / nagios status.dat... + * 0429 Implemented role permissions for dashboards... + * 0430 It is now possible to define custom time ranges in PNP graph search... + * 0449 Show all custom variables of hosts and services in the detail views... + * 0665 Added mail notificaton method to custom user notification dialog... + * 0123 New time range filter for Downtimes and Comments... + * 0683 New column painter for the last time a service was OK... + * 0561 quicksearch: now able to search with multiple filters... + * 0748 Also custom views now have permissions... + * 0302 FIX: Fixed highlight of choosen elements in foldertee/views snapin in Chrome/IE + * 0239 FIX: Fixed incorrect html formatting when displaying host or service comments... + * 0307 FIX: Increased performance of multisite GUI with a large userbase... + * 0312 FIX: Hiding views related to not existing datasources, like the EC now... + * 0325 FIX: Removed CSV export icon from availability views... + * 0327 FIX: Most forms did now work with "Profile Requests" enabled... + * 0333 FIX: Fixed too long page title during performing several actions... + * 0356 FIX: Fixed exception caused by utf8 chars in tooltip text... + * 0368 FIX: Generating selection id is hopefully now compatible to more systems... + * 0374 FIX: Fixed syntax error in exception handler of LDAP search code... + * 0375 FIX: LDAP: Now handling user-ids with umlauts... + * 0246 FIX: brocade_fcport: fixed error in pnp-template... + * 0393 FIX: LDAP: Enabled paged LDAP search by default now with a page size of 1000... + * 0394 FIX: LDAP: Auth expiration plugin now checks users for being disabled (in AD)... + * 0436 FIX: Fix broken Site status switching via sidebar snapin... + * 0420 FIX: LDAP: Roles/Groups are now synced even if case of DNs do not match... + * 0421 FIX: UserDB: Fixed lost passwords when changing users in large user databases... + * 0423 FIX: Users are not logged out anymore during changing their own passwords... + * 0424 FIX: Improved error handling in case of incorrect auth config in distributed WATO environments + * 0425 FIX: Fix login loop bug in distributed environments with different auth secrets + * 0117 FIX: Availability button is now visible for users without the right to edit views + * 0431 FIX: LDAP: Fixed group syncrhonisation when nested group sync is enabled + * 0122 FIX: Multisite view editor not longer throwing a exception when loading views from other users + * 0569 FIX: recurring updates of serial numbers of disabled ldap users fixed... + * 0676 FIX: Move view "Stale services" to Problems folder + * 0270 FIX: Multisite host tag filter: Now uses exact match... + * 0273 FIX: Fixed exceptions when modifying / cloning views... + * 0274 FIX: Fixed exception when view title or description was missing + * 0278 FIX: Fixed bookmark icon images for non-english user languages... + * 0670 FIX: LDAP: Fixed sync when non lower case attributes are configured... + * 0671 FIX: LDAP: Disable logging of password changes received from LDAP + * 0558 FIX: availability: fixed exception on specific filter settings... + * 0712 FIX: Fix multiple groups with same tag when grouping hosts after a tag... + * 0738 FIX: csv_export: now able to handle umlauts in download filenames... + * 0762 FIX: Fixed availability filters not opening in IE7 + + WATO: + * 0308 Multisite can now set rotation view permissions for NagVis... + * 0329 Removed Distributed WATO peer mode... + NOTE: Please refer to the migration notes! + * 0244 New features for WATO page Backup & Restore... + * 0382 Active HTTP check now supports multiline regexp matching... + * 0112 Explicit mapping of clustered services can now be done with WATO... + * 0437 Convert WATO rule for debug_log into simple Checkbox... + * 0428 Changed user profiles (e.g. pw changes) are now replicated in distributed setups... + * 0114 User Custom Attributes can now be exported to the core... + * 0448 New button in WATO service list for displaying check parameters... + * 0454 Add output of traceroute to host diagnostic page + * 0677 Make title of tags and tag groups localizable... + * 0685 Distributed WATO now disabled WATO on slave sites per default... + * 0687 New summary pages with all settings of a host or service... + * 0275 WATO "Notify Users" feature: Improved confirmation info... + * 0134 New option to use expect string in response heads for check_http in wato... + * 0717 Sort permissions of views, dashboards, commands and snapins alphabetically + * 0761 New bulk host import mode in WATO... + * 0057 FIX: Fix exception in WATO host editor on custom tag without topic... + * 0241 FIX: Improved sorting of WATO folders in dropdown menu... + * 0019 FIX: Fixed wording in WATO rule for MSSQL check + * 0242 FIX: Parameters for clustered services can now be configured on the cluster host... + * 0309 FIX: Trying to prevent read/write conflicts with a large user base... + * 0311 FIX: Fixed "Inventory failed" message when trying an inventory on clusters via WATO... + * 0330 FIX: Improved performance of WATO slave push with a large user base... + * 0331 FIX: LDAP diagnostic LOG can now have the $OMD_SITE$ macro configured via WATO... + * 0332 FIX: Own host tag groups without topics resulted in two groups "Host tags" in the rule editor + * 0361 FIX: The page linked by "new rule" can now be bookmarked again + * 0341 FIX: Avoid rare exception in WATO when deleting a host... + * 0376 FIX: LDAP: Default configuration of attributes is reflected within WATO now + * 0346 FIX: Fix folder visibility in WATO for unpriviledged users... + * 0385 FIX: Better error handling for invalid service regex in rule conditions... + * 0389 FIX: Showing LDAP settings on site specific global settings page now... + * 0400 FIX: WATO BI editor now supports percentages for count_ok... + * 0392 FIX: LDAP: Improved error messages of LDAP configuration test... + * 0415 FIX: LDAP: The LDAP Settings dialog is now disabled when the LDAP Connector is disabled + * 0416 FIX: When doing user sync on user page rendering, contact group memberships are shown correctly now... + * 0417 FIX: LDAP: Fixed "Sync-Plugin: Roles" test with OpenLDAP + * 0248 FIX: Backup & Restore: Snapshot comments now support unicode character... + * 0418 FIX: LDAP: Fixed broken role sync plugin with OpenLDAP... + * 0419 FIX: LDAP: The default user profile roles are only assigned to users without roles... + * 0249 FIX: Backup & Restore: fixed bug when uploading legacy snapshots... + * 0250 FIX: Fixed error on creating very large WATO snapshots... + * 0422 FIX: Fixed numbers shown in log entries of bulk inventory... + * 0252 FIX: ESX vSphere configuration: Fixed non-working configuration parameters... + * 0456 FIX: Column was too short... + * 0256 FIX: wato snapshots: snapshot restore no longer fails with older python versions... + * 0433 FIX: Creating WATO lock during automations (like e.g. master to slave syncs)... + * 0434 FIX: Fixed wrong count of failed hosts in bulk inventory mode... + * 0678 FIX: Move two last global settings of Event Console to proper places + * 0268 FIX: wato inventory: fixed missing services... + * 0686 FIX: Fix replication with WATO if EC is enabled on master and disabled on slave + * 0129 FIX: Fixed permission bug in "Edit user profile" dialog.... + * 0269 FIX: brocade_fcport: fixed problem on displaying check_parameters in WATO... + * 0271 FIX: Fixed sorting in duallist element (two lists with interchangable elements)... + * 0131 FIX: Error rates for network interfaces can now be set smaller then 0.1 when using Wato.... + * 0690 FIX: Fix language jumping to German when saving user profiles + * 0666 FIX: Minimum port for the mknotifyd is now 1024 (never use well known ports)... + * 0559 FIX: WATO snapshots: improved validation of (uploaded) snapshots... + * 0709 FIX: Fix NoneType has not attribute userdb_automatic_sync bug in D-WATO + * 0728 FIX: mem.win: fixed bug in WATO configuration rule... + * 0139 FIX: ldap sync: syncing if rules against ldap is not longer case sensitiv + * 0736 FIX: WATO backup and restore: improved error handling... + + Notifications: + * 0362 sms: now searching PATH for sendsms and smssend commands... + * 0684 New notification variables NOTIFY_LASTSERVICEOK and NOTIFY_LASTHOSTUP... + * 0711 New rules based notifications... + * 0713 New bulk notifications... + * 0108 FIX: Prevent service notification on host alerts... + * 0058 FIX: Fix email notifications containing non-ASCII characters in some situtations... + * 0133 FIX: Fixed mkeventd notification plugin... + * 0720 FIX: Fix timeperiod computation with CMC and flexible notifications... + + BI: + * 0721 Use hard states in BI aggregates... + * 0714 BI aggregations now also honor scheduled downtimes... + * 0715 BI aggregates now acknowledgement information... + * 0669 FIX: Fixed regex matching in BI when using character groups [...]... + + Reporting & Availability: + * 0018 New option for displaying a legend for the colors used in the timeline... + * 0405 Add CSV export to availability views... + * 0338 FIX: Introduce time limit on availability queries... + * 0681 FIX: Display correct year for availability range for last month in january + * 0750 FIX: Availability: fix exception when summary is on and some elements have never been OK + + Event Console: + * 0301 Handling messages of special syslog format correctly... + * 0388 Moved Event Console related settings to own settings page... + * 0710 Create a history entry for events that failed their target count... + * 0749 Allow to restrict visibility of events by their host contacts... + * 0303 FIX: Old log entries were shown in event history first... + * 0304 FIX: Escaping several unwanted chars from incoming log messages... + * 0089 FIX: CSV export of event console was broken... + * 0359 FIX: Fixed exception in event simulator when one match group did not match + * 0384 FIX: Trying to prevent problem when restarting mkeventd... + * 0427 FIX: Fixed exception when handling connections from event unix socket... + * 0679 FIX: Allow non-Ascii characters in generated events + * 0680 FIX: Do not allow spaces in host names in event simulator... + * 0672 FIX: Service item of "Check event state in event console" checks can now be configured... + * 0590 FIX: mkeventd: fixed encoding of unicode characters in the snmptrap receiver... + + Livestatus: + * 0337 New header for limiting the execution time of a query... + * 0276 nagios4 livestatus support... + * 0335 FIX: Parse state of downtime notification log entries correctly... + * 0336 FIX: Limit the number of lines read from a single logfile... + * 0344 FIX: Fix semantics of columns num_services_hard_*... + + Livestatus-Proxy: + * 0263 FIX: livestatus log table: fixed missing logentries of archived logfiles... + + +1.2.3i7: + Core & Setup: + * 0011 Introduce optional lower limit for predicted levels... + * 0217 FIX: More verbose error output for SNMP errors on the command line... + * 0288 FIX: Error messages of datasource programs (e.g. VSphere Agent) are now visible within WATO... + * 0010 FIX: Fix computation of hour-of-the-day and day-of-month prediction... + * 0292 FIX: Inline SNMP: Check_MK check helpers are closing UDP sockets now... + + Checks & Agents: + * 0060 cisco_fantray: new check for monitoring fan trays of Cisco Nexus switches + * 0061 cisco_cpu: check now recognizes new object cpmCPUTotal5minRev... + * 0063 veeam_client: new check to monitor status of veeam clients with special agent plugin... + * 0064 veeam_jobs: new check to monitor the backup jobs of the veeam backup tool... + * 0047 fritz.conn fritz.config fritz.uptime fritz.wan_if fritz.link: New checks for monitoring Fritz!Box devices... + * 0027 esx_vsphere_sensors: it is now possible override the state of sensors... + * 0090 apc_ats_status: New Check for monitoring APC Automatic Transfer Switches + * 0080 Added new checks for Brocade NetIron MLX switching / routing devices... + * 0091 apc_ats_output: new check for output measurements on APC ATS devices + * 0068 check_sql: support for mssql databases included + * 0208 fileinfo.groups: Added minimum/maximum file size parameters... + * 0093 check_http: Default service description prefix can be avoided... + * 0004 df: dynamic filesystem levels now reorder levels automatically... + * 0069 veeam_client: limits for time since last backup introduced + * 0214 Logwatch: context lines can now be disabled using nocontext=1... + * 0038 casa_cpu_mem casa_cpu_temp casa_cpu_util casa_fan casa_power: New checks for casa Cable Modem Termination Systems... + * 0097 arc_raid_status: New check for Areca RAID controllers + * 0070 cmciii_lcp_airin cmciii_lcp_airout cmciii_lcp_fans cmciii_lcp_waterflow cmciii_lcp_waterin cmciii_lcp_waterout: new checks for the Rittal CMC-III LCP device + * 0098 apc_inrow_airflow, apc_inrow_fanspeed, apc_inrow_temp: New checks for APC inrow devices + * 0099 apc_mod_pdu_modules: New check for APC Modular Power Distribution Unit + * 0072 cmciii_pu_access cmciii_pu_canbus cmciii_pu_io cmciii_pu_temp: New checks for the Rittal CMC-III PU Unit + * 0100 juniper_cpu: New check for CPU utilization on Juniper switches + * 0236 windows_agent: each script can now be configured to run sync / async... + * 0101 liebert_chiller_status: New check for Liebert Chiller devices + * 0083 brocade_mlx: Temperature sensors of one module now in one common check... + * 0008 df: Solaris agent now also supports samfs + * 0084 brocade_mlx: single checks now instead of sub checks... + * 0291 winperf_ts_sessions: New check to monitor Microsoft Terminal Server sessions... + * 0102 modbus_value: New check and Agent to modbus devices... + * 0013 Solaris Agent: implement cached async plugins and local checks... + * 0238 vsphere monitoring: new option to skip placeholder vms in agent output... + * 0016 Linux+Windows agent: allow spooling plugin outputs via files... + * 0017 local: New state type P for state computation based on perfdata... + * 0085 brocade_mlx: now handles more different module states... + * 0024 FIX: cisco_wlc: removed check configuration parameter ap_model... + * 0003 FIX: ps: Remove exceeding [ and ] in service description when using process inventory... + * 0037 FIX: checkman browser (cmk -m) was not working properly in network subtree... + * 0283 FIX: Interface Checks: ignore invalid error counts while interface is down... + * 0081 FIX: Fixed corruption in SNMP walks created with cmk --snmpwalk... + * 0286 FIX: esx_vsphrere_counters.ramdisk: Better handling for non existant ramdisks... + * 0290 FIX: winperf_processor mem.win: Handling no/empty agent responses correctly now... + * 0293 FIX: esx_vsphere_counters_ramdisk_sizes: Handles ram disk "ibmscratch" by default now + * 0012 FIX: Solaris Agent: fixed broken fileinfo section... + * 0297 FIX: mk-job is now also usable on CentOS 5+... + * 0298 FIX: win_dhcp_pools: Fixed wrong percentage calculation + * 0237 FIX: tsm_sessions: fixed invalid check output during backups... + + Multisite: + * 0001 New filters for selecting several host/service-groups at once... + * 0050 New concept of favorite hosts and services plus matching filters and views... + * 0211 GUI Notify: Added notify method "popup" to really create popup windows... + * 0215 Added option to make HTML escape in plugin outputs configurable... + * 0071 livedump: new option to include contact_groups instead of contacts when dumping configuration + * 0043 FIX: LDAP: Improved error reporting during synchronisation... + * 0044 FIX: LDAP: Fixed error with empty groups during non nested group sync... + * 0045 FIX: LDAP: Fixed error when synchronizing non nested groups to roles... + * 0046 FIX: Fixed editing contactgroup assignments of hosts or folders with "-" in names... + * 0049 FIX: Fixed useless I/O during page processing... + * 0203 FIX: Changed sidebar reload interval to be more random... + * 0204 FIX: Reduced I/O on logins with access time recording or failed login counts... + * 0206 FIX: Fixed logwatch permission check when using liveproxy for normal users... + * 0210 FIX: LDAP: Fixed problem syncing contactgroups of a user with umlauts in CN + * 0035 FIX: Convert HTTP(S) links in plugin output into clickable icon... + * 0006 FIX: Checkboxes for hosts/services were missing on modified views... + * 0284 FIX: Context help toggled on/off randomly... + * 0285 FIX: Fixed bookmarking of absolute URLs or PNP/NagVis URLs in sidebar snapin... + * 0296 FIX: Fixed moving of snapins while in scrolled sidebar... + + WATO: + * 0053 New rule for configuring the display_name of a service... + * 0216 Supporting float values as SNMP timeout value now... + * 0082 Improved online help for LDAP connections... + * 0009 Automatically schedule inventory check after service config change... + * 0294 Added "services" button to host diagnose page + * 0048 FIX: Tests on host diagnose page are executed parallel now... + * 0033 FIX: Fixed problem when saving settings in WATOs host diagnostic page... + * 0205 FIX: NagVis related permissions of roles can be edited again... + * 0207 FIX: Explicit communities were not saved in all cases... + * 0094 FIX: Hide SNMPv3 credentials in WATO... + * 0212 FIX: Fixed broken site edit page in case a TCP socket has been configured... + * 0095 FIX: Fixed problem with portnumber in Wato Distributed Monitoring dialog + * 0213 FIX: LDAP: Various small improvements for handling the LDAP user connector... + * 0039 FIX: Fixed exception on displaying WATO helptexts in the global settings... + * 0219 FIX: Fixed display problems in WATO folders with long contact group names + * 0220 FIX: Added HTML escaping to several global settings attributes... + * 0234 FIX: Improved handling of interface inventory states / types... + * 0289 FIX: Renamed "Hosts & Folders" page to "Hosts" + * 0295 FIX: Fixed problem with new created tag groups with "/" in title... + + Notifications: + * 0005 Added notification script for sending SMS via mobilant.com... + * 0032 FIX: Fixed problem when forwarding notification mails in windows... + * 0218 FIX: Fixed rendering of HTML mails for Outlook (at least 2013)... + + BI: + * 0287 FIX: Fixed assuming states of services with backslashes in descriptions... + + Reporting & Availability: + * 0051 Option for showing timeline directly in availability table... + * 0052 Visual colorization of availability according to levels... + * 0054 New labelling options for availability table... + * 0055 Allow grouping by host, host group or service group... + * 0056 New concept of service periods in availability reporting... + * 0002 You can now annotate events in the availability reporting... + * 0014 FIX: Fix styling of tables: always use complete width... + * 0015 FIX: Fixed summary computation in availability when grouping is used... + + Event Console: + * 0026 FIX: snmptd_mkevent.py: fixed crash on startup + * 0036 FIX: Fixed bug where multsite commands did not work properly... + + Livestatus: + * 0067 livedump: new option to mark the mode at the beginning of the dump and documentation fixes... + * 0023 FIX: Fixed incorrect starttime of table statehist entries... + * 0034 FIX: Availability no longer showes incorrect entries when only one logfile exists... + * 0233 FIX: Fixed missing entries in log file and availability view... + + +1.2.3i6: + Core & Setup: + * 0041 FIX: setup.py now handles non existing wwwuser gracefully... + + Checks & Agents: + * 0040 Add agent plugin to test local hostname resolving... + * 0020 FIX: Inventory problem with inventory_processes parameter... + + Multisite: + * 0000 Improved performance of LDAP sync by refactoring the group sync code + + WATO: + * 0042 FIX: Removed debug outputs from service inventory... + + +1.2.3i5: + Core: + * Automatically remove duplicate checks when monitoring with Agent+SNMP + at the same time. TCP based ones have precedence. + * inventory check of SNMP devices now does scan per default (configurable) + * FIX: inventory check now honors settings for exit code + * FIX: avoid exception nodes of cluster have different agent type + * FIX: continue inventory, if one check does not support it + * FIX: fix configuration of explicit SNMP community, allow unicode + * FIX: avoid invalid cache of 2nd and up hosts in bulk inventory + * FIX: fixed error handling in SNMP scan, inventory check fails now + if SNMP agent is not responding + * FIX: Ignore snmp_check_interval cache in interactive situations (e.g. -nv) + * FIX: check_mk config generation: on computing the checks parameters + there is no longer a small chance that existing rules get modified + + Event Console: + * check_mkevents now available as C binary: check_mkevents_c + * FIX: use default values for unset variables in actions + + Multisite: + * Speed-O-Meter: now measure only service checks. Host checks + are omitted, since they do not really matter and make the + results less useful when using CMC. + * Added host aliases filter to some views (host/service search) + * It is now possible to enforce checkboxes in views upon view loading + (needs to be confgured per view via the view editor) + * Wiki Sidebar Snapin: showing navigation and quicksearch. OMD only. + * Sidebar can now be folded. Simply click somewhere at the left 10 pixels. + * Foldable sections now have an animated triangle icon that shows the folding state + * Added new snapin "Folders", which interacts with the views snapin when + both are enabled. You can use it to open views in a specific folder context + * LDAP: Added option to make group and role sync plugin handle nested + groups (only in Active Directory at the moment). Enabling this + feature might increase the sync time a lot - use only when really needed. + * FIX: Fixed encoding problem in webservice column output + * FIX: Fix output format python for several numeric columns + * FIX: Fixed searching hosts by aliases/adresses + * FIX: Remove duplicate entries from Quicksearch + * FIX: Avoid timed browser reload after execution of exections + * FIX: Hosttag filter now works in service related views + * FIX: Added code to prevent injection of bogus varnames + (This might break code which uses some uncommon chars for varnames) + * FIX: Fixed computation of perfometer values, which did not care about + the snmp_check_interval. Simplyfied computation of perfometer values + * FIX: LDAP: Custom user attributes can now be synced again + + BI: + * FIX: Fix exception when showing BI tree in reporting time warp + * FIX: Fixed blue triangle link: would show more aggregations, + if one name was the prefix of another + + Notifications: + * Blacklisting for services in the felixble notification system + * FIX: mail with graph plugin: set explicit session.save_path for php + Fixes instances where the php command couldn't fetch any graphs + + Checks & Agents: + * diskstat: removed (ever incorrect) latency computation for Linux + * statgrab_load: support predictive levels, add perf-o-meter + * ucd_cpu_load: support predictive levels + * hpux_cpu, blade_bx_load: support predictive levels, add perf-o-meter, + make WATO-configable + * check_sql: Database port can now be explicitly set + * steelhead_perrs: New check for Rivergate Gateways + * alcatel_power: Check for power supplies on Alcatel switches + * qnap_disks: New check for Hardisks in Qnap devices + * Dell Open Manage: SNNP Checks for Physical Disks, CPU and Memory + * check_tcp: Now able to set custom service description + * Apache ActiveMQ: New Special Agent and Check to query ActiveMQ Queues + * check_ftp: can now be configured via Wato + * windows_tasks: New check to monitor the Windows Task Scheduler + * sensatronics_temp: New check for Sensatronic E4 Temperatur Sensor + * akcp_sensor_drycontact: New Check for AKCP drycontact Sensors + * esx_vsphere_vm.heartbeat: Heartbeat status alert level now configurable + * ps: new configuration option: handle_count (windows only) + * FIX: Windows agent: gracefully handle garbled logstate.txt + * FIX: esx_vsphere_counters: added missing ramdisk type upgradescratch + * FIX: esx_vsphere_hostsystem: fixed bug in handling of params + * FIX: local: tolerate invalid output lines + * FIX: hp_proliant: Correct handling of missing snmp data + * FIX: logwatch.ec: No longer forwards "I" lines to event console + * FIX: check_dns: default to querying the DNS server on the localhost itself + * FIX: ps: do not output perfdata of CPU averaging (use ps.perf for that) + * FIX: nfsexports: also support systems with rpcbind instead of portmap + * FIX: ups_in_freq: corrected spelling of service description + * FIX: ups_bat_temp: renamed service description to "Temperature Battery", + in order to make it consistent with the other temperature checks + * FIX: hp_blade_blades: Fixed crash on inventory when receiving + unexpected snmp data + * FIX: apache_status: If ReqPerSec and BytesPerSec are not reported by + the agent, no PNP graphs for them are drawn. + (This is the case if ExtendedStatus set to Off in Apache config) + * FIX: oracle_jobs: fixed issues with incorrect column count in check output + * FIX: if/if64/...: layout fix in PNP template for packets + + + WATO: + * You can now have site-specific global settings when using + distributed WATO (available in the "Distributed Monitoring") + * bulk inventory: display percentage in progress bar + * New option for full SNMP scan in bulk inventory + * bulk operations now also available when checkboxes are off + * LDAP: Added test to validate the configured role sync groups + * LDAP: The sync hooks during activate changes can now be enabled/disabled + by configuration (Global Settings) + * Disabled replication type "peer" in site editor. + * Added "permanently ignore" button to inventory services dialog which + links directly to the disabled services view + * Added diagnose page linked from host edit dialog. This can be used to test + connection capabilities of hosts + * The rule "Process inventory" now offers the same configuration options + as its manual check equivalent "State and count of processes" + * New configuration option handle_count (windows only) in the rules + "Process inventory" and "State and count of processes" + * FIX: correct display of number of hosts in bulk inventory + * FIX: nailed down ".siteid" exception when added new site + * FIX: fixed setting for locking mode from 'ait' to 'wait' + * FIX: avoid removal of tags from rules when not yet acknowledged + * FIX: avoid need for apache restart when adding new service levels + * FIX: fix encoding problem on GIT integration + + Livestatus: + * Removed "livecheck". It never was really stable. Nagios4 has something + similar built in. And also the Check_MK Micro Core. + * table statehist: no longer computes an unmonitored state for hosts and + services on certain instances. + (showed up as no hosts/services in the multisite gui) + * table statehist: fixed SIGSEGV chance on larger queries + +1.2.3i4: + Core: + * Create inventory check also for hosts without services, if they + have *no* ping tag. + + WATO: + * Bulk inventory: speed up by use of cache files and doing stuff in + groups of e.g. 10 hosts at once + * Multisite connection: new button for cloning a connection + + Checks & Agents: + * Linux agent RPM: remove dependency to package "time". That package + is just needed for the binary mk-job, which is useful but not + neccessary. + + Multisite: + * FIX: fix broken single-site setups due to new caching + +1.2.3i3: + Core: + * FIX: fixed typo in core startup message "logging initial states" + * FIX: livestatus table statehist: fixed rubbish entries whenever + logfile instances got unloaded + + Livestatus: + * FIX: check_mk snmp checks with a custom check interval no longer + have an incorrect staleness value + + Notifications: + * mkeventd: new notification plugin for forwarding notifications + to the Event Console. See inline docu in share/check_mk/notification/mkeventd + for documentation. + * FIX: cleanup environment from notifications (needed for CMC) + + Checks & Agents: + * Windows agent: increased maximum plugin output buffer size to 2MB + * check_icmp: New WATO rule for custom PING checks + * agent_vsphere: now able to handle < > & ' " in login credentials + * if/if64 and friends: add 95% percentiles to graphs + * services: inventory now also matches against display names of services + * esx_vsphere_hostsystem.multipath: now able to set warn/crit levels + * cpu_netapp: added Perf-O-Meter and PNP template + * cisco_cpu: added Perf-O-Meter and PNP template + * apc_symmetra: add input voltage to informational output + * agent_vsphere: new debug option --tracefile + * FIX: windows_agent: fixed bug in cleanup of open thread handles + * FIX: cups default printer is now monitored again in linux agent + * FIX: host notification email in html format: fixed formating error + (typo in tag) + * FIX: netapp_volumes: better output when volume is missing + * FIX: winperf_phydisk: handle case where not performance counters are available + * FIX: check_mk_agent.linux: limit Livestatus check to 3 seconds + * FIX: esx_vsphere_vm: fixed exception when memory info for vm is missing + * FIX: esx_vsphere_hostsystem: Fixed typo in check output + * FIX: psperf.bat/ps: Plugin output processing no longer crashes when + the ps service is clustered + + Multisite: + * Filtering in views by Hostalias is possible now too + (however the filter is not displayed in any standard view - user needs + to enable it by customizing the needed views himself) + * FIX: add missing service icons to view "All Services with this descr..." + * FIX: ldap attribute plugins: fixed crash when parameters are None + * FIX: avoid duplicate output of log message in log tables + * FIX: fixed problem with ldap userid encoding + * FIX: removed state-based colors from all Perf-O-Meters + * FIX: brocade_fcport pnp-template: fixed incorrect display of average values + * FIX: all log views are now correctly sorted from new to old + + Livestatus-Proxy: + * Implement caching of non-status requests (together with Multisite) + * FIX: fix exception when printing error message + * FIX: honor wait time (now called cooling period) after failed TCP connection + * FIX: fix hanging if client cannot accept large chunks (seen on RH6.4) + + WATO: + * Rule "State and count of processes": New configuration options: + virtual and resident memory levels + * Added title of tests to LDAP diagnose table + * Bulk inventory: new checkbox to only include hosts that have a failed + inventory check. + * Bulk inventory: yet another checkbox for skipping hosts where the + Check_MK service is currently critical + * New rule: Multipath Count (used by esx_vsphere_hostsystem.multipath) + * FIX: The rule "State and count of processes" is no longer available + in "Parameters for inventorized check". This rule was solely + intented for "Manual checks" configuration + * FIX: Trying to prevent auth.php errors while file is being updated + +1.2.3i2: + Core: + * New option -B for just generating the configuration + * Introduced persistent host address lookup cache to prevent issues + loading an unchanged configuration after a single address is not resolvable anymore + * Assigning a service to a cluster host no longer requires a reinventory + * Setting a check_type or service to ignore no longer requires a reinventory + Note: If the ignore rule is removed the services will reappear + * Config creation: The ignore services rule now also applies to custom, active + and legacy checks + * Predictive monitoring: correctly handle spaces in variable names (thanks + to Karl Golland) + * New man page browser for console (cmk -m) + * New option explicit_snmp_communities to override rule based SNMP settings + * Preparations for significant SNMP monitoring performance improvement + (It's named Inline SNMP, which is available as special feature via subscriptions) + * Allow to specify custom host check via WATO (arbitrary command line) + * Implement DNS caching. This can be disabled with use_dns_cache = False + + Livestatus: + * new service column staleness: indicator for outdated service checks + * new host column staleness: indicator for outdated host checks + + Checks & Agents: + * esx_hostystem multipath: criticize standby paths only if not equal to active paths + * mk_logwatch: fixed bug when rewriting logwatch messages + * check_mk: Re-inventory is no longer required when a service is ignored via rule + * check_mk: Now possible to assign services to clusters without the need to + reinventorize + * lnx_if: Fixed crash on missing "Address" field + * viprinet_router: Now able to set required target state via rule + * windows_agent: Now available as 64 bit version + * agent_vsphere: fix problem where sensors were missing when + you queried multiple host systems via vCenter + * cached checks: no longer output cached data if the age of the + cache file is twice the maximum cache age + * windows agent: no longer tries to execute directories + * fileinfo: no longer inventorize missing files(reported by windows agent) + * New checks for Brocade fans, temperature and power supplies + * cluster hosts: removed agent version output from Check_MK service (this + was misleading for different agent versions on multiple nodes) + * job check: better handling of unexpected agent output + * lnx_thermal: Added check for linux thermal sensors (e.g. acpi) + * hwg_temp: Make WATO-Rule "Room Temperature" match, add man page, graph + and Perf-O-Meter + * ps.perf: Support Windows with new plugin "psperf.bat". wmicchecks.bat + is obsolete now. + * Special Agent vSphere: support ESX 4.1 (thanks to Mirko Witt) + * esx_vsphere_object: make check state configurable + * mk_logwatch: support continuation lines with 'A'. Please refer to docu. + * mk_oracle: Added plugin for solaris + * win_netstat: New check for Windows for checking the existance of a UDP/TCP + connection or listener + * ps/ps.perf: allow to set levels on CPU util, optional averaging of CPU + * diskstat: Agent is now also processing data of mmcblk devices + * qmail: Added check for mailqueue + * cisco_locif: removed obsolete and already disabled check completely + * fc_brocade_port: removed obsolete check + * fc_brocade_port_detailed: removed obsolete check + * tsm_stgpool: removed orphaned check + * vmware_state: removed ancient, now orphaned check. Use vsphere_agent instead. + * vms_{df,md,netif,sys}: remove orphaned checks that are not needed by the current agent + * tsm: Added new TSM checks with a simple windows agent plugin + * windows_agent: now starts local/plugin scripts in separate threads/processes + new script parameters cache_age, retry_count, timeout + new script caching options "off", "async", "sync" + * windows_agent: increased maximum local/plugin script output length to 512kB + (output buffer now grows dynamically) + * jolokia_metrics: fixed incorrect plugin output for high warn/crit levels + * jolokia_metrics.uptime: Added pnp template + * hyperv: Added a check for checking state changes. + * df / esx_vsphere_datastore: now able to set absolute levels and levels depending + on total disk space of used and free space + * cisco_wlc: New check for monitoring cisco wireless lan access points + * cisco_wlc_clients: New check for the nummber of clients in a wlc wifi + * df: Negative integer levels for MB left on a device + * win_printers: Monitoring of printer queue on a windows printserver + * cisco_qos: Updated to be able to mintor IOS XR 4.2.1 (on a ASR9K device) + * New active check, check_form_submit, to submit HTML forms and check the resulting page + * mk-job: /var/lib/check_mk_agent/job directory is now created with mode 1777 so + mk-job can be used by unprivileged users too + * ADD: etherbox: new check for etherbox (messpc) sensors. + currently supported: temperature, humidity, switch contact and smoke sensors + * cisco_wlc_client: now supports low/high warn and crit levels + * cisco_wlc: now supports configuration options for missing AP + * agent_vsphere: completely rewritten, now considerably faster + vCenter is still queried by old version + * windows_agent: windows eventlog informational/audit logs now reported with O prefix + * mk_logwatch: ignored loglines now reported with an "." prefix (if required) + * apache_status: Nopw also supports multithreaded mpm + * windows_agent: now able to suppress context messages in windows eventlogs + * agent_vsphere: completely rewritten, now considerably faster + vCenter is still queried by old version + * windows_agent: windows eventlog informational/audit logs now reported with O prefix + * mk_logwatch: ignored loglines now reported with an "." prefix (if required) + * check_mk-if.pnp: fixed bug with pnp template on esx hosts without perfdata + * jolokia checks (JVM): uptime, threads, sessions, requests, queue + now configurable via WATO + * vSphere checks: secret is not shown to the user via WATO anymore + * WATO rule to check state of physical switch (currently used by etherbox check) + * cisco_wlc: Allows to configure handling of missing AP + * logwatch.ec: show logfiles from that we forwarded messages + * FIX: blade_blades: Fixed output of "(UNKNOWN)" even if state is OK + * FIX: apache_status: fix exception if parameter is None + * FIX: hr_mem: handle virtual memory correct on some devices + * FIX: apache_status agent plugin: now also works, if prog name contains slashes + * FIX: check_dns: parameter -A does not get an additional string + * FIX: cisco_qos: Catch policies without post/drop byte information + * FIX: cisco_qos: Catch policies without individual bandwidth limits + * FIX: windows_agent: fixed bug on merging plugin output buffers + * FIX: esx_vsphere_datastores: Fix incomplete performance data and Perf-O-Meter + * FIX: cleaned up fileinfo.groups pattern handling, manual configuration + is now possible using WATO + * FIX: check_mk-ipmi.php: PNP template now displays correct units as delivered + by the check plugin + * FIX: check_disk_smb: Remove $ from share when creating service description. + Otherwise Nagios will not accept the service description. + * FIX: mrpe: gracefully handle invalid exit code of plugin + + Notifications: + * notify.py: Matching service level: Use the hosts service level if a + service has no service level set + * notify.py: fixed bug with local notification spooling + * HTML notifications: Now adding optional links to host- and service names + when second argument notification script is configured to the base url of the + monitoring installation (e.g. http://// in case of OMD setups) + * HTML notifications: Added time of state change + + Multisite: + * Finally good handling of F5 / browser reloads -> no page switching to + start page anymore (at least in modern browsers) + * User accounts can now be locked after a specified amount of auth + failures (lock_on_logon_failures can be set to a number of tries) + * Column Perf-O-Meter is now sortable: it sorts after the *first* + performance value. This might not always be the one you like, but + its far better than nothing. + * logwatch: Logwatch icon no longer uses notes_url + * Inventory screen: Host inventory also displays its clustered services + * Rules: Renamed "Ignored services" to "Disabled services" + Renamed "Ignored checks" to "Disabled checks" + * Sorter Host IP address: fixed sorting, no longer uses str compare on ip + * Views: New: Draw rule editor icon in multisite views (default off) + Can be activated in global settings + * New global multisite options: Adhoc downtime with duration and comment + Display current date in dashboard + * LDAP: Using asynchronous searches / added optional support for paginated + searches (Can be enabled in connection settings) + * LDAP: It is now possible to provide multiple failover servers, which are + tried when the primary ldap server fails + * LDAP: Supporting posixGroup with memberUid as member attribute + * LDAP: Added filter_group option to user configuration to make the + synchonized users filterable by group memberships in directories without + memberof attributes + * LDAP: Moved configuration to dedicated page which also provides some + testing mechanisms for the configuration + * Added option to enable browser scrollbar to the multisite sidebar (only + via "sidebar_show_scrollbar = True" in multisite.mk + * Added option to disable automatic userdb synchronizations in multisite + * Implemented search forms for most data tables + * New icons in view footers: export as CSV, export as JSON + * Availability: new columns for shortest, longest, average and count + * Editing localized strings (like the title) is now optional when cloning + views or editing cloned views. If not edited, the views inherit the + localized strings from their ancestors + * Added simple problems Dashboard + * New filter and column painter for current notification number (escalations) + * Added new painters for displaying host tags (list of tags, single tag + groups). All those painters are sortable. Also added new filters for tags. + * Added painters, icon and filters for visualizing staleness information + * Improved filtering of the foldertree snapin by user permissions (when a user is + only permitted on one child folder, the upper folder is removed from the + hierarchy) + * "Unchecked Services" view now uses the staleness of services for filtering + * Globe dashlets make use of the parameter "id" to make it possible to + provide unique ids in the render HTML code to the dashlets + * Multisite can now track wether or not a user is online, this need to be + enabled e.g. via Global Settings in WATO (Save last access times of + users) + * Added popup message notification system to make it possible to notify + multisite users about various things. It is linked on WATO Users page at + the moment. An image will appear for a user in the sidebar footer with + the number of pending messages when there are pending messages for a user. + To make the sidebar check for new messages on a regular base, you need + to configure the interval of sidebar popup notification updates e.g. via + WATO Global Settings. + * Event views: changed default horizon from 31 to 7 days + * New option for painting timestamp: as Unix Epoch time + * New filters: Host state type and Service state type + * FIX: better error message in case of exception in SNMP handling + * FIX: Inventory screen: Now shows custom checks + * FIX: Fixed locking problem of multisite pages related to user loading/saving + * FIX: Fixed wrong default settings of view filters in localized multisite + * FIX: line wrapping of logwatch entries + * FIX: Fixed button dragging bug when opening the view editor + (at least in Firefox) + + WATO: + * Allow to configure check-/retry_interval in second precision + * Custom user attributes can now be managed using WATO + * Allow GIT to be used for change tracking (enable via global option) + * Hosts/Folders: SNMP communities can now be configured via the host + and folders hierarchy. Those settings override the rule base config. + * Require unique alias names in between the following elements: + Host/Service/Contact Groups, Timeperiods and Roles + * Removed "do not connect" option from site socket editor. Use the + checkbox "Disable" to disable the site for multisite. + * Converted table of Event Console Rules to new implementation, make it sortable + * FIX: do validation of check items in rule editor + * FIX: More consistent handling of folderpath select in rule editor + * FIX: Now correctly handling depends_on_tags on page rendering for + inherited values + * FIX: Changed several forms from GET to POST to prevent "Request-URI too + large" error messages during submitting forms + * FIX: automation snmp scan now adhere rules for shoddy snmp devices + which have no sys description + * FIX: Cisco ruleset "Cisco WLC WiFi client connections" has been generalized to + "WLC WiFi client connections" + * FIX: Snapshot handling is a little more robust agains manually created + files in snapshot directory now + * FIX: Slightly more transparent handling of syntax errors when loading rules.mk + + Notifications: + * Flexible Notification can now filter service levels + * FIX: check_tcp corrected order of parameters in definition + + Event Console: + * New global setting "force message archiving", converts the EC into + a kind of syslog archive + * New built-in snmptrap server to directly receive snmp traps + * FIX: fix layout of filter for history action type + * FIX: better detect non-IP-number hosts in hostname translation + +1.2.3i1: + Core: + * Agents can send data for other hosts "piggyback". This is being + used by the vSphere and SAP plugins + * New variable host_check_commands, that allows the definition of + an alternative host check command (without manually defining one) + * New variable snmp_check_interval which can be used to customize + the check intervals of SNMP based checks + * setup: Added missing vars rrd_path and rrdcached_sock + * new variable check_mk_exit_status: allows to make Check_MK service OK, + even if host in not reachable. + * set always_cleanup_autochecks to True per default now + * check_mk: new option --snmptranslate + + Multisite: + * New availability view for arbitrary host/service collections + * New option auth_by_http_header to use the value of a HTTP header + variable for authentication (Useful in reverse proxy environments) + * New permission that is needed for seeing views that other users + have defined (per default this is contained in all roles) + * New path back to the view after command exection with all + checkboxes cleared + * Added plugins to config module to make registration of default values + possible for addons like mkeventd - reset to default values works now + correctly even for multisite related settings + * perfometer: Bit values now using base of 1000 + * Added PNP tempate for check_disk_smb + * Dashboards can now be configured to be reloaded on resizing + (automatically adds width/height url parameters) + * LDAP authentification: New config option "Do not use persistent + connections to ldap server" + * Hosttags and auxiliary tags can now be grouped in topics + * Fixed output of time in view if server time differs from user time + + Event Console: + * New rule feature: automatically delete event after actions + * New filter for maximum service level (minimum already existed) + * New global setting: hostname translation (allows e.g. to drop domain name) + * New rule match: only apply rule within specified time period + + Checks & Agents: + * solaris_mem: New check for memory and swap for Solaris agent + * agent_vsphere: New VMWare ESX monitoring that uses pySphere and the VMWare + API in order to get data very efficiently. Read (upcoming) documentation + for details. + * new special agent agent_random for creating random monitoring data + * New checks: windows_intel_bonding / windows_broadcom_bonding + * Implemented SAP monitoring based on the agent plugin mk_sap. This + must be run on a linux host. It connects via RFC calls to SAP R/3 + systems to retrieve monitoring information for this or other machines. + * sap.dialog: Monitors SAP dialog statistics like the response time + * sap.value: Simply processes information provided by SAP to Nagios + * openvpn_clients: new check for OpenVPN connections + * if64_tplink: special new check for TP Link switches with broken SNMP output + * job: Monitoring states and performance indicators of any jobs on linux systems + * oracle_asm_diskgroups: Added missing agent plugin + asmcmd wrapper script + * oracle_jobs: New check to monitor oracle database job execution + * oracle_rman_backups: New check to monitor state of ORACLE RMAN backups + * jar_signature: New check to monitor wether or not a jar is signed and + certificate is not expired + * cisco_qos: adhere qos-bandwidth policies + * check_disk_smb: WATO formalization for active check check_disk_smb + * if.include: new configurable parameters for assumed input and output speed + * cisco_qos: new param unit: switches between bit/byte display + new param average: average the values over the given minute + new params post/drop can be configured via int and float + fixed incorrect worst state if different parameters exceed limit + * logwatch.ec: Added optional spooling to the check to prevent dataloss + when processing of current lines needs more time than max execution time + * mounts: ignore multiple occurrances of the same device + * Linux agent: allow cached local/plugins checks (see docu) + * mem.include: Linux memory check now includes size of page tables. This + can be important e.g. on ORACLE systems with a lot of memory + * windows_agent: Now buffers output before writing it to the socket + Results in less tcp packages per call + * smart.stats: rewrote check. Please reinventorize. Error counters are now + snapshotted during inventory. + * smart.temp: add WATO configuration + * windows_agent: check_mk.ini: new option "port" - specifies agent port + * winperf_processor: introduce averaging, support predictive levels + * cpu_util.include: fixed bug when params are set to None + * predictive levels: fixed bug when existing predictive levels get new options + * windows_plugin mssql.vbs: No longer queries stopped mssql instances + * cisco_hsrp: fixed problem when HSRP groups had same ip address + * winperf_if: hell has frozen over: a new check for network adapters on Windows + * windows agent: new config section plugins, now able to set timeouts for specific plugins + new global config option: timeout_plugins_total + * lnx_if in Linux agent: force deterministical order of network devices + * Linux agent: remove obsolete old <<>> and <<>> sections + * logwatch, logwatch.ec: detect error in agent configuration + * Linux agent: cups_queues: do not monitor non-local queues (thanks to Olaf Morgenstern) + * AIX agent: call lparstat with argument 1 1, this give more accurate data + * Check_MK check: enable extended performance data per default now + * viprinet checks: New checks for firmware version/update, memory usage, power supply status, + router mode, serialnumber and temperature sensors + * uptime, snmp_uptime, esx_vsphere_counters.uptime: allow to set lower and upper levels + * winperf_processor: Now displays (and scales) to number of cpus in pnpgraph + * mk_postgres plugin: replace select * with list of explicit columns (fix for PG 9.1) + * lnx_if: show MAC address for interfaces (needs also agent update) + * winperf_tcp_conn: New check. Displays number of established tcpv4 connections in windows + Uses WATO Rule "TCP connection stats (Windows)" + * windows_agent: fixed timeouts for powershell scripts in local/plugins + * logwatch: Agent can now use logwatch.d/ to split config to multipe files + * logwatch: Agent can now rewrite Messages + * apache_status: New rule: set levels for number of remaining open slots + * mrpe: handle long plugin output correctly, including performance data + * cisco_qos: parameters now configurable via WATO + + Notifications: + * notify.py: unique spoolfiles name no longer created with uuid + * Warn user if only_services does never match + + Livestatus: + * Table statehist: Improved detection of vanished hosts and services. + Now able to detect and remove nonsense check plugin output + * FIX: able to handle equal comment_id between host and service + * livestatus.log: show utf-8 decoding problems only with debug logging >=2 + * livestatus: fixed incorrect output formatting of comments_with_info column + + BI: + * Integrated availability computing, including nifty time warp feature + + WATO: + * Configuration of datasource programs via dedicated rules + * New editor for Business Intelligence rules + * Rule Editor: Now able to show infeffective rules + * Valuespec: CascadingDropdown now able to process choice values from functions + * Removed global option logwatch_forward_to_ec, moved this to the + logwatch_ec ruleset. With this option the forwarding can now be enabled + for each logfile on a host + * Configuration of an alternative host check command + * Inventory: Display link symbol for ps ruleset + * New rule for notification_options of hosts and services + * FIX: Rulesets: correct display of rules within subfolders + * Remove Notification Command user settings, please use flexible notifications instead + + 1.2.2p3: Core: * FIX: get_average(): Gracefully handle time anomlies of target systems - * FIX: notifications: /var/lib/check_mk/notify directory is now created + * FIX: notifications: /var/lib/check_mk/notify directory is now created correctly during setup from tgz file. (Without it notifications did not get sent out.) * FIX: add missing $DESTDIR to auth.serials in setup.sh @@ -9,20 +2867,17 @@ Checks & Agents: * FIX: winperf_processor: fix case where CPU percent is exactly 100% * FIX: blade_powerfan: fix mixup of default levels 50/40 -> 40/50 - * FIX: Cleaned up graph rendering of Check_MK services + * FIX: Cleaned up graph rendering of Check_MK services * FIX: zypper: deal with output from SLES 10 * FIX: zpool_status: Ignoring "No known data errors" text - * FIX: fileinfo: no longer inventorize missing files - * FIX: fileinfo: fixed fileinfo grouping in case of missing files - * FIX: fileinfo: take care of empty patterns * FIX: dmi_sysinfo: Handling ":" in value correctly * FIX: check_http: Fixed syntax error when monitoring certificates * FIX: check_dns: parameter -A does not get an additional string * FIX: diskstat: Fixed wrong values for IO/s computation on linux hosts * FIX: blade_healts: Fixed wrong index checking resulting in exceptions - * FIX: mrpe: gracefully handle invalid plugin exit codes - * FIX: check_mk-ipmi.php: PNP template now displays correct units as delivered - by the check plugin + * FIX: notifications: /var/lib/check_mk/notify directory is now created + correctly during setup from tgz file. (Without it notifications + did not get sent out.) Multisite: * FIX: LDAP: Disabling use of referrals in active directory configuration @@ -30,14 +2885,13 @@ non visible pnp graphs and missing nagvis permissions * FIX: Fixed label color of black toner perfometers when fuel is low * FIX: Fixed wrong default settings of view filters in localized multisite - * FIX: Fixed exception when enabling sounds for views relying on + * FIX: Fixed exception when enabling sounds for views relying on e.g. alert statistics source * FIX: Folder Tree Snapin: make folder filter also work for remote folders that do not exist locally * FIX: correctly display sub-minute check/retry intervals * FIX: fix logic of some numeric sorters * FIX: Improved user provided variable validation in view code - * FIX: Improved user provided variable validation in bookmark sidebar snapin * FIX: Escaping html code in plugin output painters WATO: @@ -61,18 +2915,17 @@ * FIX: check_mkevents now uses case insensitive host name matching Livestatus: - * livestatus: fixed incorrect output formatting of comments_with_info column - * table statehist: fixed memory leak + * FIX: fixed incorrect output formatting of comments_with_info column + * FIX: statehist table: fixed memory leak 1.2.2p2: Core: - * FIX: better error message in case of exception in SNMP handling * FIX: livecheck: fixed handling of one-line plugin outputs and missing \n (Thanks to Florent Peterschmitt) Checks & Agents: - * FIX: apache_status: use (also) apache_status.cfg instead of apache_status.conf * FIX: jolokia_info: ignore ERROR instances + * FIX: apache_status: use (also) apache_status.cfg instead of apache_status.conf * FIX: f5_bigip_vserver: fix wrong OID (13 instead of 1), thanks to Miro Ramza * FIX: f5_bigip_psu: handle more than first power supply, thanks to Miro Ramza * FIX: ipmi_sensors: ignore sensors in state [NA] (not available) @@ -98,7 +2951,7 @@ * Renamed "Delete Event" to "Archive Event" to clearify the meaning Notifications: - * FIX: contacts with notifications disabled no longer receive + * FIX: contacts with notifications disabled no longer receive custom notifications, unless forced 1.2.2p1: @@ -165,19 +3018,17 @@ 1.2.2b7: Checks & Agents: * FIX: postfix_mailq: fix labels in WATO rule, set correct default levels - - WATO: - * FIX: syntax error in check_parameters + 1.2.2b6: Core: * FIX: setup: detect check_icmp also on 64-Bit CentOS - (thanks to あきら) + (thanks to あきら) * FIX: setup.sh: create auth.serials, fix permissions of htpasswd * FIX: livecheck: now able to handle check output up to 16kB Checks & Agents: - * FIX: apc_symmetra_power: resurrect garble PNP template for + * FIX: apc_symmetra_power: resurrect garble PNP template for * FIX: check_mk_agent.freebsd: remove garble from output (Thanks to Mathias Decker) * FIX: check_mk-mssql_counters.locks: fix computation, was altogether wrong @@ -202,9 +3053,9 @@ * FIX: fix event type for recoveries * FIX: fix custom notifications on older nagios versions * FIX: handle case where type HOST/SERVICE not correctly detected - + Livestatus: - * FIX: memory leak when removing downtime / comment + * FIX: memory leak when removing downtime / comment WATO: * FIX: Removed "No roles assigned" text in case of unlocked role attribute @@ -222,13 +3073,13 @@ * Checks can now omit the typical "OK - " or "WARN -". This text will be added automatically if missing. * FIX: livecheck: fixed compilation bug - * FIX: avoid simultanous activation of changes by means of a lock * FIX: check_mk: convert service description unicode into utf-8 - + * FIX: avoid simultanous activation of changes by means of a lock + Checks & Agents: * FIX: jolokia_metrics.mem - now able to handle negative/missing max values * ADD: tcp_conn_stats: now additionally uses /proc/net/tcp6 - * ADD: wmic_processs: cpucores now being considered when calculating + * ADD: wmic_processs: cpucores now being considered when calculating user/kernel percentages. (thanks to William Baum) * FIX: UPS checks support Eaton Evolution * FIX: windows agent plugin: mssql now exits after 10 seconds @@ -238,7 +3089,6 @@ Livestatus: * FIX: possible crash with VERY long downtime comments - * FIX: now able to handle equal comment id of host and service WATO: * FIX: Fix hiliting of errors in Nagios output @@ -274,14 +3124,15 @@ Checks & Agents: * FIX: smart - not trying to parse unhandled lines to prevent errors - * FIX: WATO configuration of filesystem trends: it's hours, not days! * FIX: winperf_processor - fixed wrong calculations of usage + * FIX: WATO configuration of filesystem trends: it's hours, not days! * FIX: mysql: fixed crash on computing IO information * FIX: diskstat: fix local variable 'ios_per_sec' referenced before assignment * FIX: multipath: ignore warning messages in agent due to invalid multipath.conf * FIX: megaraid_bbu: deal with broken output ("Adpater"), found in Open-E * FIX: megaraid_pdisk: deal with special output of Open-E * FIX: jolokia_metrics.mem: renamed parameter totalheap to total + * FIX: megaraid_bbu: deal with broken output ("Adpater") * FIX: check_ldap: added missing host address (check didn't work at all) * FIX: check_ldap: added missing version option -2, -3, -3 -T (TLS) * FIX: mssql: Agent plugin now supports MSSQL Server 2012 @@ -302,8 +3153,8 @@ * FIX: Fixed wrong localization right after a user changed its language * FIX: Improved handling of error messages in bulk inventory * FIX: fixed focus bug in transform valuespec class - * FIX: sidebar snapins which refresh do not register for restart detection anymore * FIX: stop doing snapin refreshes after they have been removed + * FIX: sidebar snapins which refresh do not register for restart detection anymore * FIX: fix user database corruption in case of a race condition * FIX: added checks wether or not a contactgroup can be deleted * FIX: Avoid deadlock due to lock on contacts.mk in some situations @@ -363,13 +3214,13 @@ Multisite: * FIX: Fixed several minor IE7 related layout bugs - * Add: Improved navigation convenience when plugin output contains [running on ... ] * FIX: title of pages was truncated and now isn't anymore * Cleanup form for executing commands on hosts/services WATO: * FIX: Fixed layout of rulelist table in IE* * FIX: Fixed adding explicit host names to rules in IE7 + * Add: Improved navigation convenience when plugin output contains [running on ... ] 1.2.2b1: Core: @@ -378,11 +3229,11 @@ * cmk --notify: added the macros NOTIFY_LASTHOSTSTATECHANGE, NOTIFY_HOSTSTATEID, NOTIFY_LASTSERVICESTATECHANGE, NOTIFY_SERVICESTATEID, NOTIFY_NOTIFICATIONCOMMENT, NOTIFY_NOTIFICATIONAUTHOR, NOTIFY_NOTIFICATIONAUTHORNAME, NOTIFY_NOTIFICATIONAUTHORALIAS - * FIX: more robust deletion of precompiled files to ensure the correct + * FIX: more robust deletion of precompiled files to ensure the correct creation of the files (Thanks to Guido Günther) - * FIX: Inventory for cluster nodes who are part of multiple clusters + * FIX: Inventory for cluster nodes who are part of multiple clusters * cmk --notify: added plugin for sms notification - * FIX: precompiled checks: correct handling of sys.exit() call when using python2.4 + * FIX: precompiled checks: correct handling of sys.exit() call when using python2.4 * cmk --notify: improved logging on wrong notification type * RPM: Added check_mk-agent-scriptless package (Same as normal agent rpm, but without RPM post scripts) @@ -390,12 +3241,12 @@ Checks & Agents: * winperf_processor now outputs float usage instead of integer * FIX: mssql_counters.file_sizes - Fixed wrong value for "Log Files" in output - * FIX: drbd: Parameters for expected roles and disk states can now be set to + * FIX: drbd: Parameters for expected roles and disk states can now be set to None to disable alerting on changed values * printer_supply_ricoh: New check for Ricoh printer supply levels * jolokia_metrics.mem: now supports warn/crit levels for heap, nonheap, totalheap * jolokia_metrics.mem: add dedicated PNP graph - * FIX: logwatch.ec: use UNIX socket instead of Pipe for forwarding into EC + * FIX: logwatch.ec: use UNIX socket instead of Pipe for forwarding into EC * FIX: logwatch.ec: fixed exception when forwarding "OK" lines * FIX: logwatch.ec: fixed forwarding of single log lines to event console * Improved performance of logwatch.ec check in case of many messages @@ -410,7 +3261,7 @@ * Add: New check for DB2 instance memory levels * Add: winperf_phydisk can now output IOPS * Add: oracle_tablespace now with flexible warn/crit levels(magic number) - + Livestatus: * Add: new column in hosts/services table: comments_with_extra_info Adds the entry type and entry time @@ -441,7 +3292,7 @@ * FIX: fix title of foldable areas contained in list valuespecs * FIX: Fixed bug where pending log was not removed in multisite setup * FIX: Fixed generation of auth.php (Needed for NagVis Multisite Authorisation) - * FIX: Fixed missing general.* permissions in auth.php on slave sites in + * FIX: Fixed missing general.* permissions in auth.php on slave sites in case of distributed WATO setups * Added oracle_tablespaces configuration to the application checkgroup * FIX: Fixed synchronisation of mkeventd configs in distributed WATO setups @@ -464,7 +3315,7 @@ 1.2.1i5: Core: - * Improved handling of CTRL+C (SIGINT) to terminate long runnining tasks + * Improved handling of CTRL+C (SIGINT) to terminate long runnining tasks (e.g. inventory of SNMP hosts) * FIX: PING services on clusters are treated like the host check of clusters * cmk --notify: new environment variable NOTIFY_WHAT which has HOST or SERVICE as value @@ -475,7 +3326,7 @@ Checks & Agents: * Linux Agent, diskstat: Now supporting /dev/emcpower* devices (Thanks to Claas Rockmann-Buchterkirche) * FIX: winperf_processor: Showing 0% on "cmk -nv" now instead of 100% - * FIX: win_dhcp_pools: removed faulty output on non-german windows 2003 servers + * FIX: win_dhcp_pools: removed faulty output on non-german windows 2003 servers with no dhcp server installed (Thanks to Mathias Decker) * Add: fileinfo is now supported by the solaris agent. Thanks to Daniel Roettgermann * Logwatch: unknown eventlog level ('u') from windows agent treated as warning @@ -493,7 +3344,7 @@ * When having row selections enabled and no selected and performing actions an error message is displayed instead of performing the action on all rows - * Storing row selections in user files, cleaned up row selection + * Storing row selections in user files, cleaned up row selection handling to single files. Cleaned up GET/POST mixups in confirm dialogs * Add: New user_options to limit seen nagios objects even the role is set to see all * Fix: On site configaration changes, only relevant sites are marked as dirty @@ -508,7 +3359,7 @@ * LDAP: Improved error handling in case of misconfigurations * LDAP: Reduced number of ldap querys during a single page request / sync process * LDAP: Implemnted some kind of debug logging for LDAP communication - * FIX: Re-added an empty file as auth.py (wato plugin) to prevent problems during update + * FIX: Re-added an empty file as auth.py (wato plugin) to prevent problems during update WATO: * CPU load ruleset does now accept float values @@ -538,7 +3389,7 @@ Notifications: * Fix flexible notifications on non-OMD systems - + Checks & Agents: * Linux Agent, mk_postgres: Supporting pgsql and postgres as user * Linux Agent, mk_postgres: Fixed database stats query to be compatible @@ -566,7 +3417,7 @@ * FIX: loading notification scripts in local directory for real * FIX: oracle_version: return valid check result in case of missing agent info * FIX: apache_status: fixed bug with missing 'url', wrote man page - * FIX: fixed missing localisation in check_parameteres.py + * FIX: fixed missing localisation in check_parameteres.py * FIX: userdb/ldap.py: fixed invalid call site.getsitepackages() for python 2.6 * FIX: zpool_status: fixed crash when spare devices were available * FIX: hr_fs: handle negative values in order to larger disks (thanks to Christof Musik) @@ -605,10 +3456,10 @@ * FIX: Speedometer: Terminating data updates when snapin is removed from sidebar * FIX: Views: toggling forms does not disable the checkbox button anymore * FIX: Dashboard: Fixed wrong display options in links after data reloads - * FIX: Fixed "remove all downtimes" button in views when no downtimes to be deleted + * FIX: Fixed "remove all downtimes" button in views when no downtimes to be deleted * FIX: Services in hosttables now use the service name as header (if no custom title set) * New filter for host_contact and service_contact - + WATO: * Add: Creating a new rule immediately opens its edit formular * The rules formular now uses POST as transaction method @@ -671,7 +3522,7 @@ * New system of custom notification, with WATO support Event Console: - * Moved source of Event Console into Check_MK project + * Moved source of Event Console into Check_MK project * New button for resetting all rule hits counters * When saving a rule then its hits counter is always reset * New feature of hiding certain actions from the commands in the status GUI @@ -699,7 +3550,7 @@ * WATO Folder Filter no longer available in single host views * Added new painters "Service check command expanded" and "Host check command expanded" - * FIX: Corrected garbled description for sorter "Service Performance data" + * FIX: Corrected garbled description for sorter "Service Performance data" * Dashboard globes can now be filtered by host_contact_group/service_contact_group * Dashboard "iframe" attribute can now be rendered dynamically using the "iframefunc" attribute in the dashlet declaration @@ -735,7 +3586,7 @@ progress dialogs (e.g. bulk inventory mode) * FIX: Fixed editing of icon_image rules * Added support of locked hosts and folders ( created by CMDB ) - * Logwatch: logwatch agents/plugins now with ok pattern support + * Logwatch: logwatch agents/plugins now with ok pattern support * Valuespec: Alternative Value Spec now shows helptext of its elements * Valuespec: DropdownChoice, fixed exception on validate_datatype @@ -763,8 +3614,8 @@ * Added new Checks for Gude PDU Units * logwatch: Working around confusion with OK/Ignore handling in logwatch_rules * logwatch_ec: Added new subcheck to forward all incoming logwatch messages - to the event console. With this check you can use the Event Console - mechanisms and GUIs instead of the classic logwatch GUI. It can be + to the event console. With this check you can use the Event Console + mechanisms and GUIs instead of the classic logwatch GUI. It can be enabled on "Global Settings" page in WATO for your whole installation. After enabling it you need to reinventorize your hosts. * Windows Update Check: Now with caching, Thanks to Phil Randal and Patrick Schlüter @@ -779,7 +3630,7 @@ * FIX: windows agent: fixed possible crash in eventlog section BI: - * FIX: fixed bug in aggregation count (thanks Neil) + * FIX: fixed bug in aggregation count (thanks Neil) 1.2.0p4: WATO: @@ -814,7 +3665,7 @@ * FIX: Localize option for not OMD Environments WATO: - * FIX: Users & Contacts uses case-insensitive sorting of 'Full name' column + * FIX: Users & Contacts uses case-insensitive sorting of 'Full name' column * FIX: Removed focus of "Full name" attribute on editing a contact * FIX: fix layout bug in ValueSpec ListOfStrings (e.g. used in list of explicit host/services in rules) @@ -831,18 +3682,18 @@ Livestatus: * FIX: check_icmp: fixed calculation of remaining length of output buffer * FIX: check_icmp: removed possible buffer overflow on do_output_char() - + Livecheck: * FIX: fixed problem with long plugin output * FIX: added /0 termination to strings * FIX: changed check_type to be always active (0) - * FIX: fix bug in assignment of livecheck helpers + * FIX: fix bug in assignment of livecheck helpers * FIX: close inherited unused filedescriptors after fork() * FIX: kill process group of called plugin if timeout is reached -> preventing possible freeze of livecheck * FIX: correct escaping of character / in nagios checkresult file * FIX: fixed SIGSEGV on hosts without defined check_command - * FIX: now providing correct output buffer size when calling check_icmp + * FIX: now providing correct output buffer size when calling check_icmp Checks & Agents: * FIX: Linux mk_logwatch: iregex Parameter was never used @@ -894,7 +3745,7 @@ * mysql_capacity: cleaned up check, levels are in MB now * jolokia_info, jolokia_metrics: new rewritten checks for jolokia (formerly jmx4perl). You need the new plugin mk_jokokia for using them - * added preliminary agent for OpenVMS (refer to agents/README.OpenVMS) + * added preliminary agent for OpenVMS (refer to agents/README.OpenVMS) * vms_diskstat.df: new check file usage of OpenVMS disks * vms_users: new check for number of interactive sessions on OpenVMS * vms_cpu: new check for CPU utilization on OpenVMS @@ -915,7 +3766,7 @@ * New check for f5 bigip network interfaces * cmctc.temp: added parameters for warn/crit, use now WATO rule "Room temperature (external thermal sensors)" - * cisco_asa_failover: New Check for clustered Cisco ASA Firewalls + * cisco_asa_failover: New Check for clustered Cisco ASA Firewalls * cbl_airlaser.status: New Check for CBL Airlaser IP1000 laser bridge. * cbl_airlaser.hardware: New Check for CBL Airlaser IP1000 laser bridge. Check monitors the status info and allows alerting based on temperature. @@ -940,7 +3791,7 @@ permissions of roles * FIX: remove line about number of rules in rule set overview (that garbled the logical layout) - * Rules now have an optional comment and an URL for linking to + * Rules now have an optional comment and an URL for linking to documntation * Rule now can be disabled without deleting them. * Added new hook "sites-saved" @@ -949,7 +3800,7 @@ When set to False the attribute can only be changed during creation of a new object. When editing an object this attribute is only displayed. * new: search for rules in "Host & Service Configuration" - * parent scan: new option "ping probes", that allows skipping + * parent scan: new option "ping probes", that allows skipping unreachable gateways. * User managament: Added fields for editing host/service notification commands * Added new active check configuration for check_smtp @@ -1027,7 +3878,7 @@ * if_lancom: add SSID to logical WLAN interface names * Added a collection of MSSQL checks for monitoring MSSQL servers (backups, tablespaces, counters) - * New check wut_webio_io: Monitor the IO input channels on W&T Web-IO + * New check wut_webio_io: Monitor the IO input channels on W&T Web-IO devices * nfsmounts: reclassify "Stale NFS handle" from WARN to CRIT * ORACLE agent/checks: better error handling. Let SQL errors get @@ -1044,7 +3895,7 @@ * FIX: winperf_phydisk: Fix typo (lead to WATO rule not being applied) * Windows agent: new [global] option crash_debug (see online docu) * AIX agent: new check for LVM volume status in rootvg. - * PostgreSQL plugin: agent is now modified to work with PostgreSQL + * PostgreSQL plugin: agent is now modified to work with PostgreSQL versions newer than 8.1. (multiple reports, thanks!) Multisite: @@ -1058,7 +3909,7 @@ * FIX: The refresh time in footer is updated now when changing the value * FIX: view editor shows "(Mobile)" hint in view titles when linking to views - WATO: + WATO: * Main menu of ruleeditor (Host & Service Parameters) now has a topic for "Used rules" - a short overview of all non-empty rulesets. @@ -1119,7 +3970,7 @@ * FIX: fix bulk edit and form properties (visibility of attributes was broken) * FIX: fix negating hosts in rule editor - Checks & Agents: + Checks & Agents: * fileinfo: added this check to Linux agent. Simply put your file patterns into /etc/check_mk/fileinfo.cfg for configuration. * mysql.sessions: New check for MySQL sessions (need new plugin mk_mysql) @@ -1131,7 +3982,7 @@ * FIX: hpux_if: convert_to_hex was missing on non-SNMP-hosts -replace with inline implementation * tcp_conn_stats: handle state BOUND (found on Solaris) - * diskstat: support for checking latency, LVM and VxVM on Linux (needs + * diskstat: support for checking latency, LVM and VxVM on Linux (needs updated agent) * avoid duplicate checks cisco_temp_perf and cisco_sensor_temp @@ -1157,7 +4008,7 @@ * FIX: Fixed "make_utf is not defined" error when having custom timeperiods defined in WATO - Checks & Agents: + Checks & Agents: * MacOS X: Agent for MacOS (Thanks to Christian Zigotzky) * AIX: New check aix_multipath: Supports checking native AIX multipathing from AIX 5.2 onward * Solaris: New check solaris_multipath: Supports checking native Solaris multipath from Solaris10 and up. @@ -1241,7 +4092,7 @@ 1.2.0b4: Core: - * New configuration variable snmp_timing, allowing to + * New configuration variable snmp_timing, allowing to configure timeout and retries for SNMP requests (also via WATO) * New configuration variable custom_checks. This is mainly for WATO but also usable in main.mk It's a variant of legacy_checks that @@ -1267,10 +4118,10 @@ * FIX: audit log was not shown if no entry for today existed * FIX: fix parent scan on single site installations * FIX: fix folder visibility permission handling - * FIX: honor folder-permissions when creating, deleting + * FIX: honor folder-permissions when creating, deleting and modifiying rules * FIX: detect non-local site even if unix: is being used - * FIX: better error message if not logged into site during + * FIX: better error message if not logged into site during action that needs remote access * FIX: send automation data via POST not GET. This fixes inventory on hosts with more than 500 services. @@ -1333,7 +4184,7 @@ Checks & Agents: * FIX: Made logwatch parsing mechanism a little more robust (Had problems with emtpy sections from windows agent) - * FIX: brocade_fcport: Configuration of portsates now possible + * FIX: brocade_fcport: Configuration of portsates now possible * if_lancom: special version for if64 for LANCOM devices (uses ifName instead of ifDescr) @@ -1353,7 +4204,7 @@ * FIX: Fixed umlaut problem in host aliases and ip addresses created by WATO * FIX: Fixed exception caused by validation problems during editing tags in WATO * FIX: create sample config only if both rules.mk and hosttags.mk are missing - * FIX: do not loose host tags when both using WATO-configured and + * FIX: do not loose host tags when both using WATO-configured and manual ones (via multisite.mk) * Timeperiods: Make list of exceptions dynamic, not fixed to 10 entries * Timeperiods: Configure exclusion of other timeperiods @@ -1376,7 +4227,7 @@ * FIX: deletion of automation accounts now works * FIX: Disabling notifications for users does work now * New main overview for rule editor - * New multisite.mk option wato_hide_varnames for hiding Check_MK + * New multisite.mk option wato_hide_varnames for hiding Check_MK configuration variable names from the user * New module "Logwatch Pattern Analyzer" to verify logwatch rules * Added new variable logwatch_rules which can also be managed through the @@ -1488,7 +4339,7 @@ * hpux_lunstats: new check for disk IO on HP-UX * windows - mk_oracle tablespace: Added missing sid column * diskstat: make inventory mode configurable via WATO - * added new checks for Fujitsu ETERNUS DX80 S2 + * added new checks for Fujitsu ETERNUS DX80 S2 (thanks to Philipp Höfflin) * New checks: lgp_info, lgp_pdu_info and lgp_pdu_aux to monitor Liebert MPH/MPX devices @@ -1503,7 +4354,7 @@ * FIX: Linux Agent: Fixed ipmi-sensors handling of Power_Unit data * hr_mem: handle rare case where more than one entry is present (this prevents an exception of pfSense) - * statgrab_load: level is now checked against 15min average - + * statgrab_load: level is now checked against 15min average - in order to be consistent with the Linux load check * dell_powerconnect_cpu: hopefully correctly handle incomplete output from agent now. @@ -1598,7 +4449,7 @@ are used. Multisite: - * Improve transaction handling and reload detection: user can have + * Improve transaction handling and reload detection: user can have multiple action threads in parallel now * Sounds in views are now enabled per default. The new configuration variable enable_sounds can be set to False in multisite.mk in order @@ -1623,7 +4474,7 @@ local/ hierarchy in OMD Mobile: - * Improved sorting of views in main page + * Improved sorting of views in main page * Fix: Use all the availiable space in header * Fix: Navigation with Android Hardwarekeys now working * Fix: Links to pnp4nagios now work better @@ -1659,7 +4510,7 @@ Livestatus: * ColumnHeaders: on is now able to switch column header on even if Stats: headers are used. Artifical header names stats_1, stats_2, etc. are - begin used. Important: Use "ColumnHeaders: on" after Columns: and + begin used. Important: Use "ColumnHeaders: on" after Columns: and after Stats:. 1.1.13i2: @@ -1670,7 +4521,7 @@ * linux agent - ipmi: Creating directory of cache file if not exists * dell_powerconnect_cpu: renamed service from CPU to "CPU utilization", in order to be consistent with other checks - + Multisite: * Several cleanups to prevent css/js warning messages in e.g. Firefox * Made texts in selectable rows selectable again @@ -1799,7 +4650,7 @@ * fileinfo: new check for monitoring age and size of files * heartbeat_crm: apply patches from Václav Ovsík, so that the check should work on Debian now. - * ad_replication: added warninglevel + * ad_replication: added warninglevel * fsc_*: added missing scan functions * printer_alerts: added further state codes (thanks to Matthew Stew) * Solaris agent: changed shell to /usr/bin/bash (fixes problems with LC_ALL=C) @@ -1822,7 +4673,7 @@ 1.1.12p6: Checks & Agents: * FIX: lnx_if: remove debug output (left over from 1.1.12p5) - + 1.1.12p5: Multisite: * FIX: fix hitting enter in Quicksearch on IE 8 @@ -1881,7 +4732,7 @@ Multisite: * FIX: Fixed styling of view header in older IE browsers * FIX: Do not show WATO button in views if WATO is disabled - * FIX: Remove WATO Folder filter if WATO is disabled + * FIX: Remove WATO Folder filter if WATO is disabled * FIX: Snapin 'Performance': fix text align for numbers * FIX: Disallow setting downtimes that end in the past * FIX: Fix links to downtime services in dashboard @@ -1915,7 +4766,7 @@ * FIX: dashboard problem views now ignore notification period, just as tactical overview and normal problem views do * FIX: Loading dashboard plugins in dashboard module - + 1.1.12: Checks & Agents: @@ -1926,7 +4777,7 @@ * Dashboard: fix font size of service statistics table * Dashboard: insert links to views into statistics * Dashboard: add links to PNP when using PNP graphs - + 1.1.12b2: Core, Setup, etc.: * FIX: fix crash with umlauts in host aliases @@ -1954,7 +4805,7 @@ * FIX: fix crash on imcomplete log lines (i.e. as as result of a full disk) * FIX: Livestatus-API: fix COMMAND via persistent connections - + 1.1.12b1: Core, Setup, etc.: @@ -1962,7 +4813,7 @@ * Made profile output file configurable (Variable: g_profile_path) Checks & Agents: - * FIX: j4p_performance: fix inventory functions + * FIX: j4p_performance: fix inventory functions * FIX: mk_oracle: fix race condition in cache file handling (agent data was missing sections in certain situations) * mrpe: make check cluster-aware and work as clustered_service @@ -2020,7 +4871,7 @@ the title of the file/folder in WATO * FIX: Removed new python syntax which is incompatible with old python versions * FIX: Made bulk inventory work in IE - * FIX: Fixed js errors in IE when having not enough space on dashboard + * FIX: Fixed js errors in IE when having not enough space on dashboard * FIX: fix error when using non-Ascii characters in view title * FIX: fix error on comment page caused by missing sorter * FIX: endless javascript when fetching pnp graphs on host/service detail pages @@ -2131,8 +4982,8 @@ (this fixes a problem with printer_pages and an empty item) * Great speed up of cmk -N/-C/-U/-R, especially when number of hosts is large. - * new main.mk option delay_precompile: if True, check_mk will skip Python - precompilation during cmk -C or cmk -R, but will do this the first + * new main.mk option delay_precompile: if True, check_mk will skip Python + precompilation during cmk -C or cmk -R, but will do this the first time the host is checked. This speeds up restarts. Default is False. Nagios user needs write access in precompiled directory! * new config variable agent_ports, allowing to specify the agent's @@ -2141,7 +4992,7 @@ to used with SNMP, on a per-host basis. * new config variable dyndns_hosts. Hosts listed in this configuration list (compatible to bulkwalk_hosts) use their hostname as IP address. - + Checks & Agents: * FIX: AIX agent: output name of template in case of MRPE * FIX: cisco_temp: skip non-present sensors at inventory @@ -2171,7 +5022,7 @@ * if/if64: Fixed bug in operstate detection when using tuple of valid operstates * mk_oracle: Added caching of results to prevent problems with long running SQL queries. Cache is controlled by CACHE_MAXAGE var which is preset to - 120 seconds + 120 seconds * mk_oracle: EXCLUDE_=ALL or EXCLUDE_=oracle_sessions can be used to exclude specific checks now * mk_oracle: Added optional configuration file to configure the new options @@ -2182,7 +5033,7 @@ the running state, number of sessions and number of requests now. Can be extended via agent configuration (j4p.cfg). * Added some preflight checks to --scan-parents code - * New checks netapp_cluster, netapp_vfiler for checking NetAPP filer + * New checks netapp_cluster, netapp_vfiler for checking NetAPP filer running as cluster or running vfilers. * megaraid_pdisks: Better handling of MegaCli output (Thanks to Bastian Kuhn) * Windows: agent now also sends start type (auto/demand/disabled/boot/system) @@ -2256,7 +5107,7 @@ * FIX: cmk -D: drop obsolete (and always empty) Notification: * FIX: better handling of broken checks returning empty services * FIX: fix computation of weight when averaging - * FIX: fix detection of missing OIDs (led to empty lines) + * FIX: fix detection of missing OIDs (led to empty lines) * SNMP scan functions can now call oid(".1.3.6.1.4.1.9.9.13.1.3.1.3.*") That will return the *first* OID beginning with .1.3.6.1.4.1.9.9.13.1.3.1.3 * New config option: Set check_submission = "file" in order to write @@ -2346,7 +5197,7 @@ * windows_update: Added check to monitor windows update states on windows clients. The check monitors the number of pending updates and checks if a reboot is needed after updates have been installed. - * lnx_if: new check for Linux NICs compatible with if/if64 replacing + * lnx_if: new check for Linux NICs compatible with if/if64 replacing netif.* and netctr. * if/if64: also output performance data if operstate not as expected * if/if64: scan function now also detects devices where the first port @@ -2367,7 +5218,7 @@ * hp_procurve_cpu: add PNP template * hp_procurve_cpu: rename load to utilization, rename service to CPU utilizition * df,df_netapp,df_netapp32,hr_fs,vms_df: convert to mergeable dictionaries - * mbg_lantime_state,mbg_lantime_refclock: added new checks to monitor + * mbg_lantime_state,mbg_lantime_refclock: added new checks to monitor Meinberg LANTIME GPS clocks Livestatus: @@ -2375,8 +5226,8 @@ 1.1.10: Core, Setup, etc.: - * --flush now also deletes all autochecks - + * --flush now also deletes all autochecks + Checks & Agents: * FIX: hr_cpu: fix inventory on 1-CPU systems (thanks to Ulrich Kiermayr) @@ -2415,7 +5266,7 @@ * FIX: drbd now handles output of older version without an ep field * FIX: repaired df_netapp32 * FIX: Added SNMP scan function of df_netapp and df_netapp32 - * FIX: repaired apc_symmetra (was broken due to new option -Ot + * FIX: repaired apc_symmetra (was broken due to new option -Ot for SNMP) * FIX: df, hr_fs and other filesystem checks: fix bug if using magic number. levels_low is now honored. @@ -2474,7 +5325,7 @@ Multisite: * FIX: fix "too many values to unpack" when editing views in single layout mode (such as host or service detail) - * FIX: fix PNP icon in cases where host and service icons are displayed in + * FIX: fix PNP icon in cases where host and service icons are displayed in same view (found by Wolfgang Barth) * FIX: Fixed view column editor forgetting pending changes to other form fields @@ -2492,11 +5343,11 @@ * FIX: fix most compiler warnings (thanks to patch by Sami Kerola) * FIX: fix memory leak. The leak caused increasing check latency in some situations - + 1.1.9i8: Multisite: - * New "web service" for retrieving data from views as JSON or - Python objects. This allows to connect with NagStaMon + * New "web service" for retrieving data from views as JSON or + Python objects. This allows to connect with NagStaMon (requires patch in NagStaMon). Simply add &output_format=json or &output_format=python to your view URL. * Added two builtin views for NagStaMon. @@ -2519,7 +5370,7 @@ Checks & Agents: * PNP templates for if/if64: fix bugs: outgoing packets had been - same as incoming, errors and discards were swapped (thanks to + same as incoming, errors and discards were swapped (thanks to Paul Freeman) * Linux Agent: Added suport for vdx and xvdx volumes (KVM+Virtio, XEN+xvda) @@ -2537,7 +5388,7 @@ with dummy checks begin always UNKNOWN. Core, Setup, etc.: - * cmk -D: show ip address of host + * cmk -D: show ip address of host * Fix SNMP inventory find snmp misc checks inspite of negative scan function * Fix output of MB and GB values (fraction part was zero) @@ -2572,7 +5423,7 @@ 1.1.9i5: Multisite: - * custom notes: new macros $URL_PREFIX$ and $SITE$, making + * custom notes: new macros $URL_PREFIX$ and $SITE$, making multi site setups easier * new intelligent logwatch icon, using url_prefix in multi site setups @@ -2609,7 +5460,7 @@ Core, Setup, etc.: * Create alias 'cmk' for check_mk in bin/ (easier typing) - * Create alias 'mkp' for check_mk -P in bin/ (easier typing) + * Create alias 'mkp' for check_mk -P in bin/ (easier typing) Multisite: * Each column can now have a tooltip showing another painter (e.g. @@ -2618,7 +5469,7 @@ Put your icon files in /usr/share/check_mk/web/htdocs/images/icons. OMD users put the icons into ~/local/share/check_mk/web/htdocs/images/icons. * New automatic PNP-link icons: These icons automatically appear, if - the new livestatus is configured correctly (see below). + the new livestatus is configured correctly (see below). * new view property "hidebutton": allow to hide context button to a view. * Defaults views 'Services: OK', 'Services: WARN, etc. do now not create context buttons (cleans up button bar). @@ -2683,10 +5534,10 @@ Multisite: * WATO: Fixed omd mode/site detection and help for /etc/sudoers - * WATO: Use and show common log for pending changes + * WATO: Use and show common log for pending changes * Sidebar Quicksearch: Now really disabling browser built-in completion dropdown selections - + 1.1.9i1: INCOMPATIBLE CHANGES: * TCP / SNMP: hosts using TCP and SNMP now must use the tags 'tcp' @@ -2711,8 +5562,8 @@ call any Check_MK action with an invalid configuration. This saves you against mistyped variables. * Check kernel: converted performance data from counters to rates. This - fixes RRD problems (spikes) on reboots and also allows better access - to the peformance data for the Perf-O-Meters. Also changed service + fixes RRD problems (spikes) on reboots and also allows better access + to the peformance data for the Perf-O-Meters. Also changed service descriptions. You need to reinventurize the kernel checks. Your old RRDs will not be deleted, new ones will be created. * Multisite: parameters nagios_url, nagios_cgi_url and pnp_url are now @@ -2750,10 +5601,10 @@ * Renamed check functions of imm_health check from test_imm to imm_health to have valid function and check names. Please remove remove from inventory and re-inventory those checks. - * fc_brocade_port_detailed: allow to specify port state combinations not + * fc_brocade_port_detailed: allow to specify port state combinations not to be critical * megaraid_pdisks: Using the real enclosure number as check item now - * if/if64: allow to configure averaging of traffic over time (e.g. 15 min) + * if/if64: allow to configure averaging of traffic over time (e.g. 15 min) and apply traffic levels and averaged values. Also allow to specify relative traffic levels. Allow new parameter configuration via dictionary. Also allow to monitor unused ports and/or to ignore link status. @@ -2784,7 +5635,7 @@ * heartbeat_rscstatus: Allowing a list as expected state to expect multiple OK states * win_dhcp_pools agent plugin: Filtering additional error message on systems without dhcp server - * j4p_performance: Added experimental agent plugin fetching data via + * j4p_performance: Added experimental agent plugin fetching data via jmx4perl agent (does not need jmx4perl on Nagios) * j4p_performance.mem: added new experimental check for memory usage via JMX. * if/if64: added Perf-O-Meter for Multisite @@ -2882,7 +5733,7 @@ spaces) * Allow non-ASCII character in downtimes and comments * Added nagvis_base_url to multisite.mk example configuration - * Filter for host/service groups: use name instead of alias if + * Filter for host/service groups: use name instead of alias if user has no permissions for groups 1.1.8b3: @@ -2924,7 +5775,7 @@ Core, Setup, etc.: * Inventory: skip SNMP-only hosts on non-SNMP checktypes (avoids timeouts) * Improve error output for invalid checks - + Checks & Agents: * fix bug: run local and plugins also when spaces are in path name (such as C:\Program Files\Check_MK\plugins @@ -3015,7 +5866,7 @@ * fix problem with missing default hostgroup Multisite: - * Sidebar: Improved the quicksearch snapin. It can search for services, + * Sidebar: Improved the quicksearch snapin. It can search for services, servicegroups and hostgroups now. Simply add a prefix "s:", "sg:" or "hg:" to search for other objects than hosts. * View editor: fix bug which made it impossible to add more than 10 columns @@ -3085,7 +5936,7 @@ * Service Check_MK now displays overall processing time including agent communication and adds this as performance data * Fix bug: define_contactgroups was always assumed True. That led to duplicate - definitions in case of manual definitions in Nagios + definitions in case of manual definitions in Nagios Checks & Agents: * New Check: hp_proliant_da_phydrv for monitoring the state of physical disks @@ -3096,7 +5947,7 @@ HP Proliant Servers * PNP-templates: fix several templates not working with MULTIPLE rrds * new check mem.vmalloc for monitoring vmalloc address space in Linux kernel. - * Linux agent: add timeout of 2 secs to ntpq + * Linux agent: add timeout of 2 secs to ntpq * wmic_process: make check OK if no matching process is found Livestatus: @@ -3121,12 +5972,12 @@ Core, Setup, etc.: * New config option usewalk_hosts, triggers --usewalk during normal checking for selected hosts. - * new option --scan-parents for automatically finding and + * new option --scan-parents for automatically finding and configuring parent hosts (see online docu for details) * inventory check: put detailed list of unchecked items into long plugin output (to be seen in status details) * New configuration variable check_parameters, that allows to - override default parameters set by inventory, without defining + override default parameters set by inventory, without defining manual checks! Checks & Agents: @@ -3162,7 +6013,7 @@ * Multisite: improve performance in multi site environments by sending queries to sites in parallel * Multisite: improve performance in high latency situations by - allowing persistent Livestatus connections (set "persist" : True + allowing persistent Livestatus connections (set "persist" : True in sites, use current Livestatus version) Livestatus: @@ -3199,7 +6050,7 @@ * Packager: make sanity check prohibiting creating of package files in Check MK's directories * install_nagios.sh: Support Ubuntu 10.04 (Thanks to Ben) - + Checks & Agents: * New check ntp.time: Similar to 'ntp' but only honors the system peer (that NTP peer where ntpq -p prints a *). @@ -3230,7 +6081,7 @@ * New check heartbeat_rscstatus: Checks the local resource status of a heartbeat node * New check win_dhcp_pools: Checks the usage of Windows DHCP Server lease pools - * New check netapp_volumes: Checks on/offline-condition and states of netapp volumes + * New check netapp_volumes: Checks on/offline-condition and states of netapp volumes Multisite: * New view showing all PNP graphs of services with the same description @@ -3310,7 +6161,7 @@ * snmp: new handling of unprintable strings: hex dumps are converted into binary strings now. That way all strings can be displayed and no information is lost - nevertheless. - + Checks & Agents: * Solaris agent: fixed rare df problems on Solaris 10, fix problem with test -f (thanks to Ulf Hoffmann) @@ -3325,7 +6176,7 @@ Livestatus: * Delay starting of threads (and handling of socket) until Nagios has - started its event loop. This prevents showing services as PENDING + started its event loop. This prevents showing services as PENDING a short time during program start. 1.1.6b3: @@ -3435,7 +6286,7 @@ Multisite: * New column host painter with link to old Nagios services * Multisite: new configuration parameter default_user_role - + Livestatus: * Add missing LDFLAGS for compiling (useful for -g) @@ -3451,12 +6302,12 @@ * install_nagios.sh: fix link to Check_MK in sidebar * install_nagios.sh: switch PNP to version 0.6.3 * install_nagios.sh: better Apache-Config for Multisite setup - * do not search main.mk in ~ and . anymore (brought only trouble) + * do not search main.mk in ~ and . anymore (brought only trouble) * clusters: new variable 'clustered_services_of', allowing for overlapping clusters (as proposed by Jörg Linge) * install_nagios.sh: install snmp package (needed for snmp based checks) * Fix ower/group of tarballs: set them to root/root - * Remove dependency from debian agent package + * Remove dependency from debian agent package * Fixed problem with inventory when using clustered_services * tcp_connect_timeout: Applies now only for connect(), not for time of data transmission once a connection is established @@ -3465,7 +6316,7 @@ will get a debug log in case if 'invalid output from plugin...' * ping-only-hosts: When ping only hosts are summarized, remove Check_MK and add single PING to summary host. - * Service aggregation: fix state relationship: CRIT now worse than UNKNOWN + * Service aggregation: fix state relationship: CRIT now worse than UNKNOWN * Make extra_service_conf work also for autogenerated PING on ping-only-hosts (groups, contactgroups still missing) @@ -3489,7 +6340,7 @@ * Linux agent: The xinetd does not log each request anymore. Only failures are logged by xinetd now. This can be changed in the xinetd configuration files. - * Check df: handle mountpoints containing spaces correctly + * Check df: handle mountpoints containing spaces correctly (need new inventorization if you have mountpoints with spaces) * Check md on Linux: handle spare disks correctly * Check md on Linux: fix case where (auto-read-only) separated by space @@ -3509,7 +6360,7 @@ * Linux agent: fixed computation of number of processors on S390 * check netctr: add missing perfdata (was only sent on OK case) * Check sylo: New check for monitoring the sylo state - + Livestatus: * Table hosts: New column 'services' listing all services of that host * Column servicegroups:members: 'AuthUser' is now honored @@ -3663,7 +6514,7 @@ * Added some new columns about Nagios status data to stable 'status' * Added new table "comments" * Added logic for count of pending service and hosts - * Added several new columns in table 'status' + * Added several new columns in table 'status' * Added new columns flap_detection and obsess_over_services in table services * Fixed bug for double columns: filter truncated double to int * Added new column status:program_version, showing the Nagios version @@ -3703,7 +6554,7 @@ * Fix bug in PNP-template for Linux NICs (bytes and megabytes had been mixed up). * Windows agent: fix bug in output of performance counters (where sometimes with , instead of .) * Windows agent: outputs version if called with 'version' - + Core, Setup, etc.: * New SNMP scan feature: -I snmp scans all SNMP checks (currently only very few checks support this, though) * make non-bulkwalk a default. Please edit bulkwalk_hosts or non_bulkwalk_hosts to change that @@ -3722,7 +6573,7 @@ * Fixed bug in cluster checks: No cache files had been used. This can lead to missing logfile messages. - * Check kernel: allow to set levels (e.g. on + * Check kernel: allow to set levels (e.g. on pgmajfaults) * Check ps now allows to check for processes owned by a specific user (need update of Linux agent) @@ -3733,7 +6584,7 @@ for warning and critical. Default levels are at 101 / 101 * New check df_netapp32 which must be used - for Netapps that do not support 64 bit + for Netapps that do not support 64 bit counters. Does the same as df_netapp * Symlink PNP templates: df_netapp32 and df_netapp use same template as df @@ -3746,7 +6597,7 @@ snapshot filesystems with size 0 from inventory. * Rudimentary support for monitoring ESX: monitor virtual filesystems with 'vdf' (using normal df - check of check_mk) and monitor state of machines + check of check_mk) and monitor state of machines with vcbVmName -s any (new check vmware_state). * Fixed bug in MRPE: check failed on empty performance data (e.g. from check_snmp: there is emptyness @@ -3787,9 +6638,9 @@ you to limit check_mk to a subset of your hosts (for testing) * New configuration parameter mem_extended_perfdata - sends more performance data on Linux (see + sends more performance data on Linux (see check manual for details) - * many improvements of Multiadmin web pages: optionally + * many improvements of Multiadmin web pages: optionally filter out services which are (not) currently in downtime (host or service itself), optionally (not) filter out summary hosts, show host status (down hosts), new action @@ -3808,15 +6659,15 @@ CRC errors and C3 discards * Fixed bug: snmp_info_single was missing in precompiled host checks - + 1.0.38: * New: check_mk's multiadmin tool (Python based web page). It allows mass administration of - services (enable/disable checks/notifications, + services (enable/disable checks/notifications, acknowledgements, downtimes). It does not need Nagios service- or host groups but works with a freeform search. - * Remove duplicate = 4 and settings['expect_regex'][3]: + args += ' -l ' if settings['expect_regex'][1]: args += ' -R' else: @@ -122,11 +130,17 @@ args = " -I $HOSTADDRESS$" + args return args +def check_http_desc(params): + if params[0].startswith("^"): + return params[0][1:] + #if params[1].get("ssl"): + # return "HTTPS %s" % params[0] + return "HTTP %s" % params[0] active_check_info['http'] = { "command_line" : '$USER1$/check_http $ARG1$', "argument_function" : check_http_arguments, - "service_description" : lambda params: "HTTP %s" % params[0], + "service_description" : check_http_desc, "has_perfdata" : True, } diff -Nru check-mk-1.2.2p3/check_icmp check-mk-1.2.6p12/check_icmp --- check-mk-1.2.2p3/check_icmp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_icmp 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,52 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def check_icmp_arguments(params): + args = [] + rta = 200, 500 + loss = 80, 100 + for key, value in params.items(): + if key == "timeout": + args.append("-t %d" % value) + elif key == "packets": + args.append("-n %d" % value) + elif key == "rta": + rta = value + elif key == "loss": + loss = value + args.append("-w %.2f,%d%%" % (rta[0], loss[0])) + args.append("-c %.2f,%d%%" % (rta[1], loss[1])) + args.append("'$HOSTADDRESS$'") + return " ".join(args) + + +active_check_info['icmp'] = { + "command_line" : '$USER1$/check_icmp $ARG1$', + "argument_function" : check_icmp_arguments, + "service_description" : lambda args: args.get("description", "PING"), + "has_perfdata" : True, +} + diff -Nru check-mk-1.2.2p3/check_ldap check-mk-1.2.6p12/check_ldap --- check-mk-1.2.2p3/check_ldap 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/check_ldap 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mail check-mk-1.2.6p12/check_mail --- check-mk-1.2.2p3/check_mail 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mail 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,86 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def check_mail_arguments(params): + args = '' + + fetch_proto, fetch_params = params['fetch'] + args += ' --protocol=%s' % quote_shell_string(fetch_proto) + + if 'server' in fetch_params: + args += ' --server=%s' % quote_shell_string(fetch_params['server']) + else: + args += ' --server=$HOSTADDRESS$' + + fetch_use_ssl, fetch_port = fetch_params['ssl'] + if fetch_use_ssl: + args += ' --ssl' + if fetch_port != None: + args += ' --port=%d' % fetch_port + + args += ' --username=%s' % quote_shell_string(fetch_params['auth'][0]) + args += ' --password=%s' % quote_shell_string(fetch_params['auth'][1]) + + if 'connect_timeout' in params: + args += ' --connect-timeout=%d' % params['connect_timeout'] + + if 'forward' in params: + forward = params['forward'] + args += ' --forward-ec' + if 'method' in forward: + args += ' --forward-method=%s' % quote_shell_string(forward['method']) + + if 'match_subject' in forward: + args += ' --match-subject=%s' % quote_shell_string(forward['match_subject']) + + if 'facility' in forward: + args += ' --forward-facility=%d' % forward['facility'] + + if 'host' in forward: + args += ' --forward-host=%s' % quote_shell_string(forward['host']) + + if forward.get('application'): + args += ' --forward-app=%s' % quote_shell_string(forward['application']) + + if 'body_limit' in forward: + args += ' --body-limit=%d' % forward['body_limit'] + + if 'cleanup' in forward: + if forward['cleanup'] == True: + args += ' --cleanup=delete' + else: + args += ' --cleanup=%s' % quote_shell_string(forward['cleanup']) + + return args + + +active_check_info['mail'] = { + "command_line" : '$USER1$/check_mail $ARG1$', + "argument_function" : check_mail_arguments, + "service_description" : lambda params: params['service_description'], + "has_perfdata" : True, +} + diff -Nru check-mk-1.2.2p3/check_mail_loop check-mk-1.2.6p12/check_mail_loop --- check-mk-1.2.2p3/check_mail_loop 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mail_loop 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,86 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def check_mail_loop_arguments(params): + args = '' + + if 'smtp_server' in params: + args += ' --smtp-server=%s' % quote_shell_string(params['smtp_server']) + else: + args += ' --smtp-server=$HOSTADDRESS$' + + if 'smtp_tls' in params: + args == ' --smtp-tls' + + if 'smtp_port' in params: + args += ' --smtp-port=%d' % params['smtp_port'] + + if 'smtp_auth' in params: + args += ' --smtp-username=%s' % quote_shell_string(params['smtp_auth'][0]) + args += ' --smtp-password=%s' % quote_shell_string(params['smtp_auth'][1]) + + fetch_proto, fetch_params = params['fetch'] + args += ' --fetch-protocol=%s' % quote_shell_string(fetch_proto) + + if 'server' in fetch_params: + args += ' --fetch-server=%s' % quote_shell_string(fetch_params['server']) + else: + args += ' --fetch-server=$HOSTADDRESS$' + + fetch_use_ssl, fetch_port = fetch_params['ssl'] + if fetch_use_ssl: + args += ' --fetch-ssl' + if fetch_port != None: + args += ' --fetch-port=%d' % fetch_port + + args += ' --fetch-username=%s' % quote_shell_string(fetch_params['auth'][0]) + args += ' --fetch-password=%s' % quote_shell_string(fetch_params['auth'][1]) + + args += ' --mail-from=%s' % quote_shell_string(params['mail_from']) + args += ' --mail-to=%s' % quote_shell_string(params['mail_to']) + + if 'connect_timeout' in params: + args += ' --connect-timeout=%d' % params['connect_timeout'] + + if 'delete_messages' in params: + args += ' --delete-messages' + + args += ' --status-suffix=%s' % quote_shell_string(g_hostname + '-' + params['item']) + + if 'duration' in params: + args += ' --warning=%d' % params['duration'][0] + args += ' --critical=%d' % params['duration'][1] + + return args + + +active_check_info['mail_loop'] = { + "command_line" : '$USER1$/check_mail_loop $ARG1$', + "argument_function" : check_mail_loop_arguments, + "service_description" : lambda params: "Mail Loop %s" % params['item'], + "has_perfdata" : True, +} + Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/checkman.tar.gz and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/checkman.tar.gz differ diff -Nru check-mk-1.2.2p3/check_mk check-mk-1.2.6p12/check_mk --- check-mk-1.2.2p3/check_mk 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/check_mk 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -57,7 +57,7 @@ def check_only_from(item, param, info): if param == None: - return (1, "WARN - IP access restriction not monitored for this host") + return (1, "IP access restriction not monitored for this host") for line in info: if line[0] == "OnlyFrom:": an = [] @@ -85,10 +85,15 @@ status = 1 infotexts.append("agent blocks: %s" % (" ".join(too_few))) if status == 1: - return (1, "WARN - invalid access configuration: %s" % (", ".join(infotexts))) + return (1, "invalid access configuration: %s" % (", ".join(infotexts))) else: - return (0, "OK - allowed IP ranges: %s" % (" ".join(allowed_nets))) - return (3, "UNKNOWN - Agent does not send OnlyFrom: header") + return (0, "allowed IP ranges: %s" % (" ".join(allowed_nets))) + return (3, "Agent does not send OnlyFrom: header") -check_info['check_mk.only_from'] = (check_only_from, "Check_MK Agent Access", 0, inventory_only_from) + +check_info["check_mk.only_from"] = { + 'check_function': check_only_from, + 'inventory_function': inventory_only_from, + 'service_description': 'Check_MK Agent Access', +} diff -Nru check-mk-1.2.2p3/check_mk_active-disk_smb.php check-mk-1.2.6p12/check_mk_active-disk_smb.php --- check-mk-1.2.2p3/check_mk_active-disk_smb.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk_active-disk_smb.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,63 @@ + diff -Nru check-mk-1.2.2p3/check_mk_active-http.php check-mk-1.2.6p12/check_mk_active-http.php --- check-mk-1.2.2p3/check_mk_active-http.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk_active-http.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -27,12 +27,12 @@ $opt[1] = "-X0 --vertical-label \"Response Time (ms)\" --title \"$hostname / $desc\" "; $def[1] = "" - . "DEF:var1=$RRDFILE[1]:$DS[1]:MAX " + . "DEF:var1=$RRDFILE[1]:$DS[1]:MAX " . "CDEF:ms=var1,1000,* " - . "AREA:ms#66ccff:\"Response Time \" " - . "LINE1:ms#000000:\"\" " - . "GPRINT:ms:LAST:\"%3.3lg ms LAST \" " - . "GPRINT:ms:MAX:\"%3.3lg ms MAX \" " + . "AREA:ms#66ccff:\"Response Time \" " + . "LINE1:ms#000000:\"\" " + . "GPRINT:ms:LAST:\"%3.3lg ms LAST \" " + . "GPRINT:ms:MAX:\"%3.3lg ms MAX \" " . "GPRINT:ms:AVERAGE:\"%3.3lg ms AVERAGE \" " ; @@ -44,10 +44,10 @@ if ($CRIT[2] != "") $def[2] .= "HRULE:$CRIT[2]#FF0000 "; $def[2] .= "" - . "AREA:size#cc66ff:\"Size of response \" " - . "LINE1:size#000000:\"\" " - . "GPRINT:size:LAST:\"%3.0lf Bytes LAST \" " - . "GPRINT:size:MAX:\"%3.0lf Bytes MAX \" " + . "AREA:size#cc66ff:\"Size of response \" " + . "LINE1:size#000000:\"\" " + . "GPRINT:size:LAST:\"%3.0lf Bytes LAST \" " + . "GPRINT:size:MAX:\"%3.0lf Bytes MAX \" " . "GPRINT:size:AVERAGE:\"%3.0lf Bytes AVERAGE \" " ; diff -Nru check-mk-1.2.2p3/check_mk_active-icmp.php check-mk-1.2.6p12/check_mk_active-icmp.php --- check-mk-1.2.2p3/check_mk_active-icmp.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk_active-icmp.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,59 @@ + diff -Nru check-mk-1.2.2p3/check_mk_active-mail_loop.php check-mk-1.2.6p12/check_mk_active-mail_loop.php --- check-mk-1.2.2p3/check_mk_active-mail_loop.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk_active-mail_loop.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,37 @@ + diff -Nru check-mk-1.2.2p3/check_mk_active-mail.php check-mk-1.2.6p12/check_mk_active-mail.php --- check-mk-1.2.2p3/check_mk_active-mail.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk_active-mail.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,33 @@ + diff -Nru check-mk-1.2.2p3/check_mk_active-notify_count.php check-mk-1.2.6p12/check_mk_active-notify_count.php --- check-mk-1.2.2p3/check_mk_active-notify_count.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk_active-notify_count.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,47 @@ + $ds_val) { + $contact_name = substr($NAME[$i], 0, strpos($NAME[$i], '_')); + $varname = "notto$nr"; + $def[1] .= "DEF:$varname=".$RRDFILE[$i].":$ds_val:MAX " ; + + $def[1] .= "LINE1:$varname#".$line_colors[$i % 8].":\"".sprintf("%-20s", $contact_name)."\" "; + $def[1] .= "GPRINT:$varname:MAX:\"%3.lf\\n\" "; + $nr += 1; +} + +?> diff -Nru check-mk-1.2.2p3/check_mk_active-tcp.php check-mk-1.2.6p12/check_mk_active-tcp.php --- check-mk-1.2.2p3/check_mk_active-tcp.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk_active-tcp.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -28,11 +28,11 @@ $opt[1] = "-X0 --vertical-label \"Response Time (ms)\" --title \"$hostname / $desc\" "; $def[1] = "" - . "DEF:var1=$RRDFILE[1]:$DS[1]:MAX " + . "DEF:var1=$RRDFILE[1]:$DS[1]:MAX " . "CDEF:ms=var1,1000,* " - . "AREA:ms#20dd30:\"Response Time \" " - . "LINE1:ms#000000:\"\" " - . "GPRINT:ms:LAST:\"%3.3lg ms LAST \" " - . "GPRINT:ms:MAX:\"%3.3lg ms MAX \" " - . "GPRINT:ms:AVERAGE:\"%3.3lg ms AVERAGE \" " + . "AREA:ms#20dd30:\"Response Time \" " + . "LINE1:ms#000000:\"\" " + . "GPRINT:ms:LAST:\"%3.3lg ms LAST \" " + . "GPRINT:ms:MAX:\"%3.3lg ms MAX \" " + . "GPRINT:ms:AVERAGE:\"%3.3lg ms AVERAGE \" " ?> Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/check-mk-agent_1.2.6p12-1_all.deb and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/check-mk-agent_1.2.6p12-1_all.deb differ Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/check-mk-agent-1.2.6p12-1.noarch.rpm and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/check-mk-agent-1.2.6p12-1.noarch.rpm differ diff -Nru check-mk-1.2.2p3/check_mk_agent.aix check-mk-1.2.6p12/check_mk_agent.aix --- check-mk-1.2.2p3/check_mk_agent.aix 2013-11-05 09:22:41.000000000 +0000 +++ check-mk-1.2.6p12/check_mk_agent.aix 2015-09-21 10:59:53.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -23,8 +23,13 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -export MK_LIBDIR="/to/be/changed" -export MK_CONFDIR="/to/be/changed" +# Remove locale settings to eliminate localized outputs where possible +export LC_ALL=C +unset LANG + +export MK_LIBDIR="/usr/check_mk/lib" +export MK_CONFDIR="/usr/check_mk/conf" +export MK_VARDIR="/tmp/check_mk" # All executables in PLUGINSDIR will simply be executed and their # ouput appended to the output of the agent. Plugins define their own @@ -47,25 +52,109 @@ exec <&- 2>/dev/null fi +# Shell version of the waitmax utility, that limits the runtime of +# commands. This version does not conserve the original exit code +# of the command. It is successfull if the command terminated + # in time. +function waitmax +{ + TIMEOUT=${1}0 + SIGNAL=9 + shift + + # Run command in background + ksh -c "$*" & + PID=$! + + # Wait for termination within TIMOUT seconds + while [ $TIMEOUT -gt 0 ] + do + TIMEOUT=$((TIMEOUT - 1)) + if [ ! -e /proc/$PID ] ; then + return 0 + fi + perl -e "select(undef, undef, undef, 0.1);" + done + + # Process did not terminate in time. Kill and + # return with an error + kill -9 $PID + return 255 +} + +function run_cached { + NAME=$1 + MAXAGE=$2 + shift 2 + CMDLINE=$* + MAXAGE=$MAXAGE*60 + + if [ ! -e $MK_VARDIR/cache ] ; then mkdir $MK_VARDIR/cache ; fi + CACHE_FILE=$MK_VARDIR/cache/$NAME.cache + + USE_CACHE_FILE="" + # Check if file exists and is recent enough + if [ -s $CACHE_FILE ] + then + MTIME=$(/usr/bin/perl -e 'if (! -f $ARGV[0]){die "0000000"};$mtime=(stat($ARGV[0]))[9];print ($^T-$mtime);' $CACHE_FILE ) + if (( $MTIME < $MAXAGE )) ; then + USE_CACHE_FILE=1 + fi + fi + if [ -s "$CACHE_FILE" ] + then + cat $CACHE_FILE + fi + if [ -z "$USE_CACHE_FILE" -a ! -e "$CACHE_FILE.new" ] + then + nohup sh -c $CMDLINE > $CACHE_FILE.new 2> /dev/null && mv $CACHE_FILE.new $CACHE_FILE & + fi +} + echo '<<>>' -echo Version: 1.2.2p3 +echo Version: 1.2.6p12 echo AgentOS: aix echo '<<>>' df -kP | sed 's/ / - /' | grep -v ^/proc | grep -v ^Filesystem | grep -v : +# Check for hanging NFS mounts. This needs a GNU stat installed in the PATH +if type stat >/dev/null 2>&1 ; then + echo '<<>>' + mount | grep ' nfs' | awk '{print $3;}' | \ + while read MP + do + waitmax 2 stat -f -c '"'$MP' ok - - - -"' "$MP" || \ + echo "$MP hanging 0 0 0 0" + done + echo '<<>>' + mount | grep ' cifs' | awk '{print $3;}' | \ + while read MP + do + waitmax 2 stat -f -c '"'$MP' ok - - - -"' "$MP" || \ + echo "$MP hanging 0 0 0 0" + done +fi + echo '<<>>' ps -ef -F user,vszsize,rssize,pcpu,args | sed -e 1d -e 's/ *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) */(\1,\2,\3,\4) /' if type lparstat >/dev/null 2>&1 then echo '<<>>' - lparstat | tail -n1 + lparstat 1 1 fi echo '<<>>' vmstat | tail -n1 +echo '<<>>' +iostat -d | tr -s ' ' | grep hdisk + +echo '<<>>' +vmstat -v | tr -s ' ' +swap -s + echo '<<>>' mpstat -a | tail -n1 @@ -94,16 +183,16 @@ done -if which ntpq > /dev/null 2>&1 ; then - if [ $(lssrc -s xntpd|grep -c active) -gt 0 ] ; then - echo '<<>>' - ntpq -p | sed -e 1,2d -e 's/^\(.\)/\1 /' -e 's/^ /%/' - fi -fi +if which ntpq > /dev/null 2>&1 ; then + if [ $(lssrc -s xntpd|grep -c active) -gt 0 ] ; then + echo '<<>>' + ntpq -np | sed -e 1,2d -e 's/^\(.\)/\1 /' -e 's/^ /%/' + fi +fi echo '<<>>' - lspath -F"name parent status" + lspath -F"name parent status" echo '<<>>' # -L disables LVM lock for the query. Avoids blocking while LVM is @@ -114,6 +203,13 @@ echo '<<>>' netstat -ntfinet | awk ' /^tcp/ { c[$6]++; } END { for (x in c) { print x, c[x]; } }' +# Libelle Business Shadow +if type trd >/dev/null 2>&1 +then + echo '<<>>' + trd -s +fi + if cd $PLUGINSDIR 2>/dev/null then @@ -123,6 +219,13 @@ ./$skript fi done + + # Call some plugins only every X'th second + for skript in [1-9]*/* ; do + if [ -x "$skript" ] ; then + run_cached plugins_${skript//\//\#} ${skript%/*} "$skript" + fi + done fi @@ -135,6 +238,7 @@ ./$skript fi done + fi # MK's Remote Plugin Executor @@ -149,3 +253,6 @@ echo "(${PLUGIN##*/}) $descr $? $OUTPUT" done fi + + + diff -Nru check-mk-1.2.2p3/check_mk_agent.freebsd check-mk-1.2.6p12/check_mk_agent.freebsd --- check-mk-1.2.2p3/check_mk_agent.freebsd 2013-11-05 09:22:41.000000000 +0000 +++ check-mk-1.2.6p12/check_mk_agent.freebsd 2015-09-21 10:59:53.000000000 +0000 @@ -1,4 +1,4 @@ -#!/bin/sh +#!/usr/local/bin/bash # +------------------------------------------------------------------+ # | ____ _ _ __ __ _ __ | # | / ___| |__ ___ ___| | __ | \/ | |/ / | @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -40,11 +40,13 @@ export LC_ALL=C unset LANG -export MK_LIBDIR="/usr/lib/check_mk_agent" +export MK_LIBDIR="/usr/local/lib/check_mk_agent" export MK_CONFDIR="/etc/check_mk" +export MK_TMPDIR="/var/run/check_mk" + # Make sure, locally installed binaries are found -PATH=$PATH:/usr/local/bin +PATH=$PATH:/usr/local/bin:/usr/local/sbin # All executables in PLUGINSDIR will simply be executed and their # ouput appended to the output of the agent. Plugins define their own @@ -62,16 +64,53 @@ then set -xv else - exec <&- 2>/dev/null + exec /dev/null fi +# Runs a command asynchronous by use of a cache file +function run_cached() { + if [ "$1" = -s ] ; then local section="echo '<<<$2>>>' ; " ; shift ; fi + local NAME=$1 + local MAXAGE=$2 + shift 2 + local CMDLINE="$section$@" + + if [ ! -d $MK_TMPDIR/cache ]; then mkdir -p $MK_TMPDIR/cache ; fi + CACHEFILE="$MK_TMPDIR/cache/$NAME.cache" + + # Check if the creation of the cache takes suspiciously long and return + # nothing if the age (access time) of $CACHEFILE.new is twice the MAXAGE + local NOW=$(date +%s) + if [ -e "$CACHEFILE.new" ] ; then + local CF_ATIME=$(stat -f "%a" "$CACHEFILE.new") + if [ $((NOW - CF_ATIME)) -ge $((MAXAGE * 2)) ] ; then + return + fi + fi + + # Check if cache file exists and is recent enough + if [ -s "$CACHEFILE" ] ; then + local MTIME=$(stat -f "%m" "$CACHEFILE") + if [ $((NOW - MTIME)) -le $MAXAGE ] ; then local USE_CACHEFILE=1 ; fi + # Output the file in any case, even if it is + # outdated. The new file will not yet be available + cat "$CACHEFILE" + fi + + # Cache file outdated and new job not yet running? Start it + if [ -z "$USE_CACHEFILE" -a ! -e "$CACHEFILE.new" ] ; then + echo "$CMDLINE" | daemon bash -o noclobber > $CACHEFILE.new && mv $CACHEFILE.new $CACHEFILE || rm -f $CACHEFILE $CACHEFILE.new & + fi +} + echo '<<>>' -echo Version: 1.2.2p3 +echo Version: 1.2.6p12 echo AgentOS: freebsd osver="$(uname -r)" +is_jailed="$(sysctl -n security.jail.jailed)" # Partitionen (-P verhindert Zeilenumbruch bei langen Mountpunkten) @@ -91,7 +130,8 @@ # Filesystem usage for ZFS if type zfs > /dev/null 2>&1 ; then echo '<<>>' - zfs get -Hp name,quota,used,avail,mountpoint,type + zfs get -Hp name,quota,used,avail,mountpoint,type -t filesystem,volume || \ + zfs get -Hp name,quota,used,avail,mountpoint,type echo '[df]' df -kP -t zfs | sed 1d fi @@ -118,8 +158,8 @@ # done #fi -# Check mount options. -# FreeBSD doesn't do remount-ro on errors, but the users might consider +# Check mount options. +# FreeBSD doesn't do remount-ro on errors, but the users might consider # security related mount options more important. echo '<<>>' mount -p -t ufs @@ -127,16 +167,10 @@ # processes including username, without kernel processes echo '<<>>' COLUMNS=10000 -ps ax -o user,vsz,rss,pcpu,command | sed -e 1d -e 's/ *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) */(\1,\2,\3,\4) /' - -# Memory Usage -# currently we'll need sysutils/muse for this. -if [ -x /usr/local/bin/muse ] - then - echo '<<>>' -# yes, i don't know sed well. - muse -k 2>/dev/null | sed 's/Total/MemTotal/' | sed 's/Free/MemFree/' - swapinfo -k 1K | tail -n 1 | awk '{ print "SwapTotal: "$2" kB\nSwapFree: "$4" kB" }' +if [ "$is_jailed" = "0" ]; then + ps ax -o state,user,vsz,rss,pcpu,command | sed -e 1d -e '/\([^ ]*J\) */d' -e 's/ *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) */(\2,\3,\4,\5) /' +else + ps ax -o user,vsz,rss,pcpu,command | sed -e 1d -e 's/ *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) */(\1,\2,\3,\4) /' fi @@ -146,18 +180,9 @@ # Calculate the uptime in seconds since epoch compatible to /proc/uptime in linux echo '<<>>' -echo `date +%s` - `sysctl -n kern.boottime | cut -d' ' -f 4,7 | tr ',' '.' | tr -d ' '` | bc - -# Network interfaces (Link, Autoneg, Speed) -# This requires ethtool -#if which ethtool > /dev/null -#then -# echo '<<>>' -# for eth in $(cat /proc/net/dev | sed -rn -e 's/[[:space:]]*//g' -e '/ *([^:]):.*/s//\1/p' | egrep -vx '(lo|sit.*)') -# do -# echo $eth $(ethtool $eth | egrep '(Speed|Duplex|Link detected|Auto-negotiation):' | cut -d: -f2 | sed 's/ *//g') -# done -#fi +up_seconds=$(( `date +%s` - `sysctl -n kern.boottime | cut -f1 -d\, | awk '{print $4}'`)) +idle_seconds=$(ps axw | grep idle | grep -v grep | awk '{print $4}' | cut -f1 -d\: ) +echo "$up_seconds $idle_seconds" # Platten- und RAID-Status von LSI-Controlleren, falls vorhanden #if which cfggen > /dev/null ; then @@ -175,19 +200,19 @@ # Soft-RAID -# need to replace with extra section for GEOM and ZFS because of larger / -# different featuresets. - - -# Performancecounter Platten -#echo '<<>>' -#date +%s -#egrep ' ([sh]d[a-z]*|cciss/c[0-9]+d[0-9]+) ' < /proc/diskstats +echo '<<>>' +gmirror status | grep -v ^Name # Performancecounter Kernel -#echo '<<>>' -#date +%s -#cat /proc/vmstat /proc/stat +echo "<<>>" +date +%s +forks=`sysctl -n vm.stats.vm.v_forks` +vforks=`sysctl -n vm.stats.vm.v_vforks` +rforks=`sysctl -n vm.stats.vm.v_rforks` +kthreads=`sysctl -n vm.stats.vm.v_kthreads` +echo "cpu" `sysctl -n kern.cp_time | awk ' { print $1" "$2" "$3" "$5" "$4 } '` +echo "ctxt" `sysctl -n vm.stats.sys.v_swtch` +echo "processes" `expr $forks + $vforks + $rforks + $kthreads` # Network device statistics (Packets, Collisions, etc) # only the "Link/Num" interface has all counters. @@ -195,7 +220,7 @@ date +%s if [ "$(echo $osver | cut -f1 -d\. )" -gt "8" ]; then netstat -inb | egrep -v '(^Name|lo|plip)' | grep Link | awk '{print $1" "$8" "$5" "$6" "$7" 0 0 0 0 "$11" "$9" "$10" 0 0 0 0 0"}' -else +else # pad output for freebsd 7 and before netstat -inb | egrep -v '(^Name|lo|plip)' | grep Link | awk '{print $1" "$7" "$5" "$6" 0 0 0 0 0 "$10" "$8" "$9" 0 0 "$11" 0 0"}' fi @@ -213,21 +238,30 @@ fi -# State of LSI MegaRAID controller via MegaCli. You can download that tool from: -# http://www.lsi.com/DistributionSystem/AssetDocument/support/downloads/megaraid/miscellaneous/linux/2.00.15_Linux_MegaCLI.zip -#if which MegaCli >/dev/null ; then -# echo '<<>>' -# MegaCli -PDList -aALL -NoLog < /dev/null | egrep 'Enclosure|Raw Size|Slot Number|Device Id|Firmware state|Inquiry' -# echo '<<>>' -# MegaCli -LDInfo -Lall -aALL -NoLog < /dev/null | egrep 'Size|State|Number|Adapter|Virtual' -#fi +# State of LSI MegaRAID controller via MegaCli. +# To install: pkg install megacli +if which MegaCli >/dev/null ; then + echo '<<>>' + MegaCli -PDList -aALL -NoLog < /dev/null | egrep 'Enclosure|Raw Size|Slot Number|Device Id|Firmware state|Inquiry' + echo '<<>>' + MegaCli -LDInfo -Lall -aALL -NoLog < /dev/null | egrep 'Size|State|Number|Adapter|Virtual' + echo '<<>>' + MegaCli -AdpBbuCmd -GetBbuStatus -aALL -NoLog < /dev/null | grep -v Exit +fi +# OpenVPN Clients. +# Correct log location unknown, sed call might also be broken +if [ -e /var/log/openvpn/openvpn-status.log ] ; then + echo '<<>>' + sed -n -e '/CLIENT LIST/,/ROUTING TABLE/p' < /var/log/openvpn/openvpn-status.log | sed -e 1,3d -e '$d' +fi + if which ntpq > /dev/null 2>&1 ; then echo '<<>>' # remote heading, make first column space separated - ntpq -p | sed -e 1,2d -e 's/^\(.\)/\1 /' -e 's/^ /%/' + ntpq -np | sed -e 1,2d -e 's/^\(.\)/\1 /' -e 's/^ /%/' fi @@ -257,7 +291,7 @@ # for NODE in $(cl_status listnodes); do # if [ $NODE != $HOSTNAME ]; then # STATUS=$(cl_status nodestatus $NODE) -# echo -n "$NODE $STATUS" +# echo -n "$NODE $STATUS" # for LINK in $(cl_status listhblinks $NODE 2>/dev/null); do # echo -n " $LINK $(cl_status hblinkstatus $NODE $LINK)" # done @@ -288,6 +322,13 @@ mailq | tail -n 6 fi +#Check status of qmail mailqueue +if type qmail-qstat >/dev/null +then + echo "<<>>" + qmail-qstat +fi + # check zpool status if [ -x /sbin/zpool ]; then echo "<<>>" @@ -295,37 +336,81 @@ fi +# Statgrab +# To install: pkg install libstatgrab +if type statgrab 2>&1 >/dev/null ; then + + statgrab_vars="const. disk. general. page. proc. user." + statgrab_vars_mem="mem. swap." + statgrab_sections="proc disk page" + + statgrab $statgrab_vars 1> /tmp/statgrab.$$ + statgrab $statgrab_vars_mem 1>>/tmp/statgrab.$$ + + + for s in $statgrab_sections + do + echo "<<>>" + grep "^${s}\." /tmp/statgrab.$$ | cut -d. -f2-99 | sed 's/ *= */ /' + done + + echo '<<>>' + statgrab net. 2>&1 | cut -d. -f2-99 | sed 's/ *= */ /' + + echo '<<>>' + egrep "^(swap|mem)\." /tmp/statgrab.$$ | sed 's/ *= */ /' + + [ -f /tmp/statgrab.$$ ] && rm -f /tmp/statgrab.$$ +fi + + # Fileinfo-Check: put patterns for files into /etc/check_mk/fileinfo.cfg if [ -r "$MK_CONFDIR/fileinfo.cfg" ] ; then echo '<<>>' date +%s - stat -f "%N|%z|%m" $(cat "$MK_CONFDIR/fileinfo.cfg") + for line in $(cat "$MK_CONFDIR/fileinfo.cfg") + do + stat -f "%N|%z|%m" $line 2>/dev/null + + if [ $? -ne 0 ]; then + echo "$line|missing|$(date +%s)" + fi + done fi -# Einbinden von lokalen Plugins, die eine eigene Sektion ausgeben -if cd $PLUGINSDIR -then - for skript in $(ls) - do - if [ -x "$skript" ] ; then - ./$skript - fi - done +# Local checks +echo '<<>>' +if cd $LOCALDIR ; then + for skript in $(ls) ; do + if [ -f "$skript" -a -x "$skript" ] ; then + ./$skript + fi + done + # Call some plugins only every X'th minute + for skript in [1-9]*/* ; do + if [ -x "$skript" ] ; then + run_cached local_${skript//\//\\} ${skript%/*} "$skript" + fi + done fi -# Lokale Einzelchecks -echo '<<>>' -if cd $LOCALDIR -then - for skript in $(ls) - do - if [ -x "$skript" ] ; then - ./$skript - fi - done +# Plugins +if cd $PLUGINSDIR; then + for skript in $(ls) ; do + if [ -f "$skript" -a -x "$skript" ] ; then + ./$skript + fi + done + # Call some plugins only every X'th minute + for skript in [1-9]*/* ; do + if [ -x "$skript" ] ; then + run_cached plugins_${skript//\//\\} ${skript%/*} "$skript" + fi + done fi + # MK's Remote Plugin Executor if [ -e "$MK_CONFDIR/mrpe.cfg" ] then @@ -339,3 +424,4 @@ echo done fi + diff -Nru check-mk-1.2.2p3/check_mk_agent.hpux check-mk-1.2.6p12/check_mk_agent.hpux --- check-mk-1.2.2p3/check_mk_agent.hpux 2013-11-05 09:22:41.000000000 +0000 +++ check-mk-1.2.6p12/check_mk_agent.hpux 2015-09-21 10:59:53.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2010 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -48,7 +48,7 @@ echo '<<>>' -echo Version: 1.2.2p3 +echo Version: 1.2.6p12 echo AgentOS: hpux echo PluginsDirectory: $PLUGINSDIR echo LocalDirectory: $LOCALDIR @@ -58,7 +58,7 @@ # modern systems with vxfs only here. The filesystem type is currently # not used by the check anyway. echo '<<>>' -df -kP -F vxfs | sed 's/ / - /' | awk '/^(.*-.*)$/ { print $0 } /^([^-]+)$/ { printf $0 }' | grep -v ^/proc | grep -v ^Filesystem | grep -v : +df -kP -F vxfs | sed 's/ / - /' | awk '/^(.*-.*)$/ { print $0 } /^([^-]+)$/ { printf $0 }' | grep -Ev "^/proc|^Filesystem|^/aha|:" # Process table: HP-UX does not provide a resident size of processes. # We send a 0 here for RSZ. @@ -86,14 +86,14 @@ # Network interfaces echo '<<>>' -for nic in `nwmgr -g | sed -n '/^lan/s/\(^[^ ]* \).*/\1/p'` -do +for nic in `nwmgr -g | sed -n '/^lan/s/\(^[^ ]* \).*/\1/p'` +do nwmgr -g --st mib -c $nic done # Logical Volume Manager echo '<<>>' -vgdisplay -v -F +/sbin/vgdisplay -v -F # Multipathing echo '<<>>' @@ -107,7 +107,7 @@ echo '<<>>' # remove heading, make first column space separated -ntpq -p | sed -e 1,2d -e 's/^\(.\)/\1 /' -e 's/^ /%/' +ntpq -np | sed -e 1,2d -e 's/^\(.\)/\1 /' -e 's/^ /%/' # Kernel tunnables @@ -127,6 +127,13 @@ -e "Code version" -e "Hardware Path" -e "Port World" done +# Libelle Business Shadow +if type trd >/dev/null 2>&1 +then + echo '<<>>' + trd -s +fi + # Einbinden von lokalen Plugins, die eine eigene Sektion ausgeben if cd $PLUGINSDIR diff -Nru check-mk-1.2.2p3/check_mk_agent.linux check-mk-1.2.6p12/check_mk_agent.linux --- check-mk-1.2.2p3/check_mk_agent.linux 2013-11-05 09:22:41.000000000 +0000 +++ check-mk-1.2.6p12/check_mk_agent.linux 2015-09-21 10:59:53.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -29,6 +29,15 @@ export MK_LIBDIR="/usr/lib/check_mk_agent" export MK_CONFDIR="/etc/check_mk" +export MK_VARDIR="/var/lib/check_mk_agent" + +# Provide information about the remote host. That helps when data +# is being sent only once to each remote host. +if [ "$REMOTE_HOST" ] ; then + export REMOTE=$REMOTE_HOST +elif [ "$SSH_CLIENT" ] ; then + export REMOTE=${SSH_CLIENT%% *} +fi # Make sure, locally installed binaries are found PATH=$PATH:/usr/local/bin @@ -43,21 +52,71 @@ # refer to online documentation for details about local checks. LOCALDIR=$MK_LIBDIR/local +# All files in SPOOLDIR will simply appended to the agent +# output if they are not outdated (see below) +SPOOLDIR=$MK_VARDIR/spool # close standard input (for security reasons) and stderr if [ "$1" = -d ] then set -xv else - exec <&- 2>/dev/null + exec /dev/null fi +# Runs a command asynchronous by use of a cache file +function run_cached () { + local section= + if [ "$1" = -s ] ; then local section="echo '<<<$2>>>' ; " ; shift ; fi + local NAME=$1 + local MAXAGE=$2 + shift 2 + local CMDLINE="$section$@" + + if [ ! -d $MK_VARDIR/cache ]; then mkdir -p $MK_VARDIR/cache ; fi + CACHEFILE="$MK_VARDIR/cache/$NAME.cache" + + # Check if the creation of the cache takes suspiciously long and return + # nothing if the age (access time) of $CACHEFILE.new is twice the MAXAGE + local NOW=$(date +%s) + if [ -e "$CACHEFILE.new" ] ; then + local CF_ATIME=$(stat -c %X "$CACHEFILE.new") + if [ $((NOW - CF_ATIME)) -ge $((MAXAGE * 2)) ] ; then + # Kill the process still accessing that file in case + # it is still running. This avoids overlapping processes! + fuser -k -9 "$CACHEFILE.new" >/dev/null 2>&1 + rm -f "$CACHEFILE.new" + return + fi + fi + + # Check if cache file exists and is recent enough + if [ -s "$CACHEFILE" ] ; then + local MTIME=$(stat -c %Y "$CACHEFILE") + if [ $((NOW - MTIME)) -le $MAXAGE ] ; then local USE_CACHEFILE=1 ; fi + # Output the file in any case, even if it is + # outdated. The new file will not yet be available + cat "$CACHEFILE" + fi + + # Cache file outdated and new job not yet running? Start it + if [ -z "$USE_CACHEFILE" -a ! -e "$CACHEFILE.new" ] ; then + # When the command fails, the output is throws away ignored + echo "set -o noclobber ; exec > \"$CACHEFILE.new\" || exit 1 ; $CMDLINE && mv \"$CACHEFILE.new\" \"$CACHEFILE\" || rm -f \"$CACHEFILE\" \"$CACHEFILE.new\"" | nohup bash >/dev/null 2>&1 & + fi +} + +# Make run_cached available for subshells (plugins, local checks, etc.) +export -f run_cached + echo '<<>>' -echo Version: 1.2.2p3 +echo Version: 1.2.6p12 echo AgentOS: linux +echo AgentDirectory: $MK_CONFDIR +echo DataDirectory: $MK_VARDIR +echo SpoolDirectory: $SPOOLDIR echo PluginsDirectory: $PLUGINSDIR echo LocalDirectory: $LOCALDIR -echo AgentDirectory: $MK_CONFDIR # If we are called via xinetd, try to find only_from configuration if [ -n "$REMOTE_HOST" ] @@ -76,20 +135,20 @@ echo '<<>>' # The exclusion list is getting a bit of a problem. -l should hide any remote FS but seems # to be all but working. -excludefs="-x smbfs -x tmpfs -x cifs -x iso9660 -x udf -x nfsv4 -x nfs -x mvfs -x zfs" +excludefs="-x smbfs -x cifs -x iso9660 -x udf -x nfsv4 -x nfs -x mvfs -x zfs" df -PTlk $excludefs | sed 1d -# VMWare shows its own filesystems with 'vdf'. Just one -# problem: it outputs not 7 but only 6 columns -if type vdf > /dev/null -then - vdf -P | grep ^/vmfs/volumes | sed 's/ / vmfs /' -fi +# df inodes information +echo '<<>>' +echo '[df_inodes_start]' +df -PTli $excludefs | sed 1d +echo '[df_inodes_end]' # Filesystem usage for ZFS if type zfs > /dev/null 2>&1 ; then echo '<<>>' - zfs get -Hp name,quota,used,avail,mountpoint,type + zfs get -Hp name,quota,used,avail,mountpoint,type -t filesystem,volume || \ + zfs get -Hp name,quota,used,avail,mountpoint,type echo '[df]' df -PTlk -t zfs | sed 1d fi @@ -104,6 +163,7 @@ echo '<<>>' sed -n '/ nfs4\? /s/[^ ]* \([^ ]*\) .*/\1/p' < /proc/mounts | + sed 's/\\040/ /g' | while read MP do if [ $STAT_VERSION != $STAT_BROKE ]; then @@ -114,6 +174,20 @@ printf '\n'|| echo "$MP hanging 0 0 0 0" fi done + + echo '<<>>' + sed -n '/ cifs\? /s/[^ ]* \([^ ]*\) .*/\1/p' < /proc/mounts | + sed 's/\\040/ /g' | + while read MP + do + if [ $STAT_VERSION != $STAT_BROKE ]; then + waitmax -s 9 2 stat -f -c "$MP ok %b %f %a %s" "$MP" || \ + echo "$MP hanging 0 0 0 0" + else + waitmax -s 9 2 stat -f -c "$MP ok %b %f %a %s" "$MP" && \ + printf '\n'|| echo "$MP hanging 0 0 0 0" + fi + done fi # Check mount options. Filesystems may switch to 'ro' in case @@ -123,8 +197,7 @@ # processes including username, without kernel processes echo '<<>>' -ps ax -o user,vsz,rss,pcpu,command --columns 10000 | sed -e 1d -e 's/ *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) */(\1,\2,\3,\4) /' - +ps ax -o user,vsz,rss,cputime,pid,command --columns 10000 | sed -e 1d -e 's/ *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) */(\1,\2,\3,\4,\5) /' # Memory usage echo '<<>>' @@ -138,26 +211,17 @@ echo '<<>>' cat /proc/uptime -# Network interfaces (Link, Autoneg, Speed) -# This requires ethtool -if type ethtool > /dev/null -then - echo '<<>>' - for eth in $(cat /proc/net/dev | sed -rn -e 's/[[:space:]]*//g' -e '/ *([^:]):.*/s//\1/p' | egrep -vx '(lo|sit.*)') - do - echo $eth $(ethtool $eth | egrep '(Speed|Duplex|Link detected|Auto-negotiation):' | cut -d: -f2 | sed 's/ *//g') - done -fi # New variant: Information about speed and state in one section echo '<<>>' sed 1,2d /proc/net/dev if type ethtool > /dev/null then - for eth in $(sed -e 1,2d < /proc/net/dev | cut -d':' -f1) + for eth in $(sed -e 1,2d < /proc/net/dev | cut -d':' -f1 | sort) do - echo "[$eth]" - ethtool $eth | egrep '(Speed|Duplex|Link detected|Auto-negotiation):' + echo "[$eth]" + ethtool $eth | egrep '(Speed|Duplex|Link detected|Auto-negotiation):' + echo -en "\tAddress: " ; cat /sys/class/net/$eth/address ; echo done fi @@ -184,26 +248,18 @@ # New implementation: netstat is very slow for large TCP tables cat /proc/net/tcp /proc/net/tcp6 2>/dev/null | awk ' /:/ { c[$4]++; } END { for (x in c) { print x, c[x]; } }' -# Platten- und RAID-Status von LSI-Controlleren, falls vorhanden -if type cfggen > /dev/null ; then - echo '<<>>' - cfggen 0 DISPLAY | egrep '(Target ID|State|Volume ID|Status of volume)[[:space:]]*:' | sed -e 's/ *//g' -e 's/:/ /' -fi - -# Multipathgeraete +# Linux Multipathing if type multipath >/dev/null ; then - echo '<<>>' - multipath -l + if [ -f /etc/multipath.conf ] ; then + echo '<<>>' + multipath -l + fi fi -# Soft-RAID -echo '<<>>' -cat /proc/mdstat - # Performancecounter Platten echo '<<>>' date +%s -egrep ' (x?[shv]d[a-z]*|cciss/c[0-9]+d[0-9]+|emcpower[a-z]+|dm-[0-9]+|VxVM.*) ' < /proc/diskstats +egrep ' (x?[shv]d[a-z]*|cciss/c[0-9]+d[0-9]+|emcpower[a-z]+|dm-[0-9]+|VxVM.*|mmcblk.*) ' < /proc/diskstats if type dmsetup >/dev/null ; then echo '[dmsetup_info]' dmsetup info -c --noheadings --separator ' ' -o name,devno,vg_name,lv_name @@ -219,114 +275,90 @@ date +%s cat /proc/vmstat /proc/stat -# Statistik der Netzwerkgeraete (Pakete, Kollisionen, etc) -echo '<<>>' -# Genauen Zeitstempel einfuegen, da Counter von Zeit abhaengen -date +%s -sed -e 1,2d -e 's/:/ /g' < /proc/net/dev - - -if type ipmitool >/dev/null +# Hardware sensors via IPMI (need ipmitool) +if type ipmitool > /dev/null then - echo '<<>>' - IPMI_FILE=$MK_CONFDIR/ipmitool_sensors.cache - if [ ! -d $MK_CONFDIR ]; then - mkdir -p $MK_CONFDIR - fi - - # Do not use cache file after 20 minutes - IPMI_MAXAGE=1200 - - # Check if file exists and is recent enough - if [ -s $IPMI_FILE ] - then - NOW=$(date +%s) - MTIME=$(stat -c %Y $IPMI_FILE) - if [ $((NOW - MTIME)) -le $IPMI_MAXAGE ] ; then - USE_IPMI_FILE=1 - fi - fi - - if [ -s "$IPMI_FILE" ] - then - grep -v 'command failed' "$IPMI_FILE" \ - | sed -e 's/ *| */|/g' -e "s/ /_/g" -e 's/_*$//' -e 's/|/ /g' \ - | egrep -v '^[^ ]+ na ' \ - | grep -v ' discrete ' - fi - - if [ -z "$USE_IPMI_FILE" -a ! -e "$IPMI_FILE.new" ] - then - setsid bash -c "set -o noclobber ; ipmitool sensor list > $IPMI_FILE.new && mv $IPMI_FILE.new $IPMI_FILE || rm -f $IPMI_FILE*" & - fi + run_cached -s ipmi 300 "ipmitool sensor list | grep -v 'command failed' | sed -e 's/ *| */|/g' -e 's/ /_/g' -e 's/_*"'$'"//' -e 's/|/ /g' | egrep -v '^[^ ]+ na ' | grep -v ' discrete '" fi # IPMI data via ipmi-sensors (of freeipmi). Please make sure, that if you # have installed freeipmi that IPMI is really support by your hardware. -# The agent tries to avoid hanging forever by setting a limit of 300 seconds -# for the first run (where the cache is created). If ipmi-sensors runs into -# that timeout, it leaves and empty cache file. We skip this check forever -# if we find that empty cache file. -sdrcache=/var/cache/.freeipmi/sdr-cache/sdr-cache-$(hostname | cut -d. -f1).127.0.0.1 -if type ipmi-sensors >/dev/null && [ ! -e "$sdrcache" -o -s "$sdrcache" ] +if type ipmi-sensors >/dev/null then echo '<<>>' - # No cache file existing? => Impose a high time limit. We do not suffice - # in creating the cache we most probably run on a hardware where this tool - # is hanging forever. We make sure that we never try again in that case! - if [ ! -e "$sdrcache" ] - then - WAITMAX="waitmax 300" - elif tail --bytes 2 < "$sdrcache" | od -t x2 | grep -q 0a0a - then - WAITMAX="waitmax 3" - else - # Cache file corrupt. Must end with two linefeeds. - rm -f $sdrcache - WAITMAX= - fi # Newer ipmi-sensors version have new output format; Legacy format can be used if ipmi-sensors --help | grep -q legacy-output; then IPMI_FORMAT="--legacy-output" else IPMI_FORMAT="" fi - # Aquire lock with flock in order to avoid multiple runs of ipmi-sensors - # in case of parallel or overlapping calls of the agent. - ( - flock -n 200 --wait 60 - # At least with ipmi-sensoirs 0.7.16 this group is Power_Unit instead of "Power Unit" - for class in Temperature Power_Unit Fan - do - $WAITMAX ipmi-sensors $IPMI_FORMAT --sdr-cache-directory /var/cache -g "$class" | sed -e 's/ /_/g' -e 's/:_\?/ /g' -e 's@ \([^(]*\)_(\([^)]*\))@ \2_\1@' - # In case of a timeout immediately leave loop. + # At least with ipmi-sensoirs 0.7.16 this group is Power_Unit instead of "Power Unit" + run_cached -s ipmi_sensors 300 "for class in Temperature Power_Unit Fan + do + ipmi-sensors $IPMI_FORMAT --sdr-cache-directory /var/cache -g "$class" | sed -e 's/ /_/g' -e 's/:_\?/ /g' -e 's@ \([^(]*\)_(\([^)]*\))@ \2_\1@' + # In case of a timeout immediately leave loop. if [ $? = 255 ] ; then break ; fi - WAITMAX="waitmax 3" - done - ) 200>>"$sdrcache" + done" fi -# State of LSI MegaRAID controller via MegaCli. You can download that tool from: -# http://www.lsi.com/downloads/Public/MegaRAID%20Common%20Files/8.02.16_MegaCLI.zip +# RAID status of Linux software RAID +echo '<<>>' +cat /proc/mdstat + +# RAID status of Linux RAID via device mapper +if type dmraid >/dev/null && DMSTATUS=$(dmraid -r) +then + echo '<<>>' + + # Output name and status + dmraid -s | grep -e ^name -e ^status + + # Output disk names of the RAID disks + DISKS=$(echo "$DMSTATUS" | cut -f1 -d\:) + + for disk in $DISKS ; do + device=$(cat /sys/block/$(basename $disk)/device/model ) + status=$(echo "$DMSTATUS" | grep ^${disk}) + echo "$status Model: $device" + done +fi +# RAID status of LSI controllers via cfggen +if type cfggen > /dev/null ; then + echo '<<>>' + cfggen 0 DISPLAY | egrep '(Target ID|State|Volume ID|Status of volume)[[:space:]]*:' | sed -e 's/ *//g' -e 's/:/ /' +fi + +# RAID status of LSI MegaRAID controller via MegaCli. You can download that tool from: +# http://www.lsi.com/downloads/Public/MegaRAID%20Common%20Files/8.02.16_MegaCLI.zip if type MegaCli >/dev/null ; then + MegaCli_bin="MegaCli" +elif type MegaCli64 >/dev/null ; then + MegaCli_bin="MegaCli64" +elif type megacli >/dev/null ; then + MegaCli_bin="megacli" +else + MegaCli_bin="unknown" +fi + +if [ "$MegaCli_bin" != "unknown" ]; then echo '<<>>' - for part in $(MegaCli -EncInfo -aALL -NoLog < /dev/null \ + for part in $($MegaCli_bin -EncInfo -aALL -NoLog < /dev/null \ | sed -rn 's/:/ /g; s/[[:space:]]+/ /g; s/^ //; s/ $//; s/Number of enclosures on adapter ([0-9]+).*/adapter \1/g; /^(Enclosure|Device ID|adapter) [0-9]+$/ p'); do [ $part = adapter ] && echo "" [ $part = 'Enclosure' ] && echo -ne "\ndev2enc" echo -n " $part" done echo - MegaCli -PDList -aALL -NoLog < /dev/null | egrep 'Enclosure|Raw Size|Slot Number|Device Id|Firmware state|Inquiry|Adapter' + $MegaCli_bin -PDList -aALL -NoLog < /dev/null | egrep 'Enclosure|Raw Size|Slot Number|Device Id|Firmware state|Inquiry|Adapter' echo '<<>>' - MegaCli -LDInfo -Lall -aALL -NoLog < /dev/null | egrep 'Size|State|Number|Adapter|Virtual' + $MegaCli_bin -LDInfo -Lall -aALL -NoLog < /dev/null | egrep 'Size|State|Number|Adapter|Virtual' echo '<<>>' - MegaCli -AdpBbuCmd -GetBbuStatus -aALL -NoLog < /dev/null | grep -v Exit + $MegaCli_bin -AdpBbuCmd -GetBbuStatus -aALL -NoLog < /dev/null | grep -v Exit fi -# 3WARE disk controller (by Radoslaw Bak) +# RAID status of 3WARE disk controller (by Radoslaw Bak) if type tw_cli > /dev/null ; then for C in $(tw_cli show | awk 'NR < 4 { next } { print $1 }'); do echo '<<<3ware_info>>>' @@ -338,25 +370,38 @@ done fi -if type vcbVmName > /dev/null 2>&1 ; then - echo '<<>>' - vcbVmName -s any +# RAID controllers from areca (Taiwan) +# cli64 can be found at ftp://ftp.areca.com.tw/RaidCards/AP_Drivers/Linux/CLI/ +if type cli64 >/dev/null ; then + run_cached -s arc_raid_status 300 "cli64 rsf info | tail -n +3 | head -n -2" fi # VirtualBox Guests. Section must always been output. Otherwise the # check would not be executed in case no guest additions are installed. # And that is something the check wants to detect echo '<<>>' -if type VBoxControl > /dev/null 2>&1 ; then +if type VBoxControl >/dev/null 2>&1 ; then VBoxControl -nologo guestproperty enumerate | cut -d, -f1,2 [ ${PIPESTATUS[0]} = 0 ] || echo "ERROR" fi +# OpenVPN Clients. Currently we assume that the configuration # is in +# /etc/openvpn. We might find a safer way to find the configuration later. +if [ -e /etc/openvpn/openvpn-status.log ] ; then + echo '<<>>' + sed -n -e '/CLIENT LIST/,/ROUTING TABLE/p' < /etc/openvpn/openvpn-status.log | sed -e 1,3d -e '$d' +fi +# Time synchronization with NTP if type ntpq > /dev/null 2>&1 ; then - echo '<<>>' - # remote heading, make first column space separated - waitmax 2 ntpq -p | sed -e 1,2d -e 's/^\(.\)/\1 /' -e 's/^ /%/' + # remove heading, make first column space separated + run_cached -s ntp 30 "waitmax 5 ntpq -np | sed -e 1,2d -e 's/^\(.\)/\1 /' -e 's/^ /%/' || true" +fi + +# Time synchronization with Chrony +if type chronyc > /dev/null 2>&1 ; then + # Force successful exit code. Otherwise section will be missing if daemon not running + run_cached -s chrony 30 "waitmax 5 chronyc tracking || true" fi if type nvidia-settings >/dev/null && [ -S /tmp/.X11-unix/X0 ] @@ -375,18 +420,44 @@ # Status of CUPS printer queues if type lpstat > /dev/null 2>&1; then - if pgrep cups > /dev/null 2>&1; then - echo '<<>>' - waitmax 3 lpstat -p - echo '---' - waitmax 3 lpstat -o|sort - fi + if pgrep -x cups > /dev/null 2>&1; then + # first define a function to check cups + function cups_queues () { + CPRINTCONF=/etc/cups/printers.conf + if [ -r "$CPRINTCONF" ] ; then + LOCAL_PRINTERS=$(grep -E "<(Default)?Printer .*>" $CPRINTCONF | awk '{print $2}' | sed -e 's/>//') + lpstat -p | while read LINE + do + PRINTER=$(echo $LINE | awk '{print $2}') + if echo "$LOCAL_PRINTERS" | grep -q "$PRINTER"; then + echo $LINE + fi + done + echo '---' + lpstat -o | while read LINE + do + PRINTER=${LINE%%-*} + if echo $LOCAL_PRINTERS | grep -q $PRINTER; then + echo $LINE + fi + done + else + lpstat -p + echo '---' + lpstat -o | sort + fi + } + # Make cups_queues available for subshell + export -f cups_queues + # Use cups_queues function with run_cached and cache time of 5 mins + run_cached -s cups_queues 300 "cups_queues" + fi fi # Heartbeat monitoring # Different handling for heartbeat clusters with and without CRM # for the resource state -if [ -S /var/run/heartbeat/crm/cib_ro -o -S /var/run/crm/cib_ro ]; then +if [ -S /var/run/heartbeat/crm/cib_ro -o -S /var/run/crm/cib_ro ] || pgrep crmd > /dev/null 2>&1; then echo '<<>>' crm_mon -1 -r | grep -v ^$ | sed 's/^ //; /^\sResource Group:/,$ s/^\s//; s/^\s/_/g' fi @@ -419,16 +490,33 @@ # Only handle the last 6 lines (includes the summary line at the bottom and # the last message in the queue. The last message is not used at the moment # but it could be used to get the timestamp of the last message. -if type mailq >/dev/null 2>&1 && getent passwd postfix >/dev/null 2>&1; then - echo '<<>>' - mailq | tail -n 6 +if type postconf >/dev/null ; then + echo '<<>>' + postfix_queue_dir=$(postconf -h queue_directory) + postfix_count=$(find $postfix_queue_dir/deferred -type f | wc -l) + postfix_size=$(du -ks $postfix_queue_dir/deferred | awk '{print $1 }') + if [ $postfix_count -gt 0 ] + then + echo -- $postfix_size Kbytes in $postfix_count Requests. + else + echo Mail queue is empty + fi +elif [ -x /usr/sbin/ssmtp ] ; then + echo '<<>>' + mailq 2>&1 | sed 's/^[^:]*: \(.*\)/\1/' | tail -n 6 +fi + +#Check status of qmail mailqueue +if type qmail-qstat >/dev/null +then + echo "<<>>" + qmail-qstat fi # Check status of OMD sites if type omd >/dev/null then - echo '<<>>' - omd status --bare --auto + run_cached -s omd_status 60 "omd status --bare --auto || true" fi @@ -445,7 +533,14 @@ if [ -r "$MK_CONFDIR/fileinfo.cfg" ] ; then echo '<<>>' date +%s - stat -c "%n|%s|%Y" $(cat "$MK_CONFDIR/fileinfo.cfg") + for line in $(cat "$MK_CONFDIR/fileinfo.cfg") + do + stat -c "%n|%s|%Y" $line 2>/dev/null + + if [ $? -ne 0 ]; then + echo "$line|missing|$(date +%s)" + fi + done fi # Get stats about OMD monitoring cores running on this machine. @@ -458,33 +553,47 @@ do if [ -S "/omd/sites/$site/tmp/run/live" ] ; then echo "[$site]" - echo -e "GET status" | /omd/sites/$site/bin/unixcat /omd/sites/$site/tmp/run/live + echo -e "GET status" | waitmax 3 /omd/sites/$site/bin/unixcat /omd/sites/$site/tmp/run/live fi done fi +# Get statistics about monitored jobs. Below the job directory there +# is a sub directory per user that ran a job. That directory must be +# owned by the user so that a symlink or hardlink attack for reading +# arbitrary files can be avoided. +if pushd $MK_VARDIR/job >/dev/null; then + echo '<<>>' + for username in * + do + if [ -d "$username" ] && cd "$username" ; then + if [ $EUID -eq 0 ]; then + su "$username" -c "head -n -0 -v *" + else + head -n -0 -v * + fi + cd .. + fi + done + popd > /dev/null +fi -# Einbinden von lokalen Plugins, die eine eigene Sektion ausgeben -if cd $PLUGINSDIR -then - for skript in $(ls) - do - if [ -x "$skript" ] ; then - ./$skript - fi - done +# Gather thermal information provided e.g. by acpi +# At the moment only supporting thermal sensors +if ls /sys/class/thermal/thermal_zone* >/dev/null 2>&1; then + echo '<<>>' + for F in /sys/class/thermal/thermal_zone*; do + echo -n "${F##*/} " + if [ ! -e $F/mode ] ; then echo -n "- " ; fi + cat $F/{mode,type,temp,trip_point_*} | tr \\n " " + echo + done fi -# Lokale Einzelchecks -echo '<<>>' -if cd $LOCALDIR -then - for skript in $(ls) - do - if [ -x "$skript" ] ; then - ./$skript - fi - done +# Libelle Business Shadow +if type trd >/dev/null; then + echo "<<>>" + trd -s fi # MK's Remote Plugin Executor @@ -500,3 +609,71 @@ echo done fi + + +# Local checks +echo '<<>>' +if cd $LOCALDIR ; then + for skript in $(ls) ; do + if [ -f "$skript" -a -x "$skript" ] ; then + ./$skript + fi + done + # Call some plugins only every X'th minute + for skript in [1-9]*/* ; do + if [ -x "$skript" ] ; then + run_cached local_${skript//\//\\} ${skript%/*} "$skript" + fi + done +fi + +# Plugins +if cd $PLUGINSDIR ; then + for skript in $(ls) ; do + if [ -f "$skript" -a -x "$skript" ] ; then + ./$skript + fi + done + # Call some plugins only every Xth minute + for skript in [1-9]*/* ; do + if [ -x "$skript" ] ; then + run_cached plugins_${skript//\//\\} ${skript%/*} "$skript" + fi + done +fi + +# Agent output snippets created by cronjobs, etc. +if [ -d "$SPOOLDIR" ] +then + pushd "$SPOOLDIR" > /dev/null + now=$(date +%s) + + for file in * + do + # output every file in this directory. If the file is prefixed + # with a number, then that number is the maximum age of the + # file in seconds. If the file is older than that, it is ignored. + maxage="" + part="$file" + + # Each away all digits from the front of the filename and + # collect them in the variable maxage. + while [ "${part/#[0-9]/}" != "$part" ] + do + maxage=$maxage${part:0:1} + part=${part:1} + done + + # If there is at least one digit, than we honor that. + if [ "$maxage" ] ; then + mtime=$(stat -c %Y "$file") + if [ $((now - mtime)) -gt $maxage ] ; then + continue + fi + fi + + # Output the file + cat "$file" + done + popd > /dev/null +fi diff -Nru check-mk-1.2.2p3/check_mk_agent.macosx check-mk-1.2.6p12/check_mk_agent.macosx --- check-mk-1.2.2p3/check_mk_agent.macosx 2013-11-05 09:22:41.000000000 +0000 +++ check-mk-1.2.6p12/check_mk_agent.macosx 2015-09-21 10:59:53.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -43,7 +43,7 @@ echo '<<>>' -echo Version: 1.2.2p3 +echo Version: 1.2.6p12 echo AgentOS: macosx osver="$(uname -r)" @@ -52,7 +52,7 @@ df -kPt hfs | sed -e 's/^\([^ ][^ ]*\) \(.*\)$/\1 hfs \2/' | sed 1d echo '<<>>'; -echo `sysctl -n vm.loadavg | tr -d '{}'` `top -l 1 -n 1 | egrep ^Processes: | +echo `sysctl -n vm.loadavg | tr -d '{}'` `top -l 1 -n 1 | egrep ^Processes: | awk '{print $4"/"$2;}'` `echo 'echo $$' | bash` `sysctl -n hw.ncpu` echo '<<>>' @@ -60,34 +60,53 @@ echo "MemFree: $(echo "( $(vm_stat | grep speculative: | awk '{print $3}') + $(vm_stat | grep inactive: | awk '{print $3}') + $(vm_stat | grep free: | awk '{print $3}') ) * $(vm_stat | grep Mach | awk '{print $8}') / 1024" | bc) kB" echo "SwapTotal: 0 kB" echo "SwapFree: 0 kB" +# FIXME: Just call vm_stat here, write a check plugin that uses that +# navite output of vm_stat echo '<<>>'; -echo `date +%s` - `sysctl -n kern.boottime | cut -d' ' -f 4,7 | tr ',' '.' | +echo `date +%s` - `sysctl -n kern.boottime | cut -d' ' -f 4,7 | tr ',' '.' | tr -d ' '` | bc +# FIXME: use sysctl -a for outputting *all* kernel values. Write +# checks plugins with subchecks for parsing that output. Maybe reduce +# the output size by grepping away totally useless parts echo '<<>>'; -date +'%s'; netstat -inb | egrep -v '(^Name|lo|plip)' | grep Link | awk '{ +date +'%s'; netstat -inb | egrep -v '(^Name|lo|plip)' | grep Link | awk '{ print $1,$7,$5,$6,"0","0","0","0","0",$10,$8,$9,"0","0",$11,"0","0"; }' +# FIXME: send netstat -inb plain, write proper check plugins for +# clean parsing of the output echo '<<>>' ps ax -o user,vsz,rss,pcpu,command | sed -e 1d -e 's/ *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) */(\1,\2,\3,\4) /' # NTP seems to be enabled as a default echo '<<>>' - ntpq -pn | sed -e 1,2d -e 's/^\(.\)/\1 /' -e 's/^ /%/' +ntpq -np | sed -e 1,2d -e 's/^\(.\)/\1 /' -e 's/^ /%/' # TCP Conn stats echo '<<>>' - netstat -ntfinet | awk ' /^tcp/ { c[$6]++; } END { for (x in c) { print x, c[x]; } }' +netstat -ntfinet | awk ' /^tcp/ { c[$6]++; } END { for (x in c) { print x, c[x]; } }' # Fileinfo-Check: put patterns for files into /etc/check_mk/fileinfo.cfg if [ -r "$MK_CONFDIR/fileinfo.cfg" ] ; then echo '<<>>' date +%s - stat -f "%N|%z|%m" $(cat "$MK_CONFDIR/fileinfo.cfg") + for line in $(cat "$MK_CONFDIR/fileinfo.cfg") + do + stat -f "%N|%z|%m" $line 2>/dev/null + + if [ $? -ne 0 ]; then + echo "$line|missing|$(date +%s)" + fi + done fi +if type tmutil >/dev/null +then + echo '<<>>' + tmutil latestbackup 2>&1 +fi ############################### # Things up for takers: @@ -97,6 +116,7 @@ # *hw sensors, how to query them? # *OSX Server specific stuff, LDAP, etc... # *Rewrite cpu / ps check to be faster - takes >1s on my laptop +# ioreg -l zeigt etliche interessante Inventurdaten # MK's Remote Plugin Executor diff -Nru check-mk-1.2.2p3/check_mk_agent.netbsd check-mk-1.2.6p12/check_mk_agent.netbsd --- check-mk-1.2.2p3/check_mk_agent.netbsd 2013-11-05 09:22:41.000000000 +0000 +++ check-mk-1.2.6p12/check_mk_agent.netbsd 2015-09-21 10:59:53.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -59,7 +59,7 @@ fi echo '<<>>' -echo Version: 1.2.2p3 +echo Version: 1.2.6p12 echo AgentOS: netbsd osver="$(uname -r)" @@ -95,7 +95,7 @@ do BI=$( netstat -inb | egrep -v Name | grep Link | awk '{print $1" "$5}' | sed -ne $Z1$Z2 ) PI=$( netstat -in | egrep -v Name | grep Link | awk '{print $5}' | sed -ne $Z1$Z2 ) - EI=$( netstat -in | egrep -v Name | grep Link | awk '{print $6}' | sed -ne $Z1$Z2 ) + EI=$( netstat -in | egrep -v Name | grep Link | awk '{print $6}' | sed -ne $Z1$Z2 ) FF1="0 0 0 0 0" BO=$( netstat -inb | egrep -v Name | grep Link | awk '{print $6}' | sed -ne $Z1$Z2 ) PO=$( netstat -in | egrep -v Name | grep Link | awk '{print $7}' | sed -ne $Z1$Z2 ) diff -Nru check-mk-1.2.2p3/check_mk_agent.openbsd check-mk-1.2.6p12/check_mk_agent.openbsd --- check-mk-1.2.2p3/check_mk_agent.openbsd 2013-11-05 09:22:41.000000000 +0000 +++ check-mk-1.2.6p12/check_mk_agent.openbsd 2015-09-21 10:59:53.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -59,13 +59,13 @@ fi echo '<<>>' -echo Version: 1.2.2p3 +echo Version: 1.2.6p12 echo AgentOS: openbsd osver="$(uname -r)" echo '<<>>' -df -kPt ffs | sed -e 's/^\([^ ][^ ]*\) \(.*\)$/\1 ffs \2/' | sed 1d +df -kPt ffs | sed -e 's/^\([^ ][^ ]*\) \(.*\)$/\1 ffs \2/' | sed 1d # processes including username, without kernel processes echo '<<>>' @@ -78,7 +78,29 @@ echo '<<>>' echo `date +%s` - `sysctl -n kern.boottime | cut -d' ' -f 4,7 | tr ',' '.' | tr -d ' '` | bc -echo '<<>>' +echo "<<>>" +MEM_FREE=$(vmstat | tail -n1 | awk '{ print $5 }') +MEM_TOTAL=$(sysctl hw.usermem | cut -d= -f2) +MEM_TOTAL=$(echo $MEM_TOTAL/1024 | bc) + +SWAPCTL_OUTPUT=$(swapctl -k -s) +SWAP_FREE=$(echo $SWAPCTL_OUTPUT | awk '{ print $7 }') +SWAP_TOTAL=$(echo $SWAPCTL_OUTPUT | awk '{ print $2 }') + +# if there is no swap space swap values are 0 +if [ -z "$SWAPCTL_OUTPUT" ] + then + SWAP_FREE=0 + SWAP_TOTAL=0 +fi + +echo "MemTotal:\t" $MEM_TOTAL kB +echo "MemFree:\t" $MEM_FREE kB +echo "SwapTotal:\t" $SWAP_TOTAL kB +echo "SwapFree:\t" $SWAP_FREE kB + +echo '<<>>' +# MC= MAC address # BI= Bytes in # PI= Packets in # EI= Errors in @@ -86,33 +108,103 @@ # BO= Bytes out # PO= Packets out # CO= Colls -# NI= number of interfaces +# NI= Number of interfaces +# INTERFACES= Array of interfaces -Z1=1 -Z2=p -NI=$(netstat -in | grep 'Link' | wc -l) +set -A INTERFACES +set -A MC +set -A BI +set -A BO +set -A PI +set -A PO +set -A EI +set -A EO +set -A CO + +NI=0 +# special (lo/pfsync/pflog/enc) and inactive (*) interfaces are not needed +NETSTAT_OUTPUT=$(netstat -in | grep '' | egrep -v "\*|lo|pfsync|enc") +NETSTAT_OUTPUT_BYTES=$(netstat -inb | grep '' | egrep -v "\*|lo|pfsync|enc") + +# adjust internal field separator to get lines from netstat and backup it before +OFS=$IFS +IFS=' +' +# collect netstat values and interface number +for NS in $NETSTAT_OUTPUT + do + NI=$(($NI+1)) + INTERFACES[$NI]=$(echo $NS | awk '{print $1}') + MC[$NI]=$(echo $NS | awk '{print $4}') + PI[$NI]=$(echo $NS | awk '{print $5}') + EI[$NI]=$(echo $NS | awk '{print $6}') + PO[$NI]=$(echo $NS | awk '{print $7}') + EO[$NI]=$(echo $NS | awk '{print $8}') + CO[$NI]=$(echo $NS | awk '{print $9}') +done + +# need NIC counter again for byte values - reset it +NI=0 +for NS in $NETSTAT_OUTPUT_BYTES + do + NI=$(($NI+1)) + BI[$NI]=$(echo $NS | awk '{print $5}') + BO[$NI]=$(echo $NS | awk '{print $6}') +done + +# what is this for? [ "${NI}" -ge 1 ] || NI=15 +# jot is OpenBSD "range" +for i in $(jot $NI) + do + echo "${INTERFACES[$i]}:${BI[$i]} ${PI[$i]} ${EI[$i]} 0 0 0 0 0 ${BO[$i]} ${PO[$i]} ${EO[$i]} 0 0 ${CO[$i]} 0 0" +done + +for IF in $(jot $NI) + do + echo \[${INTERFACES[$IF]}\] + + IFCONFIG_OUTPUT=$(ifconfig ${INTERFACES[$IF]}) + for IO in $IFCONFIG_OUTPUT + do + # Speed + SP=$(echo "$IO" | egrep "media:.*base" | cut -d\( -f2 | cut -db -f1) + if [ "$SP" ] + then + echo "\tSpeed: "$SP"Mb/s" + fi + # Detect duplexity - in reality only available for physical devices but + # virtual ones like CARP devices will get at least a half duplex + if [ "$(echo "$IO" | egrep "media:.*full-duplex")" ] + then + echo "\tDuplex: Full" + elif [ "$(echo "$IO" | grep "media:" | grep -v "full-duplex")" ] + then + echo "\tDuplex: Half" + fi + # Auto-negotiation + if [ "$(echo "$IO" | egrep "media:.*autoselect")" ] + then + echo "\tAuto-negotiation: on" + elif [ "$(echo "$IO" | grep "media:" | grep -v "autoselect")" ] + then + echo "\tAuto-negotiation: off" + fi + # Detect detected link + if [ "$(echo "$IO" | grep "status:" | egrep "active|backup|master")" ] + then + echo "\tLink detected: yes" + fi + done + + echo "\tAddress: "${MC[$IF]} -date +%s -while [ $Z1 -lt $NI ] - do - BI=$( netstat -inb | egrep -v Name | grep Link | awk '{print $1" "$5}' | sed -ne $Z1$Z2 ) - PI=$( netstat -in | egrep -v Name | grep Link | awk '{print $5}' | sed -ne $Z1$Z2 ) - EI=$( netstat -in | egrep -v Name | grep Link | awk '{print $6}' | sed -ne $Z1$Z2 ) - FF1="0 0 0 0 0" - BO=$( netstat -inb | egrep -v Name | grep Link | awk '{print $6}' | sed -ne $Z1$Z2 ) - PO=$( netstat -in | egrep -v Name | grep Link | awk '{print $7}' | sed -ne $Z1$Z2 ) - EO=$( netstat -in | egrep -v Name | grep Link | awk '{print $8}' | sed -ne $Z1$Z2 ) - CO=$( netstat -in | egrep -v Name | grep Link | awk '{print $9}' | sed -ne $Z1$Z2 ) - FF2="0 0" - if [ "$(echo ${PI} | cut -c 1-3)" -gt 0 ] - then - echo $BI $PI $EI $FF1 $BO $PO $EO $FF2 $CO $FF2 - fi - Z1=$(($Z1+1)) done +# reset IFS to default +IFW=$OFS + # IPMI-Data (Fans, CPU, temperature, etc) # needs the sysutils/ipmitool and kldload ipmi.ko if which ipmitool >/dev/null ; then diff -Nru check-mk-1.2.2p3/check_mk_agent.openvms check-mk-1.2.6p12/check_mk_agent.openvms --- check-mk-1.2.2p3/check_mk_agent.openvms 2013-11-05 09:22:41.000000000 +0000 +++ check-mk-1.2.6p12/check_mk_agent.openvms 2015-09-21 10:59:53.000000000 +0000 @@ -5,7 +5,7 @@ $!# | | |___| | | | __/ (__| < | | | | . \ | $!# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | $!# | | -$!# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +$!# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | $!# +------------------------------------------------------------------+ $!# $!# This file is part of Check_MK. @@ -162,7 +162,7 @@ $define sys$output "''cacheFile'_new" $on error then goto WCFdone $say "<<>>" -$say "Version: 1.2.2p3" +$say "Version: 1.2.6p12" $say "AgentOS: openvms" $say "Nodename: ",f$getsyi("nodename") $say "Architecture: ''HWinfo'" diff -Nru check-mk-1.2.2p3/check_mk_agent.solaris check-mk-1.2.6p12/check_mk_agent.solaris --- check-mk-1.2.2p3/check_mk_agent.solaris 2013-11-05 09:22:41.000000000 +0000 +++ check-mk-1.2.6p12/check_mk_agent.solaris 2015-09-21 10:59:53.000000000 +0000 @@ -27,8 +27,9 @@ export LC_ALL=C unset LANG -export MK_LIBDIR="/change/me" -export MK_CONFDIR="/change/me" +export MK_LIBDIR="/usr/lib/check_mk_agent" +export MK_CONFDIR="/etc/check_mk" +export MK_VARDIR="/var/lib/check_mk_agent" # All executables in PLUGINSDIR will simply be executed and their # ouput appended to the output of the agent. Plugins define their own @@ -48,156 +49,296 @@ exec <&- 2>/dev/null fi +function file_age() { + /usr/bin/perl -e 'if (! -f $ARGV[0]){die "0000000"};$mtime=(stat($ARGV[0]))[9];print ($^T-$mtime);' "$1" +} + + +# Runs a command asynchronous by use of a cache file +function run_cached () { + if [ "$1" = -s ] ; then local section="echo '<<<$2>>>' ; " ; shift ; fi + local NAME=$1 + local MAXAGE=$2 + shift 2 + local CMDLINE="$section$@" + + if [ ! -d $MK_VARDIR/cache ]; then mkdir -p $MK_VARDIR/cache ; fi + CACHEFILE="$MK_VARDIR/cache/$NAME.cache" + + # Check if the creation of the cache takes suspiciously long and return + # nothing if the age (access time) of $CACHEFILE.new is twice the MAXAGE + if [ -e "$CACHEFILE.new" ] ; then + AGE=$(file_age "$CACHEFILE.new") + if [ $AGE -ge $((MAXAGE * 2)) ] ; then + return + fi + fi + + # Check if cache file exists and is recent enough + if [ -s "$CACHEFILE" ] ; then + AGE=$(file_age "$CACHEFILE") + if [ $AGE -le $MAXAGE ] ; then local USE_CACHEFILE=1 ; fi + # Output the file in any case, even if it is + # outdated. The new file will not yet be available + cat "$CACHEFILE" + fi + + # Cache file outdated and new job not yet running? Start it + if [ -z "$USE_CACHEFILE" -a ! -e "$CACHEFILE.new" ] ; then + echo "set -o noclobber ; exec > $CACHEFILE.new || exit 1 ; $CMDLINE && mv $CACHEFILE.new $CACHEFILE || rm -f $CACHEFILE $CACHEFILE.new" | nohup bash 2>/dev/null & + fi +} + + echo '<<>>' -echo Version: 1.2.2p3 +echo Version: 1.2.6p12 echo AgentOS: solaris -# Find out what type of system we run. -# A standard Zone that will not have hardware of any kind? -# A Global Zone with hardware? -if type zonename > /dev/null 2>&1 ; then - if [ `zonename` = "global" ]; then - platform="global_zone" - else - platform="zone" - fi +# Find out what zone we are running in +# Treat all pre-Solaris 10 systems as "global" +if type zonename &>/dev/null +then + zonename=$(zonename) + pszone="-z $zonename" else -# Otherwise we have a normal or hard-partitioned system - platform="hardware" + zonename="global" + pszone="-A" fi + +# Get statistics about monitored jobs. Below the job directory there +# is a sub directory per user that ran a job. That directory must be +# owned by the user so that a symlink or hardlink attack for reading +# arbitrary files can be avoided. +if pushd $MK_VARDIR/job >/dev/null; then + echo '<<>>' + for username in * + do + if [ -d "$username" ] && cd "$username" ; then + count=$(su "$username" -c "ls -1 * | wc -l") + + if [ "$count" -eq "1" ]; then + filename=$(su "$username" -c "ls -1 *") + echo "==> $filename <==" + fi + + su "$username" -c "head -n1000 *" + cd .. + fi + done + popd > /dev/null +fi + + + + +# Filesystem usage for UFS and VXFS echo '<<>>' -for fs in ufs vxfs +for fs in ufs vxfs samfs lofs do - df -lk -F $fs 2>/dev/null | sed 1d | \ - while read Filesystem kbytes used avail capacity Mountedon - do - kbytes=`expr $used + $avail` - echo "${Filesystem} $fs ${kbytes} ${used} ${avail} ${capacity} ${Mountedon}" - done + df -l -k -F $fs 2>/dev/null | sed 1d | \ + while read Filesystem kbytes used avail capacity Mountedon + do + kbytes=$(($used + $avail)) + echo "$Filesystem $fs $kbytes $used $avail $capacity $Mountedon" + done done # Filesystem usage for ZFS -if type zfs > /dev/null 2>&1 ; then +if type zfs &>/dev/null +then echo '<<>>' - zfs get -Hp name,quota,used,avail,mountpoint,type + echo '[zfs]' + zfs get -Hp name,quota,used,avail,mountpoint,type -t filesystem,volume 2>/dev/null || \ + zfs get -Hp name,quota,used,avail,mountpoint,type echo '[df]' - df -lk -F zfs | sed 1d + df -l -k -F zfs 2>/dev/null | sed 1d +fi + +# ZFS arc cache +if type mdb >/dev/null 2>&1 +then + echo '<<>>' + echo "::arc" | mdb -k fi # Processes echo '<<>>' -ps -A -o user,vsz,rss,pcpu,args | sed -e 1d -e 's/ *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) */(\1,\2,\3,\4) /' +ps -o user,vsz,rss,pcpu,args $pszone | \ +sed -e 1d -e 's/ *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) */(\1,\2,\3,\4) /' -# You need statgrab to be installed. You'll find it -# here: http://www.i-scream.org/libstatgrab/ -statgrab_vars="const. cpu. disk. general. mem. page. proc. swap. user." -statgrab_sections="proc cpu disk page" -# Only collect net stats if we aren't in a unpriv. zone. -if [ "$platform" != "zone" ]; then - statgrab_vars="$statgrab_vars net." - statgrab_sections="$statgrab_sections net" -fi +# Statgrab +# source: http://www.i-scream.org/libstatgrab/ +# binary: http://www.opencsw.org/ +if type statgrab &>/dev/null +then + statgrab_vars="const. cpu. disk. general. mem. page. swap. user." + statgrab_sections="cpu disk page" -if statgrab $statgrab_vars > /tmp/statgrab.$$ 2>/dev/null + # Only collect net stats in global zone. + if [ "$zonename" == "global" ] then + statgrab_vars="$statgrab_vars net." + statgrab_sections="$statgrab_sections net" + fi + + statgrab $statgrab_vars 1>/tmp/statgrab.$$ 2>/dev/null for s in $statgrab_sections do echo "<<>>" - cat /tmp/statgrab.$$ | grep "^$s\." | cut -d. -f2-99 | sed 's/ *= */ /' + grep "^$s\." /tmp/statgrab.$$ | cut -d. -f2-99 | sed 's/ *= */ /' done - echo '<<>>' - cat /tmp/statgrab.$$ | egrep "^(swap|mem)\." | sed 's/ *= */ /' + # the <<>> info is already provided by <<>> + # However, we make an exception if /usr/bin/top is missing (required by solaris_mem) + if [ ! -x /usr/bin/top ] + then + echo '<<>>' + egrep "^(swap|mem)\." /tmp/statgrab.$$ | sed 's/ *= */ /' + fi + [ -f /tmp/statgrab.$$ ] && rm -f /tmp/statgrab.$$ fi -[ -f /tmp/statgrab.$$ ] && rm -f /tmp/statgrab.$$ -# Output of cpu from Linux agent simulated -# (Thanks to Cameron Pierce) + +# /proc/cpu +# Simulated Output of Linux /proc/cpu echo '<<>>' -load=`uptime|sed -e 's;.*average: \([0-9]\{1,\}\.[0-9]\{1,\}\), \([0-9]\{1,\}\.[0-9]\{1,\}\), \([0-9]\{1,\}\.[0-9]\{1,\}\).*;\1 \2 \3;'` -ps=`ps -ef|wc -l|sed -e 's;[ ]\{1,\};;'` -procs=`psrinfo|wc -l|sed -e 's;[ ]\{1,\};;'` +load=$(uptime|sed -e 's;.*average: \([0-9]\{1,\}\.[0-9]\{1,\}\), \([0-9]\{1,\}\.[0-9]\{1,\}\), \([0-9]\{1,\}\.[0-9]\{1,\}\).*;\1 \2 \3;') +ps=$(($(ps -o comm $pszone | wc -l))) +procs=$(($(psrinfo | wc -l))) echo $load 1/$ps $$ $procs -# check zpool status + +# zpool status if [ -x /sbin/zpool ]; then - echo "<<>>" - /sbin/zpool status -x + run_cached -s zpool_status 120 "/sbin/zpool status -x" fi -# Uptime compatible with Linux format (thanks to Daniel Roettgermann) + +# /proc/uptime +# Simulated output of Linux /proc/uptime echo '<<>>' -echo `expr \`nawk 'BEGIN{print srand()}'\` - \`kstat -p unix:0:system_misc:boot_time |awk '{print $2}'\`` +ctime=`nawk 'BEGIN{print srand()}'`; +btime=`kstat '-p' 'unix:::boot_time' 2>&1|grep 'boot_time'|awk '{print $2}'`; +echo $(($ctime - $btime)); -# NTP -# This will only work on Sol10 and above -if `svcs |grep ntp > /dev/null`; then +# NTP +ps -o comm $pszone | grep -w .*ntpd &>/dev/null +if [ $? -eq 0 ] +then echo '<<>>' - # use ntpq instead, xntpdc gives less info. - #xntpdc -p | sed -e 1,2d -e 's/^\(.\)/\1 /' -e 's/^ /%/' - ntpq -p | sed -e 1,2d -e 's/^\(.\)/\1 /' -e 's/^ /%/' + ntpq -np | sed -e 1,2d -e 's/^\(.\)/\1 /' -e 's/^ /%/' +fi - + +# Memory +if [ -x /usr/bin/top ] +then + echo "<<>>" + /usr/bin/top | grep '^Memory:' fi + # TCP Connection stats echo '<<>>' -netstat -nafinet -P tcp | tail +5 | \ -awk '{ c[$7]++; } END { for (x in c) { print x, c[x];}}' +netstat -n -a -f inet -P tcp | tail +5 | \ +nawk '{ c[$7]++; } END { for (x in c) { print x, c[x]; } }' -# Multipathing on Solaris10+ -if [ "$platform" != "zone" ]; then - echo '<<>>' - mpathadm list LU | awk '{if(NR%3==1){dev=$1} - if(NR%3==2){tc=$NF} - if(NR%3==0){printf "%s %s %s\n",dev,tc,$NF}}' -fi - - -if cd $PLUGINSDIR 2>/dev/null +# Multipathing +if type mpathadm &>/dev/null then - for skript in `ls` - do - if [ -x "$skript" ] ; then - ./$skript + if [ "$zonename" == "global" ] + then + echo '<<>>' + mpathadm list LU | nawk '{if(NR%3==1){dev=$1} + if(NR%3==2){tc=$NF} + if(NR%3==0){printf "%s %s %s\n",dev,tc,$NF}}' fi - done fi + # Fileinfo-Check: put patterns for files into $MK_CONFDIR/fileinfo.cfg -echo '<<>>' -if [ -f "$MK_CONFDIR/fileinfo.cfg" ] ; +if [ -f "$MK_CONFDIR/fileinfo.cfg" ] then - echo '' - nawk 'BEGIN{print srand()}' - echo `ls $(cat "$MK_CONFDIR/fileinfo.cfg")`"|"`ls -nl $(cat "$MK_CONFDIR/fileinfo.cfg") | awk '{print $5}'`"|"`truss -v lstat -t lstat ls -l $(cat "$MK_CONFDIR/fileinfo.cfg") 2>&1|grep mt |awk '{print $9}'|cut -d . -f 1` + echo '<<>>' + /usr/bin/perl -e 'print time."\n"' + for file in $(cat "$MK_CONFDIR/fileinfo.cfg") + do + ls $file > /dev/null 2>&1 + if [ $? -eq 0 ]; then + ls $file | sort -u | \ + /usr/bin/perl -e ' + while(my $file = <>) { + $file =~ s/\n$//; + ($device, $inode, $mode, $nlink, $uid, $gid, $rdev, $size, $atime, $mtime, $ctime, $blksize, $blocks) = stat($file); + print("$file|$size|$mtime\n"); + }' + else + echo "$file|missing|$(/usr/bin/perl -e 'print time')" + fi + done fi -echo '<<>>' -if cd $LOCALDIR 2>/dev/null +# Libelle Business Shadow +if type trd >/dev/null 2>&1 then - for skript in `ls` - do - if [ -x "$skript" ] ; then - ./$skript - fi - done + echo '<<>>' + trd -s fi + # MK's Remote Plugin Executor -if [ -f "$MK_CONFDIR/mrpe.cfg" ] +if test -f "$MK_CONFDIR/mrpe.cfg" then echo '<<>>' grep -v '^ *#' "$MK_CONFDIR/mrpe.cfg" | grep -v '^ *$' | \ while read descr cmdline do - OUTPUT=`$cmdline` + OUTPUT=$($cmdline) echo "$descr $? $OUTPUT" done fi + +# Local checks +if cd $LOCALDIR 2>/dev/null +then + echo '<<>>' + for skript in $(ls) + do + if [ -x "$skript" ] ; then + ./$skript + fi + done + + # Call some plugins only every X'th second + for skript in [1-9]*/* ; do + if [ -x "$skript" ] ; then + run_cached local_${skript//\//\#} ${skript%/*} "$skript" + fi + done + +fi + +# Plugins +if cd $PLUGINSDIR 2>/dev/null +then + for skript in $(ls) + do + if [ -x "$skript" ] ; then + ./$skript + fi + done + + # Call some plugins only every X'th second + for skript in [1-9]*/* ; do + if [ -x "$skript" ] ; then + run_cached plugins_${skript//\//\#} ${skript%/*} "$skript" + fi + done +fi + diff -Nru check-mk-1.2.2p3/check-mk-agent.spec check-mk-1.2.6p12/check-mk-agent.spec --- check-mk-1.2.2p3/check-mk-agent.spec 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check-mk-agent.spec 2015-09-21 10:59:53.000000000 +0000 @@ -0,0 +1,103 @@ +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +Summary: Check_MK Agent for Linux +Name: check-mk-agent +Version: (automatically inserted) +Release: 1 +License: GPL +Group: System/Monitoring +URL: http://mathias-kettner.de/check_mk.html +Source: check-mk-agent-%{_version}.tar.gz +BuildRoot: %{_topdir}/buildroot +AutoReq: off +AutoProv: off +BuildArch: noarch +Obsoletes: check_mk-agent check_mk_agent + +%description +The Check_MK Agent uses xinetd to provide information about the system +on TCP port 6556. This can be used to monitor the host via Check_MK. + +%prep +%setup -n check-mk-agent-%{_version} + +%install + +R=$RPM_BUILD_ROOT +rm -rf $R + +# install agent +mkdir -p $R/etc/xinetd.d +install -m 644 xinetd.conf $R/etc/xinetd.d/check_mk +mkdir -p $R/etc/check_mk +mkdir -p $R/usr/bin +install -m 755 check_mk_agent.linux $R/usr/bin/check_mk_agent +install -m 755 check_mk_caching_agent.linux $R/usr/bin/check_mk_caching_agent +install -m 755 waitmax $R/usr/bin +install -m 755 mk-job $R/usr/bin +mkdir -p $R/usr/lib/check_mk_agent/plugins +mkdir -p $R/usr/lib/check_mk_agent/local +mkdir -p $R/var/lib/check_mk_agent +mkdir -p $R/var/lib/check_mk_agent/job + +%clean +rm -rf $RPM_BUILD_ROOT + +%files +%config(noreplace) /etc/xinetd.d/check_mk +/etc/check_mk +/usr/bin/* +/usr/lib/check_mk_agent +/var/lib/check_mk_agent + +%define reload_xinetd if [ -x /etc/init.d/xinetd ] ; then if pgrep -x xinetd >/dev/null ; then echo "Reloading xinetd..." ; /etc/init.d/xinetd reload ; else echo "Starting xinetd..." ; /etc/init.d/xinetd start ; fi ; fi + +%define activate_xinetd if which chkconfig >/dev/null 2>&1 ; then echo "Activating startscript of xinetd" ; chkconfig xinetd on ; fi +%define cleanup_rpmnew if [ -f /etc/xinetd.d/check_mk.rpmnew ] ; then rm /etc/xinetd.d/check_mk.rpmnew ; fi + +%pre +if [ ! -x /etc/init.d/xinetd ] ; then + echo + echo "---------------------------------------------" + echo "WARNING" + echo + echo "This package needs xinetd to be installed. " + echo "Currently you do not have installed xinetd. " + echo "Please install and start xinetd or install " + echo "and setup another inetd manually." + echo "" + echo "It's also possible to monitor via SSH without " + echo "an inetd." + echo "---------------------------------------------" + echo +fi + +%post +%cleanup_rpmnew +%activate_xinetd +%reload_xinetd + +%postun +%reload_xinetd diff -Nru check-mk-1.2.2p3/check_mk-aix_diskiod.php check-mk-1.2.6p12/check_mk-aix_diskiod.php --- check-mk-1.2.2p3/check_mk-aix_diskiod.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-aix_diskiod.php 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,148 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; + } + + $parts = explode("_", $servicedesc); + $disk = $parts[2]; + + $opt[1] = "--vertical-label 'Throughput (MB/s)' -X0 --title \"Disk throughput $hostname / $disk\" "; + + $def[1] = + "HRULE:0#a0a0a0 ". + # read + "DEF:read=$RRD[read] ". + "CDEF:read_mb=read,1048576,/ ". + "AREA:read_mb#40c080:\"Read \" ". + "GPRINT:read_mb:LAST:\"%8.1lf MB/s last\" ". + "GPRINT:read_mb:AVERAGE:\"%6.1lf MB/s avg\" ". + "GPRINT:read_mb:MAX:\"%6.1lf MB/s max\\n\" "; + + # read average as line in the same graph + if (isset($RRD["read.avg"])) { + $def[1] .= + "DEF:read_avg=${RRD['read.avg']} ". + "CDEF:read_avg_mb=read_avg,1048576,/ ". + "LINE:read_avg_mb#202020 "; + } + + # write + $def[1] .= + "DEF:write=$RRD[write] ". + "CDEF:write_mb=write,1048576,/ ". + "CDEF:write_mb_neg=write_mb,-1,* ". + "AREA:write_mb_neg#4080c0:\"Write \" ". + "GPRINT:write_mb:LAST:\"%6.1lf MB/s last\" ". + "GPRINT:write_mb:AVERAGE:\"%6.1lf MB/s avg\" ". + "GPRINT:write_mb:MAX:\"%6.1lf MB/s max\\n\" ". + ""; + + # show levels for read + if ($WARN['read']) { + $def[1] .= "HRULE:$WARN[read]#ffd000:\"Warning for read at " . sprintf("%6.1f", $WARN[1]) . " MB/s \" "; + $def[1] .= "HRULE:$CRIT[read]#ff0000:\"Critical for read at " . sprintf("%6.1f", $CRIT[1]) . " MB/s\\n\" "; + } + + # show levels for write + if ($WARN['write']) { + $def[1] .= "HRULE:-$WARN[write]#ffd000:\"Warning for write at " . sprintf("%6.1f", $WARN[2]) . " MB/s \" "; + $def[1] .= "HRULE:-$CRIT[write]#ff0000:\"Critical for write at " . sprintf("%6.1f", $CRIT[2]) . " MB/s\\n\" "; + } + + # write average + if (isset($DS["write.avg"])) { + $def[1] .= + "DEF:write_avg=${RRD['write.avg']} ". + "CDEF:write_avg_mb=write_avg,1048576,/ ". + "CDEF:write_avg_mb_neg=write_avg_mb,-1,* ". + "LINE:write_avg_mb_neg#202020 "; + } + + # latency + if (isset($RRD["latency"])) { + $opt[] = "--vertical-label 'Latency (ms)' -X0 --title \"Latency $hostname / $disk\" "; + $def[] = "" + . "DEF:latency=$RRD[latency] " + . "AREA:latency#aaccdd:\"Latency\" " + . "LINE:latency#7799aa " + . "GPRINT:latency:LAST:\"%6.1lf ms last\" " + . "GPRINT:latency:AVERAGE:\"%6.1lf ms avg\" " + . "GPRINT:latency:MAX:\"%6.1lf ms max\\n\" " + ; + } + + # IOs per second + if (isset($RRD["ios"])) { + $opt[] = "--vertical-label 'IO Operations / sec' -X0 --title \"IOs/sec $hostname / $disk\" "; + $def[] = "" + . "DEF:ios=$RRD[ios] " + . "AREA:ios#ddccaa:\"ios\" " + . "LINE:ios#aa9977 " + . "GPRINT:ios:LAST:\"%6.1lf/sec last\" " + . "GPRINT:ios:AVERAGE:\"%6.1lf/sec avg\" " + . "GPRINT:ios:MAX:\"%6.1lf/sec max\\n\" " + ; + } + + if (isset($RRD["read_ql"])) { + $opt[] = "--vertical-label 'Queue Length' -X0 -u5 -l-5 --title \"Queue Length $hostname / $disk\" "; + $def[] = "" + . "DEF:read=$RRD[read_ql] " + . "DEF:write=$RRD[write_ql] " + . "CDEF:writen=write,-1,* " + . "HRULE:0#a0a0a0 " + . "AREA:read#669a76 " + . "AREA:writen#517ba5 " + ; + + } + +} + +// legacy version of diskstat +else { + $opt[1] = "--vertical-label 'Througput (MByte/s)' -l0 -u 1 --title \"Disk throughput $hostname / $servicedesc\" "; + + $def[1] = "DEF:kb=$RRDFILE[1]:$DS[1]:AVERAGE " ; + $def[1] .= "CDEF:mb=kb,1024,/ " ; + $def[1] .= "AREA:mb#40c080 " ; + "HRULE:0#a0a0a0 ". + $def[1] .= "GPRINT:mb:LAST:\"%6.1lf MByte/s last\" " ; + $def[1] .= "GPRINT:mb:AVERAGE:\"%6.1lf MByte/s avg\" " ; + $def[1] .= "GPRINT:mb:MAX:\"%6.1lf MByte/s max\\n\" "; +} +?> + diff -Nru check-mk-1.2.2p3/check_mk-aix_memory.php check-mk-1.2.6p12/check_mk-aix_memory.php --- check-mk-1.2.2p3/check_mk-aix_memory.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-aix_memory.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,117 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; +} + +$def[1] = ""; + +if (isset($RRD['pagetables'])) { + $def[1] .= "DEF:pagetables=$RRD[pagetables] " + . "DEF:ram=$RRD[ramused] "; +} +else { + $def[1] .= "DEF:ram=$RRD[ramused] "; +} + +$def[1] .= "DEF:virt=$RRDFILE[3]:$DS[3]:MAX " + . "DEF:swap=$RRDFILE[2]:$DS[2]:MAX " + + . "HRULE:$MAX[3]#000080:\"RAM+SWAP installed\" " + . "HRULE:$MAX[1]#2040d0:\"$maxgb GB RAM installed\" " + . "HRULE:$WARN[3]#FFFF00:\"Warning\" " + . "HRULE:$CRIT[3]#FF0000:\"Critical\" " + + . "'COMMENT:\\n' " + . "AREA:ram#80ff40:\"RAM used \" " + . "GPRINT:ram:LAST:\"%6.0lf MB last\" " + . "GPRINT:ram:AVERAGE:\"%6.0lf MB avg\" " + . "GPRINT:ram:MAX:\"%6.0lf MB max\\n\" " + + . "AREA:swap#008030:\"SWAP used \":STACK " + . "GPRINT:swap:LAST:\"%6.0lf MB last\" " + . "GPRINT:swap:AVERAGE:\"%6.0lf MB avg\" " + . "GPRINT:swap:MAX:\"%6.0lf MB max\\n\" " + ; + + +if (isset($RRD['pagetables'])) { + $def[1] .= "" + . "AREA:pagetables#ff8800:\"Page tables \":STACK " + . "GPRINT:pagetables:LAST:\"%6.0lf MB last\" " + . "GPRINT:pagetables:AVERAGE:\"%6.0lf MB avg\" " + . "GPRINT:pagetables:MAX:\"%6.0lf MB max\\n\" " + . "LINE:virt#000000:\"RAM+SWAP+PT used\" " + . "GPRINT:virt:LAST:\"%6.0lf MB last\" " + . "GPRINT:virt:AVERAGE:\"%6.0lf MB avg\" " + . "GPRINT:virt:MAX:\"%6.0lf MB max\\n\" " + ; +} + +else { + $def[1] .= "LINE:virt#000000:\"RAM+SWAP used \" " + . "GPRINT:virt:LAST:\"%6.0lf MB last\" " + . "GPRINT:virt:AVERAGE:\"%6.0lf MB avg\" " + . "GPRINT:virt:MAX:\"%6.0lf MB max\\n\" " + ; +} + +if (isset($RRD['mapped'])) { + $def[1] .= "DEF:mapped=$RRD[mapped] " + . "LINE2:mapped#8822ff:\"Memory mapped \" " + . "GPRINT:mapped:LAST:\"%6.0lf MB last\" " + . "GPRINT:mapped:AVERAGE:\"%6.0lf MB avg\" " + . "GPRINT:mapped:MAX:\"%6.0lf MB max\\n\" " ; +} + +if (isset($RRD['committed_as'])) { + $def[1] .= "DEF:committed=$RRD[committed_as] " + . "LINE2:committed#cc00dd:\"Committed \" " + . "GPRINT:committed:LAST:\"%6.0lf MB last\" " + . "GPRINT:committed:AVERAGE:\"%6.0lf MB avg\" " + . "GPRINT:committed:MAX:\"%6.0lf MB max\\n\" " ; +} + +/* Shared memory is part of RAM. So simply overlay it */ +if (isset($RRD['shared'])) { + $def[1] .= "DEF:shared=$RRD[shared] " + . "AREA:shared#44ccff:\"Shared Memory \" " + . "GPRINT:shared:LAST:\"%6.0lf MB last\" " + . "GPRINT:shared:AVERAGE:\"%6.0lf MB avg\" " + . "GPRINT:shared:MAX:\"%6.0lf MB max\\n\" " ; +} +?> diff -Nru check-mk-1.2.2p3/check_mk-akcp_daisy_temp.php check-mk-1.2.6p12/check_mk-akcp_daisy_temp.php --- check-mk-1.2.2p3/check_mk-akcp_daisy_temp.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-akcp_daisy_temp.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk-akcp_sensor_humidity.php check-mk-1.2.6p12/check_mk-akcp_sensor_humidity.php --- check-mk-1.2.2p3/check_mk-akcp_sensor_humidity.php 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-akcp_sensor_humidity.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mk-akcp_sensor_temp.php check-mk-1.2.6p12/check_mk-akcp_sensor_temp.php --- check-mk-1.2.2p3/check_mk-akcp_sensor_temp.php 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-akcp_sensor_temp.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -23,13 +23,7 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -# +------------------------------------------------------------------+ -# | This file has been contributed by: | -# | | -# | Michael Nieporte | -# +------------------------------------------------------------------+ - -$opt[1] = "--vertical-label \"Value\" -l 0 -u 40 --title \"$servicedesc\" "; +$opt[1] = "--vertical-label \"Celsius\" -l 0 -u 40 --title \"Temperature $servicedesc\" "; $def[1] = "DEF:var1=$RRDFILE[1]:$DS[1]:MAX "; $def[1] .= "AREA:var1#2080ff:\"Temperature\:\" "; @@ -37,6 +31,8 @@ $def[1] .= "LINE1:var1#000080:\"\" "; $def[1] .= "GPRINT:var1:MAX:\"(Max\: %2.0lfC,\" "; $def[1] .= "GPRINT:var1:AVERAGE:\"Avg\: %2.0lfC)\" "; -$def[1] .= "HRULE:$WARN[1]#FFFF00:\"Warning\: $WARN[1]C\" "; -$def[1] .= "HRULE:$CRIT[1]#FF0000:\"Critical\: $CRIT[1]C\" "; +if ($WARN[1] != "") { + $def[1] .= "HRULE:$WARN[1]#FFFF00:\"Warning\: $WARN[1]C\" "; + $def[1] .= "HRULE:$CRIT[1]#FF0000:\"Critical\: $CRIT[1]C\" "; +} ?> diff -Nru check-mk-1.2.2p3/check_mk-allnet_ip_sensoric.humidity.php check-mk-1.2.6p12/check_mk-allnet_ip_sensoric.humidity.php --- check-mk-1.2.2p3/check_mk-allnet_ip_sensoric.humidity.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-allnet_ip_sensoric.humidity.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk-allnet_ip_sensoric.pressure.php check-mk-1.2.6p12/check_mk-allnet_ip_sensoric.pressure.php --- check-mk-1.2.2p3/check_mk-allnet_ip_sensoric.pressure.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-allnet_ip_sensoric.pressure.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,34 @@ + diff -Nru check-mk-1.2.2p3/check_mk-allnet_ip_sensoric.temp.php check-mk-1.2.6p12/check_mk-allnet_ip_sensoric.temp.php --- check-mk-1.2.2p3/check_mk-allnet_ip_sensoric.temp.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-allnet_ip_sensoric.temp.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk-allnet_ip_sensoric.tension.php check-mk-1.2.6p12/check_mk-allnet_ip_sensoric.tension.php --- check-mk-1.2.2p3/check_mk-allnet_ip_sensoric.tension.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-allnet_ip_sensoric.tension.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,34 @@ + diff -Nru check-mk-1.2.2p3/check_mk-apache_status.php check-mk-1.2.6p12/check_mk-apache_status.php --- check-mk-1.2.2p3/check_mk-apache_status.php 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-apache_status.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -23,99 +23,112 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -# Copied most parts from the pnp template check_apachestatus_auto.php. +# Modded: Thomas Zyska (tzyska@testo.de) +$RRD = array(); +foreach ($NAME as $i => $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; + $ACT[$n] = $ACT[$i]; +} + +$total_slots = intval($ACT['TotalSlots']); + +# +# First graph with all data +# +$i=1; +$ds_name[$i] = "Apache Status"; +$def[$i] = ""; +$opt[$i] = " --vertical-label 'Connections' --title '$hostname: $servicedesc' -l 0"; + +$def[$i] .= "DEF:varTotal=${RRD['TotalSlots']} "; +$def[$i] .= "DEF:varOpen=${RRD['OpenSlots']} "; +$def[$i] .= "HRULE:${ACT['TotalSlots']}#000000:\"Total Slots\\: ${total_slots}\\n\" "; +$def[$i] .= "COMMENT:\" \\n\" "; -# -# Worker -# -$i=0; -$def[$i] = ""; -$opt[$i] = " --title 'Worker'"; -$ds_name[$i] = "Workers"; -$color = '#00ff00'; -foreach ($this->DS as $KEY=>$VAL) { - if($VAL['NAME'] == 'IdleWorkers') { - $def[$i] .= rrd::def ("var".$KEY, $VAL['RRDFILE'], $VAL['DS'], "AVERAGE"); - $def[$i] .= rrd::area ("var".$KEY, $color ,rrd::cut($VAL['NAME'],12), 'STACK' ); - $def[$i] .= rrd::gprint ("var".$KEY, array("LAST","MAX","AVERAGE"), "%6.0lf"); - } -} -$color = '#ff0000'; -foreach ($this->DS as $KEY=>$VAL) { - if($VAL['NAME'] == 'BusyWorkers') { - $def[$i] .= rrd::def ("var".$KEY, $VAL['RRDFILE'], $VAL['DS'], "AVERAGE"); - $def[$i] .= rrd::area ("var".$KEY, $color, rrd::cut($VAL['NAME'],12), 'STACK' ); - $def[$i] .= rrd::gprint ("var".$KEY, array("LAST","MAX","AVERAGE"), "%6.0lf"); - } -} - -# -# Slots -# -$i++; -$def[$i] = ""; -$opt[$i] = " --title 'Slots'"; -$ds_name[$i] = "Slots"; -$color = '#ff0000'; foreach ($this->DS as $KEY=>$VAL) { - if($VAL['NAME'] == 'TotalSlots') { - $def[$i] .= rrd::def ("var".$KEY, $VAL['RRDFILE'], $VAL['DS'], "AVERAGE"); - $def[$i] .= rrd::area ("var".$KEY, $color,rrd::cut($VAL['NAME'],12) ); - $def[$i] .= rrd::gprint ("var".$KEY, array("LAST","MAX","AVERAGE"), "%6.0lf"); - } -} -$color = '#00ff00'; -foreach ($this->DS as $KEY=>$VAL) { - if($VAL['NAME'] == 'OpenSlots') { - $def[$i] .= rrd::def ("var".$KEY, $VAL['RRDFILE'], $VAL['DS'], "AVERAGE"); - $def[$i] .= rrd::area ("var".$KEY, $color,rrd::cut($VAL['NAME'],12) ); - $def[$i] .= rrd::gprint ("var".$KEY, array("LAST","MAX","AVERAGE"), "%6.0lf"); + if(preg_match('/^State_/', $VAL['NAME'])) { + $def[$i] .= "DEF:var${KEY}=${VAL['RRDFILE']}:${DS[$VAL['DS']]}:AVERAGE "; + $def[$i] .= "AREA:var${KEY}".rrd::color($KEY).":\"".rrd::cut(substr($VAL['NAME'],6),16) ."\":STACK "; + $def[$i] .= "GPRINT:var${KEY}:LAST:\"Last %5.1lf\" "; + $def[$i] .= "GPRINT:var${KEY}:MAX:\"Max %5.1lf\" "; + $def[$i] .= "GPRINT:var${KEY}:AVERAGE:\"Average %5.1lf\" "; + $def[$i] .= "COMMENT:\"\\n\" "; } } +# get UsedSlots +$def[$i] .= "CDEF:usedslots=varTotal,varOpen,- "; +$def[$i] .= "LINE:usedslots#ffffff:\"UsedSlots \t \" "; +$def[$i] .= "GPRINT:usedslots:LAST:\"Last %5.1lf\" "; +$def[$i] .= "GPRINT:usedslots:MAX:\"Max %5.1lf\" "; +$def[$i] .= "GPRINT:usedslots:AVERAGE:\"Average %5.1lf\\n\" "; +# $def[$i] .= "GPRINT:usedslots:LAST:\"Used %5.0lf of ${total_slots}\" "; +$def[$i] .= "COMMENT:\"\\n\" "; + # -# Requests per Second +# Requests per Second # $i++; -$def[$i] = ""; -$opt[$i] = " --title Requests/sec"; -$ds_name[$i] = "Requests/sec"; -$color = '#000000'; -foreach ($this->DS as $KEY=>$VAL) { - if($VAL['NAME'] == 'ReqPerSec') { - $def[$i] .= rrd::def ("var".$KEY, $VAL['RRDFILE'], $VAL['DS'], "AVERAGE"); - $def[$i] .= rrd::line1 ("var".$KEY, $color, rrd::cut($VAL['NAME'],16), 'STACK' ); - $def[$i] .= rrd::gprint ("var".$KEY, array("LAST","MAX","AVERAGE"), "%6.1lf/s"); +if (isset($RRD["ReqPerSec"])) { + $def[$i] = ""; + $opt[$i] = " --title '$hostname: $servicedesc Requests/sec' -l 0"; + $ds_name[$i] = "Requests/sec"; + $color = '#000000'; + foreach ($this->DS as $KEY=>$VAL) { + if($VAL['NAME'] == 'ReqPerSec') { + $def[$i] .= rrd::def ("var".$KEY, $VAL['RRDFILE'], $VAL['DS'], "AVERAGE"); + $def[$i] .= rrd::line1 ("var".$KEY, $color, rrd::cut($VAL['NAME'],16), 'STACK' ); + $def[$i] .= rrd::gprint ("var".$KEY, array("LAST","MAX","AVERAGE"), "%6.1lf/s"); + } } } # -# Bytes per Second +# Bytes per Second # $i++; -$def[$i] = ""; -$opt[$i] = " --title 'Bytes per Second'"; -$ds_name[$i] = "Bytes/sec"; -foreach ($this->DS as $KEY=>$VAL) { - if($VAL['NAME'] == 'BytesPerSec') { - $def[$i] .= rrd::def ("var".$KEY, $VAL['RRDFILE'], $VAL['DS'], "AVERAGE"); - $def[$i] .= rrd::line1 ("var".$KEY, rrd::color($KEY),rrd::cut($VAL['NAME'],16), 'STACK' ); - $def[$i] .= rrd::gprint ("var".$KEY, array("LAST","MAX","AVERAGE"), "%6.1lf %sb/s"); +if (isset($RRD["BytesPerSec"])) { + $def[$i] = ""; + $opt[$i] = " --title '$hostname: $servicedesc Bytes/sec' -l 0"; + $ds_name[$i] = "Bytes/sec"; + foreach ($this->DS as $KEY=>$VAL) { + if($VAL['NAME'] == 'BytesPerSec') { + $def[$i] .= rrd::def ("var".$KEY, $VAL['RRDFILE'], $VAL['DS'], "AVERAGE"); + $def[$i] .= rrd::line1 ("var".$KEY, rrd::color($KEY),rrd::cut($VAL['NAME'],16), 'STACK' ); + $def[$i] .= rrd::gprint ("var".$KEY, array("LAST","MAX","AVERAGE"), "%6.1lf %sb/s"); + } } } - # -# Stats +# all other graphs # $i++; -$def[$i] = ""; -$opt[$i] = " --title 'Worker States'"; -$ds_name[$i] = "Worker States"; foreach ($this->DS as $KEY=>$VAL) { - if(preg_match('/^State_/', $VAL['NAME'])) { - $def[$i] .= rrd::def ("var".$KEY, $VAL['RRDFILE'], $VAL['DS'], "AVERAGE"); - $def[$i] .= rrd::line1 ("var".$KEY, rrd::color($KEY),rrd::cut($VAL['NAME'],16), 'STACK' ); - $def[$i] .= rrd::gprint ("var".$KEY, array("LAST","MAX","AVERAGE"), "%6.0lf".$VAL['UNIT']); - } + if(!preg_match('/(^State_)|(^ReqPerSec)|(^BytesPerSec)|(^Uptime)/', $VAL['NAME'])) { + $def[$i] = ""; + $opt[$i] = " --title '$hostname: Apache - ".$VAL['NAME']." ' -l 0"; + $ds_name[$i] = $VAL['NAME']; + $def[$i] .= rrd::def ("var".$KEY, $VAL['RRDFILE'], $VAL['DS'], "AVERAGE"); + $def[$i] .= rrd::line1 ("var".$KEY, rrd::color($KEY),rrd::cut($VAL['NAME'],16), 'STACK' ); + $def[$i] .= rrd::gprint ("var".$KEY, array("LAST","MAX","AVERAGE"), "%6.1lf"); + $i++; + } } +# +# Uptime Graph +# +$opt[$i] = "--vertical-label 'Uptime (d)' -l0 --title \"Uptime (time since last reboot)\" "; +$def[$i] = ""; +$def[$i] .= rrd::def("sec", $RRDFILE[1], $DS[1], "MAX"); +$ds_name[$i] = $LABEL[1]; +$def[$i] .= "CDEF:uptime=sec,86400,/ "; +$def[$i] .= "AREA:uptime#80f000:\"Uptime (days)\" "; +$def[$i] .= "LINE:uptime#408000 "; +$def[$i] .= "GPRINT:uptime:LAST:\"%7.2lf %s LAST\" "; +$def[$i] .= "GPRINT:uptime:MAX:\"%7.2lf %s MAX\" "; + ?> diff -Nru check-mk-1.2.2p3/check_mk-apc_humidity.php check-mk-1.2.6p12/check_mk-apc_humidity.php --- check-mk-1.2.2p3/check_mk-apc_humidity.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-apc_humidity.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk-apc_inrow_temp.php check-mk-1.2.6p12/check_mk-apc_inrow_temp.php --- check-mk-1.2.2p3/check_mk-apc_inrow_temp.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-apc_inrow_temp.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk-apc_symmetra_ext_temp.php check-mk-1.2.6p12/check_mk-apc_symmetra_ext_temp.php --- check-mk-1.2.2p3/check_mk-apc_symmetra_ext_temp.php 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-apc_symmetra_ext_temp.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -31,6 +31,8 @@ $def[1] .= "LINE1:var1#000080:\"\" "; $def[1] .= "GPRINT:var1:MAX:\"(Max\: %2.0lfC,\" "; $def[1] .= "GPRINT:var1:AVERAGE:\"Avg\: %2.0lfC)\" "; -$def[1] .= "HRULE:$WARN[1]#FFFF00:\"Warning\: $WARN[1]C\" "; -$def[1] .= "HRULE:$CRIT[1]#FF0000:\"Critical\: $CRIT[1]C\" "; +if ($WARN[1] != "") { + $def[1] .= "HRULE:$WARN[1]#FFFF00:\"Warning\: $WARN[1]C\" "; + $def[1] .= "HRULE:$CRIT[1]#FF0000:\"Critical\: $CRIT[1]C\" "; +} ?> diff -Nru check-mk-1.2.2p3/check_mk-apc_symmetra.php check-mk-1.2.6p12/check_mk-apc_symmetra.php --- check-mk-1.2.2p3/check_mk-apc_symmetra.php 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-apc_symmetra.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -23,26 +23,28 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -$opt[1] = "--vertical-label \"Celsius\" -l 0 -u 60 --title \"Battery temperature\" "; - $color = sprintf("ff%02x80", $ACT[2] * 3, $ACT[2] * 2); -$def[1] = "DEF:var1=$RRDFILE[2]:$DS[2]:MAX "; -$def[1] .= "AREA:var1#$color:\"Temperature\:\" "; -$def[1] .= "GPRINT:var1:LAST:\"%2.0lfC\" "; -$def[1] .= "LINE1:var1#800040:\"\" "; -$def[1] .= "GPRINT:var1:MAX:\"(Max\: %2.0lfC,\" "; -$def[1] .= "GPRINT:var1:AVERAGE:\"Avg\: %2.0lfC)\" "; -$def[1] .= "HRULE:$CRIT[2]#FF0000:\"Critical\: $CRIT[2]C\" "; - -$opt[2] = "--vertical-label \"Percent\" -l 0 -u 100 --title \"Battery Capacity\" "; -$def[2] = "DEF:var2=$RRDFILE[1]:$DS[1]:MIN "; -$def[2] .= "AREA:var2#80e0c0:\"Capacity\:\" "; -$def[2] .= "GPRINT:var2:LAST:\"%2.0lf%%\" "; -$def[2] .= "LINE1:var2#008040:\"\" "; -$def[2] .= "GPRINT:var2:MAX:\"(Max\: %2.0lf%%,\" "; -$def[2] .= "GPRINT:var2:AVERAGE:\"Avg\: %2.0lf%%)\" "; -$def[2] .= "HRULE:$CRIT[1]#FF0000:\"Critical\: $CRIT[1]%\" "; +$opt[1] = "--vertical-label \"Percent\" -u 100 --title \"Battery Capacity\" "; +$def[1] = "DEF:var2=$RRDFILE[1]:$DS[1]:MIN "; +$def[1] .= "AREA:var2#80e0c0:\"Capacity\:\" "; +$def[1] .= "GPRINT:var2:LAST:\"%2.0lf%%\" "; +$def[1] .= "LINE1:var2#008040:\"\" "; +$def[1] .= "GPRINT:var2:MIN:\"(Min\: %2.0lf%%,\" "; +$def[1] .= "GPRINT:var2:MAX:\"Max\: %2.0lf%%,\" "; +$def[1] .= "GPRINT:var2:AVERAGE:\"Avg\: %2.0lf%%)\" "; +$def[1] .= "HRULE:$CRIT[1]#FF0000:\"Critical\: $CRIT[1]%\" "; + +$opt[2] = "--vertical-label \"Celsius\" -u 60 --title \"Battery temperature\" "; +$def[2] = "DEF:var1=$RRDFILE[2]:$DS[2]:MAX "; +$def[2] .= "AREA:var1#$color:\"Temperature\:\" "; +$def[2] .= "GPRINT:var1:LAST:\"%2.0lfC\" "; +$def[2] .= "LINE1:var1#800040:\"\" "; +$def[2] .= "GPRINT:var1:MIN:\"(min\: %2.0lfC,\" "; +$def[2] .= "GPRINT:var1:MAX:\"max\: %2.0lfC,\" "; +$def[2] .= "GPRINT:var1:AVERAGE:\"Avg\: %2.0lfC)\" "; +$def[2] .= "HRULE:$CRIT[2]#FF0000:\"Critical\: $CRIT[2]C\" "; + $opt[3] = "--vertical-label \"Ampere\" -l -0 --title \"Currencies\" "; $def[3] = "DEF:batcur=$RRDFILE[3]:$DS[3]:MAX "; @@ -52,7 +54,7 @@ $def[3] .= "LINE:outcur#00c0c0:\"Output Currency\:\" "; $def[3] .= "GPRINT:outcur:LAST:\"%2.0lfA\" "; -$opt[4] = "--vertical-label \"Volt\" -l 0 -u 250 --title \"Output Voltage\" "; +$opt[4] = "--vertical-label \"Volt\" -u 250 --title \"Output Voltage\" "; $def[4] = "DEF:volt=$RRDFILE[4]:$DS[4]:MIN "; $def[4] .= "GPRINT:volt:LAST:\"%2.0lfV\" "; $def[4] .= "LINE1:volt#408040:\"\" "; @@ -60,4 +62,13 @@ $def[4] .= "GPRINT:volt:AVERAGE:\"avg\: %2.0lfV)\" "; $def[4] .= "HRULE:$CRIT[4]#FF0000:\"Critical\: $CRIT[4]V\" "; +$opt[5] = "--vertical-label \"Time\" --title \"Remaining Runtime\" "; +$def[5] = "DEF:minutes=$RRDFILE[6]:$DS[6]:MIN "; +$def[5] .= "GPRINT:minutes:LAST:\"%2.0lfmin\" "; +$def[5] .= "LINE1:minutes#408040:\"\" "; +$def[5] .= "GPRINT:minutes:MIN:\"(min\: %2.0lfmin,\" "; +$def[5] .= "GPRINT:minutes:MAX:\"max\: %2.0lfmin,\" "; +$def[5] .= "GPRINT:minutes:AVERAGE:\"avg\: %2.0lfmin)\" "; +#$def[5] .= "HRULE:$CRIT[4]#FF0000:\"Critical\: $CRIT[4]V\" "; + ?> diff -Nru check-mk-1.2.2p3/check_mk-apc_symmetra_power.php check-mk-1.2.6p12/check_mk-apc_symmetra_power.php --- check-mk-1.2.2p3/check_mk-apc_symmetra_power.php 2013-05-06 09:58:46.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-apc_symmetra_power.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mk-apc_symmetra_temp.php check-mk-1.2.6p12/check_mk-apc_symmetra_temp.php --- check-mk-1.2.2p3/check_mk-apc_symmetra_temp.php 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-apc_symmetra_temp.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -31,6 +31,8 @@ $def[1] .= "LINE1:var1#000080:\"\" "; $def[1] .= "GPRINT:var1:MAX:\"(Max\: %2.0lfC,\" "; $def[1] .= "GPRINT:var1:AVERAGE:\"Avg\: %2.0lfC)\" "; -$def[1] .= "HRULE:$WARN[1]#FFFF00:\"Warning\: $WARN[1]C\" "; -$def[1] .= "HRULE:$CRIT[1]#FF0000:\"Critical\: $CRIT[1]C\" "; +if ($WARN[1] != "") { + $def[1] .= "HRULE:$WARN[1]#FFFF00:\"Warning\: $WARN[1]C\" "; + $def[1] .= "HRULE:$CRIT[1]#FF0000:\"Critical\: $CRIT[1]C\" "; +} ?> diff -Nru check-mk-1.2.2p3/check_mk-arcserve_backup.php check-mk-1.2.6p12/check_mk-arcserve_backup.php --- check-mk-1.2.2p3/check_mk-arcserve_backup.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-arcserve_backup.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,61 @@ +DS as $KEY=>$VAL) { + + $vlabel = " "; + $lower = ""; + $upper = ""; + + if ($VAL['UNIT'] == "%%") { + $vlabel = "%"; + $upper = " --upper=101 "; + $lower = " --lower=0 "; + } + else { + $vlabel = $VAL['UNIT']; + } + + $opt[$KEY] = '--vertical-label "' . $vlabel . '" --title "' . $this->MACRO['DISP_HOSTNAME'] . ' / ' . $this->MACRO['DISP_SERVICEDESC'] . '"' . $upper . $lower; + $ds_name[$KEY] = $VAL['LABEL']; + $def[$KEY] = rrd::def ("var1", $VAL['RRDFILE'], $VAL['DS'], "AVERAGE"); + $def[$KEY] .= rrd::gradient("var1", $_start_color[$KEY], $_end_color[$KEY], rrd::cut($VAL['NAME'],16), 20); + $def[$KEY] .= rrd::line1 ("var1", $_LINE ); + $def[$KEY] .= rrd::gprint ("var1", array("LAST","MAX","AVERAGE"), "%3.2lf %S".$VAL['UNIT']); +} +?> diff -Nru check-mk-1.2.2p3/check_mk-arris_cmts_temp.php check-mk-1.2.6p12/check_mk-arris_cmts_temp.php --- check-mk-1.2.2p3/check_mk-arris_cmts_temp.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-arris_cmts_temp.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk_base.py check-mk-1.2.6p12/check_mk_base.py --- check-mk-1.2.2p3/check_mk_base.py 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/check_mk_base.py 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,7 +24,26 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -import socket, os, sys, time, re, signal, math, tempfile +import sys + +# Remove precompiled directory from sys.path. Leaving it in the path +# makes problems when host names (name of precompiled files) are equal +# to python module names like "random" +sys.path.pop(0) + +import socket, os, time, re, signal, math, tempfile + +# PLANNED CLEANUP: +# - central functions for outputting verbose information and bailing +# out because of errors. Remove all explicit "if opt_debug:...". +# Note: these new functions should force a flush() if TTY is not +# a terminal (so that error messages arrive the CMC in time) +# - --debug should *only* influence exception handling +# - introduce second levels of verbosity, that takes over debug output +# from --debug +# - remove all remaining print commands and use sys.stdout.write instead +# or define a new output function +# - Also create a function bail_out() for printing and error and exiting # Python 2.3 does not have 'set' in normal namespace. # But it can be imported from 'sets' @@ -75,14 +94,32 @@ def tty(fg=-1, bg=-1, attr=-1): return '' +# Output text if opt_verbose is set (-v). Adds no linefeed +def verbose(text): + if opt_verbose: + try: + sys.stdout.write(text) + sys.stdout.flush() + except: + pass # avoid exception on broken pipe (e.g. due to | head) + +# Output text if, opt_verbose >= 2 (-vv). +def vverbose(text): + if opt_verbose >= 2: + verbose(text) + +# Output text to sys.stderr with a linefeed added. Exists +# afterwards with and exit code of 3, in order to be +# compatible with monitoring plugin API. +def bail_out(reason): + raise MKBailOut(reason) + # global variables used to cache temporary values -g_dns_cache = {} g_infocache = {} # In-memory cache of host info. g_agent_already_contacted = {} # do we have agent data from this host? g_counters = {} # storing counters of one host g_hostname = "unknown" # Host currently being checked g_aggregated_service_results = {} # store results for later submission -compiled_regexes = {} # avoid recompiling regexes nagios_command_pipe = None # Filedescriptor to open nagios command pipe. checkresult_file_fd = None checkresult_file_path = None @@ -90,6 +127,7 @@ g_single_oid_cache = {} g_broken_snmp_hosts = set([]) g_broken_agent_hosts = set([]) +g_timeout = None # variables set later by getopt @@ -103,6 +141,12 @@ opt_use_snmp_walk = False opt_cleanup_autochecks = False fake_dns = False +opt_keepalive = False +opt_cmc_relfilename = "config" +opt_keepalive_fd = None +opt_oids = [] +opt_extra_oids = [] +opt_force = False # register SIGINT handler for consistenct CTRL+C handling def interrupt_handler(signum, frame): @@ -116,12 +160,17 @@ def __str__(self): return self.reason +class MKBailOut(Exception): + def __init__(self, reason): + self.reason = reason + def __str__(self): + return self.reason + class MKCounterWrapped(Exception): - def __init__(self, countername, reason): - self.name = countername + def __init__(self, reason): self.reason = reason def __str__(self): - return '%s: %s' % (self.name, self.reason) + return self.reason class MKAgentError(Exception): def __init__(self, reason): @@ -135,6 +184,9 @@ def __str__(self): return self.reason +class MKSkipCheck(Exception): + pass + # +----------------------------------------------------------------------+ # | _ _ _ | # | / \ __ _ __ _ _ __ ___ __ _ __ _| |_(_) ___ _ __ | @@ -199,7 +251,7 @@ text = " *** ".join([ item + " " + output for itemstatus, item, output in outputlist ]) if not opt_dont_submit: - submit_to_nagios(aggr_hostname, servicedesc, status, text) + submit_to_core(aggr_hostname, servicedesc, status, text) if opt_verbose: color = { 0: tty_green, 1: tty_yellow, 2: tty_red, 3: tty_magenta }[status] @@ -217,7 +269,7 @@ return if not opt_dont_submit: - submit_to_nagios(summary_hostname(hostname), "Check_MK", status, output) + submit_to_core(summary_hostname(hostname), "Check_MK", status, output) if opt_verbose: color = { 0: tty_green, 1: tty_yellow, 2: tty_red, 3: tty_magenta }[status] @@ -246,14 +298,13 @@ # If the host is a cluster, the information is fetched from all its # nodes an then merged per-check-wise. -# For cluster checks we do not have an ip address from Nagios -# We need to do DNS-lookups in that case :-(. We could avoid that at -# least in case of precompiled checks. On the other hand, cluster checks -# usually use existing cache files, if check_mk is not misconfigured, +# For cluster checks the monitoring core does not provide the IP addresses +# of the node. We need to do DNS-lookups in that case :-(. We could avoid +# that at least in case of precompiled checks. On the other hand, cluster +# checks usually use existing cache files, if check_mk is not misconfigured, # and thus do no network activity at all... def get_host_info(hostname, ipaddress, checkname): - # If the check want's the node info, we add an additional # column (as the first column) with the name of the node # or None (in case of non-clustered nodes). On problem arises, @@ -268,38 +319,50 @@ exception_texts = [] global opt_use_cachefile opt_use_cachefile = True - is_snmp_error = False + is_snmp_error = False for node in nodes: # If an error with the agent occurs, we still can (and must) - # try the other node. + # try the other nodes. try: ipaddress = lookup_ipaddress(node) new_info = get_realhost_info(node, ipaddress, checkname, cluster_max_cachefile_age) - if add_nodeinfo: - new_info = [ [node] + line for line in new_info ] - info += new_info + if new_info != None: + if add_nodeinfo: + new_info = [ [node] + line for line in new_info ] + info += new_info + at_least_one_without_exception = True + except MKSkipCheck: at_least_one_without_exception = True except MKAgentError, e: - if str(e) != "": # only first error contains text + if str(e) != "": # only first error contains text exception_texts.append(str(e)) - g_broken_agent_hosts.add(node) + g_broken_agent_hosts.add(node) except MKSNMPError, e: - if str(e) != "": # only first error contains text - exception_texts.append(str(e)) - g_broken_snmp_hosts.add(node) - is_snmp_error = True + if str(e) != "": # only first error contains text + exception_texts.append(str(e)) + g_broken_snmp_hosts.add(node) + is_snmp_error = True if not at_least_one_without_exception: - if is_snmp_error: + if is_snmp_error: raise MKSNMPError(", ".join(exception_texts)) else: raise MKAgentError(", ".join(exception_texts)) - return info + else: info = get_realhost_info(hostname, ipaddress, checkname, check_max_cachefile_age) - if add_nodeinfo: - return [ [ None ] + line for line in info ] - else: - return info + if info != None and add_nodeinfo: + info = [ [ None ] + line for line in info ] + + # Now some check types define a parse function. In that case the + # info is automatically being parsed by that function - on the fly. + if checkname in check_info: # e.g. not the case for cpu (check is cpu.loads) + parse_function = check_info[checkname]["parse_function"] + if parse_function: + parsed = parse_function(info) + return parsed + + return info + # Gets info from a real host (not a cluster). There are three possible # ways: TCP, SNMP and external command. This function raises @@ -311,9 +374,13 @@ # might have to be fetched via SNMP *and* TCP for one host # (even if this is unlikeyly) # +# What makes the thing even more tricky is the new piggyback +# function, that allows one host's agent to send data for another +# host. +# # This function assumes, that each check type is queried # only once for each host. -def get_realhost_info(hostname, ipaddress, check_type, max_cache_age): +def get_realhost_info(hostname, ipaddress, check_type, max_cache_age, ignore_check_interval = False): info = get_cached_hostinfo(hostname) if info and info.has_key(check_type): return info[check_type] @@ -325,56 +392,256 @@ # snmp info for "foo", not for "foo.bar". oid_info = snmp_info.get(check_type.split(".")[0]) if oid_info: - content = read_cache_file(cache_relpath, max_cache_age) + cache_path = tcp_cache_dir + "/" + cache_relpath + check_interval = check_interval_of(hostname, check_type) + if not ignore_check_interval \ + and not opt_dont_submit \ + and check_interval is not None and os.path.exists(cache_path) \ + and cachefile_age(cache_path) < check_interval * 60: + # cache file is newer than check_interval, skip this check + raise MKSkipCheck() + + try: + content = read_cache_file(cache_relpath, max_cache_age) + except: + if simulation_mode and not opt_no_cache: + return # Simply ignore missing SNMP cache files + raise + if content: return eval(content) # Not cached -> need to get info via SNMP # Try to contact host only once - if hostname in g_broken_snmp_hosts: - raise MKSNMPError("") + if hostname in g_broken_snmp_hosts: + raise MKSNMPError("") # New in 1.1.3: oid_info can now be a list: Each element # of that list is interpreted as one real oid_info, fetches # a separate snmp table. The overall result is then the list # of these results. - try: - if type(oid_info) == list: - table = [ get_snmp_table(hostname, ipaddress, entry) for entry in oid_info ] - # if at least one query fails, we discard the hole table - if None in table: - table = None - else: - table = get_snmp_table(hostname, ipaddress, oid_info) - except: - if opt_debug: - raise - else: - raise MKGeneralException("Incomplete or invalid response from SNMP agent") - + if type(oid_info) == list: + table = [ get_snmp_table(hostname, ipaddress, check_type, entry) for entry in oid_info ] + # if at least one query fails, we discard the hole table + if None in table: + table = None + else: + table = get_snmp_table(hostname, ipaddress, check_type, oid_info) store_cached_checkinfo(hostname, check_type, table) - write_cache_file(cache_relpath, repr(table) + "\n") + # only write cache file in non interactive mode. Otherwise it would + # prevent the regular checking from getting status updates during + # interactive debugging, for example with cmk -nv. + if not opt_dont_submit: + write_cache_file(cache_relpath, repr(table) + "\n") return table + # Note: even von SNMP-tagged hosts TCP based checks can be used, if + # the data comes piggyback! + # No SNMP check. Then we must contact the check_mk_agent. Have we already - # to get data from the agent? If yes we must not do that again! Even if - # no cache file is present + # tried to get data from the agent? If yes we must not do that again! Even if + # no cache file is present. if g_agent_already_contacted.has_key(hostname): - raise MKAgentError("") + raise MKAgentError("") g_agent_already_contacted[hostname] = True store_cached_hostinfo(hostname, []) # leave emtpy info in case of error - output = get_agent_info(hostname, ipaddress, max_cache_age) - if len(output) == 0: + # If we have piggyback data for that host from another host, + # then we prepend this data and also tolerate a failing + # normal Check_MK Agent access. + piggy_output = get_piggyback_info(hostname) + get_piggyback_info(ipaddress) + + output = "" + agent_failed = False + if is_tcp_host(hostname): + try: + output = get_agent_info(hostname, ipaddress, max_cache_age) + except MKCheckTimeout: + raise + + except Exception, e: + agent_failed = True + # Remove piggybacked information from the host (in the + # role of the pig here). Why? We definitely haven't + # reached that host so its data from the last time is + # not valid any more. + remove_piggyback_info_from(hostname) + + if not piggy_output: + raise + + output += piggy_output + + if len(output) == 0 and is_tcp_host(hostname): raise MKAgentError("Empty output from agent") + elif len(output) == 0: + return elif len(output) < 16: raise MKAgentError("Too short output from agent: '%s'" % output) - lines = [ l.strip() for l in output.split('\n') ] - info = parse_info(lines) + info, piggybacked, persisted = parse_info(output.split("\n"), hostname) + store_piggyback_info(hostname, piggybacked) + store_persisted_info(hostname, persisted) store_cached_hostinfo(hostname, info) - return info.get(check_type, []) # return only data for specified check + + # Add information from previous persisted agent outputs, if those + # sections are not available in the current output + add_persisted_info(hostname, info) + + # If the agent has failed and the information we seek is + # not contained in the piggy data, raise an exception + if check_type not in info: + if agent_failed: + raise MKAgentError("Cannot get information from agent, processing only piggyback data.") + else: + return [] + + return info[check_type] # return only data for specified check + +def store_persisted_info(hostname, persisted): + dir = var_dir + "/persisted/" + if persisted: + if not os.path.exists(dir): + os.makedirs(dir) + file(dir + hostname, "w").write("%r\n" % persisted) + verbose("Persisted sections %s.\n" % ", ".join(persisted.keys())) + +def add_persisted_info(hostname, info): + file_path = var_dir + "/persisted/" + hostname + try: + persisted = eval(file(file_path).read()) + except: + return + + now = time.time() + modified = False + for section, (persisted_until, persisted_section) in persisted.items(): + if now < persisted_until or opt_force: + if section not in info: + info[section] = persisted_section + vverbose("Added persisted section %s.\n" % section) + else: + verbose("Persisted section %s is outdated by %d seconds. Deleting it.\n" % ( + section, now - persisted_until)) + del persisted[section] + modified = True + + if not persisted: + os.remove(file_path) + elif modified: + store_persisted_info(hostname, persisted) + + +def get_piggyback_files(hostname): + files = [] + dir = tmp_dir + "/piggyback/" + hostname + if os.path.exists(dir): + for sourcehost in os.listdir(dir): + if sourcehost not in ['.', '..'] \ + and not sourcehost.startswith(".new."): + file_path = dir + "/" + sourcehost + + if cachefile_age(file_path) > piggyback_max_cachefile_age: + verbose("Piggyback file %s is outdated by %d seconds. Deleting it.\n" % + (file_path, cachefile_age(file_path) - piggyback_max_cachefile_age)) + os.remove(file_path) + continue + + files.append((sourcehost, file_path)) + return files + + +def has_piggyback_info(hostname): + return get_piggyback_files(hostname) != [] + + +def get_piggyback_info(hostname): + output = "" + if not hostname: + return output + for sourcehost, file_path in get_piggyback_files(hostname): + verbose("Using piggyback information from host %s.\n" % sourcehost) + output += file(file_path).read() + return output + + +def store_piggyback_info(sourcehost, piggybacked): + piggyback_path = tmp_dir + "/piggyback/" + for backedhost, lines in piggybacked.items(): + verbose("Storing piggyback data for %s.\n" % backedhost) + dir = piggyback_path + backedhost + if not os.path.exists(dir): + os.makedirs(dir) + out = file(dir + "/.new." + sourcehost, "w") + for line in lines: + out.write("%s\n" % line) + os.rename(dir + "/.new." + sourcehost, dir + "/" + sourcehost) + + # Remove piggybacked information that is not + # being sent this turn + remove_piggyback_info_from(sourcehost, keep=piggybacked.keys()) + + +def remove_piggyback_info_from(sourcehost, keep=[]): + removed = 0 + piggyback_path = tmp_dir + "/piggyback/" + if not os.path.exists(piggyback_path): + return # Nothing to do + + for backedhost in os.listdir(piggyback_path): + if backedhost not in ['.', '..'] and backedhost not in keep: + path = piggyback_path + backedhost + "/" + sourcehost + if os.path.exists(path): + verbose("Removing stale piggyback file %s\n" % path) + os.remove(path) + removed += 1 + + # Remove directory if empty + try: + os.rmdir(piggyback_path + backedhost) + except: + pass + return removed + +def translate_piggyback_host(sourcehost, backedhost): + translation = get_piggyback_translation(sourcehost) + + # 1. Case conversion + caseconf = translation.get("case") + if caseconf == "upper": + backedhost = backedhost.upper() + elif caseconf == "lower": + backedhost = backedhost.lower() + + # 2. Drop domain part (not applied to IP addresses!) + if translation.get("drop_domain") and not backedhost[0].isdigit(): + backedhost = backedhost.split(".", 1)[0] + + # To make it possible to match umlauts we need to change the backendhost + # to a unicode string which can then be matched with regexes etc. + # We assume the incoming name is correctly encoded in UTF-8 + backedhost = backedhost.decode('utf-8') + + # 3. Regular expression conversion + if "regex" in translation: + regex, subst = translation.get("regex") + if not regex.endswith('$'): + regex += '$' + rcomp = get_regex(regex) + mo = rcomp.match(backedhost) + if mo: + backedhost = subst + for nr, text in enumerate(mo.groups()): + backedhost = backedhost.replace("\\%d" % (nr+1), text) + + # 4. Explicity mapping + for from_host, to_host in translation.get("mapping", []): + if from_host == backedhost: + backedhost = to_host + break + + return backedhost.encode('utf-8') # change back to UTF-8 encoded string def read_cache_file(relpath, max_cache_age): @@ -388,17 +655,18 @@ result = f.read(10000000) f.close() if len(result) > 0: - if opt_debug: - sys.stderr.write("Using data from cachefile %s.\n" % cachefile) + verbose("Using data from cachefile %s.\n" % cachefile) return result elif opt_debug: - sys.stderr.write("Skipping cache file %s: Too old\n" % cachefile) + sys.stderr.write("Skipping cache file %s: Too old " + "(age is %d sec, allowed is %d sec)\n" % + (cachefile, cachefile_age(cachefile), max_cache_age)) if simulation_mode and not opt_no_cache: raise MKGeneralException("Simulation mode and no cachefile present.") if opt_no_tcp: - raise MKGeneralException("Host is unreachable") + raise MKGeneralException("Host is unreachable, no usable cache file present") #Cache file '%s' missing or too old. TCP disallowed by you." % cachefile) @@ -421,7 +689,7 @@ # Get information about a real host (not a cluster node) via TCP -# or by executing an external programm. ipaddress may be None. +# or by executing an external program. ipaddress may be None. # In that case it will be looked up if needed. Also caching will # be handled here def get_agent_info(hostname, ipaddress, max_cache_age): @@ -431,9 +699,9 @@ if hostname in g_broken_agent_hosts: raise MKAgentError("") - # If the host ist listed in datasource_programs the data from + # If the host is listed in datasource_programs the data from # that host is retrieved by calling an external program (such - # as ssh or rsy) instead of a TCP connect. + # as ssh or rsh or agent_vsphere) instead of a TCP connect. commandline = get_datasource_program(hostname, ipaddress) if commandline: output = get_agent_info_program(commandline) @@ -448,56 +716,75 @@ return output -# Get data in case of external programm +# Get data in case of external program def get_agent_info_program(commandline): + exepath = commandline.split()[0] # for error message, hide options! + + import subprocess if opt_verbose: - sys.stderr.write("Calling external programm %s\n" % commandline) + sys.stderr.write("Calling external program %s\n" % commandline) try: - sout = os.popen(commandline + " 2>/dev/null") - output = sout.read() - exitstatus = sout.close() + p = subprocess.Popen(commandline, shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE) + stdout, stderr = p.communicate() + exitstatus = p.returncode except Exception, e: - raise MKAgentError("Could not execute '%s': %s" % (commandline, e)) + raise MKAgentError("Could not execute '%s': %s" % (exepath, e)) if exitstatus: - if exitstatus >> 8 == 127: - raise MKAgentError("Programm '%s' not found (exit code 127)" % (commandline,)) + if exitstatus == 127: + raise MKAgentError("Program '%s' not found (exit code 127)" % exepath) else: - raise MKAgentError("Programm '%s' exited with code %d" % (commandline, exitstatus >> 8)) - return output + raise MKAgentError("Agent exited with code %d: %s" % (exitstatus, stderr)) + return stdout # Get data in case of TCP -def get_agent_info_tcp(hostname, ipaddress): +def get_agent_info_tcp(hostname, ipaddress, port = None): if not ipaddress: raise MKGeneralException("Cannot contact agent: host '%s' has no IP address." % hostname) + + if port is None: + port = agent_port_of(hostname) + try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.settimeout(tcp_connect_timeout) except: pass # some old Python versions lack settimeout(). Better ignore than fail - s.connect((ipaddress, agent_port_of(hostname))) + vverbose("Connecting via TCP to %s:%d.\n" % (ipaddress, port)) + s.connect((ipaddress, port)) + # Immediately close sending direction. We do not send any data + # s.shutdown(socket.SHUT_WR) try: s.setblocking(1) except: pass output = "" - while True: - out = s.recv(4096, socket.MSG_WAITALL) - if out and len(out) > 0: - output += out - else: - break + try: + while True: + out = s.recv(4096, socket.MSG_WAITALL) + if out and len(out) > 0: + output += out + else: + break + except Exception, e: + # Python seems to skip closing the socket under certain + # conditions, leaving open filedescriptors and sockets in + # CLOSE_WAIT. This happens one a timeout (ALERT signal) + s.close() + raise + s.close() if len(output) == 0: # may be caused by xinetd not allowing our address - raise MKAgentError("Empty output from agent at TCP port %d" % - agent_port_of(hostname)) + raise MKAgentError("Empty output from agent at TCP port %d" % port) return output except MKAgentError, e: raise + except MKCheckTimeout: + raise except Exception, e: raise MKAgentError("Cannot get data from TCP port %s:%d: %s" % - (ipaddress, agent_port_of(hostname), e)) + (ipaddress, port, e)) # Gets all information about one host so far cached. @@ -525,19 +812,42 @@ else: g_infocache[hostname] = { checkname: table } -# Split agent output in chunks, splits lines by whitespaces -def parse_info(lines): +# Split agent output in chunks, splits lines by whitespaces. +# Returns a triple of: +# 1. A dictionary from "sectionname" to a list of rows +# 2. piggy-backed data for other hosts +# 3. Sections to be persisted for later usage +def parse_info(lines, hostname): info = {} - chunk = [] - chunkoptions = {} + piggybacked = {} # unparsed info for other hosts + persist = {} # handle sections with option persist(...) + host = None + section = [] + section_options = {} separator = None + encoding = None + to_unicode = False for line in lines: - if line[:3] == '<<<' and line[-3:] == '>>>': - chunkheader = line[3:-3] - # chunk header has format <<>> - headerparts = chunkheader.split(":") - chunkname = headerparts[0] - chunkoptions = {} + line = line.rstrip("\r") + stripped_line = line.strip() + if stripped_line[:4] == '<<<<' and stripped_line[-4:] == '>>>>': + host = stripped_line[4:-4] + if not host: + host = None + else: + host = translate_piggyback_host(hostname, host) + if host == hostname: + host = None # unpiggybacked "normal" host + elif host: # processing data for an other host + piggybacked.setdefault(host, []).append(line) + + # Found normal section header + # section header has format <<>> + elif stripped_line[:3] == '<<<' and stripped_line[-3:] == '>>>': + section_header = stripped_line[3:-3] + headerparts = section_header.split(":") + section_name = headerparts[0] + section_options = {} for o in headerparts[1:]: opt_parts = o.split("(") opt_name = opt_parts[0] @@ -545,19 +855,52 @@ opt_args = opt_parts[1][:-1] else: opt_args = None - chunkoptions[opt_name] = opt_args + section_options[opt_name] = opt_args - chunk = info.get(chunkname, None) - if chunk == None: # chunk appears in output for the first time - chunk = [] - info[chunkname] = chunk + section = info.get(section_name, None) + if section == None: # section appears in output for the first time + section = [] + info[section_name] = section try: - separator = chr(int(chunkoptions["sep"])) + separator = chr(int(section_options["sep"])) except: separator = None - elif line != '': - chunk.append(line.split(separator)) - return info + + # Split of persisted section for server-side caching + if "persist" in section_options: + until = int(section_options["persist"]) + persist[section_name] = ( until, section ) + + # The section data might have a different encoding + encoding = section_options.get("encoding") + + # Make the contents of the section unicode strings or UTF-8 + # encoded bytestrings (like it was done always in the past) + try: + to_unicode = inv_info.get(section_name, {}).get('unicode', False) + except NameError: + pass # e.g. in precompiled mode we have no inv_info. That's ok. + + elif stripped_line != '': + if "nostrip" not in section_options: + line = stripped_line + + if encoding: + try: + decoded_line = line.decode(encoding) + if not to_unicode: + line = decoded_line.encode('utf-8') + except: + pass + elif to_unicode: + try: + line = line.decode('utf-8') + except: + line = line.decode('latin1') + + section.append(line.split(separator)) + + return info, piggybacked, persist def cachefile_age(filename): @@ -577,6 +920,17 @@ # | | # +----------------------------------------------------------------------+ +SKIP = None +RAISE = False +ZERO = 0.0 +g_last_counter_wrap = None + +def reset_wrapped_counters(): + global g_last_counter_wrap + g_last_counter_wrap = None + +def last_counter_wrap(): + return g_last_counter_wrap # Variable time_t value # netctr.eth.tx_collisions 112354335 818 @@ -595,6 +949,49 @@ except: g_counters = {} +def save_counters(hostname): + if not opt_dont_submit and not i_am_root(): # never writer counters as root + global g_counters + filename = counters_directory + "/" + hostname + try: + if not os.path.exists(counters_directory): + os.makedirs(counters_directory) + file(filename, "w").write("%r\n" % g_counters) + except Exception, e: + raise MKGeneralException("User %s cannot write to %s: %s" % (username(), filename, e)) + + +# Deletes counters from g_counters matching the given pattern and are older_than x seconds +def clear_counters(pattern, older_than): + global g_counters + counters_to_delete = [] + now = time.time() + + for name, (timestamp, value) in g_counters.items(): + if name.startswith(pattern): + if now > timestamp + older_than: + counters_to_delete.append(name) + + for name in counters_to_delete: + del g_counters[name] + + +def get_rate(countername, this_time, this_val, allow_negative=False, onwrap=SKIP): + try: + timedif, rate = get_counter(countername, this_time, this_val, allow_negative) + return rate + except MKCounterWrapped, e: + if onwrap is RAISE: + raise + elif onwrap is SKIP: + global g_last_counter_wrap + g_last_counter_wrap = e + return 0.0 + else: + return onwrap + + +# Legacy. Do not use this function in checks directly any more! def get_counter(countername, this_time, this_val, allow_negative=False): global g_counters @@ -605,7 +1002,7 @@ # Do not suppress this check on check_mk -nv if opt_dont_submit: return 1.0, 0.0 - raise MKCounterWrapped(countername, 'Counter initialization') + raise MKCounterWrapped('Counter initialization') last_time, last_val = g_counters.get(countername) timedif = this_time - last_time @@ -615,7 +1012,7 @@ # Do not suppress this check on check_mk -nv if opt_dont_submit: return 1.0, 0.0 - raise MKCounterWrapped(countername, 'No time difference') + raise MKCounterWrapped('No time difference') # update counter for next time g_counters[countername] = (this_time, this_val) @@ -629,7 +1026,7 @@ # Do not suppress this check on check_mk -nv if opt_dont_submit: return 1.0, 0.0 - raise MKCounterWrapped(countername, 'Value overflow') + raise MKCounterWrapped('Value overflow') per_sec = float(valuedif) / timedif return timedif, per_sec @@ -640,14 +1037,14 @@ # this_time: timestamp of new value # backlog: averaging horizon in minutes # initialize_zero: assume average of 0.0 when now previous average is stored -def get_average(itemname, this_time, this_val, backlog, initialize_zero = True): +def get_average(itemname, this_time, this_val, backlog_minutes, initialize_zero = True): # first call: take current value as average or assume 0.0 if not itemname in g_counters: if initialize_zero: this_val = 0 g_counters[itemname] = (this_time, this_val) - return 1.0, this_val # avoid time diff of 0.0 -> avoid division by zero + return this_val # avoid time diff of 0.0 -> avoid division by zero # Get previous value and time difference last_time, last_val = g_counters.get(itemname) @@ -659,40 +1056,24 @@ timedif = 0 # Compute the weight: We do it like this: First we assume that - # we get one sample per minute. And that backlog is the number + # we get one sample per minute. And that backlog_minutes is the number # of minutes we should average over. Then we want that the weight # of the values of the last average minutes have a fraction of W% # in the result and the rest until infinity the rest (1-W%). - # Then the weight can be computed as backlog'th root of 1-W + # Then the weight can be computed as backlog_minutes'th root of 1-W percentile = 0.50 - weight_per_minute = (1 - percentile) ** (1.0 / backlog) + weight_per_minute = (1 - percentile) ** (1.0 / backlog_minutes) # now let's compute the weight per second. This is done weight = weight_per_minute ** (timedif / 60.0) new_val = last_val * weight + this_val * (1 - weight) - # print "Alt: %.5f, Jetzt: %.5f, Timedif: %.1f, Gewicht: %.5f, Neu: %.5f" % \ - # (last_val, this_val, timedif, weight, new_val) - g_counters[itemname] = (this_time, new_val) - return timedif, new_val + return new_val -def save_counters(hostname): - if not opt_dont_submit and not i_am_root(): # never writer counters as root - global g_counters - filename = counters_directory + "/" + hostname - try: - if not os.path.exists(counters_directory): - os.makedirs(counters_directory) - file(filename, "w").write("%r\n" % g_counters) - except Exception, e: - raise MKGeneralException("User %s cannot write to %s: %s" % (username(), filename, e)) - -# writelines([ "%s %d %d\n" % (i[0], i[1][0], i[1][1]) for i in g_counters.items() ]) - # +----------------------------------------------------------------------+ # | ____ _ _ _ | @@ -708,40 +1089,65 @@ # This is the main check function - the central entry point to all and # everything def do_check(hostname, ipaddress, only_check_types = None): - if opt_verbose: sys.stderr.write("Check_mk version %s\n" % check_mk_version) start_time = time.time() + expected_version = agent_target_version(hostname) + + # Exit state in various situations is confiugrable since 1.2.3i1 + exit_spec = exit_code_spec(hostname) + try: load_counters(hostname) agent_version, num_success, error_sections, problems = do_all_checks_on_host(hostname, ipaddress, only_check_types) num_errors = len(error_sections) save_counters(hostname) if problems: - output = "CRIT - %s, " % problems - status = 2 + output = "%s, " % problems + status = exit_spec.get("connection", 2) elif num_errors > 0 and num_success > 0: - output = "WARN - Missing agent sections: %s - " % ", ".join(error_sections) - status = 1 + output = "Missing agent sections: %s - " % ", ".join(error_sections) + status = exit_spec.get("missing_sections", 1) elif num_errors > 0: - output = "CRIT - Got no information from host, " - status = 2 + output = "Got no information from host, " + status = exit_spec.get("empty_output", 2) + elif expected_version and agent_version \ + and not is_expected_agent_version(agent_version, expected_version): + # expected version can either be: + # a) a single version string + # b) a tuple of ("at_least", {'daily_build': '2014.06.01', 'release': '1.2.5i4'} + # (the dict keys are optional) + if type(expected_version) == tuple and expected_version[0] == 'at_least': + expected = 'at least' + if 'daily_build' in expected_version[1]: + expected += ' build %s' % expected_version[1]['daily_build'] + if 'release' in expected_version[1]: + if 'daily_build' in expected_version[1]: + expected += ' or' + expected += ' release %s' % expected_version[1]['release'] + else: + expected = expected_version + output = "unexpected agent version %s (should be %s), " % (agent_version, expected) + status = exit_spec.get("wrong_version", 1) elif agent_min_version and agent_version < agent_min_version: - output = "WARN - old plugin version %s (should be at least %s), " % (agent_version, agent_min_version) - status = 1 + output = "old plugin version %s (should be at least %s), " % (agent_version, agent_min_version) + status = exit_spec.get("wrong_version", 1) else: - output = "OK - " - if agent_version != None: + output = "" + if not is_cluster(hostname) and agent_version != None: output += "Agent version %s, " % agent_version status = 0 + except MKCheckTimeout: + raise + except MKGeneralException, e: if opt_debug: raise - output = "UNKNOWN - %s, " % e - status = 3 + output = "%s, " % e + status = exit_spec.get("exception", 3) if aggregate_check_mk: try: @@ -756,14 +1162,29 @@ run_time = time.time() - start_time if check_mk_perfdata_with_times: times = os.times() + if opt_keepalive: + times = map(lambda a: a[0]-a[1], zip(times, g_initial_times)) output += "execution time %.1f sec|execution_time=%.3f user_time=%.3f "\ "system_time=%.3f children_user_time=%.3f children_system_time=%.3f\n" %\ (run_time, run_time, times[0], times[1], times[2], times[3]) else: output += "execution time %.1f sec|execution_time=%.3f\n" % (run_time, run_time) - sys.stdout.write(output) - sys.exit(status) + if record_inline_snmp_stats and has_inline_snmp and use_inline_snmp: + save_snmp_stats() + + if opt_keepalive: + global total_check_output + total_check_output += output + else: + sys.stdout.write(nagios_state_names[status] + " - " + output) + + return status + +# Keepalive-mode for running cmk as a check helper. +class MKCheckTimeout(Exception): + pass + def check_unimplemented(checkname, params, info): return (3, 'UNKNOWN - Check not implemented') @@ -775,7 +1196,7 @@ if type(info) != dict: # Convert check declaration from old style to new API check_function, service_description, has_perfdata, inventory_function = info - if inventory_function == no_inventory_possible: + if inventory_function == no_discovery_possible: inventory_function = None check_info[check_type] = { @@ -793,11 +1214,13 @@ snmp_scan_functions.get(basename)), "default_levels_variable" : check_default_levels.get(check_type), "node_info" : False, + "parse_function" : None, } else: # Check does already use new API. Make sure that all keys are present, # extra check-specific information into file-specific variables. info.setdefault("inventory_function", None) + info.setdefault("parse_function", None) info.setdefault("group", None) info.setdefault("snmp_info", None) info.setdefault("snmp_scan_function", None) @@ -831,6 +1254,57 @@ if info["snmp_scan_function"] and basename not in snmp_scan_functions: snmp_scan_functions[basename] = info["snmp_scan_function"] + +def convert_check_result(result, is_snmp): + if type(result) == tuple: + return result + + elif result == None: + return item_not_found(is_snmp) + + # The check function may either return a tuple (pair or triple) or an iterator + # (using yield). The latter one is new since version 1.2.5i5. + else: # We assume an iterator, convert to tuple + subresults = list(result) + + # Empty list? Check returned nothing + if not subresults: + return item_not_found(is_snmp) + + + # Simple check with no separate subchecks (yield wouldn't have been neccessary here!) + if len(subresults) == 1: + return subresults[0] + + # Several sub results issued with multiple yields. Make that worst sub check + # decide the total state, join the texts and performance data. Subresults with + # an infotext of None are used for adding performance data. + else: + perfdata = [] + infotexts = [] + status = 0 + + for subresult in subresults: + st, text = subresult[:2] + if text != None: + infotexts.append(text + ["", "(!)", "(!!)", "(?)"][st]) + if st == 2 or status == 2: + status = 2 + else: + status = max(status, st) + if len(subresult) == 3: + perfdata += subresult[2] + + return status, ", ".join(infotexts), perfdata + + +def item_not_found(is_snmp): + if is_snmp: + return 3, "Item not found in SNMP data" + else: + return 3, "Item not found in agent output" + + # Loops over all checks for a host, gets the data, calls the check # function that examines that data and sends the result to Nagios def do_all_checks_on_host(hostname, ipaddress, only_check_types = None): @@ -840,13 +1314,19 @@ g_hostname = hostname num_success = 0 error_sections = set([]) - check_table = get_sorted_check_table(hostname) + check_table = get_sorted_check_table(hostname, remove_duplicates=True, world=opt_keepalive and "active" or "config") problems = [] - for checkname, item, params, description, info in check_table: + parsed_infos = {} # temporary cache for section infos, maybe parsed + + for checkname, item, params, description, aggrinfo in check_table: if only_check_types != None and checkname not in only_check_types: continue + # Make service description globally available + global g_service_description + g_service_description = description + # Skip checks that are not in their check period period = check_period_of(hostname, description) if period and not check_timeperiod(period): @@ -858,29 +1338,36 @@ sys.stderr.write("Service %s: timeperiod %s is currently active.\n" % (description, period)) - # In case of a precompiled check table info is the aggrated + # In case of a precompiled check table aggrinfo is the aggrated # service name. In the non-precompiled version there are the dependencies - if type(info) == str: - aggrname = info + if type(aggrinfo) == str: + aggrname = aggrinfo else: aggrname = aggregated_service_name(hostname, description) infotype = checkname.split('.')[0] try: - info = get_host_info(hostname, ipaddress, infotype) + if infotype in parsed_infos: + info = parsed_infos[infotype] + else: + info = get_host_info(hostname, ipaddress, infotype) + parsed_infos[infotype] = info + + except MKSkipCheck, e: + continue except MKSNMPError, e: - if str(e): - problems.append(str(e)) + if str(e): + problems.append(str(e)) error_sections.add(infotype) - g_broken_snmp_hosts.add(hostname) - continue + g_broken_snmp_hosts.add(hostname) + continue except MKAgentError, e: - if str(e): + if str(e): problems.append(str(e)) error_sections.add(infotype) - g_broken_agent_hosts.add(hostname) - continue + g_broken_agent_hosts.add(hostname) + continue if info or info == []: num_success += 1 @@ -891,37 +1378,66 @@ try: dont_submit = False - result = check_function(item, params, info) + + # Call the actual check function + reset_wrapped_counters() + result = convert_check_result(check_function(item, params, info), check_uses_snmp(checkname)) + if last_counter_wrap(): + raise last_counter_wrap() + + # handle check implementations that do not yet support the # handling of wrapped counters via exception. Do not submit # any check result in that case: except MKCounterWrapped, e: - if opt_verbose: - print "Counter wrapped, not handled by check, ignoring this check result: %s" % e + verbose("%-20s PEND - Cannot compute check result: %s\n" % (description, e)) dont_submit = True + except Exception, e: - result = (3, "invalid output from agent, invalid check parameters or error in implementation of check %s. Please set debug_log to a filename in main.mk for enabling exception logging." % checkname) - if debug_log: - try: - import traceback, pprint - l = file(debug_log, "a") - l.write(("Invalid output from plugin or error in check:\n" - " Check_MK Version: %s\n" - " Date: %s\n" - " Host: %s\n" - " Service: %s\n" - " Check type: %s\n" - " Item: %r\n" - " Parameters: %s\n" - " %s\n" - " Agent info: %s\n\n") % ( - check_mk_version, - time.strftime("%Y-%d-%m %H:%M:%S"), - hostname, description, checkname, item, pprint.pformat(params), - traceback.format_exc().replace('\n', '\n '), - pprint.pformat(info))) - except: - pass + text = "check failed - please submit a crash report!" + try: + import traceback, pprint, tarfile, base64 + # Create a crash dump with a backtrace and the agent output. + # This is put into a directory per service. The content is then + # put into a tarball, base64 encoded and put into the long output + # of the check :-) + crash_dir = var_dir + "/crashed_checks/" + hostname + "/" + description.replace("/", "\\") + if not os.path.exists(crash_dir): + os.makedirs(crash_dir) + file(crash_dir + "/trace", "w").write( + ( + " Check output: %s\n" + " Check_MK Version: %s\n" + " Date: %s\n" + " Host: %s\n" + " Service: %s\n" + " Check type: %s\n" + " Item: %r\n" + " Parameters: %s\n" + " %s\n") % ( + text, + check_mk_version, + time.strftime("%Y-%d-%m %H:%M:%S"), + hostname, + description, + checkname, + item, + pprint.pformat(params), + traceback.format_exc().replace('\n', '\n '))) + file(crash_dir + "/info", "w").write(repr(info) + "\n") + cachefile = tcp_cache_dir + "/" + hostname + if os.path.exists(cachefile): + file(crash_dir + "/agent_output", "w").write(file(cachefile).read()) + elif os.path.exists(crash_dir + "/agent_output"): + os.remove(crash_dir + "/agent_output") + + tarcontent = os.popen("tar czf - -C %s ." % quote_shell_string(crash_dir)).read() + encoded = base64.b64encode(tarcontent) + text += "\n" + "Crash dump:\n" + encoded + "\n" + except: + pass + + result = 3, text if opt_debug: raise @@ -939,7 +1455,7 @@ else: agent_version = None except MKAgentError, e: - g_broken_agent_hosts.add(hostname) + g_broken_agent_hosts.add(hostname) agent_version = "(unknown)" except: agent_version = "(unknown)" @@ -1008,6 +1524,9 @@ def submit_check_result(host, servicedesc, result, sa): + if not result: + result = 3, "Check plugin did not return any result" + if len(result) >= 3: state, infotext, perfdata = result[:3] else: @@ -1021,6 +1540,12 @@ infotext.startswith("UNKNOWN -")): infotext = nagios_state_names[state] + " - " + infotext + # make sure that plugin output does not contain a vertical bar. If that is the + # case then replace it with a Uniocode "Light vertical bar" + if type(infotext) == unicode: + infotext = infotext.encode("utf-8") # should never happen + infotext = infotext.replace("|", "\xe2\x9d\x98") + global nagios_command_pipe # [] PROCESS_SERVICE_CHECK_RESULT;;;; @@ -1052,7 +1577,7 @@ perftext = "|" + (" ".join(perftexts)) if not opt_dont_submit: - submit_to_nagios(host, servicedesc, state, infotext + perftext) + submit_to_core(host, servicedesc, state, infotext + perftext) if opt_verbose: if opt_showperfdata: @@ -1060,11 +1585,21 @@ else: p = '' color = { 0: tty_green, 1: tty_yellow, 2: tty_red, 3: tty_magenta }[state] - print "%-20s %s%s%-56s%s%s" % (servicedesc, tty_bold, color, infotext, tty_normal, p) + print "%-20s %s%s%-56s%s%s" % (servicedesc, tty_bold, color, infotext.split('\n')[0], tty_normal, p) -def submit_to_nagios(host, service, state, output): - if check_submission == "pipe": +def submit_to_core(host, service, state, output): + # Save data for sending it to the Check_MK Micro Core + # Replace \n to enable multiline ouput + if opt_keepalive: + output = output.replace("\n", "\x01", 1).replace("\n","\\n") + result = "\t%d\t%s\t%s\n" % (state, service, output.replace("\0", "")) # remove binary 0, CMC does not like it + global total_check_output + total_check_output += result + + # Send to Nagios/Icinga command pipe + elif check_submission == "pipe" or monitoring_core == "cmc": # CMC does not support file + output = output.replace("\n", "\\n") open_command_pipe() if nagios_command_pipe: nagios_command_pipe.write("[%d] PROCESS_SERVICE_CHECK_RESULT;%s;%s;%d;%s\n" % @@ -1072,7 +1607,10 @@ # Important: Nagios needs the complete command in one single write() block! # Python buffers and sends chunks of 4096 bytes, if we do not flush. nagios_command_pipe.flush() + + # Create check result files for Nagios/Icinga elif check_submission == "file": + output = output.replace("\n", "\\n") open_checkresult_file() if checkresult_file_fd: now = time.time() @@ -1113,14 +1651,133 @@ def i_am_root(): return os.getuid() == 0 +# Parses versions of Check_MK and converts them into comparable integers. +# This does not handle daily build numbers, only official release numbers. +# 1.2.4p1 -> 01020450001 +# 1.2.4 -> 01020450000 +# 1.2.4b1 -> 01020420100 +# 1.2.3i1p1 -> 01020310101 +# 1.2.3i1 -> 01020310100 +def parse_version(v): + def extract_number(s): + number = '' + for i, c in enumerate(s): + try: + int(c) + number += c + except ValueError: + s = s[i:] + return number and int(number) or 0, s + return number and int(number) or 0, '' + + major, minor, rest = v.split('.') + sub, rest = extract_number(rest) + + if not rest: + val = 50000 + elif rest[0] == 'p': + num, rest = extract_number(rest[1:]) + val = 50000 + num + elif rest[0] == 'i': + num, rest = extract_number(rest[1:]) + val = 10000 + num*100 + + if rest and rest[0] == 'p': + num, rest = extract_number(rest[1:]) + val += num + elif rest[0] == 'b': + num, rest = extract_number(rest[1:]) + val = 20000 + num*100 + + return int('%02d%02d%02d%05d' % (int(major), int(minor), sub, val)) + +def is_expected_agent_version(agent_version, expected_version): + try: + if agent_version in [ '(unknown)', None, 'None' ]: + return False + + if type(expected_version) == str and expected_version != agent_version: + return False + + elif type(expected_version) == tuple and expected_version[0] == 'at_least': + is_daily_build = len(agent_version) == 10 or '-' in agent_version + + spec = expected_version[1] + if is_daily_build and 'daily_build' in spec: + expected = int(spec['daily_build'].replace('.', '')) + if len(agent_version) == 10: # master build + agent = int(agent_version.replace('.', '')) + + else: # branch build (e.g. 1.2.4-2014.06.01) + agent = int(agent_version.split('-')[1].replace('.', '')) + + if agent < expected: + return False + + elif 'release' in spec: + if parse_version(agent_version) < parse_version(spec['release']): + return False + + return True + except Exception, e: + raise MKGeneralException("Unable to check agent version (Agent: %s Expected: %s, Error: %s)" % + (agent_version, expected_version, e)) + # Returns the nodes of a cluster, or None if hostname is # not a cluster def nodes_of(hostname): + nodes = g_nodesof_cache.get(hostname, False) + if nodes != False: + return nodes + for tagged_hostname, nodes in clusters.items(): if hostname == tagged_hostname.split("|")[0]: + g_nodesof_cache[hostname] = nodes return nodes + + g_nodesof_cache[hostname] = None return None +def check_uses_snmp(check_type): + return snmp_info.get(check_type.split(".")[0]) != None + +def pnp_cleanup(s): + return s \ + .replace(' ', '_') \ + .replace(':', '_') \ + .replace('/', '_') \ + .replace('\\', '_') + + +#. +# .--Caches--------------------------------------------------------------. +# | ____ _ | +# | / ___|__ _ ___| |__ ___ ___ | +# | | | / _` |/ __| '_ \ / _ \/ __| | +# | | |__| (_| | (__| | | | __/\__ \ | +# | \____\__,_|\___|_| |_|\___||___/ | +# | | +# +----------------------------------------------------------------------+ +# | Global caches that are valid until the configuration changes | +# '----------------------------------------------------------------------' + +def reset_global_caches(): + global g_check_table_cache + g_check_table_cache = {} # per-host-checktables + global g_singlehost_checks + g_singlehost_checks = None # entries in checks used by just one host + global g_multihost_checks + g_multihost_checks = None # entries in checks used by more than one host + global g_nodesof_cache + g_nodesof_cache = {} # Nodes of cluster hosts + global g_dns_cache + g_dns_cache = {} + global g_ip_lookup_cache + g_ip_lookup_cache = None # permanently cached ipaddresses from ipaddresses.cache + global g_converted_rulesets_cache + g_converted_rulesets_cache = {} + +reset_global_caches() # +----------------------------------------------------------------------+ # | ____ _ _ _ _ | @@ -1133,6 +1790,85 @@ # | These functions are used in some of the checks. | # +----------------------------------------------------------------------+ +# Generic function for checking a value against levels. This also supports +# predictive levels. +# value: currently measured value +# dsname: name of the datasource in the RRD that corresponds to this value +# unit: unit to be displayed in the plugin output, e.g. "MB/s" +# factor: the levels are multiplied with this factor before applying +# them to the value. This is being used for the CPU load check +# currently. The levels here are "per CPU", so the number of +# CPUs is used as factor. +# scale: Scale of the levels in relation to "value" and the value in the RRDs. +# For example if the levels are specified in GB and the RRD store KB, then +# the scale is 1024*1024. +def check_levels(value, dsname, params, unit="", factor=1.0, scale=1.0, statemarkers=False): + if unit: + unit = " " + unit # Insert space before MB, GB, etc. + + perfdata = [] + infotexts = [] + + # None or (None, None) -> do not check any levels + if params == None or params == (None, None): + return 0, "", [] + + # Pair of numbers -> static levels + elif type(params) == tuple: + warn_upper, crit_upper = params[0] * factor * scale, params[1] * factor * scale, + warn_lower, crit_lower = None, None + ref_value = None + + # Dictionary -> predictive levels + else: + try: + ref_value, ((warn_upper, crit_upper), (warn_lower, crit_lower)) = \ + get_predictive_levels(dsname, params, "MAX", levels_factor=factor * scale) + + if ref_value: + infotexts.append("predicted reference: %.2f%s" % (ref_value / scale, unit)) + else: + infotexts.append("no reference for prediction yet") + except Exception, e: + if opt_debug: + raise + return 3, "%s" % e, [] + + if ref_value: + perfdata.append(('predict_' + dsname, ref_value)) + + # Critical cases + if crit_upper != None and value >= crit_upper: + state = 2 + infotexts.append("critical level at %.2f%s" % (crit_upper / scale, unit)) + elif crit_lower != None and value <= crit_lower: + state = 2 + infotexts.append("too low: critical level at %.2f%s" % (crit_lower / scale, unit)) + + # Warning cases + elif warn_upper != None and value >= warn_upper: + state = 1 + infotexts.append("warning level at %.2f%s" % (warn_upper / scale, unit)) + elif warn_lower != None and value <= warn_lower: + state = 1 + infotexts.append("too low: warning level at %.2f%s" % (warn_lower / scale, unit)) + + # OK + else: + state = 0 + + if infotexts: + infotext = " (" + ", ".join(infotexts) + ")" + else: + infotext = "" + + if state and statemarkers: + if state == 1: + infotext += "(!)" + else: + infotext += "(!!)" + return state, infotext, perfdata + # check range, values might be negative! # returns True, if value is inside the interval @@ -1143,6 +1879,7 @@ # compile regex or look it up in already compiled regexes # (compiling is a CPU consuming process. We cache compiled # regexes). +compiled_regexes = {} def get_regex(pattern): reg = compiled_regexes.get(pattern) if not reg: @@ -1153,6 +1890,9 @@ # Names of texts usually output by checks nagios_state_names = ["OK", "WARN", "CRIT", "UNKNOWN"] +# Symbolic representations of states (Needed for new 2.0 check api) +state_markers = ["", "(!)", "(!!)", "(?)"] + # int() function that return 0 for strings the # cannot be converted to a number def saveint(i): @@ -1180,53 +1920,53 @@ b *= -1 if b >= base * base * base * base: - return '%s%.2fT%s' % (prefix, b / base / base / base / base, unit) + return '%s%.2f T%s' % (prefix, b / base / base / base / base, unit) elif b >= base * base * base: - return '%s%.2fG%s' % (prefix, b / base / base / base, unit) + return '%s%.2f G%s' % (prefix, b / base / base / base, unit) elif b >= base * base: - return '%s%.2fM%s' % (prefix, b / base / base, unit) + return '%s%.2f M%s' % (prefix, b / base / base, unit) elif b >= base: - return '%s%.2fk%s' % (prefix, b / base, unit) + return '%s%.2f k%s' % (prefix, b / base, unit) elif bytefrac: - return '%s%.2f%s' % (prefix, b, unit) + return '%s%.2f %s' % (prefix, b, unit) else: # Omit byte fractions - return '%s%.0f%s' % (prefix, b, unit) + return '%s%.0f %s' % (prefix, b, unit) # Similar to get_bytes_human_readable, but optimized for file -# sizes +# sizes. Really only use this for files. We assume that for smaller +# files one wants to compare the exact bytes of a file, so the +# threshold to show the value as MB/GB is higher as the one of +# get_bytes_human_readable(). def get_filesize_human_readable(size): if size < 4 * 1024 * 1024: - return str(size) + return "%d B" % int(size) elif size < 4 * 1024 * 1024 * 1024: - return "%.2fMB" % (float(size) / (1024 * 1024)) + return "%.2f MB" % (float(size) / (1024 * 1024)) else: - return "%.2fGB" % (float(size) / (1024 * 1024 * 1024)) + return "%.2f GB" % (float(size) / (1024 * 1024 * 1024)) def get_nic_speed_human_readable(speed): try: speedi = int(speed) if speedi == 10000000: - speed = "10MBit/s" + speed = "10 Mbit/s" elif speedi == 100000000: - speed = "100MBit/s" + speed = "100 Mbit/s" elif speedi == 1000000000: - speed = "1GBit/s" + speed = "1 Gbit/s" elif speed < 1500: - speed = "%dBit/s" % speedi + speed = "%d bit/s" % speedi elif speed < 1000000: - speed = "%.1fKBit/s" % (speedi / 1000.0) + speed = "%.1f Kbit/s" % (speedi / 1000.0) elif speed < 1000000000: - speed = "%.2fMBit/s" % (speedi / 1000000.0) + speed = "%.2f Mbit/s" % (speedi / 1000000.0) else: - speed = "%.2fGBit/s" % (speedi / 1000000000.0) + speed = "%.2f Gbit/s" % (speedi / 1000000000.0) except: pass return speed -# Convert Fahrenheit to Celsius -def to_celsius(f): - return round(float(f) - 32.0) * 5.0 / 9.0 # Format time difference seconds into approximated # human readable value @@ -1237,13 +1977,13 @@ if mins < 120: return "%d min" % mins hours, mins = divmod(mins, 60) - if hours < 12: - return "%d hours, %d min" % (hours, mins) - if hours < 48: + if hours < 12 and mins > 0: + return "%d hours %d min" % (hours, mins) + elif hours < 48: return "%d hours" % hours days, hours = divmod(hours, 24) - if days < 7: - return "%d days, %d hours" % (days, hours) + if days < 7 and hours > 0: + return "%d days %d hours" % (days, hours) return "%d days" % days # Quote string for use as arguments on the shell @@ -1259,13 +1999,21 @@ global g_inactive_timerperiods # Let exceptions happen, they will be handled upstream. if g_inactive_timerperiods == None: - import socket - s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - s.connect(livestatus_unix_socket) - # We just get the currently inactive timeperiods. All others - # (also non-existing) are considered to be active - s.send("GET timeperiods\nColumns:name\nFilter: in = 0\n") - s.shutdown(socket.SHUT_WR) - g_inactive_timerperiods = s.recv(10000000).splitlines() + try: + import socket + s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + s.connect(livestatus_unix_socket) + # We just get the currently inactive timeperiods. All others + # (also non-existing) are considered to be active + s.send("GET timeperiods\nColumns: name\nFilter: in = 0\n") + s.shutdown(socket.SHUT_WR) + g_inactive_timerperiods = s.recv(10000000).splitlines() + except Exception, e: + if opt_debug: + raise + else: + # If the query is not successful better skip this check then fail + return False + return timeperiod not in g_inactive_timerperiods diff -Nru check-mk-1.2.2p3/check_mk-bintec_sensors.fan.php check-mk-1.2.6p12/check_mk-bintec_sensors.fan.php --- check-mk-1.2.2p3/check_mk-bintec_sensors.fan.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-bintec_sensors.fan.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,34 @@ + diff -Nru check-mk-1.2.2p3/check_mk-bintec_sensors.temp.php check-mk-1.2.6p12/check_mk-bintec_sensors.temp.php --- check-mk-1.2.2p3/check_mk-bintec_sensors.temp.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-bintec_sensors.temp.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk-bintec_sensors.voltage.php check-mk-1.2.6p12/check_mk-bintec_sensors.voltage.php --- check-mk-1.2.2p3/check_mk-bintec_sensors.voltage.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-bintec_sensors.voltage.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk-blade_bx_load.php check-mk-1.2.6p12/check_mk-blade_bx_load.php --- check-mk-1.2.2p3/check_mk-blade_bx_load.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-blade_bx_load.php 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,71 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; +} + +$opt[1] = "--vertical-label 'Load average' -l0 -u 1 --title \"CPU Load for $hostname\" "; + +$def[1] = "" + . "DEF:load1=$RRD[load1] " + . "AREA:load1#60c0e0:\"Load average 1 min \" " + . "GPRINT:load1:LAST:\"%6.2lf last\" " + . "GPRINT:load1:AVERAGE:\"%6.2lf avg\" " + . "GPRINT:load1:MAX:\"%6.2lf max\\n\" " + + . "DEF:load15=$RRD[load15] " + . "LINE:load15#004080:\"Load average 15 min \" " + . "GPRINT:load15:LAST:\"%6.2lf last\" " + . "GPRINT:load15:AVERAGE:\"%6.2lf avg\" " + . "GPRINT:load15:MAX:\"%6.2lf max\\n\" " + . ""; + +if ($WARN[1]) { + $def[1] .= "" + . "HRULE:$WARN[1]#FFFF00 " + . "HRULE:$CRIT[1]#FF0000 " + . ""; +} + +if ($MAX[1]) { + $def[1] .= "COMMENT:\" Number of CPUs $MAX[1]\" "; +} + +if (isset($RRD["predict_load15"])) { + $def[1] .= "" + . "DEF:predict=$RRD[predict_load15] " + . "LINE:predict#ff0000:\"Reference for prediction \\n\" " + . ""; +} +?> diff -Nru check-mk-1.2.2p3/check_mk-brocade_fcport.php check-mk-1.2.6p12/check_mk-brocade_fcport.php --- check-mk-1.2.2p3/check_mk-brocade_fcport.php 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-brocade_fcport.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -29,11 +29,12 @@ # than with numbers. $RRD = array(); foreach ($NAME as $i => $n) { - $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; - $WARN[$n] = $WARN[$i]; - $CRIT[$n] = $CRIT[$i]; - $MIN[$n] = $MIN[$i]; - $MAX[$n] = $MAX[$i]; + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $RRDAVG[$n] = "$RRDFILE[$i]:$DS[$i]:AVERAGE"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; } @@ -48,32 +49,40 @@ . "DEF:out=$RRD[out] " . "CDEF:inmb=in,1048576,/ " . "CDEF:outmb=out,1048576,/ " + . "DEF:inavg=$RRDAVG[in] " + . "DEF:outavg=$RRDAVG[out] " + . "CDEF:inmbavg=inavg,1048576,/ " + . "CDEF:outmbavg=outavg,1048576,/ " . "AREA:inmb#60a020:\"in \" " - . "GPRINT:inmb:LAST:\"%5.1lf MB/s last\" " - . "GPRINT:inmb:AVERAGE:\"%5.1lf MB/s avg\" " - . "GPRINT:inmb:MAX:\"%5.1lf MB/s max\\n\" " + . "GPRINT:inmb:LAST:\"%5.3lf MB/s last\" " + . "GPRINT:inmbavg:AVERAGE:\"%5.3lf MB/s avg\" " + . "GPRINT:inmb:MAX:\"%5.3lf MB/s max\\n\" " . "CDEF:out_draw=outmb,-1,* " . "AREA:out_draw#2060a0:\"out \" " - . "GPRINT:outmb:LAST:\"%5.1lf MB/s last\" " - . "GPRINT:outmb:AVERAGE:\"%5.1lf MB/s avg\" " - . "GPRINT:outmb:MAX:\"%5.1lf MB/s max\\n\" " + . "GPRINT:outmb:LAST:\"%5.3lf MB/s last\" " + . "GPRINT:outmbavg:AVERAGE:\"%5.3lf MB/s avg\" " + . "GPRINT:outmb:MAX:\"%5.3lf MB/s max\\n\" " ; if (isset($RRD['in_avg'])) { $def[1] .= "" - . "DEF:inavg=$RRD[in_avg] " - . "DEF:outavg=$RRD[out_avg] " - . "CDEF:inavgmb=inavg,1048576,/ " - . "CDEF:outavgmb=outavg,1048576,/ " - . "CDEF:outavgmbdraw=outavg,-1048576,/ " - . "LINE:inavgmb#a0d040:\"in (avg) \" " - . "GPRINT:inavgmb:LAST:\"%5.1lf MB/s last\" " - . "GPRINT:inavgmb:AVERAGE:\"%5.1lf MB/s avg\" " - . "GPRINT:inavgmb:MAX:\"%5.1lf MB/s max\\n\" " - . "LINE:outavgmbdraw#40a0d0:\"out (avg)\" " - . "GPRINT:outavgmb:LAST:\"%5.1lf MB/s last\" " - . "GPRINT:outavgmb:AVERAGE:\"%5.1lf MB/s avg\" " - . "GPRINT:outavgmb:MAX:\"%5.1lf MB/s max\\n\" " + . "DEF:inaverage=$RRD[in_avg] " + . "DEF:outaverage=$RRD[out_avg] " + . "CDEF:inaveragemb=inaverage,1048576,/ " + . "CDEF:outaveragemb=outaverage,1048576,/ " + . "DEF:inaverage_avg=$RRDAVG[in_avg] " + . "DEF:outaverage_avg=$RRDAVG[out_avg] " + . "CDEF:inaveragemb_avg=inaverage_avg,1048576,/ " + . "CDEF:outaveragemb_avg=outaverage_avg,1048576,/ " + . "CDEF:outaveragemb_draw=outaverage,-1048576,/ " + . "LINE:inaveragemb_avg#a0d040:\"in (avg) \" " + . "GPRINT:inaveragemb:LAST:\"%5.3lf MB/s last\" " + . "GPRINT:inaveragemb_avg:AVERAGE:\"%5.3lf MB/s avg\" " + . "GPRINT:inaveragemb:MAX:\"%5.3lf MB/s max\\n\" " + . "LINE:outaveragemb_draw#40a0d0:\"out (avg)\" " + . "GPRINT:outaveragemb:LAST:\"%5.3lf MB/s last\" " + . "GPRINT:outaveragemb_avg:AVERAGE:\"%5.3lf MB/s avg\" " + . "GPRINT:outaveragemb:MAX:\"%5.3lf MB/s max\\n\" " ; } @@ -100,14 +109,16 @@ . "HRULE:0#c0c0c0 " . "DEF:in=$RRD[rxframes] " . "DEF:out=$RRD[txframes] " + . "DEF:inavg=$RRDAVG[rxframes] " + . "DEF:outavg=$RRDAVG[txframes] " . "AREA:in#a0d040:\"in \" " . "GPRINT:in:LAST:\"%5.1lf/s last\" " - . "GPRINT:in:AVERAGE:\"%5.1lf/s avg\" " + . "GPRINT:inavg:AVERAGE:\"%5.1lf/s avg\" " . "GPRINT:in:MAX:\"%5.1lf/s max\\n\" " . "CDEF:out_draw=out,-1,* " . "AREA:out_draw#40a0d0:\"out \" " . "GPRINT:out:LAST:\"%5.1lf/s last\" " - . "GPRINT:out:AVERAGE:\"%5.1lf/s avg\" " + . "GPRINT:outavg:AVERAGE:\"%5.1lf/s avgargs\" " . "GPRINT:out:MAX:\"%5.1lf/s max\\n\" " ; diff -Nru check-mk-1.2.2p3/check_mk-brocade_mlx.module_cpu.php check-mk-1.2.6p12/check_mk-brocade_mlx.module_cpu.php --- check-mk-1.2.2p3/check_mk-brocade_mlx.module_cpu.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-brocade_mlx.module_cpu.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,48 @@ + diff -Nru check-mk-1.2.2p3/check_mk-brocade_mlx.module_mem.php check-mk-1.2.6p12/check_mk-brocade_mlx.module_mem.php --- check-mk-1.2.2p3/check_mk-brocade_mlx.module_mem.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-brocade_mlx.module_mem.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,36 @@ + diff -Nru check-mk-1.2.2p3/check_mk-brocade_mlx_temp.php check-mk-1.2.6p12/check_mk-brocade_mlx_temp.php --- check-mk-1.2.2p3/check_mk-brocade_mlx_temp.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-brocade_mlx_temp.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,46 @@ +MACRO['DISP_HOSTNAME'] . ' / ' . $this->MACRO['DISP_SERVICEDESC'] . "\" -w 600"; + +$line_colors = array( "5f7a2f", "2f8077", "2f5580", "662f80", "802f71", "802f36", "804b2f", "80762f"); +$def[1] = ""; + +foreach ($NAME as $i => $n) { + $def[1] .= "DEF:$n=$RRDFILE[$i]:$DS[1]:AVERAGE "; +} +foreach ($NAME as $i => $n) { + $ii = $i % 8; + $def[1] .= "LINE:$n#$line_colors[$ii]:\"$n\" "; + $def[1] .= "GPRINT:$n:LAST:\"Cur\: %.0lf C \" "; + $def[1] .= "GPRINT:$n:AVERAGE:\"Avg\: %.0lf C \" "; + $def[1] .= "GPRINT:$n:MIN:\"Min\: %.0lf C \" "; + $def[1] .= "GPRINT:$n:MAX:\"Max\: %.0lf C \\n\" "; +} + +$def[1] .= "HRULE:$WARN[1]#ffe000:\"Warning at $WARN[1] C\" "; +$def[1] .= "HRULE:$CRIT[1]#ff0000:\"Critical at $CRIT[1] C \\n\" "; + +?> diff -Nru check-mk-1.2.2p3/check_mk-brocade.temp.php check-mk-1.2.6p12/check_mk-brocade.temp.php --- check-mk-1.2.2p3/check_mk-brocade.temp.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-brocade.temp.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk_caching_agent.linux check-mk-1.2.6p12/check_mk_caching_agent.linux --- check-mk-1.2.2p3/check_mk_caching_agent.linux 2013-10-12 17:49:41.000000000 +0000 +++ check-mk-1.2.6p12/check_mk_caching_agent.linux 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -50,7 +50,7 @@ export MK_CACHEDIR="/var/cache/check_mk" # Determine the IP address of the remote Nagios server. -# xinetd sends us the IP address of the remote host via +# xinetd sends us the IP address of the remote host via # the environment variable REMOTE_HOST. SSH sets a variable # SSH_CONNECTION where the remote IP address is the first # part of a space-separated list. Of both fails, we set diff -Nru check-mk-1.2.2p3/check_mk-canon_pages.php check-mk-1.2.6p12/check_mk-canon_pages.php --- check-mk-1.2.2p3/check_mk-canon_pages.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-canon_pages.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,32 @@ + diff -Nru check-mk-1.2.2p3/check_mk-carel_sensors.php check-mk-1.2.6p12/check_mk-carel_sensors.php --- check-mk-1.2.2p3/check_mk-carel_sensors.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-carel_sensors.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk-cbl_airlaser.hardware.php check-mk-1.2.6p12/check_mk-cbl_airlaser.hardware.php --- check-mk-1.2.2p3/check_mk-cbl_airlaser.hardware.php 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-cbl_airlaser.hardware.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -31,6 +31,8 @@ $def[1] .= "LINE1:var1#000080:\"\" "; $def[1] .= "GPRINT:var1:MAX:\"(Max\: %2.0lfC,\" "; $def[1] .= "GPRINT:var1:AVERAGE:\"Avg\: %2.0lfC)\" "; -$def[1] .= "HRULE:$WARN[1]#FFFF00:\"Warning\: $WARN[1]C\" "; -$def[1] .= "HRULE:$CRIT[1]#FF0000:\"Critical\: $CRIT[1]C\" "; +if ($WARN[1] != "") { + $def[1] .= "HRULE:$WARN[1]#FFFF00:\"Warning\: $WARN[1]C\" "; + $def[1] .= "HRULE:$CRIT[1]#FF0000:\"Critical\: $CRIT[1]C\" "; +} ?> diff -Nru check-mk-1.2.2p3/check_mk-check_mk-cmctc.temp.php check-mk-1.2.6p12/check_mk-check_mk-cmctc.temp.php --- check-mk-1.2.2p3/check_mk-check_mk-cmctc.temp.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-check_mk-cmctc.temp.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk-chrony.php check-mk-1.2.6p12/check_mk-chrony.php --- check-mk-1.2.2p3/check_mk-chrony.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-chrony.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,46 @@ + diff -Nru check-mk-1.2.2p3/check_mk-cisco_cpu.php check-mk-1.2.6p12/check_mk-cisco_cpu.php --- check-mk-1.2.2p3/check_mk-cisco_cpu.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-cisco_cpu.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,44 @@ + diff -Nru check-mk-1.2.2p3/check_mk-cisco_qos.php check-mk-1.2.6p12/check_mk-cisco_qos.php --- check-mk-1.2.2p3/check_mk-cisco_qos.php 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-cisco_qos.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -23,15 +23,9 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -# Graph 1: used bandwidth -$bitBandwidth = $MAX[1] * 8; -$warn = $WARN[2]; -$crit = $CRIT[2]; - -$bandwidth = $bitBandwidth; -$mByteBandwidth = $MAX[1] / 1000 / 1000; -$mByteWarn = $WARN[2] / 1000 / 1000; -$mByteCrit = $CRIT[2] / 1000 / 1000; +$bandwidth = $MAX[1] * 8; +$warn = $WARN[1] * 8; +$crit = $CRIT[1] * 8; $bwuom = ''; $base = 1000; @@ -53,22 +47,44 @@ } $ds_name[1] = 'QoS Class Traffic'; -$opt[1] = "--vertical-label \"MB/sec\" -X0 -b 1024 --title \"$hostname / $servicedesc\" "; -$def[1] = +$opt[1] = "--vertical-label \"MBit/sec\" -X0 -b 1000 --title \"$hostname / $servicedesc\" "; +$def[1] = "HRULE:0#c0c0c0 ". - "HRULE:$mByteBandwidth#808080:\"Interface speed\: " . sprintf("%.1f", $bandwidth) . " ".$bwuom."Bit/s\\n\" ". - "HRULE:-$mByteBandwidth#808080: ". + "HRULE:$bandwidth#808080:\"Interface speed\: " . sprintf("%.1f", $bandwidth) . " ".$bwuom."Bit/s\\n\" ". + "HRULE:$warn#FFE000:\"Warning\: " . sprintf("%.1f", $warn) . " ".$bwuom."Bit/s\\n\" ". + "HRULE:$crit#FF5030:\"Critical\: " . sprintf("%.1f", $crit) . " ".$bwuom."Bit/s\\n\" ". "DEF:postbytes=$RRDFILE[1]:$DS[1]:MAX ". "DEF:dropbytes=$RRDFILE[2]:$DS[2]:MAX ". - "CDEF:postmb=postbytes,1048576,/ ". - "CDEF:dropmb=dropbytes,1048576,/ ". - "AREA:postmb#00e060:\"post \" ". - "GPRINT:postbytes:LAST:\"%5.1lf %sB/s last\" ". - "GPRINT:postbytes:AVERAGE:\"%5.1lf %sB/s avg\" ". - "GPRINT:postbytes:MAX:\"%5.1lf %sB/s max\\n\" ". - "AREA:dropmb#0080e0:\"drop \" ". - "GPRINT:dropbytes:LAST:\"%5.1lf %sB/s last\" ". - "GPRINT:dropbytes:AVERAGE:\"%5.1lf %sB/s avg\" ". - "GPRINT:dropbytes:MAX:\"%5.1lf %sB/s max\\n\" "; + "CDEF:post_traffic=postbytes,8,* ". + "CDEF:drop_traffic=dropbytes,8,* ". + "CDEF:postmbit=post_traffic,1000000,/ ". + "CDEF:dropmbit=drop_traffic,1000000,/ ". + "AREA:postmbit#00e060:\"post \" ". + "GPRINT:post_traffic:LAST:\"%5.1lf %sBit/s last\" ". + "GPRINT:post_traffic:AVERAGE:\"%5.1lf %sBit/s avg\" ". + "GPRINT:post_traffic:MAX:\"%5.1lf %sBit/s max\\n\" ". + "AREA:dropmbit#0080e0:\"drop \" ". + "GPRINT:drop_traffic:LAST:\"%5.1lf %sBit/s last\" ". + "GPRINT:drop_traffic:AVERAGE:\"%5.1lf %sBit/s avg\" ". + "GPRINT:drop_traffic:MAX:\"%5.1lf %sBit/s max\\n\" "; + + +if (isset($DS[3])) { +$def[1] .= "DEF:postbytes_avg=$RRDFILE[3]:$DS[1]:MAX ". + "DEF:dropbytes_avg=$RRDFILE[4]:$DS[2]:MAX ". + "CDEF:post_traffic_avg=postbytes_avg,8,* ". + "CDEF:drop_traffic_avg=dropbytes_avg,8,* ". + "CDEF:postmbit_avg=post_traffic_avg,1000000,/ ". + "CDEF:dropmbit_avg=drop_traffic_avg,1000000,/ ". + "LINE:postmbit_avg#3b762e:\"post avg \" ". + "GPRINT:post_traffic_avg:LAST:\"%5.1lf %sBit/s last\" ". + "GPRINT:post_traffic_avg:AVERAGE:\"%5.1lf %sBit/s avg\" ". + "GPRINT:post_traffic_avg:MAX:\"%5.1lf %sBit/s max\\n\" ". + "LINE:dropmbit_avg#1255a9:\"drop avg \" ". + "GPRINT:drop_traffic_avg:LAST:\"%5.1lf %sBit/s last\" ". + "GPRINT:drop_traffic_avg:AVERAGE:\"%5.1lf %sBit/s avg\" ". + "GPRINT:drop_traffic_avg:MAX:\"%5.1lf %sBit/s max\\n\" "; +} + ?> diff -Nru check-mk-1.2.2p3/check_mk-cisco_temp_sensor.php check-mk-1.2.6p12/check_mk-cisco_temp_sensor.php --- check-mk-1.2.2p3/check_mk-cisco_temp_sensor.php 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-cisco_temp_sensor.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -23,9 +23,7 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. - -$title = str_replace("_", " ", $servicedesc); -$opt[1] = "--vertical-label \"Celsius\" --title \"$title\" -h 140 "; +$opt[1] = "--vertical-label \"Celsius\" -l 0 -u 40 --title \"Temperature $servicedesc\" "; $def[1] = "DEF:var1=$RRDFILE[1]:$DS[1]:MAX "; $def[1] .= "AREA:var1#2080ff:\"Temperature\:\" "; @@ -33,6 +31,8 @@ $def[1] .= "LINE1:var1#000080:\"\" "; $def[1] .= "GPRINT:var1:MAX:\"(Max\: %2.0lfC,\" "; $def[1] .= "GPRINT:var1:AVERAGE:\"Avg\: %2.0lfC)\" "; -$def[1] .= "HRULE:$WARN[1]#FFFF00:\"Warning\: $WARN[1]C\" "; -$def[1] .= "HRULE:$CRIT[1]#FF0000:\"Critical\: $CRIT[1]C\" "; +if ($WARN[1] != "") { + $def[1] .= "HRULE:$WARN[1]#FFFF00:\"Warning\: $WARN[1]C\" "; + $def[1] .= "HRULE:$CRIT[1]#FF0000:\"Critical\: $CRIT[1]C\" "; +} ?> diff -Nru check-mk-1.2.2p3/check_mk-cisco_wlc_clients.php check-mk-1.2.6p12/check_mk-cisco_wlc_clients.php --- check-mk-1.2.2p3/check_mk-cisco_wlc_clients.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-cisco_wlc_clients.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,39 @@ + diff -Nru check-mk-1.2.2p3/check_mk-citrix_licenses.php check-mk-1.2.6p12/check_mk-citrix_licenses.php --- check-mk-1.2.2p3/check_mk-citrix_licenses.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-citrix_licenses.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,37 @@ + diff -Nru check-mk-1.2.2p3/check_mk-cmctc.temp.php check-mk-1.2.6p12/check_mk-cmctc.temp.php --- check-mk-1.2.2p3/check_mk-cmctc.temp.php 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-cmctc.temp.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -31,6 +31,8 @@ $def[1] .= "LINE1:var1#000080:\"\" "; $def[1] .= "GPRINT:var1:MAX:\"(Max\: %2.0lfC,\" "; $def[1] .= "GPRINT:var1:AVERAGE:\"Avg\: %2.0lfC)\" "; -$def[1] .= "HRULE:$WARN[1]#FFFF00:\"Warning\: $WARN[1]C\" "; -$def[1] .= "HRULE:$CRIT[1]#FF0000:\"Critical\: $CRIT[1]C\" "; +if ($WARN[1] != "") { + $def[1] .= "HRULE:$WARN[1]#FFFF00:\"Warning\: $WARN[1]C\" "; + $def[1] .= "HRULE:$CRIT[1]#FF0000:\"Critical\: $CRIT[1]C\" "; +} ?> diff -Nru check-mk-1.2.2p3/check_mk-cpu.load.php check-mk-1.2.6p12/check_mk-cpu.load.php --- check-mk-1.2.2p3/check_mk-cpu.load.php 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-cpu.load.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mk-cpu.loads.php check-mk-1.2.6p12/check_mk-cpu.loads.php --- check-mk-1.2.2p3/check_mk-cpu.loads.php 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-cpu.loads.php 2015-09-21 10:59:54.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -23,19 +23,49 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -$opt[1] = "--vertical-label Load -l0 -u 1 --title \"CPU Load for $hostname / $servicedesc\" "; +# The number of data source various due to different +# settings (such as averaging). We rather work with names +# than with numbers. +$RRD = array(); +foreach ($NAME as $i => $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; +} -$def[1] = "DEF:var1=$RRDFILE[1]:$DS[1]:MAX " ; -$def[1] .= "DEF:var2=$RRDFILE[2]:$DS[2]:MAX " ; -$def[1] .= "DEF:var3=$RRDFILE[3]:$DS[3]:MAX " ; -$def[1] .= "HRULE:$WARN[1]#FFFF00 "; -$def[1] .= "HRULE:$CRIT[1]#FF0000 "; -$def[1] .= "AREA:var1#60c0e0:\"Load average 1 min \" " ; -$def[1] .= "GPRINT:var1:LAST:\"%6.2lf last\" " ; -$def[1] .= "GPRINT:var1:AVERAGE:\"%6.2lf avg\" " ; -$def[1] .= "GPRINT:var1:MAX:\"%6.2lf max\\n\" "; -$def[1] .= "LINE:var3#004080:\"Load average 15 min \" " ; -$def[1] .= "GPRINT:var3:LAST:\"%6.2lf last\" " ; -$def[1] .= "GPRINT:var3:AVERAGE:\"%6.2lf avg\" " ; -$def[1] .= "GPRINT:var3:MAX:\"%6.2lf max\\n\" " ; +$opt[1] = "--vertical-label 'Load average' -l0 -u 1 --title \"CPU Load for $hostname\" "; + +$def[1] = "" + . "DEF:load1=$RRD[load1] " + . "AREA:load1#60c0e0:\"Load average 1 min \" " + . "GPRINT:load1:LAST:\"%6.2lf last\" " + . "GPRINT:load1:AVERAGE:\"%6.2lf avg\" " + . "GPRINT:load1:MAX:\"%6.2lf max\\n\" " + + . "DEF:load15=$RRD[load15] " + . "LINE:load15#004080:\"Load average 15 min \" " + . "GPRINT:load15:LAST:\"%6.2lf last\" " + . "GPRINT:load15:AVERAGE:\"%6.2lf avg\" " + . "GPRINT:load15:MAX:\"%6.2lf max\\n\" " + . ""; + +if ($WARN[1]) { + $def[1] .= "" + . "HRULE:$WARN[1]#FFFF00 " + . "HRULE:$CRIT[1]#FF0000 " + . ""; +} + +if ($MAX[1]) { + $def[1] .= "COMMENT:\" Number of CPUs $MAX[1]\" "; +} + +if (isset($RRD["predict_load15"])) { + $def[1] .= "" + . "DEF:predict=$RRD[predict_load15] " + . "LINE:predict#ff0000:\"Reference for prediction \\n\" " + . ""; +} ?> diff -Nru check-mk-1.2.2p3/check_mk-cpu.threads.php check-mk-1.2.6p12/check_mk-cpu.threads.php --- check-mk-1.2.2p3/check_mk-cpu.threads.php 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-cpu.threads.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mk-decru_cpu.php check-mk-1.2.6p12/check_mk-decru_cpu.php --- check-mk-1.2.2p3/check_mk-decru_cpu.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-decru_cpu.php 2015-09-21 10:59:54.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -40,24 +40,24 @@ $def[1] .= "" . "COMMENT:Average\: " - . "AREA:system#ff6000:\"System\" " - . "GPRINT:system:AVERAGE:\"%2.1lf%% \" " - . "AREA:user#60f020:\"User\":STACK " - . "GPRINT:user:AVERAGE:\"%2.1lf%% \" " - . "AREA:wait#00b0c0:\"$thirdname\":STACK " - . "GPRINT:wait:AVERAGE:\"%2.1lf%% \" " - . "LINE:sum#004080:\"Total\" " - . "GPRINT:sum:AVERAGE:\"%2.1lf%% \\n\" " + . "AREA:system#ff6000:\"System\" " + . "GPRINT:system:AVERAGE:\"%4.1lf%% \" " + . "AREA:user#60f020:\"User\":STACK " + . "GPRINT:user:AVERAGE:\"%4.1lf%% \" " + . "AREA:wait#00b0c0:\"$thirdname\":STACK " + . "GPRINT:wait:AVERAGE:\"%4.1lf%% \" " + . "LINE:sum#004080:\"Total\" " + . "GPRINT:sum:AVERAGE:\"%4.1lf%% \\n\" " . "COMMENT:\"Last\: \" " - . "AREA:system#ff6000:\"System\" " - . "GPRINT:system:LAST:\"%2.1lf%% \" " - . "AREA:user#60f020:\"User\":STACK " - . "GPRINT:user:LAST:\"%2.1lf%% \" " - . "AREA:wait#00b0c0:\"$thirdname\":STACK " - . "GPRINT:wait:LAST:\"%2.1lf%% \" " - . "LINE:sum#004080:\"Total\" " - . "GPRINT:sum:LAST:\"%2.1lf%% \\n\" " + . "AREA:system#ff6000:\"System\" " + . "GPRINT:system:LAST:\"%4.1lf%% \" " + . "AREA:user#60f020:\"User\":STACK " + . "GPRINT:user:LAST:\"%4.1lf%% \" " + . "AREA:wait#00b0c0:\"$thirdname\":STACK " + . "GPRINT:wait:LAST:\"%4.1lf%% \" " + . "LINE:sum#004080:\"Total\" " + . "GPRINT:sum:LAST:\"%4.1lf%% \\n\" " .""; diff -Nru check-mk-1.2.2p3/check_mk-decru_temps.php check-mk-1.2.6p12/check_mk-decru_temps.php --- check-mk-1.2.2p3/check_mk-decru_temps.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-decru_temps.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk-dell_chassis_temp.php check-mk-1.2.6p12/check_mk-dell_chassis_temp.php --- check-mk-1.2.2p3/check_mk-dell_chassis_temp.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-dell_chassis_temp.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk-dell_om_sensors.php check-mk-1.2.6p12/check_mk-dell_om_sensors.php --- check-mk-1.2.2p3/check_mk-dell_om_sensors.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-dell_om_sensors.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk-dell_powerconnect_cpu.php check-mk-1.2.6p12/check_mk-dell_powerconnect_cpu.php --- check-mk-1.2.2p3/check_mk-dell_powerconnect_cpu.php 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-dell_powerconnect_cpu.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -51,5 +51,5 @@ "GPRINT:load3:MIN:\"Min\: %.0lf %s$UNIT[3] \" ". "GPRINT:load3:MAX:\"Max\: %.0lf %s$UNIT[3] \\n\" ". ""; - + ?> diff -Nru check-mk-1.2.2p3/check_mk-dell_powerconnect_temp.php check-mk-1.2.6p12/check_mk-dell_powerconnect_temp.php --- check-mk-1.2.2p3/check_mk-dell_powerconnect_temp.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-dell_powerconnect_temp.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -37,5 +37,5 @@ "GPRINT:load1:MIN:\"Min\: %.0lf C \" ". "GPRINT:load1:MAX:\"Max\: %.0lf C \\n\" ". ""; - + ?> diff -Nru check-mk-1.2.2p3/check_mk-df_netapp32.php check-mk-1.2.6p12/check_mk-df_netapp32.php --- check-mk-1.2.2p3/check_mk-df_netapp32.php 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-df_netapp32.php 2015-09-21 10:59:54.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -25,10 +25,23 @@ setlocale(LC_ALL, "POSIX"); +// Make data sources available via names +$RRD = array(); +foreach ($NAME as $i => $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $RRD_MIN[$n] = "$RRDFILE[$i]:$DS[$i]:MIN"; + $RRD_AVG[$n] = "$RRDFILE[$i]:$DS[$i]:AVERAGE"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; + $ACT[$n] = $ACT[$i]; +} + # RRDtool Options #$servicedes=$NAGIOS_SERVICEDESC -$fsname = str_replace("_", "/", substr($servicedesc, 3)); +$fsname = str_replace("_", "/", substr($servicedesc,11)); $fstitle = $fsname; # Hack for windows: replace C// with C:\ @@ -47,57 +60,98 @@ $opt[1] = "--vertical-label GB -l 0 -u $maxgb --title '$hostname: Filesystem $fstitle ($sizegb GB)' "; # First graph show current filesystem usage -$def[1] = "DEF:mb=$RRDFILE[1]:$DS[1]:MAX "; +$def[1] = "DEF:mb=$RRDFILE[1]:$DS[1]:MAX "; $def[1] .= "CDEF:var1=mb,1024,/ "; -$def[1] .= "AREA:var1#00ffc6:\"used space on $fsname\\n\" "; -$def[1] .= "LINE1:var1#226600: "; +$def[1] .= "AREA:var1#00ffc6:\"used space on $fsname\\n\" "; + +# Optional uncommitted usage e.g. for esx hosts +if(isset($RRD['uncommitted'])) { + $def[1] .= "DEF:uncommitted_mb=".$RRD['uncommitted']." "; + $def[1] .= "CDEF:uncommitted_gb=uncommitted_mb,1024,/ "; + $def[1] .= "CDEF:total_gb=uncommitted_gb,var1,+ "; +} else { + $def[1] .= "CDEF:total_gb=var1 "; +} + $def[1] .= "HRULE:$maxgb#003300:\"Size ($sizegb GB) \" "; $def[1] .= "HRULE:$warngb#ffff00:\"Warning at $warngbtxt GB \" "; $def[1] .= "HRULE:$critgb#ff0000:\"Critical at $critgbtxt GB \\n\" "; $def[1] .= "GPRINT:var1:LAST:\"current\: %6.2lf GB\" "; $def[1] .= "GPRINT:var1:MAX:\"max\: %6.2lf GB \" "; -$def[1] .= "GPRINT:var1:AVERAGE:\"avg\: %6.2lf GB\" "; +$def[1] .= "GPRINT:var1:AVERAGE:\"avg\: %6.2lf GB\\n\" "; + +if(isset($RRD['uncommitted'])) { + $def[1] .= "AREA:uncommitted_gb#eeccff:\"Uncommited\":STACK "; + $def[1] .= "GPRINT:uncommitted_gb:MAX:\"%6.2lf GB\l\" "; +} + +$def[1] .= "LINE1:total_gb#226600 "; # Second graph is optional and shows trend. The MAX field # of the third variable contains (size of the filesystem in MB # / range in hours). From that we can compute the configured range. -if (isset($DS[2])) { - $size_mb_per_hours = floatval($MAX[3]); // this is size_mb / range(hours) +if (isset($RRD['growth'])) { + $size_mb_per_hours = floatval($MAX['trend']); // this is size_mb / range(hours) $size_mb = floatval($MAX[1]); $hours = 1.0 / ($size_mb_per_hours / $size_mb); $range = sprintf("%.0fh", $hours); - // Current growth / shrinking. This value is give as MB / 24 hours. + // Current growth / shrinking. This value is give as MB / 24 hours. // Note: This has changed in 1.1.13i3. Prior it was MB / trend_range! $opt[2] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Growth of $fstitle' "; - $def[2] = "DEF:growth_max=$RRDFILE[2]:$DS[2]:MAX "; - $def[2] .= "DEF:growth_min=$RRDFILE[2]:$DS[2]:MIN "; - $def[2] .= "DEF:trend=$RRDFILE[3]:$DS[3]:AVERAGE "; + $def[2] = "DEF:growth_max=${RRD['growth']} "; + $def[2] .= "DEF:growth_min=${RRD_MIN['growth']} "; + $def[2] .= "DEF:trend=${RRD_AVG['trend']} "; $def[2] .= "CDEF:growth_pos=growth_max,0,MAX "; $def[2] .= "CDEF:growth_neg=growth_min,0,MIN "; - $def[2] .= "CDEF:growth_minabs=0,growth_min,- "; + $def[2] .= "CDEF:growth_minabs=0,growth_min,- "; $def[2] .= "CDEF:growth=growth_minabs,growth_max,MAX "; $def[2] .= "HRULE:0#c0c0c0 "; - $def[2] .= "AREA:growth_pos#3060f0:\"Grow\" "; - $def[2] .= "AREA:growth_neg#30f060:\"Shrink \" "; + $def[2] .= "AREA:growth_pos#3060f0:\"Grow\" "; + $def[2] .= "AREA:growth_neg#30f060:\"Shrink \" "; $def[2] .= "GPRINT:growth:LAST:\"Current\: %+9.2lfMB / 24h\" "; $def[2] .= "GPRINT:growth:MAX:\"Max\: %+9.2lfMB / 24h\\n\" "; // Trend $opt[3] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Trend for $fstitle' "; - $def[3] = "DEF:trend=$RRDFILE[3]:$DS[3]:AVERAGE "; + $def[3] = "DEF:trend=${RRD_AVG['trend']} "; $def[3] .= "HRULE:0#c0c0c0 "; $def[3] .= "LINE1:trend#000000:\"Trend\:\" "; - $def[3] .= "GPRINT:trend:LAST:\"%+7.2lf MB/24h\" "; - if ($WARN[3]) { - $warn_mb = sprintf("%.2fMB", $WARN[3] * $hours / 24.0); - $def[3] .= "LINE1:$WARN[3]#ffff00:\"Warn\: $warn_mb / $range\" "; + $def[3] .= "GPRINT:trend:LAST:\"%+7.2lf MB/24h\" "; + if ($WARN['trend']) { + $warn_mb = sprintf("%.2fMB", $WARN['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${WARN['trend']}#ffff00:\"Warn\: $warn_mb / $range\" "; } - if ($CRIT[3]) { - $crit_mb = sprintf("%.2fMB", $CRIT[3] * $hours / 24.0); - $def[3] .= "LINE1:$CRIT[3]#ff0000:\"Crit\: $crit_mb / $range\" "; + if ($CRIT['trend']) { + $crit_mb = sprintf("%.2fMB", $CRIT['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${CRIT['trend']}#ff0000:\"Crit\: $crit_mb / $range\" "; } $def[3] .= "COMMENT:\"\\n\" "; } +if (isset($RRD['trend_hoursleft'])) { + // Trend + $opt[4] = "--vertical-label 'Days left' -l -1 -u 365 -X0 --title '$hostname: Days left for $fstitle' "; + $def[4] = "DEF:hours_left=${RRD_AVG['trend_hoursleft']} "; + $def[4] .= "DEF:hours_left_min=${RRD_MIN['trend_hoursleft']} "; + // negative hours indicate no growth + // the dataset hours_left_isneg stores this info for each point as True/False + $def[4] .= "CDEF:hours_left_isneg=hours_left_min,-1,EQ "; + $def[4] .= "CDEF:hours_left_unmon=hours_left_min,400,0,IF "; + $def[4] .= "CDEF:days_left=hours_left,24,/ "; + $def[4] .= "CDEF:days_left_cap=days_left,400,MIN "; + // Convert negative points to 400 (y-axis cap) + $def[4] .= "CDEF:days_left_cap_positive=hours_left_isneg,400,days_left_cap,IF "; + // The AREA has a rendering problem. Points are too far to the right + $def[4] .= "AREA:hours_left_unmon#AA2200: "; + + $def[4] .= "AREA:days_left_cap_positive#22AA44:\"Days left\:\" "; + if ($ACT[4] == -1) + { + $def[4] .= "COMMENT:\"Not growing\" "; + } + else { + $def[4] .= "GPRINT:days_left:LAST:\"%7.2lf days\" "; + } +} ?> diff -Nru check-mk-1.2.2p3/check_mk-df_netapp.php check-mk-1.2.6p12/check_mk-df_netapp.php --- check-mk-1.2.2p3/check_mk-df_netapp.php 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-df_netapp.php 2015-09-21 10:59:54.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -25,10 +25,23 @@ setlocale(LC_ALL, "POSIX"); +// Make data sources available via names +$RRD = array(); +foreach ($NAME as $i => $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $RRD_MIN[$n] = "$RRDFILE[$i]:$DS[$i]:MIN"; + $RRD_AVG[$n] = "$RRDFILE[$i]:$DS[$i]:AVERAGE"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; + $ACT[$n] = $ACT[$i]; +} + # RRDtool Options #$servicedes=$NAGIOS_SERVICEDESC -$fsname = str_replace("_", "/", substr($servicedesc, 3)); +$fsname = str_replace("_", "/", substr($servicedesc,11)); $fstitle = $fsname; # Hack for windows: replace C// with C:\ @@ -47,57 +60,98 @@ $opt[1] = "--vertical-label GB -l 0 -u $maxgb --title '$hostname: Filesystem $fstitle ($sizegb GB)' "; # First graph show current filesystem usage -$def[1] = "DEF:mb=$RRDFILE[1]:$DS[1]:MAX "; +$def[1] = "DEF:mb=$RRDFILE[1]:$DS[1]:MAX "; $def[1] .= "CDEF:var1=mb,1024,/ "; -$def[1] .= "AREA:var1#00ffc6:\"used space on $fsname\\n\" "; -$def[1] .= "LINE1:var1#226600: "; +$def[1] .= "AREA:var1#00ffc6:\"used space on $fsname\\n\" "; + +# Optional uncommitted usage e.g. for esx hosts +if(isset($RRD['uncommitted'])) { + $def[1] .= "DEF:uncommitted_mb=".$RRD['uncommitted']." "; + $def[1] .= "CDEF:uncommitted_gb=uncommitted_mb,1024,/ "; + $def[1] .= "CDEF:total_gb=uncommitted_gb,var1,+ "; +} else { + $def[1] .= "CDEF:total_gb=var1 "; +} + $def[1] .= "HRULE:$maxgb#003300:\"Size ($sizegb GB) \" "; $def[1] .= "HRULE:$warngb#ffff00:\"Warning at $warngbtxt GB \" "; $def[1] .= "HRULE:$critgb#ff0000:\"Critical at $critgbtxt GB \\n\" "; $def[1] .= "GPRINT:var1:LAST:\"current\: %6.2lf GB\" "; $def[1] .= "GPRINT:var1:MAX:\"max\: %6.2lf GB \" "; -$def[1] .= "GPRINT:var1:AVERAGE:\"avg\: %6.2lf GB\" "; +$def[1] .= "GPRINT:var1:AVERAGE:\"avg\: %6.2lf GB\\n\" "; + +if(isset($RRD['uncommitted'])) { + $def[1] .= "AREA:uncommitted_gb#eeccff:\"Uncommited\":STACK "; + $def[1] .= "GPRINT:uncommitted_gb:MAX:\"%6.2lf GB\l\" "; +} + +$def[1] .= "LINE1:total_gb#226600 "; # Second graph is optional and shows trend. The MAX field # of the third variable contains (size of the filesystem in MB # / range in hours). From that we can compute the configured range. -if (isset($DS[2])) { - $size_mb_per_hours = floatval($MAX[3]); // this is size_mb / range(hours) +if (isset($RRD['growth'])) { + $size_mb_per_hours = floatval($MAX['trend']); // this is size_mb / range(hours) $size_mb = floatval($MAX[1]); $hours = 1.0 / ($size_mb_per_hours / $size_mb); $range = sprintf("%.0fh", $hours); - // Current growth / shrinking. This value is give as MB / 24 hours. + // Current growth / shrinking. This value is give as MB / 24 hours. // Note: This has changed in 1.1.13i3. Prior it was MB / trend_range! $opt[2] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Growth of $fstitle' "; - $def[2] = "DEF:growth_max=$RRDFILE[2]:$DS[2]:MAX "; - $def[2] .= "DEF:growth_min=$RRDFILE[2]:$DS[2]:MIN "; - $def[2] .= "DEF:trend=$RRDFILE[3]:$DS[3]:AVERAGE "; + $def[2] = "DEF:growth_max=${RRD['growth']} "; + $def[2] .= "DEF:growth_min=${RRD_MIN['growth']} "; + $def[2] .= "DEF:trend=${RRD_AVG['trend']} "; $def[2] .= "CDEF:growth_pos=growth_max,0,MAX "; $def[2] .= "CDEF:growth_neg=growth_min,0,MIN "; - $def[2] .= "CDEF:growth_minabs=0,growth_min,- "; + $def[2] .= "CDEF:growth_minabs=0,growth_min,- "; $def[2] .= "CDEF:growth=growth_minabs,growth_max,MAX "; $def[2] .= "HRULE:0#c0c0c0 "; - $def[2] .= "AREA:growth_pos#3060f0:\"Grow\" "; - $def[2] .= "AREA:growth_neg#30f060:\"Shrink \" "; + $def[2] .= "AREA:growth_pos#3060f0:\"Grow\" "; + $def[2] .= "AREA:growth_neg#30f060:\"Shrink \" "; $def[2] .= "GPRINT:growth:LAST:\"Current\: %+9.2lfMB / 24h\" "; $def[2] .= "GPRINT:growth:MAX:\"Max\: %+9.2lfMB / 24h\\n\" "; // Trend $opt[3] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Trend for $fstitle' "; - $def[3] = "DEF:trend=$RRDFILE[3]:$DS[3]:AVERAGE "; + $def[3] = "DEF:trend=${RRD_AVG['trend']} "; $def[3] .= "HRULE:0#c0c0c0 "; $def[3] .= "LINE1:trend#000000:\"Trend\:\" "; - $def[3] .= "GPRINT:trend:LAST:\"%+7.2lf MB/24h\" "; - if ($WARN[3]) { - $warn_mb = sprintf("%.2fMB", $WARN[3] * $hours / 24.0); - $def[3] .= "LINE1:$WARN[3]#ffff00:\"Warn\: $warn_mb / $range\" "; + $def[3] .= "GPRINT:trend:LAST:\"%+7.2lf MB/24h\" "; + if ($WARN['trend']) { + $warn_mb = sprintf("%.2fMB", $WARN['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${WARN['trend']}#ffff00:\"Warn\: $warn_mb / $range\" "; } - if ($CRIT[3]) { - $crit_mb = sprintf("%.2fMB", $CRIT[3] * $hours / 24.0); - $def[3] .= "LINE1:$CRIT[3]#ff0000:\"Crit\: $crit_mb / $range\" "; + if ($CRIT['trend']) { + $crit_mb = sprintf("%.2fMB", $CRIT['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${CRIT['trend']}#ff0000:\"Crit\: $crit_mb / $range\" "; } $def[3] .= "COMMENT:\"\\n\" "; } +if (isset($RRD['trend_hoursleft'])) { + // Trend + $opt[4] = "--vertical-label 'Days left' -l -1 -u 365 -X0 --title '$hostname: Days left for $fstitle' "; + $def[4] = "DEF:hours_left=${RRD_AVG['trend_hoursleft']} "; + $def[4] .= "DEF:hours_left_min=${RRD_MIN['trend_hoursleft']} "; + // negative hours indicate no growth + // the dataset hours_left_isneg stores this info for each point as True/False + $def[4] .= "CDEF:hours_left_isneg=hours_left_min,-1,EQ "; + $def[4] .= "CDEF:hours_left_unmon=hours_left_min,400,0,IF "; + $def[4] .= "CDEF:days_left=hours_left,24,/ "; + $def[4] .= "CDEF:days_left_cap=days_left,400,MIN "; + // Convert negative points to 400 (y-axis cap) + $def[4] .= "CDEF:days_left_cap_positive=hours_left_isneg,400,days_left_cap,IF "; + // The AREA has a rendering problem. Points are too far to the right + $def[4] .= "AREA:hours_left_unmon#AA2200: "; + + $def[4] .= "AREA:days_left_cap_positive#22AA44:\"Days left\:\" "; + if ($ACT[4] == -1) + { + $def[4] .= "COMMENT:\"Not growing\" "; + } + else { + $def[4] .= "GPRINT:days_left:LAST:\"%7.2lf days\" "; + } +} ?> diff -Nru check-mk-1.2.2p3/check_mk-df.php check-mk-1.2.6p12/check_mk-df.php --- check-mk-1.2.2p3/check_mk-df.php 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-df.php 2015-09-21 10:59:54.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -25,10 +25,23 @@ setlocale(LC_ALL, "POSIX"); +// Make data sources available via names +$RRD = array(); +foreach ($NAME as $i => $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $RRD_MIN[$n] = "$RRDFILE[$i]:$DS[$i]:MIN"; + $RRD_AVG[$n] = "$RRDFILE[$i]:$DS[$i]:AVERAGE"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; + $ACT[$n] = $ACT[$i]; +} + # RRDtool Options #$servicedes=$NAGIOS_SERVICEDESC -$fsname = str_replace("_", "/", substr($servicedesc, 3)); +$fsname = str_replace("_", "/", substr($servicedesc,11)); $fstitle = $fsname; # Hack for windows: replace C// with C:\ @@ -47,57 +60,98 @@ $opt[1] = "--vertical-label GB -l 0 -u $maxgb --title '$hostname: Filesystem $fstitle ($sizegb GB)' "; # First graph show current filesystem usage -$def[1] = "DEF:mb=$RRDFILE[1]:$DS[1]:MAX "; +$def[1] = "DEF:mb=$RRDFILE[1]:$DS[1]:MAX "; $def[1] .= "CDEF:var1=mb,1024,/ "; -$def[1] .= "AREA:var1#00ffc6:\"used space on $fsname\\n\" "; -$def[1] .= "LINE1:var1#226600: "; +$def[1] .= "AREA:var1#00ffc6:\"used space on $fsname\\n\" "; + +# Optional uncommitted usage e.g. for esx hosts +if(isset($RRD['uncommitted'])) { + $def[1] .= "DEF:uncommitted_mb=".$RRD['uncommitted']." "; + $def[1] .= "CDEF:uncommitted_gb=uncommitted_mb,1024,/ "; + $def[1] .= "CDEF:total_gb=uncommitted_gb,var1,+ "; +} else { + $def[1] .= "CDEF:total_gb=var1 "; +} + $def[1] .= "HRULE:$maxgb#003300:\"Size ($sizegb GB) \" "; $def[1] .= "HRULE:$warngb#ffff00:\"Warning at $warngbtxt GB \" "; $def[1] .= "HRULE:$critgb#ff0000:\"Critical at $critgbtxt GB \\n\" "; $def[1] .= "GPRINT:var1:LAST:\"current\: %6.2lf GB\" "; $def[1] .= "GPRINT:var1:MAX:\"max\: %6.2lf GB \" "; -$def[1] .= "GPRINT:var1:AVERAGE:\"avg\: %6.2lf GB\" "; +$def[1] .= "GPRINT:var1:AVERAGE:\"avg\: %6.2lf GB\\n\" "; + +if(isset($RRD['uncommitted'])) { + $def[1] .= "AREA:uncommitted_gb#eeccff:\"Uncommited\":STACK "; + $def[1] .= "GPRINT:uncommitted_gb:MAX:\"%6.2lf GB\l\" "; +} + +$def[1] .= "LINE1:total_gb#226600 "; # Second graph is optional and shows trend. The MAX field # of the third variable contains (size of the filesystem in MB # / range in hours). From that we can compute the configured range. -if (isset($DS[2])) { - $size_mb_per_hours = floatval($MAX[3]); // this is size_mb / range(hours) +if (isset($RRD['growth'])) { + $size_mb_per_hours = floatval($MAX['trend']); // this is size_mb / range(hours) $size_mb = floatval($MAX[1]); $hours = 1.0 / ($size_mb_per_hours / $size_mb); $range = sprintf("%.0fh", $hours); - // Current growth / shrinking. This value is give as MB / 24 hours. + // Current growth / shrinking. This value is give as MB / 24 hours. // Note: This has changed in 1.1.13i3. Prior it was MB / trend_range! $opt[2] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Growth of $fstitle' "; - $def[2] = "DEF:growth_max=$RRDFILE[2]:$DS[2]:MAX "; - $def[2] .= "DEF:growth_min=$RRDFILE[2]:$DS[2]:MIN "; - $def[2] .= "DEF:trend=$RRDFILE[3]:$DS[3]:AVERAGE "; + $def[2] = "DEF:growth_max=${RRD['growth']} "; + $def[2] .= "DEF:growth_min=${RRD_MIN['growth']} "; + $def[2] .= "DEF:trend=${RRD_AVG['trend']} "; $def[2] .= "CDEF:growth_pos=growth_max,0,MAX "; $def[2] .= "CDEF:growth_neg=growth_min,0,MIN "; - $def[2] .= "CDEF:growth_minabs=0,growth_min,- "; + $def[2] .= "CDEF:growth_minabs=0,growth_min,- "; $def[2] .= "CDEF:growth=growth_minabs,growth_max,MAX "; $def[2] .= "HRULE:0#c0c0c0 "; - $def[2] .= "AREA:growth_pos#3060f0:\"Grow\" "; - $def[2] .= "AREA:growth_neg#30f060:\"Shrink \" "; + $def[2] .= "AREA:growth_pos#3060f0:\"Grow\" "; + $def[2] .= "AREA:growth_neg#30f060:\"Shrink \" "; $def[2] .= "GPRINT:growth:LAST:\"Current\: %+9.2lfMB / 24h\" "; $def[2] .= "GPRINT:growth:MAX:\"Max\: %+9.2lfMB / 24h\\n\" "; // Trend $opt[3] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Trend for $fstitle' "; - $def[3] = "DEF:trend=$RRDFILE[3]:$DS[3]:AVERAGE "; + $def[3] = "DEF:trend=${RRD_AVG['trend']} "; $def[3] .= "HRULE:0#c0c0c0 "; $def[3] .= "LINE1:trend#000000:\"Trend\:\" "; - $def[3] .= "GPRINT:trend:LAST:\"%+7.2lf MB/24h\" "; - if ($WARN[3]) { - $warn_mb = sprintf("%.2fMB", $WARN[3] * $hours / 24.0); - $def[3] .= "LINE1:$WARN[3]#ffff00:\"Warn\: $warn_mb / $range\" "; + $def[3] .= "GPRINT:trend:LAST:\"%+7.2lf MB/24h\" "; + if ($WARN['trend']) { + $warn_mb = sprintf("%.2fMB", $WARN['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${WARN['trend']}#ffff00:\"Warn\: $warn_mb / $range\" "; } - if ($CRIT[3]) { - $crit_mb = sprintf("%.2fMB", $CRIT[3] * $hours / 24.0); - $def[3] .= "LINE1:$CRIT[3]#ff0000:\"Crit\: $crit_mb / $range\" "; + if ($CRIT['trend']) { + $crit_mb = sprintf("%.2fMB", $CRIT['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${CRIT['trend']}#ff0000:\"Crit\: $crit_mb / $range\" "; } $def[3] .= "COMMENT:\"\\n\" "; } +if (isset($RRD['trend_hoursleft'])) { + // Trend + $opt[4] = "--vertical-label 'Days left' -l -1 -u 365 -X0 --title '$hostname: Days left for $fstitle' "; + $def[4] = "DEF:hours_left=${RRD_AVG['trend_hoursleft']} "; + $def[4] .= "DEF:hours_left_min=${RRD_MIN['trend_hoursleft']} "; + // negative hours indicate no growth + // the dataset hours_left_isneg stores this info for each point as True/False + $def[4] .= "CDEF:hours_left_isneg=hours_left_min,-1,EQ "; + $def[4] .= "CDEF:hours_left_unmon=hours_left_min,400,0,IF "; + $def[4] .= "CDEF:days_left=hours_left,24,/ "; + $def[4] .= "CDEF:days_left_cap=days_left,400,MIN "; + // Convert negative points to 400 (y-axis cap) + $def[4] .= "CDEF:days_left_cap_positive=hours_left_isneg,400,days_left_cap,IF "; + // The AREA has a rendering problem. Points are too far to the right + $def[4] .= "AREA:hours_left_unmon#AA2200: "; + + $def[4] .= "AREA:days_left_cap_positive#22AA44:\"Days left\:\" "; + if ($ACT[4] == -1) + { + $def[4] .= "COMMENT:\"Not growing\" "; + } + else { + $def[4] .= "GPRINT:days_left:LAST:\"%7.2lf days\" "; + } +} ?> diff -Nru check-mk-1.2.2p3/check_mk-diskstat.php check-mk-1.2.6p12/check_mk-diskstat.php --- check-mk-1.2.2p3/check_mk-diskstat.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-diskstat.php 2015-09-21 10:59:54.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -41,7 +41,7 @@ $opt[1] = "--vertical-label 'Throughput (MB/s)' -X0 --title \"Disk throughput $hostname / $disk\" "; - $def[1] = + $def[1] = "HRULE:0#a0a0a0 ". # read "DEF:read=$RRD[read] ". @@ -53,7 +53,7 @@ # read average as line in the same graph if (isset($RRD["read.avg"])) { - $def[1] .= + $def[1] .= "DEF:read_avg=${RRD['read.avg']} ". "CDEF:read_avg_mb=read_avg,1048576,/ ". "LINE:read_avg_mb#202020 "; @@ -84,7 +84,7 @@ # write average if (isset($DS["write.avg"])) { - $def[1] .= + $def[1] .= "DEF:write_avg=${RRD['write.avg']} ". "CDEF:write_avg_mb=write_avg,1048576,/ ". "CDEF:write_avg_mb_neg=write_avg_mb,-1,* ". @@ -122,14 +122,14 @@ $def[] = "" . "DEF:read=$RRD[read_ql] " . "DEF:write=$RRD[write_ql] " - . "CDEF:writen=write,-1,* " + . "CDEF:writen=write,-1,* " . "HRULE:0#a0a0a0 " . "AREA:read#669a76 " . "AREA:writen#517ba5 " ; } - + } // legacy version of diskstat diff -Nru check-mk-1.2.2p3/check_mk-emc_datadomain_temps.php check-mk-1.2.6p12/check_mk-emc_datadomain_temps.php --- check-mk-1.2.2p3/check_mk-emc_datadomain_temps.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-emc_datadomain_temps.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk-emc_isilon_iops.php check-mk-1.2.6p12/check_mk-emc_isilon_iops.php --- check-mk-1.2.2p3/check_mk-emc_isilon_iops.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-emc_isilon_iops.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,48 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; +} + +$servicedesc = str_replace("_", " ", $servicedesc); + +$opt[1] = "--vertical-label 'operations/s' -X0 --title \"$hostname / $servicedesc\" "; + +$def[1] = + "HRULE:0#a0a0a0 ". + "DEF:read_blocks=$RRD[iops] ". + "AREA:read_blocks#40c080:\"operations per second \" ". + "GPRINT:read_blocks:LAST:\"%8.0lf last\" ". + "GPRINT:read_blocks:AVERAGE:\"%6.0lf avg\" ". + "GPRINT:read_blocks:MAX:\"%6.0lf max\\n\" "; +?> + diff -Nru check-mk-1.2.2p3/check_mk-emcvnx_disks.php check-mk-1.2.6p12/check_mk-emcvnx_disks.php --- check-mk-1.2.2p3/check_mk-emcvnx_disks.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-emcvnx_disks.php 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,148 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; + } + + $parts = explode("_", $servicedesc); + $disk = $parts[2]; + + $opt[1] = "--vertical-label 'Throughput (MB/s)' -X0 --title \"Disk throughput $hostname / $disk\" "; + + $def[1] = + "HRULE:0#a0a0a0 ". + # read + "DEF:read=$RRD[read] ". + "CDEF:read_mb=read,1048576,/ ". + "AREA:read_mb#40c080:\"Read \" ". + "GPRINT:read_mb:LAST:\"%8.1lf MB/s last\" ". + "GPRINT:read_mb:AVERAGE:\"%6.1lf MB/s avg\" ". + "GPRINT:read_mb:MAX:\"%6.1lf MB/s max\\n\" "; + + # read average as line in the same graph + if (isset($RRD["read.avg"])) { + $def[1] .= + "DEF:read_avg=${RRD['read.avg']} ". + "CDEF:read_avg_mb=read_avg,1048576,/ ". + "LINE:read_avg_mb#202020 "; + } + + # write + $def[1] .= + "DEF:write=$RRD[write] ". + "CDEF:write_mb=write,1048576,/ ". + "CDEF:write_mb_neg=write_mb,-1,* ". + "AREA:write_mb_neg#4080c0:\"Write \" ". + "GPRINT:write_mb:LAST:\"%6.1lf MB/s last\" ". + "GPRINT:write_mb:AVERAGE:\"%6.1lf MB/s avg\" ". + "GPRINT:write_mb:MAX:\"%6.1lf MB/s max\\n\" ". + ""; + + # show levels for read + if ($WARN['read']) { + $def[1] .= "HRULE:$WARN[read]#ffd000:\"Warning for read at " . sprintf("%6.1f", $WARN[1]) . " MB/s \" "; + $def[1] .= "HRULE:$CRIT[read]#ff0000:\"Critical for read at " . sprintf("%6.1f", $CRIT[1]) . " MB/s\\n\" "; + } + + # show levels for write + if ($WARN['write']) { + $def[1] .= "HRULE:-$WARN[write]#ffd000:\"Warning for write at " . sprintf("%6.1f", $WARN[2]) . " MB/s \" "; + $def[1] .= "HRULE:-$CRIT[write]#ff0000:\"Critical for write at " . sprintf("%6.1f", $CRIT[2]) . " MB/s\\n\" "; + } + + # write average + if (isset($DS["write.avg"])) { + $def[1] .= + "DEF:write_avg=${RRD['write.avg']} ". + "CDEF:write_avg_mb=write_avg,1048576,/ ". + "CDEF:write_avg_mb_neg=write_avg_mb,-1,* ". + "LINE:write_avg_mb_neg#202020 "; + } + + # latency + if (isset($RRD["latency"])) { + $opt[] = "--vertical-label 'Latency (ms)' -X0 --title \"Latency $hostname / $disk\" "; + $def[] = "" + . "DEF:latency=$RRD[latency] " + . "AREA:latency#aaccdd:\"Latency\" " + . "LINE:latency#7799aa " + . "GPRINT:latency:LAST:\"%6.1lf ms last\" " + . "GPRINT:latency:AVERAGE:\"%6.1lf ms avg\" " + . "GPRINT:latency:MAX:\"%6.1lf ms max\\n\" " + ; + } + + # IOs per second + if (isset($RRD["ios"])) { + $opt[] = "--vertical-label 'IO Operations / sec' -X0 --title \"IOs/sec $hostname / $disk\" "; + $def[] = "" + . "DEF:ios=$RRD[ios] " + . "AREA:ios#ddccaa:\"ios\" " + . "LINE:ios#aa9977 " + . "GPRINT:ios:LAST:\"%6.1lf/sec last\" " + . "GPRINT:ios:AVERAGE:\"%6.1lf/sec avg\" " + . "GPRINT:ios:MAX:\"%6.1lf/sec max\\n\" " + ; + } + + if (isset($RRD["read_ql"])) { + $opt[] = "--vertical-label 'Queue Length' -X0 -u5 -l-5 --title \"Queue Length $hostname / $disk\" "; + $def[] = "" + . "DEF:read=$RRD[read_ql] " + . "DEF:write=$RRD[write_ql] " + . "CDEF:writen=write,-1,* " + . "HRULE:0#a0a0a0 " + . "AREA:read#669a76 " + . "AREA:writen#517ba5 " + ; + + } + +} + +// legacy version of diskstat +else { + $opt[1] = "--vertical-label 'Througput (MByte/s)' -l0 -u 1 --title \"Disk throughput $hostname / $servicedesc\" "; + + $def[1] = "DEF:kb=$RRDFILE[1]:$DS[1]:AVERAGE " ; + $def[1] .= "CDEF:mb=kb,1024,/ " ; + $def[1] .= "AREA:mb#40c080 " ; + "HRULE:0#a0a0a0 ". + $def[1] .= "GPRINT:mb:LAST:\"%6.1lf MByte/s last\" " ; + $def[1] .= "GPRINT:mb:AVERAGE:\"%6.1lf MByte/s avg\" " ; + $def[1] .= "GPRINT:mb:MAX:\"%6.1lf MByte/s max\\n\" "; +} +?> + diff -Nru check-mk-1.2.2p3/check_mk-emcvnx_hba.php check-mk-1.2.6p12/check_mk-emcvnx_hba.php --- check-mk-1.2.2p3/check_mk-emcvnx_hba.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-emcvnx_hba.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,60 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; +} + +$servicedesc = str_replace("_", " ", $servicedesc); + +$opt[1] = "--vertical-label 'I/O (Blocks/s)' -X0 --title \"iSCSI traffic $hostname / $servicedesc\" "; + +$def[1] = + "HRULE:0#a0a0a0 ". +# read + "DEF:read_blocks=$RRD[read_blocks] ". + "AREA:read_blocks#40c080:\"Read \" ". + "GPRINT:read_blocks:LAST:\"%8.1lf Blocks/s last\" ". + "GPRINT:read_blocks:AVERAGE:\"%6.1lf Blocks/s avg\" ". + "GPRINT:read_blocks:MAX:\"%6.1lf Blocks/s max\\n\" "; + +# write +$def[1] .= + "DEF:write_blocks=$RRD[write_blocks] ". + "CDEF:write_blocks_neg=write_blocks,-1,* ". + "AREA:write_blocks_neg#4080c0:\"Write \" ". + "GPRINT:write_blocks:LAST:\"%6.1lf Blocks/s last\" ". + "GPRINT:write_blocks:AVERAGE:\"%6.1lf Blocks/s avg\" ". + "GPRINT:write_blocks:MAX:\"%6.1lf Blocks/s max\\n\" ". + ""; + +?> + diff -Nru check-mk-1.2.2p3/check_mk-emcvnx_raidgroups.capacity_contiguous.php check-mk-1.2.6p12/check_mk-emcvnx_raidgroups.capacity_contiguous.php --- check-mk-1.2.2p3/check_mk-emcvnx_raidgroups.capacity_contiguous.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-emcvnx_raidgroups.capacity_contiguous.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,157 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $RRD_MIN[$n] = "$RRDFILE[$i]:$DS[$i]:MIN"; + $RRD_AVG[$n] = "$RRDFILE[$i]:$DS[$i]:AVERAGE"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; +} + +# RRDtool Options +#$servicedes=$NAGIOS_SERVICEDESC + +$fsname = str_replace("_", "/", substr($servicedesc, 3)); +$fstitle = $fsname; + +# Hack for windows: replace C// with C:\ +if (strlen($fsname) == 3 && substr($fsname, 1, 2) == '//') { + $fsname = $fsname[0] . "\:\\\\"; + $fstitle = $fsname[0] . ":\\"; +} + +$sizegb = sprintf("%.1f", $MAX[1] / 1024.0); +$maxgb = $MAX[1] / 1024.0; +$warngb = $WARN[1] / 1024.0; +$critgb = $CRIT[1] / 1024.0; +$warngbtxt = sprintf("%.1f", $warngb); +$critgbtxt = sprintf("%.1f", $critgb); + +$opt[1] = "--vertical-label GB -l 0 -u $maxgb --title '$hostname: $servicedesc ($sizegb GB)' "; + +# First graph show current filesystem usage +$def[1] = "DEF:mb=$RRDFILE[1]:$DS[1]:MAX "; +$def[1] .= "CDEF:var1=mb,1024,/ "; +$def[1] .= "AREA:var1#00ffc6:\"used space on $fsname\\n\" "; + +# Optional uncommitted usage e.g. for esx hosts +if(isset($RRD['uncommitted'])) { + $def[1] .= "DEF:uncommitted_mb=".$RRD['uncommitted']." "; + $def[1] .= "CDEF:uncommitted_gb=uncommitted_mb,1024,/ "; + $def[1] .= "CDEF:total_gb=uncommitted_gb,var1,+ "; +} else { + $def[1] .= "CDEF:total_gb=var1 "; +} + +$def[1] .= "HRULE:$maxgb#003300:\"Size ($sizegb GB) \" "; +$def[1] .= "HRULE:$warngb#ffff00:\"Warning at $warngbtxt GB \" "; +$def[1] .= "HRULE:$critgb#ff0000:\"Critical at $critgbtxt GB \\n\" "; +$def[1] .= "GPRINT:var1:LAST:\"current\: %6.2lf GB\" "; +$def[1] .= "GPRINT:var1:MAX:\"max\: %6.2lf GB \" "; +$def[1] .= "GPRINT:var1:AVERAGE:\"avg\: %6.2lf GB\\n\" "; + +if(isset($RRD['uncommitted'])) { + $def[1] .= "AREA:uncommitted_gb#eeccff:\"Uncommited\":STACK "; + $def[1] .= "GPRINT:uncommitted_gb:MAX:\"%6.2lf GB\l\" "; +} + +$def[1] .= "LINE1:total_gb#226600 "; + +# Second graph is optional and shows trend. The MAX field +# of the third variable contains (size of the filesystem in MB +# / range in hours). From that we can compute the configured range. +if (isset($RRD['growth'])) { + $size_mb_per_hours = floatval($MAX['trend']); // this is size_mb / range(hours) + $size_mb = floatval($MAX[1]); + $hours = 1.0 / ($size_mb_per_hours / $size_mb); + $range = sprintf("%.0fh", $hours); + + // Current growth / shrinking. This value is give as MB / 24 hours. + // Note: This has changed in 1.1.13i3. Prior it was MB / trend_range! + $opt[2] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Growth of $servicedesc' "; + $def[2] = "DEF:growth_max=${RRD['growth']} "; + $def[2] .= "DEF:growth_min=${RRD_MIN['growth']} "; + $def[2] .= "DEF:trend=${RRD_AVG['trend']} "; + $def[2] .= "CDEF:growth_pos=growth_max,0,MAX "; + $def[2] .= "CDEF:growth_neg=growth_min,0,MIN "; + $def[2] .= "CDEF:growth_minabs=0,growth_min,- "; + $def[2] .= "CDEF:growth=growth_minabs,growth_max,MAX "; + $def[2] .= "HRULE:0#c0c0c0 "; + $def[2] .= "AREA:growth_pos#3060f0:\"Grow\" "; + $def[2] .= "AREA:growth_neg#30f060:\"Shrink \" "; + $def[2] .= "GPRINT:growth:LAST:\"Current\: %+9.2lfMB / 24h\" "; + $def[2] .= "GPRINT:growth:MAX:\"Max\: %+9.2lfMB / 24h\\n\" "; + + // Trend + $opt[3] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Trend for $servicedesc' "; + $def[3] = "DEF:trend=${RRD_AVG['trend']} "; + $def[3] .= "HRULE:0#c0c0c0 "; + $def[3] .= "LINE1:trend#000000:\"Trend\:\" "; + $def[3] .= "GPRINT:trend:LAST:\"%+7.2lf MB/24h\" "; + if ($WARN['trend']) { + $warn_mb = sprintf("%.2fMB", $WARN['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${WARN['trend']}#ffff00:\"Warn\: $warn_mb / $range\" "; + } + if ($CRIT['trend']) { + $crit_mb = sprintf("%.2fMB", $CRIT['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${CRIT['trend']}#ff0000:\"Crit\: $crit_mb / $range\" "; + } + $def[3] .= "COMMENT:\"\\n\" "; +} + +if (isset($RRD['trend_hoursleft'])) { + // Trend + $opt[4] = "--vertical-label 'Days left' -l -1 -u 365 -X0 --title '$hostname: Days left for $servicedesc' "; + $def[4] = "DEF:hours_left=${RRD_AVG['trend_hoursleft']} "; + $def[4] .= "DEF:hours_left_min=${RRD_MIN['trend_hoursleft']} "; + // negative hours indicate no growth + // the dataset hours_left_isneg stores this info for each point as True/False + $def[4] .= "CDEF:hours_left_isneg=hours_left_min,-1,EQ "; + $def[4] .= "CDEF:hours_left_unmon=hours_left_min,400,0,IF "; + $def[4] .= "CDEF:days_left=hours_left,24,/ "; + $def[4] .= "CDEF:days_left_cap=days_left,400,MIN "; + // Convert negative points to 400 (y-axis cap) + $def[4] .= "CDEF:days_left_cap_positive=hours_left_isneg,400,days_left_cap,IF "; + // The AREA has a rendering problem. Points are too far to the right + $def[4] .= "AREA:hours_left_unmon#AA2200: "; + + $def[4] .= "AREA:days_left_cap_positive#22AA44:\"Days left\:\" "; + if ($ACT[4] == -1) + { + $def[4] .= "COMMENT:\"Not growing\" "; + } + else { + $def[4] .= "GPRINT:days_left:LAST:\"%7.2lf days\" "; + } +} + +?> diff -Nru check-mk-1.2.2p3/check_mk-emcvnx_raidgroups.capacity.php check-mk-1.2.6p12/check_mk-emcvnx_raidgroups.capacity.php --- check-mk-1.2.2p3/check_mk-emcvnx_raidgroups.capacity.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-emcvnx_raidgroups.capacity.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,157 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $RRD_MIN[$n] = "$RRDFILE[$i]:$DS[$i]:MIN"; + $RRD_AVG[$n] = "$RRDFILE[$i]:$DS[$i]:AVERAGE"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; +} + +# RRDtool Options +#$servicedes=$NAGIOS_SERVICEDESC + +$fsname = str_replace("_", "/", substr($servicedesc, 3)); +$fstitle = $fsname; + +# Hack for windows: replace C// with C:\ +if (strlen($fsname) == 3 && substr($fsname, 1, 2) == '//') { + $fsname = $fsname[0] . "\:\\\\"; + $fstitle = $fsname[0] . ":\\"; +} + +$sizegb = sprintf("%.1f", $MAX[1] / 1024.0); +$maxgb = $MAX[1] / 1024.0; +$warngb = $WARN[1] / 1024.0; +$critgb = $CRIT[1] / 1024.0; +$warngbtxt = sprintf("%.1f", $warngb); +$critgbtxt = sprintf("%.1f", $critgb); + +$opt[1] = "--vertical-label GB -l 0 -u $maxgb --title '$hostname: $servicedesc ($sizegb GB)' "; + +# First graph show current filesystem usage +$def[1] = "DEF:mb=$RRDFILE[1]:$DS[1]:MAX "; +$def[1] .= "CDEF:var1=mb,1024,/ "; +$def[1] .= "AREA:var1#00ffc6:\"used space on $fsname\\n\" "; + +# Optional uncommitted usage e.g. for esx hosts +if(isset($RRD['uncommitted'])) { + $def[1] .= "DEF:uncommitted_mb=".$RRD['uncommitted']." "; + $def[1] .= "CDEF:uncommitted_gb=uncommitted_mb,1024,/ "; + $def[1] .= "CDEF:total_gb=uncommitted_gb,var1,+ "; +} else { + $def[1] .= "CDEF:total_gb=var1 "; +} + +$def[1] .= "HRULE:$maxgb#003300:\"Size ($sizegb GB) \" "; +$def[1] .= "HRULE:$warngb#ffff00:\"Warning at $warngbtxt GB \" "; +$def[1] .= "HRULE:$critgb#ff0000:\"Critical at $critgbtxt GB \\n\" "; +$def[1] .= "GPRINT:var1:LAST:\"current\: %6.2lf GB\" "; +$def[1] .= "GPRINT:var1:MAX:\"max\: %6.2lf GB \" "; +$def[1] .= "GPRINT:var1:AVERAGE:\"avg\: %6.2lf GB\\n\" "; + +if(isset($RRD['uncommitted'])) { + $def[1] .= "AREA:uncommitted_gb#eeccff:\"Uncommited\":STACK "; + $def[1] .= "GPRINT:uncommitted_gb:MAX:\"%6.2lf GB\l\" "; +} + +$def[1] .= "LINE1:total_gb#226600 "; + +# Second graph is optional and shows trend. The MAX field +# of the third variable contains (size of the filesystem in MB +# / range in hours). From that we can compute the configured range. +if (isset($RRD['growth'])) { + $size_mb_per_hours = floatval($MAX['trend']); // this is size_mb / range(hours) + $size_mb = floatval($MAX[1]); + $hours = 1.0 / ($size_mb_per_hours / $size_mb); + $range = sprintf("%.0fh", $hours); + + // Current growth / shrinking. This value is give as MB / 24 hours. + // Note: This has changed in 1.1.13i3. Prior it was MB / trend_range! + $opt[2] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Growth of $servicedesc' "; + $def[2] = "DEF:growth_max=${RRD['growth']} "; + $def[2] .= "DEF:growth_min=${RRD_MIN['growth']} "; + $def[2] .= "DEF:trend=${RRD_AVG['trend']} "; + $def[2] .= "CDEF:growth_pos=growth_max,0,MAX "; + $def[2] .= "CDEF:growth_neg=growth_min,0,MIN "; + $def[2] .= "CDEF:growth_minabs=0,growth_min,- "; + $def[2] .= "CDEF:growth=growth_minabs,growth_max,MAX "; + $def[2] .= "HRULE:0#c0c0c0 "; + $def[2] .= "AREA:growth_pos#3060f0:\"Grow\" "; + $def[2] .= "AREA:growth_neg#30f060:\"Shrink \" "; + $def[2] .= "GPRINT:growth:LAST:\"Current\: %+9.2lfMB / 24h\" "; + $def[2] .= "GPRINT:growth:MAX:\"Max\: %+9.2lfMB / 24h\\n\" "; + + // Trend + $opt[3] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Trend for $servicedesc' "; + $def[3] = "DEF:trend=${RRD_AVG['trend']} "; + $def[3] .= "HRULE:0#c0c0c0 "; + $def[3] .= "LINE1:trend#000000:\"Trend\:\" "; + $def[3] .= "GPRINT:trend:LAST:\"%+7.2lf MB/24h\" "; + if ($WARN['trend']) { + $warn_mb = sprintf("%.2fMB", $WARN['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${WARN['trend']}#ffff00:\"Warn\: $warn_mb / $range\" "; + } + if ($CRIT['trend']) { + $crit_mb = sprintf("%.2fMB", $CRIT['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${CRIT['trend']}#ff0000:\"Crit\: $crit_mb / $range\" "; + } + $def[3] .= "COMMENT:\"\\n\" "; +} + +if (isset($RRD['trend_hoursleft'])) { + // Trend + $opt[4] = "--vertical-label 'Days left' -l -1 -u 365 -X0 --title '$hostname: Days left for $servicedesc' "; + $def[4] = "DEF:hours_left=${RRD_AVG['trend_hoursleft']} "; + $def[4] .= "DEF:hours_left_min=${RRD_MIN['trend_hoursleft']} "; + // negative hours indicate no growth + // the dataset hours_left_isneg stores this info for each point as True/False + $def[4] .= "CDEF:hours_left_isneg=hours_left_min,-1,EQ "; + $def[4] .= "CDEF:hours_left_unmon=hours_left_min,400,0,IF "; + $def[4] .= "CDEF:days_left=hours_left,24,/ "; + $def[4] .= "CDEF:days_left_cap=days_left,400,MIN "; + // Convert negative points to 400 (y-axis cap) + $def[4] .= "CDEF:days_left_cap_positive=hours_left_isneg,400,days_left_cap,IF "; + // The AREA has a rendering problem. Points are too far to the right + $def[4] .= "AREA:hours_left_unmon#AA2200: "; + + $def[4] .= "AREA:days_left_cap_positive#22AA44:\"Days left\:\" "; + if ($ACT[4] == -1) + { + $def[4] .= "COMMENT:\"Not growing\" "; + } + else { + $def[4] .= "GPRINT:days_left:LAST:\"%7.2lf days\" "; + } +} + +?> diff -Nru check-mk-1.2.2p3/check_mk-emerson_temp.php check-mk-1.2.6p12/check_mk-emerson_temp.php --- check-mk-1.2.2p3/check_mk-emerson_temp.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-emerson_temp.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk-enterasys_cpu_util.php check-mk-1.2.6p12/check_mk-enterasys_cpu_util.php --- check-mk-1.2.2p3/check_mk-enterasys_cpu_util.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-enterasys_cpu_util.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,80 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $RRD_AVG[$n] = "$RRDFILE[$i]:$DS[$i]:AVERAGE"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; +} + +$num_threads = $MAX[1]; +$warnthreads = $WARN[1] * $num_threads / 100.0; +$critthreads = $CRIT[1] * $num_threads / 100.0; +$rightscale = 100.0 / $num_threads; + +$opt[1] = "--vertical-label 'Used CPU threads' --right-axis $rightscale:0 --right-axis-format '%4.1lf%%' --right-axis-label 'Utilization %' -l0 -ru $num_threads --title \"CPU Utilization for $hostname ($num_threads CPU threads)\" "; + +$def[1] = "DEF:perc=$RRD_AVG[util] " + . "CDEF:util=perc,$num_threads,*,100,/ " + ; + +$def[1] .= "HRULE:$MAX[util]#0040d0:\"$num_threads CPU Threads\\n\" " + ; + +$def[1] .= "AREA:util#60f020:\"Utilization\:\" " + . "LINE:util#50b01a " + . "GPRINT:perc:LAST:\"%.1lf%%\" " + . "GPRINT:util:LAST:\"(%.1lf Threads) \" " + . "GPRINT:perc:MIN:\"min\: %.1lf%%,\" " + . "GPRINT:util:MIN:\"(%.1lf), \" " + . "GPRINT:perc:MAX:\"max\: %.1lf%%\" " + . "GPRINT:util:MAX:\"(%.1lf)\\n\" " + ; + + +if (isset($RRD_AVG["avg"])) { + $def[1] .= "DEF:aperc=$RRD_AVG[avg] ". + "CDEF:avg=aperc,$num_threads,*,100,/ ". + "LINE:avg#004000:\"Averaged\: \" ". + "GPRINT:aperc:LAST:\"%.1lf%%,\" ". + "GPRINT:aperc:MIN:\"min\: %.1lf%%,\" ". + "GPRINT:aperc:MAX:\"max\: %.1lf%%\\n\" ". + ""; +} + +if ($WARN['util']) { + $def[1] .= "HRULE:$warnthreads#fff000:\"Warn at $WARN[util]% \" " + . "HRULE:$critthreads#ff0000:\"Critical at $CRIT[util]%\\n\" "; +} +else { + $def[1] .= "COMMENT:\"\\n\" "; +} + +?> diff -Nru check-mk-1.2.2p3/check_mk-enterasys_temp.php check-mk-1.2.6p12/check_mk-enterasys_temp.php --- check-mk-1.2.2p3/check_mk-enterasys_temp.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-enterasys_temp.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk-esx_vsphere_counters.diskio.php check-mk-1.2.6p12/check_mk-esx_vsphere_counters.diskio.php --- check-mk-1.2.2p3/check_mk-esx_vsphere_counters.diskio.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-esx_vsphere_counters.diskio.php 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,148 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; + } + + $parts = explode("_", $servicedesc); + $disk = $parts[2]; + + $opt[1] = "--vertical-label 'Throughput (MB/s)' -X0 --title \"Disk throughput $hostname / $disk\" "; + + $def[1] = + "HRULE:0#a0a0a0 ". + # read + "DEF:read=$RRD[read] ". + "CDEF:read_mb=read,1048576,/ ". + "AREA:read_mb#40c080:\"Read \" ". + "GPRINT:read_mb:LAST:\"%8.1lf MB/s last\" ". + "GPRINT:read_mb:AVERAGE:\"%6.1lf MB/s avg\" ". + "GPRINT:read_mb:MAX:\"%6.1lf MB/s max\\n\" "; + + # read average as line in the same graph + if (isset($RRD["read.avg"])) { + $def[1] .= + "DEF:read_avg=${RRD['read.avg']} ". + "CDEF:read_avg_mb=read_avg,1048576,/ ". + "LINE:read_avg_mb#202020 "; + } + + # write + $def[1] .= + "DEF:write=$RRD[write] ". + "CDEF:write_mb=write,1048576,/ ". + "CDEF:write_mb_neg=write_mb,-1,* ". + "AREA:write_mb_neg#4080c0:\"Write \" ". + "GPRINT:write_mb:LAST:\"%6.1lf MB/s last\" ". + "GPRINT:write_mb:AVERAGE:\"%6.1lf MB/s avg\" ". + "GPRINT:write_mb:MAX:\"%6.1lf MB/s max\\n\" ". + ""; + + # show levels for read + if ($WARN['read']) { + $def[1] .= "HRULE:$WARN[read]#ffd000:\"Warning for read at " . sprintf("%6.1f", $WARN[1]) . " MB/s \" "; + $def[1] .= "HRULE:$CRIT[read]#ff0000:\"Critical for read at " . sprintf("%6.1f", $CRIT[1]) . " MB/s\\n\" "; + } + + # show levels for write + if ($WARN['write']) { + $def[1] .= "HRULE:-$WARN[write]#ffd000:\"Warning for write at " . sprintf("%6.1f", $WARN[2]) . " MB/s \" "; + $def[1] .= "HRULE:-$CRIT[write]#ff0000:\"Critical for write at " . sprintf("%6.1f", $CRIT[2]) . " MB/s\\n\" "; + } + + # write average + if (isset($DS["write.avg"])) { + $def[1] .= + "DEF:write_avg=${RRD['write.avg']} ". + "CDEF:write_avg_mb=write_avg,1048576,/ ". + "CDEF:write_avg_mb_neg=write_avg_mb,-1,* ". + "LINE:write_avg_mb_neg#202020 "; + } + + # latency + if (isset($RRD["latency"])) { + $opt[] = "--vertical-label 'Latency (ms)' -X0 --title \"Latency $hostname / $disk\" "; + $def[] = "" + . "DEF:latency=$RRD[latency] " + . "AREA:latency#aaccdd:\"Latency\" " + . "LINE:latency#7799aa " + . "GPRINT:latency:LAST:\"%6.1lf ms last\" " + . "GPRINT:latency:AVERAGE:\"%6.1lf ms avg\" " + . "GPRINT:latency:MAX:\"%6.1lf ms max\\n\" " + ; + } + + # IOs per second + if (isset($RRD["ios"])) { + $opt[] = "--vertical-label 'IO Operations / sec' -X0 --title \"IOs/sec $hostname / $disk\" "; + $def[] = "" + . "DEF:ios=$RRD[ios] " + . "AREA:ios#ddccaa:\"ios\" " + . "LINE:ios#aa9977 " + . "GPRINT:ios:LAST:\"%6.1lf/sec last\" " + . "GPRINT:ios:AVERAGE:\"%6.1lf/sec avg\" " + . "GPRINT:ios:MAX:\"%6.1lf/sec max\\n\" " + ; + } + + if (isset($RRD["read_ql"])) { + $opt[] = "--vertical-label 'Queue Length' -X0 -u5 -l-5 --title \"Queue Length $hostname / $disk\" "; + $def[] = "" + . "DEF:read=$RRD[read_ql] " + . "DEF:write=$RRD[write_ql] " + . "CDEF:writen=write,-1,* " + . "HRULE:0#a0a0a0 " + . "AREA:read#669a76 " + . "AREA:writen#517ba5 " + ; + + } + +} + +// legacy version of diskstat +else { + $opt[1] = "--vertical-label 'Througput (MByte/s)' -l0 -u 1 --title \"Disk throughput $hostname / $servicedesc\" "; + + $def[1] = "DEF:kb=$RRDFILE[1]:$DS[1]:AVERAGE " ; + $def[1] .= "CDEF:mb=kb,1024,/ " ; + $def[1] .= "AREA:mb#40c080 " ; + "HRULE:0#a0a0a0 ". + $def[1] .= "GPRINT:mb:LAST:\"%6.1lf MByte/s last\" " ; + $def[1] .= "GPRINT:mb:AVERAGE:\"%6.1lf MByte/s avg\" " ; + $def[1] .= "GPRINT:mb:MAX:\"%6.1lf MByte/s max\\n\" "; +} +?> + diff -Nru check-mk-1.2.2p3/check_mk-esx_vsphere_counters.if.php check-mk-1.2.6p12/check_mk-esx_vsphere_counters.if.php --- check-mk-1.2.2p3/check_mk-esx_vsphere_counters.if.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-esx_vsphere_counters.if.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,226 @@ + $base * $base * $base) { + $scale = $base * $base * $base; + $bwuom = 'G'; +} +elseif ($bandwidth > $base * $base) { + $scale = $base * $base; + $bwuom = 'M'; +} +elseif ($bandwidth > $base) { + $scale = $base; + $bwuom = 'k'; +} +else { + $scale = 1; + $bwuom = ' '; +} + +$warn /= $scale; +$crit /= $scale; +$bandwidth /= $scale; + +$vertical_label_name = $bwuom . $unit . "/sec"; + +$range = min(10, $bandwidth); + + +$bandwidthInfo = ""; +if ($bandwidth > 0){ + $bandwidthInfo = " at " . sprintf("%.1f", $bandwidth) . " ${bwuom}${unit}/s"; +} +$ds_name[1] = 'Used bandwidth'; +$opt[1] = "--vertical-label \"$vertical_label_name\" -l -$range -u $range -X0 -b 1024 --title \"Used bandwidth $hostname / $servicedesc$bandwidthInfo\" "; +$def[1] = + "HRULE:0#c0c0c0 "; +if ($bandwidth) + $def[1] .= "HRULE:$bandwidth#808080:\"Port speed\: " . sprintf("%10.1f", $bandwidth) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$bandwidth#808080: "; +if ($warn) + $def[1] .= "HRULE:$warn#ffff00:\"Warning\: " . sprintf("%13.1f", $warn) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$warn#ffff00: "; +if ($crit) + $def[1] .= "HRULE:$crit#ff0000:\"Critical\: " . sprintf("%13.1f", $crit) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$crit#ff0000: "; + + $def[1] .= "". + # incoming + "DEF:inbytes=$RRDFILE[1]:$DS[1]:MAX ". + "CDEF:intraffic=inbytes,$unit_multiplier,* ". + "CDEF:inmb=intraffic,$scale,/ ". + "AREA:inmb#00e060:\"in \" ". + "GPRINT:intraffic:LAST:\"%7.1lf %s$unit/s last\" ". + "GPRINT:intraffic:AVERAGE:\"%7.1lf %s$unit/s avg\" ". + "GPRINT:intraffic:MAX:\"%7.1lf %s$unit/s max\\n\" ". + "VDEF:inperc=intraffic,95,PERCENTNAN ". + "VDEF:inpercmb=inmb,95,PERCENTNAN ". + "LINE:inpercmb#008f00:\"95% percentile\" ". + "GPRINT:inperc:\"%7.1lf %s$unit/s\\n\" ". + + # outgoing + "DEF:outbytes=$RRDFILE[6]:$DS[6]:MAX ". + "CDEF:outtraffic=outbytes,$unit_multiplier,* ". + "CDEF:minusouttraffic=outtraffic,-1,* ". + "CDEF:outmb=outtraffic,$scale,/ ". + "CDEF:minusoutmb=0,outmb,- ". + "AREA:minusoutmb#0080e0:\"out \" ". + "GPRINT:outtraffic:LAST:\"%7.1lf %s$unit/s last\" ". + "GPRINT:outtraffic:AVERAGE:\"%7.1lf %s$unit/s avg\" ". + "GPRINT:outtraffic:MAX:\"%7.1lf %s$unit/s max\\n\" ". + "VDEF:outperc=minusouttraffic,5,PERCENTNAN ". + "VDEF:outpercmb=minusoutmb,5,PERCENTNAN ". + "LINE:outpercmb#00008f:\"95% percentile\" ". + "GPRINT:outperc:\"%7.1lf %s$unit/s\\n\" ". + + ""; + +if (isset($DS[12])) { + $def[1] .= + "DEF:inbytesa=$RRDFILE[12]:$DS[12]:MAX ". + "DEF:outbytesa=$RRDFILE[13]:$DS[13]:MAX ". + "CDEF:intraffica=inbytesa,$unit_multiplier,* ". + "CDEF:outtraffica=outbytesa,$unit_multiplier,* ". + "CDEF:inmba=intraffica,1048576,/ ". + "CDEF:outmba=outtraffica,1048576,/ ". + "CDEF:minusoutmba=0,outmba,- ". + "LINE:inmba#00a060:\"in (avg) \" ". + "GPRINT:intraffica:LAST:\"%6.1lf %s$unit/s last\" ". + "GPRINT:intraffica:AVERAGE:\"%6.1lf %s$unit/s avg\" ". + "GPRINT:intraffica:MAX:\"%6.1lf %s$unit/s max\\n\" ". + "LINE:minusoutmba#0060c0:\"out (avg) \" ". + "GPRINT:outtraffica:LAST:\"%6.1lf %s$unit/s last\" ". + "GPRINT:outtraffica:AVERAGE:\"%6.1lf %s$unit/s avg\" ". + "GPRINT:outtraffica:MAX:\"%6.1lf %s$unit/s max\\n\" "; +} + +# Graph 2: packets +$ds_name[2] = 'Packets'; +$opt[2] = "--vertical-label \"packets/sec\" --title \"Packets $hostname / $servicedesc\" "; +$def[2] = + # ingoing + "HRULE:0#c0c0c0 ". + "DEF:inu=$RRDFILE[2]:$DS[2]:MAX ". + "DEF:innu=$RRDFILE[3]:$DS[3]:MAX ". + "CDEF:in=inu,innu,+ ". + "AREA:inu#00ffc0:\"in unicast \" ". + "GPRINT:inu:LAST:\"%9.1lf/s last \" ". + "GPRINT:inu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:inu:MAX:\"%9.1lf/s max\\n\" ". + "AREA:innu#00c080:\"in broadcast/multicast \":STACK ". + "GPRINT:innu:LAST:\"%9.1lf/s last \" ". + "GPRINT:innu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:innu:MAX:\"%9.1lf/s max\\n\" ". + "VDEF:inperc=in,95,PERCENTNAN ". + "LINE:inperc#00cf00:\"in 95% percentile \" ". + "GPRINT:inperc:\"%9.1lf/s\\n\" ". + + # outgoing + "DEF:outu=$RRDFILE[7]:$DS[7]:MAX ". + "DEF:outnu=$RRDFILE[8]:$DS[8]:MAX ". + "CDEF:minusoutu=0,outu,- ". + "CDEF:minusoutnu=0,outnu,- ". + "CDEF:minusout=minusoutu,minusoutnu,+ ". + "AREA:minusoutu#00c0ff:\"out unicast \" ". + "GPRINT:outu:LAST:\"%9.1lf/s last \" ". + "GPRINT:outu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:outu:MAX:\"%9.1lf/s max\\n\" ". + "AREA:minusoutnu#0080c0:\"out broadcast/multicast\":STACK ". + "GPRINT:outnu:LAST:\"%9.1lf/s last \" ". + "GPRINT:outnu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:outnu:MAX:\"%9.1lf/s max\\n\" ". + "VDEF:outperc=minusout,5,PERCENTNAN ". + "LINE:outperc#0000cf:\"out 95% percentile \" ". + "GPRINT:outperc:\"%9.1lf/s\\n\" ". + ""; + +# Graph 3: errors and discards +$ds_name[3] = 'Errors and discards'; +$opt[3] = "--vertical-label \"packets/sec\" -X0 --title \"Problems $hostname / $servicedesc\" "; +$def[3] = + "HRULE:0#c0c0c0 ". + "DEF:inerr=$RRDFILE[5]:$DS[5]:MAX ". + "DEF:indisc=$RRDFILE[4]:$DS[4]:MAX ". + "AREA:inerr#ff0000:\"in errors \" ". + "GPRINT:inerr:LAST:\"%7.2lf/s last \" ". + "GPRINT:inerr:AVERAGE:\"%7.2lf/s avg \" ". + "GPRINT:inerr:MAX:\"%7.2lf/s max\\n\" ". + "AREA:indisc#ff8000:\"in discards \":STACK ". + "GPRINT:indisc:LAST:\"%7.2lf/s last \" ". + "GPRINT:indisc:AVERAGE:\"%7.2lf/s avg \" ". + "GPRINT:indisc:MAX:\"%7.2lf/s max\\n\" ". + "DEF:outerr=$RRDFILE[10]:$DS[10]:MAX ". + "DEF:outdisc=$RRDFILE[9]:$DS[9]:MAX ". + "CDEF:minusouterr=0,outerr,- ". + "CDEF:minusoutdisc=0,outdisc,- ". + "AREA:minusouterr#ff0080:\"out errors \" ". + "GPRINT:outerr:LAST:\"%7.2lf/s last \" ". + "GPRINT:outerr:AVERAGE:\"%7.2lf/s avg \" ". + "GPRINT:outerr:MAX:\"%7.2lf/s max\\n\" ". + "AREA:minusoutdisc#ff8080:\"out discards \":STACK ". + "GPRINT:outdisc:LAST:\"%7.2lf/s last \" ". + "GPRINT:outdisc:AVERAGE:\"%7.2lf/s avg \" ". + "GPRINT:outdisc:MAX:\"%7.2lf/s max\\n\" "; +?> diff -Nru check-mk-1.2.2p3/check_mk-esx_vsphere_counters.ramdisk.php check-mk-1.2.6p12/check_mk-esx_vsphere_counters.ramdisk.php --- check-mk-1.2.2p3/check_mk-esx_vsphere_counters.ramdisk.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-esx_vsphere_counters.ramdisk.php 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,157 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $RRD_MIN[$n] = "$RRDFILE[$i]:$DS[$i]:MIN"; + $RRD_AVG[$n] = "$RRDFILE[$i]:$DS[$i]:AVERAGE"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; + $ACT[$n] = $ACT[$i]; +} + +# RRDtool Options +#$servicedes=$NAGIOS_SERVICEDESC + +$fsname = str_replace("_", "/", substr($servicedesc,11)); +$fstitle = $fsname; + +# Hack for windows: replace C// with C:\ +if (strlen($fsname) == 3 && substr($fsname, 1, 2) == '//') { + $fsname = $fsname[0] . "\:\\\\"; + $fstitle = $fsname[0] . ":\\"; +} + +$sizegb = sprintf("%.1f", $MAX[1] / 1024.0); +$maxgb = $MAX[1] / 1024.0; +$warngb = $WARN[1] / 1024.0; +$critgb = $CRIT[1] / 1024.0; +$warngbtxt = sprintf("%.1f", $warngb); +$critgbtxt = sprintf("%.1f", $critgb); + +$opt[1] = "--vertical-label GB -l 0 -u $maxgb --title '$hostname: Filesystem $fstitle ($sizegb GB)' "; + +# First graph show current filesystem usage +$def[1] = "DEF:mb=$RRDFILE[1]:$DS[1]:MAX "; +$def[1] .= "CDEF:var1=mb,1024,/ "; +$def[1] .= "AREA:var1#00ffc6:\"used space on $fsname\\n\" "; + +# Optional uncommitted usage e.g. for esx hosts +if(isset($RRD['uncommitted'])) { + $def[1] .= "DEF:uncommitted_mb=".$RRD['uncommitted']." "; + $def[1] .= "CDEF:uncommitted_gb=uncommitted_mb,1024,/ "; + $def[1] .= "CDEF:total_gb=uncommitted_gb,var1,+ "; +} else { + $def[1] .= "CDEF:total_gb=var1 "; +} + +$def[1] .= "HRULE:$maxgb#003300:\"Size ($sizegb GB) \" "; +$def[1] .= "HRULE:$warngb#ffff00:\"Warning at $warngbtxt GB \" "; +$def[1] .= "HRULE:$critgb#ff0000:\"Critical at $critgbtxt GB \\n\" "; +$def[1] .= "GPRINT:var1:LAST:\"current\: %6.2lf GB\" "; +$def[1] .= "GPRINT:var1:MAX:\"max\: %6.2lf GB \" "; +$def[1] .= "GPRINT:var1:AVERAGE:\"avg\: %6.2lf GB\\n\" "; + +if(isset($RRD['uncommitted'])) { + $def[1] .= "AREA:uncommitted_gb#eeccff:\"Uncommited\":STACK "; + $def[1] .= "GPRINT:uncommitted_gb:MAX:\"%6.2lf GB\l\" "; +} + +$def[1] .= "LINE1:total_gb#226600 "; + +# Second graph is optional and shows trend. The MAX field +# of the third variable contains (size of the filesystem in MB +# / range in hours). From that we can compute the configured range. +if (isset($RRD['growth'])) { + $size_mb_per_hours = floatval($MAX['trend']); // this is size_mb / range(hours) + $size_mb = floatval($MAX[1]); + $hours = 1.0 / ($size_mb_per_hours / $size_mb); + $range = sprintf("%.0fh", $hours); + + // Current growth / shrinking. This value is give as MB / 24 hours. + // Note: This has changed in 1.1.13i3. Prior it was MB / trend_range! + $opt[2] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Growth of $fstitle' "; + $def[2] = "DEF:growth_max=${RRD['growth']} "; + $def[2] .= "DEF:growth_min=${RRD_MIN['growth']} "; + $def[2] .= "DEF:trend=${RRD_AVG['trend']} "; + $def[2] .= "CDEF:growth_pos=growth_max,0,MAX "; + $def[2] .= "CDEF:growth_neg=growth_min,0,MIN "; + $def[2] .= "CDEF:growth_minabs=0,growth_min,- "; + $def[2] .= "CDEF:growth=growth_minabs,growth_max,MAX "; + $def[2] .= "HRULE:0#c0c0c0 "; + $def[2] .= "AREA:growth_pos#3060f0:\"Grow\" "; + $def[2] .= "AREA:growth_neg#30f060:\"Shrink \" "; + $def[2] .= "GPRINT:growth:LAST:\"Current\: %+9.2lfMB / 24h\" "; + $def[2] .= "GPRINT:growth:MAX:\"Max\: %+9.2lfMB / 24h\\n\" "; + + // Trend + $opt[3] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Trend for $fstitle' "; + $def[3] = "DEF:trend=${RRD_AVG['trend']} "; + $def[3] .= "HRULE:0#c0c0c0 "; + $def[3] .= "LINE1:trend#000000:\"Trend\:\" "; + $def[3] .= "GPRINT:trend:LAST:\"%+7.2lf MB/24h\" "; + if ($WARN['trend']) { + $warn_mb = sprintf("%.2fMB", $WARN['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${WARN['trend']}#ffff00:\"Warn\: $warn_mb / $range\" "; + } + if ($CRIT['trend']) { + $crit_mb = sprintf("%.2fMB", $CRIT['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${CRIT['trend']}#ff0000:\"Crit\: $crit_mb / $range\" "; + } + $def[3] .= "COMMENT:\"\\n\" "; +} + +if (isset($RRD['trend_hoursleft'])) { + // Trend + $opt[4] = "--vertical-label 'Days left' -l -1 -u 365 -X0 --title '$hostname: Days left for $fstitle' "; + $def[4] = "DEF:hours_left=${RRD_AVG['trend_hoursleft']} "; + $def[4] .= "DEF:hours_left_min=${RRD_MIN['trend_hoursleft']} "; + // negative hours indicate no growth + // the dataset hours_left_isneg stores this info for each point as True/False + $def[4] .= "CDEF:hours_left_isneg=hours_left_min,-1,EQ "; + $def[4] .= "CDEF:hours_left_unmon=hours_left_min,400,0,IF "; + $def[4] .= "CDEF:days_left=hours_left,24,/ "; + $def[4] .= "CDEF:days_left_cap=days_left,400,MIN "; + // Convert negative points to 400 (y-axis cap) + $def[4] .= "CDEF:days_left_cap_positive=hours_left_isneg,400,days_left_cap,IF "; + // The AREA has a rendering problem. Points are too far to the right + $def[4] .= "AREA:hours_left_unmon#AA2200: "; + + $def[4] .= "AREA:days_left_cap_positive#22AA44:\"Days left\:\" "; + if ($ACT[4] == -1) + { + $def[4] .= "COMMENT:\"Not growing\" "; + } + else { + $def[4] .= "GPRINT:days_left:LAST:\"%7.2lf days\" "; + } +} +?> diff -Nru check-mk-1.2.2p3/check_mk-esx_vsphere_counters.uptime.php check-mk-1.2.6p12/check_mk-esx_vsphere_counters.uptime.php --- check-mk-1.2.2p3/check_mk-esx_vsphere_counters.uptime.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-esx_vsphere_counters.uptime.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,34 @@ + diff -Nru check-mk-1.2.2p3/check_mk-esx_vsphere_datastores.php check-mk-1.2.6p12/check_mk-esx_vsphere_datastores.php --- check-mk-1.2.2p3/check_mk-esx_vsphere_datastores.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-esx_vsphere_datastores.php 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,157 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $RRD_MIN[$n] = "$RRDFILE[$i]:$DS[$i]:MIN"; + $RRD_AVG[$n] = "$RRDFILE[$i]:$DS[$i]:AVERAGE"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; + $ACT[$n] = $ACT[$i]; +} + +# RRDtool Options +#$servicedes=$NAGIOS_SERVICEDESC + +$fsname = str_replace("_", "/", substr($servicedesc,11)); +$fstitle = $fsname; + +# Hack for windows: replace C// with C:\ +if (strlen($fsname) == 3 && substr($fsname, 1, 2) == '//') { + $fsname = $fsname[0] . "\:\\\\"; + $fstitle = $fsname[0] . ":\\"; +} + +$sizegb = sprintf("%.1f", $MAX[1] / 1024.0); +$maxgb = $MAX[1] / 1024.0; +$warngb = $WARN[1] / 1024.0; +$critgb = $CRIT[1] / 1024.0; +$warngbtxt = sprintf("%.1f", $warngb); +$critgbtxt = sprintf("%.1f", $critgb); + +$opt[1] = "--vertical-label GB -l 0 -u $maxgb --title '$hostname: Filesystem $fstitle ($sizegb GB)' "; + +# First graph show current filesystem usage +$def[1] = "DEF:mb=$RRDFILE[1]:$DS[1]:MAX "; +$def[1] .= "CDEF:var1=mb,1024,/ "; +$def[1] .= "AREA:var1#00ffc6:\"used space on $fsname\\n\" "; + +# Optional uncommitted usage e.g. for esx hosts +if(isset($RRD['uncommitted'])) { + $def[1] .= "DEF:uncommitted_mb=".$RRD['uncommitted']." "; + $def[1] .= "CDEF:uncommitted_gb=uncommitted_mb,1024,/ "; + $def[1] .= "CDEF:total_gb=uncommitted_gb,var1,+ "; +} else { + $def[1] .= "CDEF:total_gb=var1 "; +} + +$def[1] .= "HRULE:$maxgb#003300:\"Size ($sizegb GB) \" "; +$def[1] .= "HRULE:$warngb#ffff00:\"Warning at $warngbtxt GB \" "; +$def[1] .= "HRULE:$critgb#ff0000:\"Critical at $critgbtxt GB \\n\" "; +$def[1] .= "GPRINT:var1:LAST:\"current\: %6.2lf GB\" "; +$def[1] .= "GPRINT:var1:MAX:\"max\: %6.2lf GB \" "; +$def[1] .= "GPRINT:var1:AVERAGE:\"avg\: %6.2lf GB\\n\" "; + +if(isset($RRD['uncommitted'])) { + $def[1] .= "AREA:uncommitted_gb#eeccff:\"Uncommited\":STACK "; + $def[1] .= "GPRINT:uncommitted_gb:MAX:\"%6.2lf GB\l\" "; +} + +$def[1] .= "LINE1:total_gb#226600 "; + +# Second graph is optional and shows trend. The MAX field +# of the third variable contains (size of the filesystem in MB +# / range in hours). From that we can compute the configured range. +if (isset($RRD['growth'])) { + $size_mb_per_hours = floatval($MAX['trend']); // this is size_mb / range(hours) + $size_mb = floatval($MAX[1]); + $hours = 1.0 / ($size_mb_per_hours / $size_mb); + $range = sprintf("%.0fh", $hours); + + // Current growth / shrinking. This value is give as MB / 24 hours. + // Note: This has changed in 1.1.13i3. Prior it was MB / trend_range! + $opt[2] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Growth of $fstitle' "; + $def[2] = "DEF:growth_max=${RRD['growth']} "; + $def[2] .= "DEF:growth_min=${RRD_MIN['growth']} "; + $def[2] .= "DEF:trend=${RRD_AVG['trend']} "; + $def[2] .= "CDEF:growth_pos=growth_max,0,MAX "; + $def[2] .= "CDEF:growth_neg=growth_min,0,MIN "; + $def[2] .= "CDEF:growth_minabs=0,growth_min,- "; + $def[2] .= "CDEF:growth=growth_minabs,growth_max,MAX "; + $def[2] .= "HRULE:0#c0c0c0 "; + $def[2] .= "AREA:growth_pos#3060f0:\"Grow\" "; + $def[2] .= "AREA:growth_neg#30f060:\"Shrink \" "; + $def[2] .= "GPRINT:growth:LAST:\"Current\: %+9.2lfMB / 24h\" "; + $def[2] .= "GPRINT:growth:MAX:\"Max\: %+9.2lfMB / 24h\\n\" "; + + // Trend + $opt[3] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Trend for $fstitle' "; + $def[3] = "DEF:trend=${RRD_AVG['trend']} "; + $def[3] .= "HRULE:0#c0c0c0 "; + $def[3] .= "LINE1:trend#000000:\"Trend\:\" "; + $def[3] .= "GPRINT:trend:LAST:\"%+7.2lf MB/24h\" "; + if ($WARN['trend']) { + $warn_mb = sprintf("%.2fMB", $WARN['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${WARN['trend']}#ffff00:\"Warn\: $warn_mb / $range\" "; + } + if ($CRIT['trend']) { + $crit_mb = sprintf("%.2fMB", $CRIT['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${CRIT['trend']}#ff0000:\"Crit\: $crit_mb / $range\" "; + } + $def[3] .= "COMMENT:\"\\n\" "; +} + +if (isset($RRD['trend_hoursleft'])) { + // Trend + $opt[4] = "--vertical-label 'Days left' -l -1 -u 365 -X0 --title '$hostname: Days left for $fstitle' "; + $def[4] = "DEF:hours_left=${RRD_AVG['trend_hoursleft']} "; + $def[4] .= "DEF:hours_left_min=${RRD_MIN['trend_hoursleft']} "; + // negative hours indicate no growth + // the dataset hours_left_isneg stores this info for each point as True/False + $def[4] .= "CDEF:hours_left_isneg=hours_left_min,-1,EQ "; + $def[4] .= "CDEF:hours_left_unmon=hours_left_min,400,0,IF "; + $def[4] .= "CDEF:days_left=hours_left,24,/ "; + $def[4] .= "CDEF:days_left_cap=days_left,400,MIN "; + // Convert negative points to 400 (y-axis cap) + $def[4] .= "CDEF:days_left_cap_positive=hours_left_isneg,400,days_left_cap,IF "; + // The AREA has a rendering problem. Points are too far to the right + $def[4] .= "AREA:hours_left_unmon#AA2200: "; + + $def[4] .= "AREA:days_left_cap_positive#22AA44:\"Days left\:\" "; + if ($ACT[4] == -1) + { + $def[4] .= "COMMENT:\"Not growing\" "; + } + else { + $def[4] .= "GPRINT:days_left:LAST:\"%7.2lf days\" "; + } +} +?> diff -Nru check-mk-1.2.2p3/check_mk-esx_vsphere_hostsystem.cpu_usage.php check-mk-1.2.6p12/check_mk-esx_vsphere_hostsystem.cpu_usage.php --- check-mk-1.2.2p3/check_mk-esx_vsphere_hostsystem.cpu_usage.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-esx_vsphere_hostsystem.cpu_usage.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,80 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $RRD_AVG[$n] = "$RRDFILE[$i]:$DS[$i]:AVERAGE"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; +} + +$num_threads = $MAX[1]; +$warnthreads = $WARN[1] * $num_threads / 100.0; +$critthreads = $CRIT[1] * $num_threads / 100.0; +$rightscale = 100.0 / $num_threads; + +$opt[1] = "--vertical-label 'Used CPU threads' --right-axis $rightscale:0 --right-axis-format '%4.1lf%%' --right-axis-label 'Utilization %' -l0 -ru $num_threads --title \"CPU Utilization for $hostname ($num_threads CPU threads)\" "; + +$def[1] = "DEF:perc=$RRD_AVG[util] " + . "CDEF:util=perc,$num_threads,*,100,/ " + ; + +$def[1] .= "HRULE:$MAX[util]#0040d0:\"$num_threads CPU Threads\\n\" " + ; + +$def[1] .= "AREA:util#60f020:\"Utilization\:\" " + . "LINE:util#50b01a " + . "GPRINT:perc:LAST:\"%.1lf%%\" " + . "GPRINT:util:LAST:\"(%.1lf Threads) \" " + . "GPRINT:perc:MIN:\"min\: %.1lf%%,\" " + . "GPRINT:util:MIN:\"(%.1lf), \" " + . "GPRINT:perc:MAX:\"max\: %.1lf%%\" " + . "GPRINT:util:MAX:\"(%.1lf)\\n\" " + ; + + +if (isset($RRD_AVG["avg"])) { + $def[1] .= "DEF:aperc=$RRD_AVG[avg] ". + "CDEF:avg=aperc,$num_threads,*,100,/ ". + "LINE:avg#004000:\"Averaged\: \" ". + "GPRINT:aperc:LAST:\"%.1lf%%,\" ". + "GPRINT:aperc:MIN:\"min\: %.1lf%%,\" ". + "GPRINT:aperc:MAX:\"max\: %.1lf%%\\n\" ". + ""; +} + +if ($WARN['util']) { + $def[1] .= "HRULE:$warnthreads#fff000:\"Warn at $WARN[util]% \" " + . "HRULE:$critthreads#ff0000:\"Critical at $CRIT[util]%\\n\" "; +} +else { + $def[1] .= "COMMENT:\"\\n\" "; +} + +?> diff -Nru check-mk-1.2.2p3/check_mk-esx_vsphere_hostsystem.mem_usage.php check-mk-1.2.6p12/check_mk-esx_vsphere_hostsystem.mem_usage.php --- check-mk-1.2.2p3/check_mk-esx_vsphere_hostsystem.mem_usage.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-esx_vsphere_hostsystem.mem_usage.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,49 @@ + diff -Nru check-mk-1.2.2p3/check_mk-etherbox.humidity.php check-mk-1.2.6p12/check_mk-etherbox.humidity.php --- check-mk-1.2.2p3/check_mk-etherbox.humidity.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-etherbox.humidity.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,48 @@ + | +# +------------------------------------------------------------------+ + +$opt[1] = "--vertical-label \"Percent\" -l 0 -u 100 --title \"$servicedesc\" "; + +$def[1] = "DEF:var1=$RRDFILE[1]:$DS[1]:MAX "; +$def[1] .= "LINE2:var1#2080ff:\"Humidity\:\" "; +$def[1] .= "GPRINT:var1:LAST:\"%2.0lf%%\" "; +$def[1] .= "GPRINT:var1:AVERAGE:\"(Avg\: %2.0lf%%,\" "; +$def[1] .= "GPRINT:var1:MIN:\"Min\: %2.0lf%%,\" "; +$def[1] .= "GPRINT:var1:MAX:\"Max\: %2.0lf%%)\" "; +if($CRIT[1]) + $def[1] .= "HRULE:$CRIT[1]#FFFF00 "; +if($WARN[1]) + $def[1] .= "HRULE:$WARN[1]#FF0000 "; +if($MIN[1]) + $def[1] .= "HRULE:$MIN[1]#FFFF00 "; +if($MAX[1]) + $def[1] .= "HRULE:$MAX[1]#FF0000 "; +?> diff -Nru check-mk-1.2.2p3/check_mk-f5_bigip_chassis_temp.php check-mk-1.2.6p12/check_mk-f5_bigip_chassis_temp.php --- check-mk-1.2.2p3/check_mk-f5_bigip_chassis_temp.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-f5_bigip_chassis_temp.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk-f5_bigip_cpu_temp.php check-mk-1.2.6p12/check_mk-f5_bigip_cpu_temp.php --- check-mk-1.2.2p3/check_mk-f5_bigip_cpu_temp.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-f5_bigip_cpu_temp.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk-f5_bigip_interfaces.php check-mk-1.2.6p12/check_mk-f5_bigip_interfaces.php --- check-mk-1.2.2p3/check_mk-f5_bigip_interfaces.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-f5_bigip_interfaces.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mk-f5_bigip_temp.php check-mk-1.2.6p12/check_mk-f5_bigip_temp.php --- check-mk-1.2.2p3/check_mk-f5_bigip_temp.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-f5_bigip_temp.php 1970-01-01 00:00:00.000000000 +0000 @@ -1,36 +0,0 @@ - diff -Nru check-mk-1.2.2p3/check_mk-f5_bigip_vserver.php check-mk-1.2.6p12/check_mk-f5_bigip_vserver.php --- check-mk-1.2.2p3/check_mk-f5_bigip_vserver.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-f5_bigip_vserver.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,41 @@ + diff -Nru check-mk-1.2.2p3/check_mk-fast_lta_silent_cubes.capacity.php check-mk-1.2.6p12/check_mk-fast_lta_silent_cubes.capacity.php --- check-mk-1.2.2p3/check_mk-fast_lta_silent_cubes.capacity.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-fast_lta_silent_cubes.capacity.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,157 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $RRD_MIN[$n] = "$RRDFILE[$i]:$DS[$i]:MIN"; + $RRD_AVG[$n] = "$RRDFILE[$i]:$DS[$i]:AVERAGE"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; +} + +# RRDtool Options +#$servicedes=$NAGIOS_SERVICEDESC + +$fsname = str_replace("_", "/", substr($servicedesc, 3)); +$fstitle = $fsname; + +# Hack for windows: replace C// with C:\ +if (strlen($fsname) == 3 && substr($fsname, 1, 2) == '//') { + $fsname = $fsname[0] . "\:\\\\"; + $fstitle = $fsname[0] . ":\\"; +} + +$sizegb = sprintf("%.1f", $MAX[1] / 1024.0); +$maxgb = $MAX[1] / 1024.0; +$warngb = $WARN[1] / 1024.0; +$critgb = $CRIT[1] / 1024.0; +$warngbtxt = sprintf("%.1f", $warngb); +$critgbtxt = sprintf("%.1f", $critgb); + +$opt[1] = "--vertical-label GB -l 0 -u $maxgb --title '$hostname: $servicedesc ($sizegb GB)' "; + +# First graph show current filesystem usage +$def[1] = "DEF:mb=$RRDFILE[1]:$DS[1]:MAX "; +$def[1] .= "CDEF:var1=mb,1024,/ "; +$def[1] .= "AREA:var1#00ffc6:\"used space on $fsname\\n\" "; + +# Optional uncommitted usage e.g. for esx hosts +if(isset($RRD['uncommitted'])) { + $def[1] .= "DEF:uncommitted_mb=".$RRD['uncommitted']." "; + $def[1] .= "CDEF:uncommitted_gb=uncommitted_mb,1024,/ "; + $def[1] .= "CDEF:total_gb=uncommitted_gb,var1,+ "; +} else { + $def[1] .= "CDEF:total_gb=var1 "; +} + +$def[1] .= "HRULE:$maxgb#003300:\"Size ($sizegb GB) \" "; +$def[1] .= "HRULE:$warngb#ffff00:\"Warning at $warngbtxt GB \" "; +$def[1] .= "HRULE:$critgb#ff0000:\"Critical at $critgbtxt GB \\n\" "; +$def[1] .= "GPRINT:var1:LAST:\"current\: %6.2lf GB\" "; +$def[1] .= "GPRINT:var1:MAX:\"max\: %6.2lf GB \" "; +$def[1] .= "GPRINT:var1:AVERAGE:\"avg\: %6.2lf GB\\n\" "; + +if(isset($RRD['uncommitted'])) { + $def[1] .= "AREA:uncommitted_gb#eeccff:\"Uncommited\":STACK "; + $def[1] .= "GPRINT:uncommitted_gb:MAX:\"%6.2lf GB\l\" "; +} + +$def[1] .= "LINE1:total_gb#226600 "; + +# Second graph is optional and shows trend. The MAX field +# of the third variable contains (size of the filesystem in MB +# / range in hours). From that we can compute the configured range. +if (isset($RRD['growth'])) { + $size_mb_per_hours = floatval($MAX['trend']); // this is size_mb / range(hours) + $size_mb = floatval($MAX[1]); + $hours = 1.0 / ($size_mb_per_hours / $size_mb); + $range = sprintf("%.0fh", $hours); + + // Current growth / shrinking. This value is give as MB / 24 hours. + // Note: This has changed in 1.1.13i3. Prior it was MB / trend_range! + $opt[2] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Growth of $servicedesc' "; + $def[2] = "DEF:growth_max=${RRD['growth']} "; + $def[2] .= "DEF:growth_min=${RRD_MIN['growth']} "; + $def[2] .= "DEF:trend=${RRD_AVG['trend']} "; + $def[2] .= "CDEF:growth_pos=growth_max,0,MAX "; + $def[2] .= "CDEF:growth_neg=growth_min,0,MIN "; + $def[2] .= "CDEF:growth_minabs=0,growth_min,- "; + $def[2] .= "CDEF:growth=growth_minabs,growth_max,MAX "; + $def[2] .= "HRULE:0#c0c0c0 "; + $def[2] .= "AREA:growth_pos#3060f0:\"Grow\" "; + $def[2] .= "AREA:growth_neg#30f060:\"Shrink \" "; + $def[2] .= "GPRINT:growth:LAST:\"Current\: %+9.2lfMB / 24h\" "; + $def[2] .= "GPRINT:growth:MAX:\"Max\: %+9.2lfMB / 24h\\n\" "; + + // Trend + $opt[3] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Trend for $servicedesc' "; + $def[3] = "DEF:trend=${RRD_AVG['trend']} "; + $def[3] .= "HRULE:0#c0c0c0 "; + $def[3] .= "LINE1:trend#000000:\"Trend\:\" "; + $def[3] .= "GPRINT:trend:LAST:\"%+7.2lf MB/24h\" "; + if ($WARN['trend']) { + $warn_mb = sprintf("%.2fMB", $WARN['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${WARN['trend']}#ffff00:\"Warn\: $warn_mb / $range\" "; + } + if ($CRIT['trend']) { + $crit_mb = sprintf("%.2fMB", $CRIT['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${CRIT['trend']}#ff0000:\"Crit\: $crit_mb / $range\" "; + } + $def[3] .= "COMMENT:\"\\n\" "; +} + +if (isset($RRD['trend_hoursleft'])) { + // Trend + $opt[4] = "--vertical-label 'Days left' -l -1 -u 365 -X0 --title '$hostname: Days left for $servicedesc' "; + $def[4] = "DEF:hours_left=${RRD_AVG['trend_hoursleft']} "; + $def[4] .= "DEF:hours_left_min=${RRD_MIN['trend_hoursleft']} "; + // negative hours indicate no growth + // the dataset hours_left_isneg stores this info for each point as True/False + $def[4] .= "CDEF:hours_left_isneg=hours_left_min,-1,EQ "; + $def[4] .= "CDEF:hours_left_unmon=hours_left_min,400,0,IF "; + $def[4] .= "CDEF:days_left=hours_left,24,/ "; + $def[4] .= "CDEF:days_left_cap=days_left,400,MIN "; + // Convert negative points to 400 (y-axis cap) + $def[4] .= "CDEF:days_left_cap_positive=hours_left_isneg,400,days_left_cap,IF "; + // The AREA has a rendering problem. Points are too far to the right + $def[4] .= "AREA:hours_left_unmon#AA2200: "; + + $def[4] .= "AREA:days_left_cap_positive#22AA44:\"Days left\:\" "; + if ($ACT[4] == -1) + { + $def[4] .= "COMMENT:\"Not growing\" "; + } + else { + $def[4] .= "GPRINT:days_left:LAST:\"%7.2lf days\" "; + } +} + +?> diff -Nru check-mk-1.2.2p3/check_mk-fast_lta_volumes.php check-mk-1.2.6p12/check_mk-fast_lta_volumes.php --- check-mk-1.2.2p3/check_mk-fast_lta_volumes.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-fast_lta_volumes.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,157 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $RRD_MIN[$n] = "$RRDFILE[$i]:$DS[$i]:MIN"; + $RRD_AVG[$n] = "$RRDFILE[$i]:$DS[$i]:AVERAGE"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; +} + +# RRDtool Options +#$servicedes=$NAGIOS_SERVICEDESC + +$fsname = str_replace("_", "/", substr($servicedesc, 3)); +$fstitle = $fsname; + +# Hack for windows: replace C// with C:\ +if (strlen($fsname) == 3 && substr($fsname, 1, 2) == '//') { + $fsname = $fsname[0] . "\:\\\\"; + $fstitle = $fsname[0] . ":\\"; +} + +$sizegb = sprintf("%.1f", $MAX[1] / 1024.0); +$maxgb = $MAX[1] / 1024.0; +$warngb = $WARN[1] / 1024.0; +$critgb = $CRIT[1] / 1024.0; +$warngbtxt = sprintf("%.1f", $warngb); +$critgbtxt = sprintf("%.1f", $critgb); + +$opt[1] = "--vertical-label GB -l 0 -u $maxgb --title '$hostname: $servicedesc ($sizegb GB)' "; + +# First graph show current filesystem usage +$def[1] = "DEF:mb=$RRDFILE[1]:$DS[1]:MAX "; +$def[1] .= "CDEF:var1=mb,1024,/ "; +$def[1] .= "AREA:var1#00ffc6:\"used space on $fsname\\n\" "; + +# Optional uncommitted usage e.g. for esx hosts +if(isset($RRD['uncommitted'])) { + $def[1] .= "DEF:uncommitted_mb=".$RRD['uncommitted']." "; + $def[1] .= "CDEF:uncommitted_gb=uncommitted_mb,1024,/ "; + $def[1] .= "CDEF:total_gb=uncommitted_gb,var1,+ "; +} else { + $def[1] .= "CDEF:total_gb=var1 "; +} + +$def[1] .= "HRULE:$maxgb#003300:\"Size ($sizegb GB) \" "; +$def[1] .= "HRULE:$warngb#ffff00:\"Warning at $warngbtxt GB \" "; +$def[1] .= "HRULE:$critgb#ff0000:\"Critical at $critgbtxt GB \\n\" "; +$def[1] .= "GPRINT:var1:LAST:\"current\: %6.2lf GB\" "; +$def[1] .= "GPRINT:var1:MAX:\"max\: %6.2lf GB \" "; +$def[1] .= "GPRINT:var1:AVERAGE:\"avg\: %6.2lf GB\\n\" "; + +if(isset($RRD['uncommitted'])) { + $def[1] .= "AREA:uncommitted_gb#eeccff:\"Uncommited\":STACK "; + $def[1] .= "GPRINT:uncommitted_gb:MAX:\"%6.2lf GB\l\" "; +} + +$def[1] .= "LINE1:total_gb#226600 "; + +# Second graph is optional and shows trend. The MAX field +# of the third variable contains (size of the filesystem in MB +# / range in hours). From that we can compute the configured range. +if (isset($RRD['growth'])) { + $size_mb_per_hours = floatval($MAX['trend']); // this is size_mb / range(hours) + $size_mb = floatval($MAX[1]); + $hours = 1.0 / ($size_mb_per_hours / $size_mb); + $range = sprintf("%.0fh", $hours); + + // Current growth / shrinking. This value is give as MB / 24 hours. + // Note: This has changed in 1.1.13i3. Prior it was MB / trend_range! + $opt[2] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Growth of $servicedesc' "; + $def[2] = "DEF:growth_max=${RRD['growth']} "; + $def[2] .= "DEF:growth_min=${RRD_MIN['growth']} "; + $def[2] .= "DEF:trend=${RRD_AVG['trend']} "; + $def[2] .= "CDEF:growth_pos=growth_max,0,MAX "; + $def[2] .= "CDEF:growth_neg=growth_min,0,MIN "; + $def[2] .= "CDEF:growth_minabs=0,growth_min,- "; + $def[2] .= "CDEF:growth=growth_minabs,growth_max,MAX "; + $def[2] .= "HRULE:0#c0c0c0 "; + $def[2] .= "AREA:growth_pos#3060f0:\"Grow\" "; + $def[2] .= "AREA:growth_neg#30f060:\"Shrink \" "; + $def[2] .= "GPRINT:growth:LAST:\"Current\: %+9.2lfMB / 24h\" "; + $def[2] .= "GPRINT:growth:MAX:\"Max\: %+9.2lfMB / 24h\\n\" "; + + // Trend + $opt[3] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Trend for $servicedesc' "; + $def[3] = "DEF:trend=${RRD_AVG['trend']} "; + $def[3] .= "HRULE:0#c0c0c0 "; + $def[3] .= "LINE1:trend#000000:\"Trend\:\" "; + $def[3] .= "GPRINT:trend:LAST:\"%+7.2lf MB/24h\" "; + if ($WARN['trend']) { + $warn_mb = sprintf("%.2fMB", $WARN['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${WARN['trend']}#ffff00:\"Warn\: $warn_mb / $range\" "; + } + if ($CRIT['trend']) { + $crit_mb = sprintf("%.2fMB", $CRIT['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${CRIT['trend']}#ff0000:\"Crit\: $crit_mb / $range\" "; + } + $def[3] .= "COMMENT:\"\\n\" "; +} + +if (isset($RRD['trend_hoursleft'])) { + // Trend + $opt[4] = "--vertical-label 'Days left' -l -1 -u 365 -X0 --title '$hostname: Days left for $servicedesc' "; + $def[4] = "DEF:hours_left=${RRD_AVG['trend_hoursleft']} "; + $def[4] .= "DEF:hours_left_min=${RRD_MIN['trend_hoursleft']} "; + // negative hours indicate no growth + // the dataset hours_left_isneg stores this info for each point as True/False + $def[4] .= "CDEF:hours_left_isneg=hours_left_min,-1,EQ "; + $def[4] .= "CDEF:hours_left_unmon=hours_left_min,400,0,IF "; + $def[4] .= "CDEF:days_left=hours_left,24,/ "; + $def[4] .= "CDEF:days_left_cap=days_left,400,MIN "; + // Convert negative points to 400 (y-axis cap) + $def[4] .= "CDEF:days_left_cap_positive=hours_left_isneg,400,days_left_cap,IF "; + // The AREA has a rendering problem. Points are too far to the right + $def[4] .= "AREA:hours_left_unmon#AA2200: "; + + $def[4] .= "AREA:days_left_cap_positive#22AA44:\"Days left\:\" "; + if ($ACT[4] == -1) + { + $def[4] .= "COMMENT:\"Not growing\" "; + } + else { + $def[4] .= "GPRINT:days_left:LAST:\"%7.2lf days\" "; + } +} + +?> diff -Nru check-mk-1.2.2p3/check_mk-fc_brocade_port_detailed.php check-mk-1.2.6p12/check_mk-fc_brocade_port_detailed.php --- check-mk-1.2.2p3/check_mk-fc_brocade_port_detailed.php 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-fc_brocade_port_detailed.php 1970-01-01 00:00:00.000000000 +0000 @@ -1,76 +0,0 @@ - diff -Nru check-mk-1.2.2p3/check_mk-fc_port.php check-mk-1.2.6p12/check_mk-fc_port.php --- check-mk-1.2.2p3/check_mk-fc_port.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-fc_port.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,235 @@ + $base * $base * $base) { + $warn /= $base * $base * $base; + $crit /= $base * $base * $base; + $bandwidth /= $base * $base * $base; + $bwuom = 'G'; +} elseif ($bandwidth > $base * $base) { + $warn /= $base * $base; + $crit /= $base * $base; + $bandwidth /= $base * $base; + $bwuom = 'M'; +} elseif ($bandwidth > $base) { + $warn /= $base; + $crit /= $base; + $bandwidth /= $base; + $bwuom = 'k'; +} + +# The number of data source various due to different +# settings (such as averaging). We rather work with names +# than with numbers. +$RRD = array(); +foreach ($NAME as $i => $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $RRDAVG[$n] = "$RRDFILE[$i]:$DS[$i]:AVERAGE"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; +} + +if ($mBandwidthH < 10) + $range = $mBandwidthH; +else + $range = 10.0; + +$bandwidthInfo = ""; +#if ($bandwidth > 0){ +# $bandwidthInfo = " at bandwidth ${bwuom}${unit}/s"; +#} +$ds_name[1] = 'Used bandwidth'; +$opt[1] = "--vertical-label \"$vertical_label_name\" -l -$range -u $range -X0 -b 1024 --title \"Used bandwidth $hostname / $servicedesc $bandwidthInfo\" "; +$def[1] = + "HRULE:0#c0c0c0 "; + if ($mBandwidthH) + $def[1] .= "HRULE:$mBandwidthH#808080:\"Port speed\: " . sprintf("%.1f", $bandwidth) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$mBandwidthH#808080: "; + if ($warn) + $def[1] .= "HRULE:$mWarnH#ffff00:\"Warning\: " . sprintf("%6.1f", $warn) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$mWarnH#ffff00: "; + if ($crit) + $def[1] .= "HRULE:$mCritH#ff0000:\"Critical\: " . sprintf("%6.1f", $crit) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$mCritH#ff0000: "; + + $def[1] .= "". + # incoming + "DEF:inbytes=$RRD[in] ". + "CDEF:intraffic=inbytes,$unit_multiplier,* ". + "CDEF:inmb=intraffic,1048576,/ ". + "AREA:inmb#00e060:\"in \" ". + "GPRINT:intraffic:LAST:\"%7.1lf %s$unit/s last\" ". + "GPRINT:intraffic:AVERAGE:\"%7.1lf %s$unit/s avg\" ". + "GPRINT:intraffic:MAX:\"%7.1lf %s$unit/s max\\n\" ". + "VDEF:inperc=intraffic,95,PERCENTNAN ". + "VDEF:inpercmb=inmb,95,PERCENTNAN ". + "LINE:inpercmb#008f00:\"95% percentile\" ". + "GPRINT:inperc:\"%7.1lf %s$unit/s\\n\" ". + + # outgoing + "DEF:outbytes=$RRD[out] ". + "CDEF:outtraffic=outbytes,$unit_multiplier,* ". + "CDEF:minusouttraffic=outtraffic,-1,* ". + "CDEF:outmb=outtraffic,1048576,/ ". + "CDEF:minusoutmb=0,outmb,- ". + "AREA:minusoutmb#0080e0:\"out \" ". + "GPRINT:outtraffic:LAST:\"%7.1lf %s$unit/s last\" ". + "GPRINT:outtraffic:AVERAGE:\"%7.1lf %s$unit/s avg\" ". + "GPRINT:outtraffic:MAX:\"%7.1lf %s$unit/s max\\n\" ". + "VDEF:outperc=outtraffic,5,PERCENTNAN ". + "VDEF:outpercmb=minusoutmb,5,PERCENTNAN ". + "LINE:outpercmb#00008f:\"95% percentile\" ". + "GPRINT:outperc:\"%7.1lf %s$unit/s\\n\" ". + + ""; + +# averages +if (isset($DS[9])) { + $def[1] .= + "DEF:inbytesa=$RRD[in_avg] ". + "DEF:outbytesa=$RRD[out_avg] ". + "CDEF:intraffica=inbytesa,$unit_multiplier,* ". + "CDEF:outtraffica=outbytesa,$unit_multiplier,* ". + "CDEF:inmba=intraffica,1048576,/ ". + "CDEF:outmba=outtraffica,1048576,/ ". + "CDEF:minusoutmba=0,outmba,- ". + "LINE:inmba#00a060:\"in (avg) \" ". + "GPRINT:intraffica:LAST:\"%6.1lf %s$unit/s last\" ". + "GPRINT:intraffica:AVERAGE:\"%6.1lf %s$unit/s avg\" ". + "GPRINT:intraffica:MAX:\"%6.1lf %s$unit/s max\\n\" ". + "LINE:minusoutmba#0060c0:\"out (avg) \" ". + "GPRINT:outtraffica:LAST:\"%6.1lf %s$unit/s last\" ". + "GPRINT:outtraffica:AVERAGE:\"%6.1lf %s$unit/s avg\" ". + "GPRINT:outtraffica:MAX:\"%6.1lf %s$unit/s max\\n\" "; +} + +# Graph 2: packets +$ds_name[2] = 'Objects'; +$opt[2] = "--vertical-label \"objects/sec\" --title \"Objects $hostname / $servicedesc\" "; +$def[2] = + # rxobjects + "HRULE:0#c0c0c0 ". + "DEF:inu=$RRD[rxobjects] ". + "CDEF:in=inu ". + "AREA:inu#00ffc0:\"rxobjects \" ". + "GPRINT:inu:LAST:\"%9.1lf/s last \" ". + "GPRINT:inu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:inu:MAX:\"%9.1lf/s max\\n\" ". + "VDEF:inperc=in,95,PERCENTNAN ". + "LINE:inperc#00cf00:\"in 95% percentile \" ". + "GPRINT:inperc:\"%9.1lf/s\\n\" ". + + # txobjects + "DEF:outu=$RRD[txobjects] ". + "CDEF:minusoutu=0,outu,- ". + "AREA:minusoutu#00c0ff:\"txobjects \" ". + "GPRINT:outu:LAST:\"%9.1lf/s last \" ". + "GPRINT:outu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:outu:MAX:\"%9.1lf/s max\\n\" ". + "VDEF:outperc_neg=minusoutu,5,PERCENTNAN ". + "VDEF:outperc_pos=outu,5,PERCENTNAN ". + "LINE:outperc_neg#0000cf:\"out 95% percentile\" ". + "GPRINT:outperc_pos:\"%9.1lf/s\\n\" ". + ""; + +# Graph 3: errors and discards +$ds_name[3] = 'Errors and discards'; +$opt[3] = "--vertical-label \"errors/sec\" -X0 --title \"Problems $hostname / $servicedesc\" "; +$def[3] = + "HRULE:0#c0c0c0 ". + "DEF:crcerr=$RRD[rxcrcs] ". + "DEF:encout=$RRD[rxencoutframes] ". + "AREA:crcerr#ff0000:\"crc errors \" ". + "GPRINT:crcerr:LAST:\"%7.2lf/s last \" ". + "GPRINT:crcerr:AVERAGE:\"%7.2lf/s avg \" ". + "GPRINT:crcerr:MAX:\"%7.2lf/s max\\n\" ". + "AREA:encout#ff8000:\"encout frames \":STACK ". + "GPRINT:encout:LAST:\"%7.2lf/s last \" ". + "GPRINT:encout:AVERAGE:\"%7.2lf/s avg \" ". + "GPRINT:encout:MAX:\"%7.2lf/s max\\n\" ". + "DEF:c3discards=$RRD[c3discards] ". + "DEF:notxcredits=$RRD[notxcredits] ". + "CDEF:minusc3=0,c3discards,- ". + "CDEF:minusnotxcredits=0,notxcredits,- ". + "AREA:minusc3#ff0080:\"c3 discards \" ". + "GPRINT:c3discards:LAST:\"%7.2lf/s last \" ". + "GPRINT:c3discards:AVERAGE:\"%7.2lf/s avg \" ". + "GPRINT:c3discards:MAX:\"%7.2lf/s max\\n\" ". + "AREA:minusnotxcredits#ff8080:\"no tx credits \":STACK ". + "GPRINT:notxcredits:LAST:\"%7.2lf/s last \" ". + "GPRINT:notxcredits:AVERAGE:\"%7.2lf/s avg \" ". + "GPRINT:notxcredits:MAX:\"%7.2lf/s max\\n\" "; +?> diff -Nru check-mk-1.2.2p3/check_mk-fritz.uptime.php check-mk-1.2.6p12/check_mk-fritz.uptime.php --- check-mk-1.2.2p3/check_mk-fritz.uptime.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-fritz.uptime.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,34 @@ + diff -Nru check-mk-1.2.2p3/check_mk-fritz.wan_if.php check-mk-1.2.6p12/check_mk-fritz.wan_if.php --- check-mk-1.2.2p3/check_mk-fritz.wan_if.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-fritz.wan_if.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,226 @@ + $base * $base * $base) { + $scale = $base * $base * $base; + $bwuom = 'G'; +} +elseif ($bandwidth > $base * $base) { + $scale = $base * $base; + $bwuom = 'M'; +} +elseif ($bandwidth > $base) { + $scale = $base; + $bwuom = 'k'; +} +else { + $scale = 1; + $bwuom = ' '; +} + +$warn /= $scale; +$crit /= $scale; +$bandwidth /= $scale; + +$vertical_label_name = $bwuom . $unit . "/sec"; + +$range = min(10, $bandwidth); + + +$bandwidthInfo = ""; +if ($bandwidth > 0){ + $bandwidthInfo = " at " . sprintf("%.1f", $bandwidth) . " ${bwuom}${unit}/s"; +} +$ds_name[1] = 'Used bandwidth'; +$opt[1] = "--vertical-label \"$vertical_label_name\" -l -$range -u $range -X0 -b 1024 --title \"Used bandwidth $hostname / $servicedesc$bandwidthInfo\" "; +$def[1] = + "HRULE:0#c0c0c0 "; +if ($bandwidth) + $def[1] .= "HRULE:$bandwidth#808080:\"Port speed\: " . sprintf("%10.1f", $bandwidth) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$bandwidth#808080: "; +if ($warn) + $def[1] .= "HRULE:$warn#ffff00:\"Warning\: " . sprintf("%13.1f", $warn) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$warn#ffff00: "; +if ($crit) + $def[1] .= "HRULE:$crit#ff0000:\"Critical\: " . sprintf("%13.1f", $crit) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$crit#ff0000: "; + + $def[1] .= "". + # incoming + "DEF:inbytes=$RRDFILE[1]:$DS[1]:MAX ". + "CDEF:intraffic=inbytes,$unit_multiplier,* ". + "CDEF:inmb=intraffic,$scale,/ ". + "AREA:inmb#00e060:\"in \" ". + "GPRINT:intraffic:LAST:\"%7.1lf %s$unit/s last\" ". + "GPRINT:intraffic:AVERAGE:\"%7.1lf %s$unit/s avg\" ". + "GPRINT:intraffic:MAX:\"%7.1lf %s$unit/s max\\n\" ". + "VDEF:inperc=intraffic,95,PERCENTNAN ". + "VDEF:inpercmb=inmb,95,PERCENTNAN ". + "LINE:inpercmb#008f00:\"95% percentile\" ". + "GPRINT:inperc:\"%7.1lf %s$unit/s\\n\" ". + + # outgoing + "DEF:outbytes=$RRDFILE[6]:$DS[6]:MAX ". + "CDEF:outtraffic=outbytes,$unit_multiplier,* ". + "CDEF:minusouttraffic=outtraffic,-1,* ". + "CDEF:outmb=outtraffic,$scale,/ ". + "CDEF:minusoutmb=0,outmb,- ". + "AREA:minusoutmb#0080e0:\"out \" ". + "GPRINT:outtraffic:LAST:\"%7.1lf %s$unit/s last\" ". + "GPRINT:outtraffic:AVERAGE:\"%7.1lf %s$unit/s avg\" ". + "GPRINT:outtraffic:MAX:\"%7.1lf %s$unit/s max\\n\" ". + "VDEF:outperc=minusouttraffic,5,PERCENTNAN ". + "VDEF:outpercmb=minusoutmb,5,PERCENTNAN ". + "LINE:outpercmb#00008f:\"95% percentile\" ". + "GPRINT:outperc:\"%7.1lf %s$unit/s\\n\" ". + + ""; + +if (isset($DS[12])) { + $def[1] .= + "DEF:inbytesa=$RRDFILE[12]:$DS[12]:MAX ". + "DEF:outbytesa=$RRDFILE[13]:$DS[13]:MAX ". + "CDEF:intraffica=inbytesa,$unit_multiplier,* ". + "CDEF:outtraffica=outbytesa,$unit_multiplier,* ". + "CDEF:inmba=intraffica,1048576,/ ". + "CDEF:outmba=outtraffica,1048576,/ ". + "CDEF:minusoutmba=0,outmba,- ". + "LINE:inmba#00a060:\"in (avg) \" ". + "GPRINT:intraffica:LAST:\"%6.1lf %s$unit/s last\" ". + "GPRINT:intraffica:AVERAGE:\"%6.1lf %s$unit/s avg\" ". + "GPRINT:intraffica:MAX:\"%6.1lf %s$unit/s max\\n\" ". + "LINE:minusoutmba#0060c0:\"out (avg) \" ". + "GPRINT:outtraffica:LAST:\"%6.1lf %s$unit/s last\" ". + "GPRINT:outtraffica:AVERAGE:\"%6.1lf %s$unit/s avg\" ". + "GPRINT:outtraffica:MAX:\"%6.1lf %s$unit/s max\\n\" "; +} + +# Graph 2: packets +$ds_name[2] = 'Packets'; +$opt[2] = "--vertical-label \"packets/sec\" --title \"Packets $hostname / $servicedesc\" "; +$def[2] = + # ingoing + "HRULE:0#c0c0c0 ". + "DEF:inu=$RRDFILE[2]:$DS[2]:MAX ". + "DEF:innu=$RRDFILE[3]:$DS[3]:MAX ". + "CDEF:in=inu,innu,+ ". + "AREA:inu#00ffc0:\"in unicast \" ". + "GPRINT:inu:LAST:\"%9.1lf/s last \" ". + "GPRINT:inu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:inu:MAX:\"%9.1lf/s max\\n\" ". + "AREA:innu#00c080:\"in broadcast/multicast \":STACK ". + "GPRINT:innu:LAST:\"%9.1lf/s last \" ". + "GPRINT:innu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:innu:MAX:\"%9.1lf/s max\\n\" ". + "VDEF:inperc=in,95,PERCENTNAN ". + "LINE:inperc#00cf00:\"in 95% percentile \" ". + "GPRINT:inperc:\"%9.1lf/s\\n\" ". + + # outgoing + "DEF:outu=$RRDFILE[7]:$DS[7]:MAX ". + "DEF:outnu=$RRDFILE[8]:$DS[8]:MAX ". + "CDEF:minusoutu=0,outu,- ". + "CDEF:minusoutnu=0,outnu,- ". + "CDEF:minusout=minusoutu,minusoutnu,+ ". + "AREA:minusoutu#00c0ff:\"out unicast \" ". + "GPRINT:outu:LAST:\"%9.1lf/s last \" ". + "GPRINT:outu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:outu:MAX:\"%9.1lf/s max\\n\" ". + "AREA:minusoutnu#0080c0:\"out broadcast/multicast\":STACK ". + "GPRINT:outnu:LAST:\"%9.1lf/s last \" ". + "GPRINT:outnu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:outnu:MAX:\"%9.1lf/s max\\n\" ". + "VDEF:outperc=minusout,5,PERCENTNAN ". + "LINE:outperc#0000cf:\"out 95% percentile \" ". + "GPRINT:outperc:\"%9.1lf/s\\n\" ". + ""; + +# Graph 3: errors and discards +$ds_name[3] = 'Errors and discards'; +$opt[3] = "--vertical-label \"packets/sec\" -X0 --title \"Problems $hostname / $servicedesc\" "; +$def[3] = + "HRULE:0#c0c0c0 ". + "DEF:inerr=$RRDFILE[5]:$DS[5]:MAX ". + "DEF:indisc=$RRDFILE[4]:$DS[4]:MAX ". + "AREA:inerr#ff0000:\"in errors \" ". + "GPRINT:inerr:LAST:\"%7.2lf/s last \" ". + "GPRINT:inerr:AVERAGE:\"%7.2lf/s avg \" ". + "GPRINT:inerr:MAX:\"%7.2lf/s max\\n\" ". + "AREA:indisc#ff8000:\"in discards \":STACK ". + "GPRINT:indisc:LAST:\"%7.2lf/s last \" ". + "GPRINT:indisc:AVERAGE:\"%7.2lf/s avg \" ". + "GPRINT:indisc:MAX:\"%7.2lf/s max\\n\" ". + "DEF:outerr=$RRDFILE[10]:$DS[10]:MAX ". + "DEF:outdisc=$RRDFILE[9]:$DS[9]:MAX ". + "CDEF:minusouterr=0,outerr,- ". + "CDEF:minusoutdisc=0,outdisc,- ". + "AREA:minusouterr#ff0080:\"out errors \" ". + "GPRINT:outerr:LAST:\"%7.2lf/s last \" ". + "GPRINT:outerr:AVERAGE:\"%7.2lf/s avg \" ". + "GPRINT:outerr:MAX:\"%7.2lf/s max\\n\" ". + "AREA:minusoutdisc#ff8080:\"out discards \":STACK ". + "GPRINT:outdisc:LAST:\"%7.2lf/s last \" ". + "GPRINT:outdisc:AVERAGE:\"%7.2lf/s avg \" ". + "GPRINT:outdisc:MAX:\"%7.2lf/s max\\n\" "; +?> diff -Nru check-mk-1.2.2p3/check_mk-fsc_temp.php check-mk-1.2.6p12/check_mk-fsc_temp.php --- check-mk-1.2.2p3/check_mk-fsc_temp.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-fsc_temp.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk-h3c_lanswitch_cpu.php check-mk-1.2.6p12/check_mk-h3c_lanswitch_cpu.php --- check-mk-1.2.2p3/check_mk-h3c_lanswitch_cpu.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-h3c_lanswitch_cpu.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -27,7 +27,7 @@ $opt[1] = "--vertical-label 'CPU utilization %' -l0 -u 100 --title \"CPU Utilization $hostname $desc\" "; # -$def[1] = "DEF:util=$RRDFILE[1]:$DS[1]:MAX ". +$def[1] = "DEF:util=$RRDFILE[1]:$DS[1]:MAX ". "CDEF:ok=util,$WARN[1],MIN ". "CDEF:warn=util,$CRIT[1],MIN ". "AREA:util#c0f020 ". diff -Nru check-mk-1.2.2p3/check_mk-hitachi_hnas_cifs.php check-mk-1.2.6p12/check_mk-hitachi_hnas_cifs.php --- check-mk-1.2.2p3/check_mk-hitachi_hnas_cifs.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-hitachi_hnas_cifs.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,34 @@ + diff -Nru check-mk-1.2.2p3/check_mk-hitachi_hnas_cpu.php check-mk-1.2.6p12/check_mk-hitachi_hnas_cpu.php --- check-mk-1.2.2p3/check_mk-hitachi_hnas_cpu.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-hitachi_hnas_cpu.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,39 @@ + diff -Nru check-mk-1.2.2p3/check_mk-hitachi_hnas_fan.php check-mk-1.2.6p12/check_mk-hitachi_hnas_fan.php --- check-mk-1.2.2p3/check_mk-hitachi_hnas_fan.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-hitachi_hnas_fan.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,34 @@ + diff -Nru check-mk-1.2.2p3/check_mk-hitachi_hnas_fc_if.php check-mk-1.2.6p12/check_mk-hitachi_hnas_fc_if.php --- check-mk-1.2.2p3/check_mk-hitachi_hnas_fc_if.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-hitachi_hnas_fc_if.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,226 @@ + $base * $base * $base) { + $scale = $base * $base * $base; + $bwuom = 'G'; +} +elseif ($bandwidth > $base * $base) { + $scale = $base * $base; + $bwuom = 'M'; +} +elseif ($bandwidth > $base) { + $scale = $base; + $bwuom = 'k'; +} +else { + $scale = 1; + $bwuom = ' '; +} + +$warn /= $scale; +$crit /= $scale; +$bandwidth /= $scale; + +$vertical_label_name = $bwuom . $unit . "/sec"; + +$range = min(10, $bandwidth); + + +$bandwidthInfo = ""; +if ($bandwidth > 0){ + $bandwidthInfo = " at " . sprintf("%.1f", $bandwidth) . " ${bwuom}${unit}/s"; +} +$ds_name[1] = 'Used bandwidth'; +$opt[1] = "--vertical-label \"$vertical_label_name\" -l -$range -u $range -X0 -b 1024 --title \"Used bandwidth $hostname / $servicedesc$bandwidthInfo\" "; +$def[1] = + "HRULE:0#c0c0c0 "; +if ($bandwidth) + $def[1] .= "HRULE:$bandwidth#808080:\"Port speed\: " . sprintf("%10.1f", $bandwidth) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$bandwidth#808080: "; +if ($warn) + $def[1] .= "HRULE:$warn#ffff00:\"Warning\: " . sprintf("%13.1f", $warn) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$warn#ffff00: "; +if ($crit) + $def[1] .= "HRULE:$crit#ff0000:\"Critical\: " . sprintf("%13.1f", $crit) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$crit#ff0000: "; + + $def[1] .= "". + # incoming + "DEF:inbytes=$RRDFILE[1]:$DS[1]:MAX ". + "CDEF:intraffic=inbytes,$unit_multiplier,* ". + "CDEF:inmb=intraffic,$scale,/ ". + "AREA:inmb#00e060:\"in \" ". + "GPRINT:intraffic:LAST:\"%7.1lf %s$unit/s last\" ". + "GPRINT:intraffic:AVERAGE:\"%7.1lf %s$unit/s avg\" ". + "GPRINT:intraffic:MAX:\"%7.1lf %s$unit/s max\\n\" ". + "VDEF:inperc=intraffic,95,PERCENTNAN ". + "VDEF:inpercmb=inmb,95,PERCENTNAN ". + "LINE:inpercmb#008f00:\"95% percentile\" ". + "GPRINT:inperc:\"%7.1lf %s$unit/s\\n\" ". + + # outgoing + "DEF:outbytes=$RRDFILE[6]:$DS[6]:MAX ". + "CDEF:outtraffic=outbytes,$unit_multiplier,* ". + "CDEF:minusouttraffic=outtraffic,-1,* ". + "CDEF:outmb=outtraffic,$scale,/ ". + "CDEF:minusoutmb=0,outmb,- ". + "AREA:minusoutmb#0080e0:\"out \" ". + "GPRINT:outtraffic:LAST:\"%7.1lf %s$unit/s last\" ". + "GPRINT:outtraffic:AVERAGE:\"%7.1lf %s$unit/s avg\" ". + "GPRINT:outtraffic:MAX:\"%7.1lf %s$unit/s max\\n\" ". + "VDEF:outperc=minusouttraffic,5,PERCENTNAN ". + "VDEF:outpercmb=minusoutmb,5,PERCENTNAN ". + "LINE:outpercmb#00008f:\"95% percentile\" ". + "GPRINT:outperc:\"%7.1lf %s$unit/s\\n\" ". + + ""; + +if (isset($DS[12])) { + $def[1] .= + "DEF:inbytesa=$RRDFILE[12]:$DS[12]:MAX ". + "DEF:outbytesa=$RRDFILE[13]:$DS[13]:MAX ". + "CDEF:intraffica=inbytesa,$unit_multiplier,* ". + "CDEF:outtraffica=outbytesa,$unit_multiplier,* ". + "CDEF:inmba=intraffica,1048576,/ ". + "CDEF:outmba=outtraffica,1048576,/ ". + "CDEF:minusoutmba=0,outmba,- ". + "LINE:inmba#00a060:\"in (avg) \" ". + "GPRINT:intraffica:LAST:\"%6.1lf %s$unit/s last\" ". + "GPRINT:intraffica:AVERAGE:\"%6.1lf %s$unit/s avg\" ". + "GPRINT:intraffica:MAX:\"%6.1lf %s$unit/s max\\n\" ". + "LINE:minusoutmba#0060c0:\"out (avg) \" ". + "GPRINT:outtraffica:LAST:\"%6.1lf %s$unit/s last\" ". + "GPRINT:outtraffica:AVERAGE:\"%6.1lf %s$unit/s avg\" ". + "GPRINT:outtraffica:MAX:\"%6.1lf %s$unit/s max\\n\" "; +} + +# Graph 2: packets +$ds_name[2] = 'Packets'; +$opt[2] = "--vertical-label \"packets/sec\" --title \"Packets $hostname / $servicedesc\" "; +$def[2] = + # ingoing + "HRULE:0#c0c0c0 ". + "DEF:inu=$RRDFILE[2]:$DS[2]:MAX ". + "DEF:innu=$RRDFILE[3]:$DS[3]:MAX ". + "CDEF:in=inu,innu,+ ". + "AREA:inu#00ffc0:\"in unicast \" ". + "GPRINT:inu:LAST:\"%9.1lf/s last \" ". + "GPRINT:inu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:inu:MAX:\"%9.1lf/s max\\n\" ". + "AREA:innu#00c080:\"in broadcast/multicast \":STACK ". + "GPRINT:innu:LAST:\"%9.1lf/s last \" ". + "GPRINT:innu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:innu:MAX:\"%9.1lf/s max\\n\" ". + "VDEF:inperc=in,95,PERCENTNAN ". + "LINE:inperc#00cf00:\"in 95% percentile \" ". + "GPRINT:inperc:\"%9.1lf/s\\n\" ". + + # outgoing + "DEF:outu=$RRDFILE[7]:$DS[7]:MAX ". + "DEF:outnu=$RRDFILE[8]:$DS[8]:MAX ". + "CDEF:minusoutu=0,outu,- ". + "CDEF:minusoutnu=0,outnu,- ". + "CDEF:minusout=minusoutu,minusoutnu,+ ". + "AREA:minusoutu#00c0ff:\"out unicast \" ". + "GPRINT:outu:LAST:\"%9.1lf/s last \" ". + "GPRINT:outu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:outu:MAX:\"%9.1lf/s max\\n\" ". + "AREA:minusoutnu#0080c0:\"out broadcast/multicast\":STACK ". + "GPRINT:outnu:LAST:\"%9.1lf/s last \" ". + "GPRINT:outnu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:outnu:MAX:\"%9.1lf/s max\\n\" ". + "VDEF:outperc=minusout,5,PERCENTNAN ". + "LINE:outperc#0000cf:\"out 95% percentile \" ". + "GPRINT:outperc:\"%9.1lf/s\\n\" ". + ""; + +# Graph 3: errors and discards +$ds_name[3] = 'Errors and discards'; +$opt[3] = "--vertical-label \"packets/sec\" -X0 --title \"Problems $hostname / $servicedesc\" "; +$def[3] = + "HRULE:0#c0c0c0 ". + "DEF:inerr=$RRDFILE[5]:$DS[5]:MAX ". + "DEF:indisc=$RRDFILE[4]:$DS[4]:MAX ". + "AREA:inerr#ff0000:\"in errors \" ". + "GPRINT:inerr:LAST:\"%7.2lf/s last \" ". + "GPRINT:inerr:AVERAGE:\"%7.2lf/s avg \" ". + "GPRINT:inerr:MAX:\"%7.2lf/s max\\n\" ". + "AREA:indisc#ff8000:\"in discards \":STACK ". + "GPRINT:indisc:LAST:\"%7.2lf/s last \" ". + "GPRINT:indisc:AVERAGE:\"%7.2lf/s avg \" ". + "GPRINT:indisc:MAX:\"%7.2lf/s max\\n\" ". + "DEF:outerr=$RRDFILE[10]:$DS[10]:MAX ". + "DEF:outdisc=$RRDFILE[9]:$DS[9]:MAX ". + "CDEF:minusouterr=0,outerr,- ". + "CDEF:minusoutdisc=0,outdisc,- ". + "AREA:minusouterr#ff0080:\"out errors \" ". + "GPRINT:outerr:LAST:\"%7.2lf/s last \" ". + "GPRINT:outerr:AVERAGE:\"%7.2lf/s avg \" ". + "GPRINT:outerr:MAX:\"%7.2lf/s max\\n\" ". + "AREA:minusoutdisc#ff8080:\"out discards \":STACK ". + "GPRINT:outdisc:LAST:\"%7.2lf/s last \" ". + "GPRINT:outdisc:AVERAGE:\"%7.2lf/s avg \" ". + "GPRINT:outdisc:MAX:\"%7.2lf/s max\\n\" "; +?> diff -Nru check-mk-1.2.2p3/check_mk-hitachi_hnas_fpga.php check-mk-1.2.6p12/check_mk-hitachi_hnas_fpga.php --- check-mk-1.2.2p3/check_mk-hitachi_hnas_fpga.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-hitachi_hnas_fpga.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,39 @@ + diff -Nru check-mk-1.2.2p3/check_mk-hitachi_hnas_span.php check-mk-1.2.6p12/check_mk-hitachi_hnas_span.php --- check-mk-1.2.2p3/check_mk-hitachi_hnas_span.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-hitachi_hnas_span.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,157 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $RRD_MIN[$n] = "$RRDFILE[$i]:$DS[$i]:MIN"; + $RRD_AVG[$n] = "$RRDFILE[$i]:$DS[$i]:AVERAGE"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; +} + +# RRDtool Options +#$servicedes=$NAGIOS_SERVICEDESC + +$fsname = str_replace("_", "/", substr($servicedesc, 3)); +$fstitle = $fsname; + +# Hack for windows: replace C// with C:\ +if (strlen($fsname) == 3 && substr($fsname, 1, 2) == '//') { + $fsname = $fsname[0] . "\:\\\\"; + $fstitle = $fsname[0] . ":\\"; +} + +$sizegb = sprintf("%.1f", $MAX[1] / 1024.0); +$maxgb = $MAX[1] / 1024.0; +$warngb = $WARN[1] / 1024.0; +$critgb = $CRIT[1] / 1024.0; +$warngbtxt = sprintf("%.1f", $warngb); +$critgbtxt = sprintf("%.1f", $critgb); + +$opt[1] = "--vertical-label GB -l 0 -u $maxgb --title '$hostname: $servicedesc ($sizegb GB)' "; + +# First graph show current filesystem usage +$def[1] = "DEF:mb=$RRDFILE[1]:$DS[1]:MAX "; +$def[1] .= "CDEF:var1=mb,1024,/ "; +$def[1] .= "AREA:var1#00ffc6:\"used space on $fsname\\n\" "; + +# Optional uncommitted usage e.g. for esx hosts +if(isset($RRD['uncommitted'])) { + $def[1] .= "DEF:uncommitted_mb=".$RRD['uncommitted']." "; + $def[1] .= "CDEF:uncommitted_gb=uncommitted_mb,1024,/ "; + $def[1] .= "CDEF:total_gb=uncommitted_gb,var1,+ "; +} else { + $def[1] .= "CDEF:total_gb=var1 "; +} + +$def[1] .= "HRULE:$maxgb#003300:\"Size ($sizegb GB) \" "; +$def[1] .= "HRULE:$warngb#ffff00:\"Warning at $warngbtxt GB \" "; +$def[1] .= "HRULE:$critgb#ff0000:\"Critical at $critgbtxt GB \\n\" "; +$def[1] .= "GPRINT:var1:LAST:\"current\: %6.2lf GB\" "; +$def[1] .= "GPRINT:var1:MAX:\"max\: %6.2lf GB \" "; +$def[1] .= "GPRINT:var1:AVERAGE:\"avg\: %6.2lf GB\\n\" "; + +if(isset($RRD['uncommitted'])) { + $def[1] .= "AREA:uncommitted_gb#eeccff:\"Uncommited\":STACK "; + $def[1] .= "GPRINT:uncommitted_gb:MAX:\"%6.2lf GB\l\" "; +} + +$def[1] .= "LINE1:total_gb#226600 "; + +# Second graph is optional and shows trend. The MAX field +# of the third variable contains (size of the filesystem in MB +# / range in hours). From that we can compute the configured range. +if (isset($RRD['growth'])) { + $size_mb_per_hours = floatval($MAX['trend']); // this is size_mb / range(hours) + $size_mb = floatval($MAX[1]); + $hours = 1.0 / ($size_mb_per_hours / $size_mb); + $range = sprintf("%.0fh", $hours); + + // Current growth / shrinking. This value is give as MB / 24 hours. + // Note: This has changed in 1.1.13i3. Prior it was MB / trend_range! + $opt[2] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Growth of $servicedesc' "; + $def[2] = "DEF:growth_max=${RRD['growth']} "; + $def[2] .= "DEF:growth_min=${RRD_MIN['growth']} "; + $def[2] .= "DEF:trend=${RRD_AVG['trend']} "; + $def[2] .= "CDEF:growth_pos=growth_max,0,MAX "; + $def[2] .= "CDEF:growth_neg=growth_min,0,MIN "; + $def[2] .= "CDEF:growth_minabs=0,growth_min,- "; + $def[2] .= "CDEF:growth=growth_minabs,growth_max,MAX "; + $def[2] .= "HRULE:0#c0c0c0 "; + $def[2] .= "AREA:growth_pos#3060f0:\"Grow\" "; + $def[2] .= "AREA:growth_neg#30f060:\"Shrink \" "; + $def[2] .= "GPRINT:growth:LAST:\"Current\: %+9.2lfMB / 24h\" "; + $def[2] .= "GPRINT:growth:MAX:\"Max\: %+9.2lfMB / 24h\\n\" "; + + // Trend + $opt[3] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Trend for $servicedesc' "; + $def[3] = "DEF:trend=${RRD_AVG['trend']} "; + $def[3] .= "HRULE:0#c0c0c0 "; + $def[3] .= "LINE1:trend#000000:\"Trend\:\" "; + $def[3] .= "GPRINT:trend:LAST:\"%+7.2lf MB/24h\" "; + if ($WARN['trend']) { + $warn_mb = sprintf("%.2fMB", $WARN['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${WARN['trend']}#ffff00:\"Warn\: $warn_mb / $range\" "; + } + if ($CRIT['trend']) { + $crit_mb = sprintf("%.2fMB", $CRIT['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${CRIT['trend']}#ff0000:\"Crit\: $crit_mb / $range\" "; + } + $def[3] .= "COMMENT:\"\\n\" "; +} + +if (isset($RRD['trend_hoursleft'])) { + // Trend + $opt[4] = "--vertical-label 'Days left' -l -1 -u 365 -X0 --title '$hostname: Days left for $servicedesc' "; + $def[4] = "DEF:hours_left=${RRD_AVG['trend_hoursleft']} "; + $def[4] .= "DEF:hours_left_min=${RRD_MIN['trend_hoursleft']} "; + // negative hours indicate no growth + // the dataset hours_left_isneg stores this info for each point as True/False + $def[4] .= "CDEF:hours_left_isneg=hours_left_min,-1,EQ "; + $def[4] .= "CDEF:hours_left_unmon=hours_left_min,400,0,IF "; + $def[4] .= "CDEF:days_left=hours_left,24,/ "; + $def[4] .= "CDEF:days_left_cap=days_left,400,MIN "; + // Convert negative points to 400 (y-axis cap) + $def[4] .= "CDEF:days_left_cap_positive=hours_left_isneg,400,days_left_cap,IF "; + // The AREA has a rendering problem. Points are too far to the right + $def[4] .= "AREA:hours_left_unmon#AA2200: "; + + $def[4] .= "AREA:days_left_cap_positive#22AA44:\"Days left\:\" "; + if ($ACT[4] == -1) + { + $def[4] .= "COMMENT:\"Not growing\" "; + } + else { + $def[4] .= "GPRINT:days_left:LAST:\"%7.2lf days\" "; + } +} + +?> diff -Nru check-mk-1.2.2p3/check_mk-hitachi_hnas_temp.php check-mk-1.2.6p12/check_mk-hitachi_hnas_temp.php --- check-mk-1.2.2p3/check_mk-hitachi_hnas_temp.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-hitachi_hnas_temp.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk-hitachi_hnas_volume.php check-mk-1.2.6p12/check_mk-hitachi_hnas_volume.php --- check-mk-1.2.2p3/check_mk-hitachi_hnas_volume.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-hitachi_hnas_volume.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,157 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $RRD_MIN[$n] = "$RRDFILE[$i]:$DS[$i]:MIN"; + $RRD_AVG[$n] = "$RRDFILE[$i]:$DS[$i]:AVERAGE"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; +} + +# RRDtool Options +#$servicedes=$NAGIOS_SERVICEDESC + +$fsname = str_replace("_", "/", substr($servicedesc, 3)); +$fstitle = $fsname; + +# Hack for windows: replace C// with C:\ +if (strlen($fsname) == 3 && substr($fsname, 1, 2) == '//') { + $fsname = $fsname[0] . "\:\\\\"; + $fstitle = $fsname[0] . ":\\"; +} + +$sizegb = sprintf("%.1f", $MAX[1] / 1024.0); +$maxgb = $MAX[1] / 1024.0; +$warngb = $WARN[1] / 1024.0; +$critgb = $CRIT[1] / 1024.0; +$warngbtxt = sprintf("%.1f", $warngb); +$critgbtxt = sprintf("%.1f", $critgb); + +$opt[1] = "--vertical-label GB -l 0 -u $maxgb --title '$hostname: $servicedesc ($sizegb GB)' "; + +# First graph show current filesystem usage +$def[1] = "DEF:mb=$RRDFILE[1]:$DS[1]:MAX "; +$def[1] .= "CDEF:var1=mb,1024,/ "; +$def[1] .= "AREA:var1#00ffc6:\"used space on $fsname\\n\" "; + +# Optional uncommitted usage e.g. for esx hosts +if(isset($RRD['uncommitted'])) { + $def[1] .= "DEF:uncommitted_mb=".$RRD['uncommitted']." "; + $def[1] .= "CDEF:uncommitted_gb=uncommitted_mb,1024,/ "; + $def[1] .= "CDEF:total_gb=uncommitted_gb,var1,+ "; +} else { + $def[1] .= "CDEF:total_gb=var1 "; +} + +$def[1] .= "HRULE:$maxgb#003300:\"Size ($sizegb GB) \" "; +$def[1] .= "HRULE:$warngb#ffff00:\"Warning at $warngbtxt GB \" "; +$def[1] .= "HRULE:$critgb#ff0000:\"Critical at $critgbtxt GB \\n\" "; +$def[1] .= "GPRINT:var1:LAST:\"current\: %6.2lf GB\" "; +$def[1] .= "GPRINT:var1:MAX:\"max\: %6.2lf GB \" "; +$def[1] .= "GPRINT:var1:AVERAGE:\"avg\: %6.2lf GB\\n\" "; + +if(isset($RRD['uncommitted'])) { + $def[1] .= "AREA:uncommitted_gb#eeccff:\"Uncommited\":STACK "; + $def[1] .= "GPRINT:uncommitted_gb:MAX:\"%6.2lf GB\l\" "; +} + +$def[1] .= "LINE1:total_gb#226600 "; + +# Second graph is optional and shows trend. The MAX field +# of the third variable contains (size of the filesystem in MB +# / range in hours). From that we can compute the configured range. +if (isset($RRD['growth'])) { + $size_mb_per_hours = floatval($MAX['trend']); // this is size_mb / range(hours) + $size_mb = floatval($MAX[1]); + $hours = 1.0 / ($size_mb_per_hours / $size_mb); + $range = sprintf("%.0fh", $hours); + + // Current growth / shrinking. This value is give as MB / 24 hours. + // Note: This has changed in 1.1.13i3. Prior it was MB / trend_range! + $opt[2] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Growth of $servicedesc' "; + $def[2] = "DEF:growth_max=${RRD['growth']} "; + $def[2] .= "DEF:growth_min=${RRD_MIN['growth']} "; + $def[2] .= "DEF:trend=${RRD_AVG['trend']} "; + $def[2] .= "CDEF:growth_pos=growth_max,0,MAX "; + $def[2] .= "CDEF:growth_neg=growth_min,0,MIN "; + $def[2] .= "CDEF:growth_minabs=0,growth_min,- "; + $def[2] .= "CDEF:growth=growth_minabs,growth_max,MAX "; + $def[2] .= "HRULE:0#c0c0c0 "; + $def[2] .= "AREA:growth_pos#3060f0:\"Grow\" "; + $def[2] .= "AREA:growth_neg#30f060:\"Shrink \" "; + $def[2] .= "GPRINT:growth:LAST:\"Current\: %+9.2lfMB / 24h\" "; + $def[2] .= "GPRINT:growth:MAX:\"Max\: %+9.2lfMB / 24h\\n\" "; + + // Trend + $opt[3] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Trend for $servicedesc' "; + $def[3] = "DEF:trend=${RRD_AVG['trend']} "; + $def[3] .= "HRULE:0#c0c0c0 "; + $def[3] .= "LINE1:trend#000000:\"Trend\:\" "; + $def[3] .= "GPRINT:trend:LAST:\"%+7.2lf MB/24h\" "; + if ($WARN['trend']) { + $warn_mb = sprintf("%.2fMB", $WARN['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${WARN['trend']}#ffff00:\"Warn\: $warn_mb / $range\" "; + } + if ($CRIT['trend']) { + $crit_mb = sprintf("%.2fMB", $CRIT['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${CRIT['trend']}#ff0000:\"Crit\: $crit_mb / $range\" "; + } + $def[3] .= "COMMENT:\"\\n\" "; +} + +if (isset($RRD['trend_hoursleft'])) { + // Trend + $opt[4] = "--vertical-label 'Days left' -l -1 -u 365 -X0 --title '$hostname: Days left for $servicedesc' "; + $def[4] = "DEF:hours_left=${RRD_AVG['trend_hoursleft']} "; + $def[4] .= "DEF:hours_left_min=${RRD_MIN['trend_hoursleft']} "; + // negative hours indicate no growth + // the dataset hours_left_isneg stores this info for each point as True/False + $def[4] .= "CDEF:hours_left_isneg=hours_left_min,-1,EQ "; + $def[4] .= "CDEF:hours_left_unmon=hours_left_min,400,0,IF "; + $def[4] .= "CDEF:days_left=hours_left,24,/ "; + $def[4] .= "CDEF:days_left_cap=days_left,400,MIN "; + // Convert negative points to 400 (y-axis cap) + $def[4] .= "CDEF:days_left_cap_positive=hours_left_isneg,400,days_left_cap,IF "; + // The AREA has a rendering problem. Points are too far to the right + $def[4] .= "AREA:hours_left_unmon#AA2200: "; + + $def[4] .= "AREA:days_left_cap_positive#22AA44:\"Days left\:\" "; + if ($ACT[4] == -1) + { + $def[4] .= "COMMENT:\"Not growing\" "; + } + else { + $def[4] .= "GPRINT:days_left:LAST:\"%7.2lf days\" "; + } +} + +?> diff -Nru check-mk-1.2.2p3/check_mk-hivemanager_devices.php check-mk-1.2.6p12/check_mk-hivemanager_devices.php --- check-mk-1.2.2p3/check_mk-hivemanager_devices.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-hivemanager_devices.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,47 @@ + diff -Nru check-mk-1.2.2p3/check-mk-host-ping.php check-mk-1.2.6p12/check-mk-host-ping.php --- check-mk-1.2.2p3/check-mk-host-ping.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check-mk-host-ping.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,59 @@ + diff -Nru check-mk-1.2.2p3/check-mk-host-tcp.php check-mk-1.2.6p12/check-mk-host-tcp.php --- check-mk-1.2.2p3/check-mk-host-tcp.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check-mk-host-tcp.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk-hp_blade_psu.php check-mk-1.2.6p12/check_mk-hp_blade_psu.php --- check-mk-1.2.2p3/check_mk-hp_blade_psu.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-hp_blade_psu.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -34,7 +34,7 @@ $files = array(); if($h = opendir($path)) { while(($file = readdir($h)) !== false) { - if(preg_match('/^PSU_[0-9]+\.rrd$/', $file, $aRet)) + if(preg_match('/^PSU_[0-9]+_output\.rrd$/', $file, $aRet)) $files[] = $aRet[0]; } natcasesort($files); diff -Nru check-mk-1.2.2p3/check_mk-hp_procurve_cpu.php check-mk-1.2.6p12/check_mk-hp_procurve_cpu.php --- check-mk-1.2.2p3/check_mk-hp_procurve_cpu.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-hp_procurve_cpu.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -27,7 +27,7 @@ $opt[1] = "--vertical-label 'CPU utilization %' -l0 -u 100 --title \"CPU Utilization $hostname $desc\" "; # -$def[1] = "DEF:util=$RRDFILE[1]:$DS[1]:MAX ". +$def[1] = "DEF:util=$RRDFILE[1]:$DS[1]:MAX ". "CDEF:ok=util,$WARN[1],MIN ". "CDEF:warn=util,$CRIT[1],MIN ". "AREA:util#c0f020 ". diff -Nru check-mk-1.2.2p3/check_mk-hpux_cpu.php check-mk-1.2.6p12/check_mk-hpux_cpu.php --- check-mk-1.2.2p3/check_mk-hpux_cpu.php 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-hpux_cpu.php 2015-09-21 10:59:54.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -23,19 +23,49 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -$opt[1] = "--vertical-label Load -l0 -u 1 --title \"CPU Load for $hostname / $servicedesc\" "; +# The number of data source various due to different +# settings (such as averaging). We rather work with names +# than with numbers. +$RRD = array(); +foreach ($NAME as $i => $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; +} -$def[1] = "DEF:var1=$RRDFILE[1]:$DS[1]:MAX " ; -$def[1] .= "DEF:var2=$RRDFILE[2]:$DS[2]:MAX " ; -$def[1] .= "DEF:var3=$RRDFILE[3]:$DS[3]:MAX " ; -$def[1] .= "HRULE:$WARN[1]#FFFF00 "; -$def[1] .= "HRULE:$CRIT[1]#FF0000 "; -$def[1] .= "AREA:var1#60c0e0:\"Load average 1 min \" " ; -$def[1] .= "GPRINT:var1:LAST:\"%6.2lf last\" " ; -$def[1] .= "GPRINT:var1:AVERAGE:\"%6.2lf avg\" " ; -$def[1] .= "GPRINT:var1:MAX:\"%6.2lf max\\n\" "; -$def[1] .= "LINE:var3#004080:\"Load average 15 min \" " ; -$def[1] .= "GPRINT:var3:LAST:\"%6.2lf last\" " ; -$def[1] .= "GPRINT:var3:AVERAGE:\"%6.2lf avg\" " ; -$def[1] .= "GPRINT:var3:MAX:\"%6.2lf max\\n\" " ; +$opt[1] = "--vertical-label 'Load average' -l0 -u 1 --title \"CPU Load for $hostname\" "; + +$def[1] = "" + . "DEF:load1=$RRD[load1] " + . "AREA:load1#60c0e0:\"Load average 1 min \" " + . "GPRINT:load1:LAST:\"%6.2lf last\" " + . "GPRINT:load1:AVERAGE:\"%6.2lf avg\" " + . "GPRINT:load1:MAX:\"%6.2lf max\\n\" " + + . "DEF:load15=$RRD[load15] " + . "LINE:load15#004080:\"Load average 15 min \" " + . "GPRINT:load15:LAST:\"%6.2lf last\" " + . "GPRINT:load15:AVERAGE:\"%6.2lf avg\" " + . "GPRINT:load15:MAX:\"%6.2lf max\\n\" " + . ""; + +if ($WARN[1]) { + $def[1] .= "" + . "HRULE:$WARN[1]#FFFF00 " + . "HRULE:$CRIT[1]#FF0000 " + . ""; +} + +if ($MAX[1]) { + $def[1] .= "COMMENT:\" Number of CPUs $MAX[1]\" "; +} + +if (isset($RRD["predict_load15"])) { + $def[1] .= "" + . "DEF:predict=$RRD[predict_load15] " + . "LINE:predict#ff0000:\"Reference for prediction \\n\" " + . ""; +} ?> diff -Nru check-mk-1.2.2p3/check_mk-hpux_if.php check-mk-1.2.6p12/check_mk-hpux_if.php --- check-mk-1.2.2p3/check_mk-hpux_if.php 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-hpux_if.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -40,87 +40,105 @@ # Graph 1: used bandwidth -# Determine if Bit or Byte. -# Change multiplier and labels -$unit = "B"; -$unit_multiplier = 1; -$vertical_label_name = "MByte/sec"; -if (strcmp($MIN[11], "0.0") == 0) { +# Determine if Bit or Byte. Bit is signalled via a min value of 0.0 +# in the 11th performance value. +if (!strcmp($MIN[11], "0.0")) { $unit = "Bit"; $unit_multiplier = 8; - $vertical_label_name = "MBit/sec"; + $base = 1000; // Megabit is 1000 * 1000 } +else { + $unit = "B"; + $unit_multiplier = 1; + $base = 1000; // Megabyte is 1000 * 1000 +} + +# Convert bytes to bits if neccessary $bandwidth = $MAX[1] * $unit_multiplier; $warn = $WARN[1] * $unit_multiplier; $crit = $CRIT[1] * $unit_multiplier; -# Horizontal lines -$mega = 1024.0 * 1024.0; -$mBandwidthH = $bandwidth / $mega; -$mWarnH = $warn / $mega; -$mCritH = $crit / $mega; - -# Break down bandwidth, warn and crit +# Now choose a convenient scale, based on the known bandwith of +# the interface, and break down bandwidth, warn and crit by that +# scale. $bwuom = ' '; -$base = 1000; -if($bandwidth > $base * $base * $base) { - $warn /= $base * $base * $base; - $crit /= $base * $base * $base; - $bandwidth /= $base * $base * $base; - $bwuom = 'G'; -} elseif ($bandwidth > $base * $base) { - $warn /= $base * $base; - $crit /= $base * $base; - $bandwidth /= $base * $base; - $bwuom = 'M'; -} elseif ($bandwidth > $base) { - $warn /= $base; - $crit /= $base; - $bandwidth /= $base; - $bwuom = 'k'; -} - -if ($mBandwidthH < 10) - $range = $mBandwidthH; -else - $range = 10.0; +if ($bandwidth > $base * $base * $base) { + $scale = $base * $base * $base; + $bwuom = 'G'; +} +elseif ($bandwidth > $base * $base) { + $scale = $base * $base; + $bwuom = 'M'; +} +elseif ($bandwidth > $base) { + $scale = $base; + $bwuom = 'k'; +} +else { + $scale = 1; + $bwuom = ' '; +} + +$warn /= $scale; +$crit /= $scale; +$bandwidth /= $scale; + +$vertical_label_name = $bwuom . $unit . "/sec"; + +$range = min(10, $bandwidth); + $bandwidthInfo = ""; if ($bandwidth > 0){ - $bandwidthInfo = " at bandwidth ${bwuom}${unit}/s"; + $bandwidthInfo = " at " . sprintf("%.1f", $bandwidth) . " ${bwuom}${unit}/s"; } $ds_name[1] = 'Used bandwidth'; -$opt[1] = "--vertical-label \"$vertical_label_name\" -l -$range -u $range -X0 -b 1024 --title \"Used bandwidth $hostname / $servicedesc $bandwidthInfo\" "; -$def[1] = +$opt[1] = "--vertical-label \"$vertical_label_name\" -l -$range -u $range -X0 -b 1024 --title \"Used bandwidth $hostname / $servicedesc$bandwidthInfo\" "; +$def[1] = "HRULE:0#c0c0c0 "; - if ($mBandwidthH) - $def[1] .= "HRULE:$mBandwidthH#808080:\"Port speed\: " . sprintf("%.1f", $bandwidth) . " ".$bwuom."$unit/s\\n\" ". - "HRULE:-$mBandwidthH#808080: "; - if ($warn) - $def[1] .= "HRULE:$mWarnH#ffff00:\"Warning\: " . sprintf("%6.1f", $warn) . " ".$bwuom."$unit/s\\n\" ". - "HRULE:-$mWarnH#ffff00: "; - if ($crit) - $def[1] .= "HRULE:$mCritH#ff0000:\"Critical\: " . sprintf("%6.1f", $crit) . " ".$bwuom."$unit/s\\n\" ". - "HRULE:-$mCritH#ff0000: "; +if ($bandwidth) + $def[1] .= "HRULE:$bandwidth#808080:\"Port speed\: " . sprintf("%10.1f", $bandwidth) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$bandwidth#808080: "; +if ($warn) + $def[1] .= "HRULE:$warn#ffff00:\"Warning\: " . sprintf("%13.1f", $warn) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$warn#ffff00: "; +if ($crit) + $def[1] .= "HRULE:$crit#ff0000:\"Critical\: " . sprintf("%13.1f", $crit) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$crit#ff0000: "; + + $def[1] .= "". + # incoming + "DEF:inbytes=$RRDFILE[1]:$DS[1]:MAX ". + "CDEF:intraffic=inbytes,$unit_multiplier,* ". + "CDEF:inmb=intraffic,$scale,/ ". + "AREA:inmb#00e060:\"in \" ". + "GPRINT:intraffic:LAST:\"%7.1lf %s$unit/s last\" ". + "GPRINT:intraffic:AVERAGE:\"%7.1lf %s$unit/s avg\" ". + "GPRINT:intraffic:MAX:\"%7.1lf %s$unit/s max\\n\" ". + "VDEF:inperc=intraffic,95,PERCENTNAN ". + "VDEF:inpercmb=inmb,95,PERCENTNAN ". + "LINE:inpercmb#008f00:\"95% percentile\" ". + "GPRINT:inperc:\"%7.1lf %s$unit/s\\n\" ". - $def[1] .= "DEF:inbytes=$RRDFILE[1]:$DS[1]:MAX ". + # outgoing "DEF:outbytes=$RRDFILE[6]:$DS[6]:MAX ". - "CDEF:intraffic=inbytes,$unit_multiplier,* ". "CDEF:outtraffic=outbytes,$unit_multiplier,* ". - "CDEF:inmb=intraffic,1048576,/ ". - "CDEF:outmb=outtraffic,1048576,/ ". + "CDEF:minusouttraffic=outtraffic,-1,* ". + "CDEF:outmb=outtraffic,$scale,/ ". "CDEF:minusoutmb=0,outmb,- ". - "AREA:inmb#00e060:\"in \" ". - "GPRINT:intraffic:LAST:\"%6.1lf %s$unit/s last\" ". - "GPRINT:intraffic:AVERAGE:\"%6.1lf %s$unit/s avg\" ". - "GPRINT:intraffic:MAX:\"%6.1lf %s$unit/s max\\n\" ". - "AREA:minusoutmb#0080e0:\"out \" ". - "GPRINT:outtraffic:LAST:\"%6.1lf %s$unit/s last\" ". - "GPRINT:outtraffic:AVERAGE:\"%6.1lf %s$unit/s avg\" ". - "GPRINT:outtraffic:MAX:\"%6.1lf %s$unit/s max\\n\" "; + "AREA:minusoutmb#0080e0:\"out \" ". + "GPRINT:outtraffic:LAST:\"%7.1lf %s$unit/s last\" ". + "GPRINT:outtraffic:AVERAGE:\"%7.1lf %s$unit/s avg\" ". + "GPRINT:outtraffic:MAX:\"%7.1lf %s$unit/s max\\n\" ". + "VDEF:outperc=minusouttraffic,5,PERCENTNAN ". + "VDEF:outpercmb=minusoutmb,5,PERCENTNAN ". + "LINE:outpercmb#00008f:\"95% percentile\" ". + "GPRINT:outperc:\"%7.1lf %s$unit/s\\n\" ". + + ""; if (isset($DS[12])) { - $def[1] .= + $def[1] .= "DEF:inbytesa=$RRDFILE[12]:$DS[12]:MAX ". "DEF:outbytesa=$RRDFILE[13]:$DS[13]:MAX ". "CDEF:intraffica=inbytesa,$unit_multiplier,* ". @@ -142,29 +160,41 @@ $ds_name[2] = 'Packets'; $opt[2] = "--vertical-label \"packets/sec\" --title \"Packets $hostname / $servicedesc\" "; $def[2] = + # ingoing "HRULE:0#c0c0c0 ". "DEF:inu=$RRDFILE[2]:$DS[2]:MAX ". "DEF:innu=$RRDFILE[3]:$DS[3]:MAX ". - "AREA:inu#00ffc0:\"in unicast \" ". - "GPRINT:inu:LAST:\"%7.2lf/s last \" ". - "GPRINT:inu:AVERAGE:\"%7.2lf/s avg \" ". - "GPRINT:inu:MAX:\"%7.2lf/s max\\n\" ". - "AREA:innu#00c080:\"in broadcast/multicast \":STACK ". - "GPRINT:innu:LAST:\"%7.2lf/s last \" ". - "GPRINT:innu:AVERAGE:\"%7.2lf/s avg \" ". - "GPRINT:innu:MAX:\"%7.2lf/s max\\n\" ". + "CDEF:in=inu,innu,+ ". + "AREA:inu#00ffc0:\"in unicast \" ". + "GPRINT:inu:LAST:\"%9.1lf/s last \" ". + "GPRINT:inu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:inu:MAX:\"%9.1lf/s max\\n\" ". + "AREA:innu#00c080:\"in broadcast/multicast \":STACK ". + "GPRINT:innu:LAST:\"%9.1lf/s last \" ". + "GPRINT:innu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:innu:MAX:\"%9.1lf/s max\\n\" ". + "VDEF:inperc=in,95,PERCENTNAN ". + "LINE:inperc#00cf00:\"in 95% percentile \" ". + "GPRINT:inperc:\"%9.1lf/s\\n\" ". + + # outgoing "DEF:outu=$RRDFILE[7]:$DS[7]:MAX ". "DEF:outnu=$RRDFILE[8]:$DS[8]:MAX ". "CDEF:minusoutu=0,outu,- ". "CDEF:minusoutnu=0,outnu,- ". - "AREA:minusoutu#00c0ff:\"out unicast \" ". - "GPRINT:outu:LAST:\"%7.2lf/s last \" ". - "GPRINT:outu:AVERAGE:\"%7.2lf/s avg \" ". - "GPRINT:outu:MAX:\"%7.2lf/s max\\n\" ". - "AREA:minusoutnu#0080c0:\"out broadcast/multicast \":STACK ". - "GPRINT:outnu:LAST:\"%7.2lf/s last \" ". - "GPRINT:outnu:AVERAGE:\"%7.2lf/s avg \" ". - "GPRINT:outnu:MAX:\"%7.2lf/s max\\n\" "; + "CDEF:minusout=minusoutu,minusoutnu,+ ". + "AREA:minusoutu#00c0ff:\"out unicast \" ". + "GPRINT:outu:LAST:\"%9.1lf/s last \" ". + "GPRINT:outu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:outu:MAX:\"%9.1lf/s max\\n\" ". + "AREA:minusoutnu#0080c0:\"out broadcast/multicast\":STACK ". + "GPRINT:outnu:LAST:\"%9.1lf/s last \" ". + "GPRINT:outnu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:outnu:MAX:\"%9.1lf/s max\\n\" ". + "VDEF:outperc=minusout,5,PERCENTNAN ". + "LINE:outperc#0000cf:\"out 95% percentile \" ". + "GPRINT:outperc:\"%9.1lf/s\\n\" ". + ""; # Graph 3: errors and discards $ds_name[3] = 'Errors and discards'; diff -Nru check-mk-1.2.2p3/check_mk-hpux_lunstats.php check-mk-1.2.6p12/check_mk-hpux_lunstats.php --- check-mk-1.2.2p3/check_mk-hpux_lunstats.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-hpux_lunstats.php 2015-09-21 10:59:54.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -41,7 +41,7 @@ $opt[1] = "--vertical-label 'Throughput (MB/s)' -X0 --title \"Disk throughput $hostname / $disk\" "; - $def[1] = + $def[1] = "HRULE:0#a0a0a0 ". # read "DEF:read=$RRD[read] ". @@ -53,7 +53,7 @@ # read average as line in the same graph if (isset($RRD["read.avg"])) { - $def[1] .= + $def[1] .= "DEF:read_avg=${RRD['read.avg']} ". "CDEF:read_avg_mb=read_avg,1048576,/ ". "LINE:read_avg_mb#202020 "; @@ -84,7 +84,7 @@ # write average if (isset($DS["write.avg"])) { - $def[1] .= + $def[1] .= "DEF:write_avg=${RRD['write.avg']} ". "CDEF:write_avg_mb=write_avg,1048576,/ ". "CDEF:write_avg_mb_neg=write_avg_mb,-1,* ". @@ -122,14 +122,14 @@ $def[] = "" . "DEF:read=$RRD[read_ql] " . "DEF:write=$RRD[write_ql] " - . "CDEF:writen=write,-1,* " + . "CDEF:writen=write,-1,* " . "HRULE:0#a0a0a0 " . "AREA:read#669a76 " . "AREA:writen#517ba5 " ; } - + } // legacy version of diskstat diff -Nru check-mk-1.2.2p3/check_mk-hpux_snmp_cs.cpu.php check-mk-1.2.6p12/check_mk-hpux_snmp_cs.cpu.php --- check-mk-1.2.2p3/check_mk-hpux_snmp_cs.cpu.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-hpux_snmp_cs.cpu.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mk-hpux_tunables.maxfiles_lim.php check-mk-1.2.6p12/check_mk-hpux_tunables.maxfiles_lim.php --- check-mk-1.2.2p3/check_mk-hpux_tunables.maxfiles_lim.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-hpux_tunables.maxfiles_lim.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mk-hpux_tunables.nkthread.php check-mk-1.2.6p12/check_mk-hpux_tunables.nkthread.php --- check-mk-1.2.2p3/check_mk-hpux_tunables.nkthread.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-hpux_tunables.nkthread.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mk-hpux_tunables.nproc.php check-mk-1.2.6p12/check_mk-hpux_tunables.nproc.php --- check-mk-1.2.2p3/check_mk-hpux_tunables.nproc.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-hpux_tunables.nproc.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mk-hpux_tunables.php check-mk-1.2.6p12/check_mk-hpux_tunables.php --- check-mk-1.2.2p3/check_mk-hpux_tunables.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-hpux_tunables.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mk-hpux_tunables.semmni.php check-mk-1.2.6p12/check_mk-hpux_tunables.semmni.php --- check-mk-1.2.2p3/check_mk-hpux_tunables.semmni.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-hpux_tunables.semmni.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mk-hpux_tunables.semmns.php check-mk-1.2.6p12/check_mk-hpux_tunables.semmns.php --- check-mk-1.2.2p3/check_mk-hpux_tunables.semmns.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-hpux_tunables.semmns.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mk-hpux_tunables.shmseg.php check-mk-1.2.6p12/check_mk-hpux_tunables.shmseg.php --- check-mk-1.2.2p3/check_mk-hpux_tunables.shmseg.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-hpux_tunables.shmseg.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mk-hr_cpu.php check-mk-1.2.6p12/check_mk-hr_cpu.php --- check-mk-1.2.2p3/check_mk-hr_cpu.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-hr_cpu.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mk-hr_fs.php check-mk-1.2.6p12/check_mk-hr_fs.php --- check-mk-1.2.2p3/check_mk-hr_fs.php 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-hr_fs.php 2015-09-21 10:59:54.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -25,10 +25,23 @@ setlocale(LC_ALL, "POSIX"); +// Make data sources available via names +$RRD = array(); +foreach ($NAME as $i => $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $RRD_MIN[$n] = "$RRDFILE[$i]:$DS[$i]:MIN"; + $RRD_AVG[$n] = "$RRDFILE[$i]:$DS[$i]:AVERAGE"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; + $ACT[$n] = $ACT[$i]; +} + # RRDtool Options #$servicedes=$NAGIOS_SERVICEDESC -$fsname = str_replace("_", "/", substr($servicedesc, 3)); +$fsname = str_replace("_", "/", substr($servicedesc,11)); $fstitle = $fsname; # Hack for windows: replace C// with C:\ @@ -47,57 +60,98 @@ $opt[1] = "--vertical-label GB -l 0 -u $maxgb --title '$hostname: Filesystem $fstitle ($sizegb GB)' "; # First graph show current filesystem usage -$def[1] = "DEF:mb=$RRDFILE[1]:$DS[1]:MAX "; +$def[1] = "DEF:mb=$RRDFILE[1]:$DS[1]:MAX "; $def[1] .= "CDEF:var1=mb,1024,/ "; -$def[1] .= "AREA:var1#00ffc6:\"used space on $fsname\\n\" "; -$def[1] .= "LINE1:var1#226600: "; +$def[1] .= "AREA:var1#00ffc6:\"used space on $fsname\\n\" "; + +# Optional uncommitted usage e.g. for esx hosts +if(isset($RRD['uncommitted'])) { + $def[1] .= "DEF:uncommitted_mb=".$RRD['uncommitted']." "; + $def[1] .= "CDEF:uncommitted_gb=uncommitted_mb,1024,/ "; + $def[1] .= "CDEF:total_gb=uncommitted_gb,var1,+ "; +} else { + $def[1] .= "CDEF:total_gb=var1 "; +} + $def[1] .= "HRULE:$maxgb#003300:\"Size ($sizegb GB) \" "; $def[1] .= "HRULE:$warngb#ffff00:\"Warning at $warngbtxt GB \" "; $def[1] .= "HRULE:$critgb#ff0000:\"Critical at $critgbtxt GB \\n\" "; $def[1] .= "GPRINT:var1:LAST:\"current\: %6.2lf GB\" "; $def[1] .= "GPRINT:var1:MAX:\"max\: %6.2lf GB \" "; -$def[1] .= "GPRINT:var1:AVERAGE:\"avg\: %6.2lf GB\" "; +$def[1] .= "GPRINT:var1:AVERAGE:\"avg\: %6.2lf GB\\n\" "; + +if(isset($RRD['uncommitted'])) { + $def[1] .= "AREA:uncommitted_gb#eeccff:\"Uncommited\":STACK "; + $def[1] .= "GPRINT:uncommitted_gb:MAX:\"%6.2lf GB\l\" "; +} + +$def[1] .= "LINE1:total_gb#226600 "; # Second graph is optional and shows trend. The MAX field # of the third variable contains (size of the filesystem in MB # / range in hours). From that we can compute the configured range. -if (isset($DS[2])) { - $size_mb_per_hours = floatval($MAX[3]); // this is size_mb / range(hours) +if (isset($RRD['growth'])) { + $size_mb_per_hours = floatval($MAX['trend']); // this is size_mb / range(hours) $size_mb = floatval($MAX[1]); $hours = 1.0 / ($size_mb_per_hours / $size_mb); $range = sprintf("%.0fh", $hours); - // Current growth / shrinking. This value is give as MB / 24 hours. + // Current growth / shrinking. This value is give as MB / 24 hours. // Note: This has changed in 1.1.13i3. Prior it was MB / trend_range! $opt[2] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Growth of $fstitle' "; - $def[2] = "DEF:growth_max=$RRDFILE[2]:$DS[2]:MAX "; - $def[2] .= "DEF:growth_min=$RRDFILE[2]:$DS[2]:MIN "; - $def[2] .= "DEF:trend=$RRDFILE[3]:$DS[3]:AVERAGE "; + $def[2] = "DEF:growth_max=${RRD['growth']} "; + $def[2] .= "DEF:growth_min=${RRD_MIN['growth']} "; + $def[2] .= "DEF:trend=${RRD_AVG['trend']} "; $def[2] .= "CDEF:growth_pos=growth_max,0,MAX "; $def[2] .= "CDEF:growth_neg=growth_min,0,MIN "; - $def[2] .= "CDEF:growth_minabs=0,growth_min,- "; + $def[2] .= "CDEF:growth_minabs=0,growth_min,- "; $def[2] .= "CDEF:growth=growth_minabs,growth_max,MAX "; $def[2] .= "HRULE:0#c0c0c0 "; - $def[2] .= "AREA:growth_pos#3060f0:\"Grow\" "; - $def[2] .= "AREA:growth_neg#30f060:\"Shrink \" "; + $def[2] .= "AREA:growth_pos#3060f0:\"Grow\" "; + $def[2] .= "AREA:growth_neg#30f060:\"Shrink \" "; $def[2] .= "GPRINT:growth:LAST:\"Current\: %+9.2lfMB / 24h\" "; $def[2] .= "GPRINT:growth:MAX:\"Max\: %+9.2lfMB / 24h\\n\" "; // Trend $opt[3] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Trend for $fstitle' "; - $def[3] = "DEF:trend=$RRDFILE[3]:$DS[3]:AVERAGE "; + $def[3] = "DEF:trend=${RRD_AVG['trend']} "; $def[3] .= "HRULE:0#c0c0c0 "; $def[3] .= "LINE1:trend#000000:\"Trend\:\" "; - $def[3] .= "GPRINT:trend:LAST:\"%+7.2lf MB/24h\" "; - if ($WARN[3]) { - $warn_mb = sprintf("%.2fMB", $WARN[3] * $hours / 24.0); - $def[3] .= "LINE1:$WARN[3]#ffff00:\"Warn\: $warn_mb / $range\" "; + $def[3] .= "GPRINT:trend:LAST:\"%+7.2lf MB/24h\" "; + if ($WARN['trend']) { + $warn_mb = sprintf("%.2fMB", $WARN['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${WARN['trend']}#ffff00:\"Warn\: $warn_mb / $range\" "; } - if ($CRIT[3]) { - $crit_mb = sprintf("%.2fMB", $CRIT[3] * $hours / 24.0); - $def[3] .= "LINE1:$CRIT[3]#ff0000:\"Crit\: $crit_mb / $range\" "; + if ($CRIT['trend']) { + $crit_mb = sprintf("%.2fMB", $CRIT['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${CRIT['trend']}#ff0000:\"Crit\: $crit_mb / $range\" "; } $def[3] .= "COMMENT:\"\\n\" "; } +if (isset($RRD['trend_hoursleft'])) { + // Trend + $opt[4] = "--vertical-label 'Days left' -l -1 -u 365 -X0 --title '$hostname: Days left for $fstitle' "; + $def[4] = "DEF:hours_left=${RRD_AVG['trend_hoursleft']} "; + $def[4] .= "DEF:hours_left_min=${RRD_MIN['trend_hoursleft']} "; + // negative hours indicate no growth + // the dataset hours_left_isneg stores this info for each point as True/False + $def[4] .= "CDEF:hours_left_isneg=hours_left_min,-1,EQ "; + $def[4] .= "CDEF:hours_left_unmon=hours_left_min,400,0,IF "; + $def[4] .= "CDEF:days_left=hours_left,24,/ "; + $def[4] .= "CDEF:days_left_cap=days_left,400,MIN "; + // Convert negative points to 400 (y-axis cap) + $def[4] .= "CDEF:days_left_cap_positive=hours_left_isneg,400,days_left_cap,IF "; + // The AREA has a rendering problem. Points are too far to the right + $def[4] .= "AREA:hours_left_unmon#AA2200: "; + + $def[4] .= "AREA:days_left_cap_positive#22AA44:\"Days left\:\" "; + if ($ACT[4] == -1) + { + $def[4] .= "COMMENT:\"Not growing\" "; + } + else { + $def[4] .= "GPRINT:days_left:LAST:\"%7.2lf days\" "; + } +} ?> diff -Nru check-mk-1.2.2p3/check_mk-hr_mem.php check-mk-1.2.6p12/check_mk-hr_mem.php --- check-mk-1.2.2p3/check_mk-hr_mem.php 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-hr_mem.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -27,52 +27,91 @@ $maxgb = sprintf("%.1f", $MAX[1] / 1024.0); -$def[1] = "DEF:ram=$RRDFILE[1]:$DS[1]:MAX " ; -$def[1] .= "DEF:swap=$RRDFILE[2]:$DS[2]:MAX " ; -$def[1] .= "DEF:virt=$RRDFILE[3]:$DS[3]:MAX " ; -$def[1] .= "HRULE:$MAX[3]#000080:\"RAM+SWAP installed\" "; -$def[1] .= "HRULE:$MAX[1]#2040d0:\"$maxgb GB RAM installed\" "; -$def[1] .= "HRULE:$WARN[3]#FFFF00:\"Warning\" "; -$def[1] .= "HRULE:$CRIT[3]#FF0000:\"Critical\" "; - -$def[1] .= "'COMMENT:\\n' "; -$def[1] .= "AREA:ram#80ff40:\"RAM used \" " ; -$def[1] .= "GPRINT:ram:LAST:\"%6.0lf MB last\" " ; -$def[1] .= "GPRINT:ram:AVERAGE:\"%6.0lf MB avg\" " ; -$def[1] .= "GPRINT:ram:MAX:\"%6.0lf MB max\\n\" "; - -$def[1] .= "AREA:swap#008030:\"SWAP used \":STACK " ; -$def[1] .= "GPRINT:swap:LAST:\"%6.0lf MB last\" " ; -$def[1] .= "GPRINT:swap:AVERAGE:\"%6.0lf MB avg\" " ; -$def[1] .= "GPRINT:swap:MAX:\"%6.0lf MB max\\n\" " ; - -$def[1] .= "LINE:virt#000000:\"RAM+SWAP used\" " ; -$def[1] .= "GPRINT:virt:LAST:\"%6.0lf MB last\" " ; -$def[1] .= "GPRINT:virt:AVERAGE:\"%6.0lf MB avg\" " ; -$def[1] .= "GPRINT:virt:MAX:\"%6.0lf MB max\\n\" " ; - -/* HACK: Avoid error if RRD does not contain two data - sources which .XML file *does*. F..ck. This does not - work with multiple RRDs... */ -$retval = -1; -system("rrdtool info $RRDFILE[1] | fgrep -q 'ds[5]'", $retval); -if ($retval == 0) -{ - if (count($NAME) >= 4 and $NAME[4] == "mapped") { - $def[1] .= "DEF:mapped=$RRDFILE[4]:$DS[4]:MAX " ; - $def[1] .= "LINE2:mapped#8822ff:\"Memory mapped\" " ; - $def[1] .= "GPRINT:mapped:LAST:\"%6.0lf MB last\" " ; - $def[1] .= "GPRINT:mapped:AVERAGE:\"%6.0lf MB avg\" " ; - $def[1] .= "GPRINT:mapped:MAX:\"%6.0lf MB max\\n\" " ; - } - - if (count($NAME) >= 5 and $NAME[5] == "committed_as") { - $def[1] .= "DEF:committed=$RRDFILE[5]:$DS[5]:MAX " ; - $def[1] .= "LINE2:committed#cc00dd:\"Committed \" " ; - $def[1] .= "GPRINT:committed:LAST:\"%6.0lf MB last\" " ; - $def[1] .= "GPRINT:committed:AVERAGE:\"%6.0lf MB avg\" " ; - $def[1] .= "GPRINT:committed:MAX:\"%6.0lf MB max\\n\" " ; - } - } - +# For the rest of the data we rather work with names instead +# of numbers +$RRD = array(); +foreach ($NAME as $i => $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; +} + +$def[1] = ""; + +if (isset($RRD['pagetables'])) { + $def[1] .= "DEF:pagetables=$RRD[pagetables] " + . "DEF:ram=$RRD[ramused] "; +} +else { + $def[1] .= "DEF:ram=$RRD[ramused] "; +} + +$def[1] .= "DEF:virt=$RRDFILE[3]:$DS[3]:MAX " + . "DEF:swap=$RRDFILE[2]:$DS[2]:MAX " + + . "HRULE:$MAX[3]#000080:\"RAM+SWAP installed\" " + . "HRULE:$MAX[1]#2040d0:\"$maxgb GB RAM installed\" " + . "HRULE:$WARN[3]#FFFF00:\"Warning\" " + . "HRULE:$CRIT[3]#FF0000:\"Critical\" " + + . "'COMMENT:\\n' " + . "AREA:ram#80ff40:\"RAM used \" " + . "GPRINT:ram:LAST:\"%6.0lf MB last\" " + . "GPRINT:ram:AVERAGE:\"%6.0lf MB avg\" " + . "GPRINT:ram:MAX:\"%6.0lf MB max\\n\" " + + . "AREA:swap#008030:\"SWAP used \":STACK " + . "GPRINT:swap:LAST:\"%6.0lf MB last\" " + . "GPRINT:swap:AVERAGE:\"%6.0lf MB avg\" " + . "GPRINT:swap:MAX:\"%6.0lf MB max\\n\" " + ; + + +if (isset($RRD['pagetables'])) { + $def[1] .= "" + . "AREA:pagetables#ff8800:\"Page tables \":STACK " + . "GPRINT:pagetables:LAST:\"%6.0lf MB last\" " + . "GPRINT:pagetables:AVERAGE:\"%6.0lf MB avg\" " + . "GPRINT:pagetables:MAX:\"%6.0lf MB max\\n\" " + . "LINE:virt#000000:\"RAM+SWAP+PT used\" " + . "GPRINT:virt:LAST:\"%6.0lf MB last\" " + . "GPRINT:virt:AVERAGE:\"%6.0lf MB avg\" " + . "GPRINT:virt:MAX:\"%6.0lf MB max\\n\" " + ; +} + +else { + $def[1] .= "LINE:virt#000000:\"RAM+SWAP used \" " + . "GPRINT:virt:LAST:\"%6.0lf MB last\" " + . "GPRINT:virt:AVERAGE:\"%6.0lf MB avg\" " + . "GPRINT:virt:MAX:\"%6.0lf MB max\\n\" " + ; +} + +if (isset($RRD['mapped'])) { + $def[1] .= "DEF:mapped=$RRD[mapped] " + . "LINE2:mapped#8822ff:\"Memory mapped \" " + . "GPRINT:mapped:LAST:\"%6.0lf MB last\" " + . "GPRINT:mapped:AVERAGE:\"%6.0lf MB avg\" " + . "GPRINT:mapped:MAX:\"%6.0lf MB max\\n\" " ; +} + +if (isset($RRD['committed_as'])) { + $def[1] .= "DEF:committed=$RRD[committed_as] " + . "LINE2:committed#cc00dd:\"Committed \" " + . "GPRINT:committed:LAST:\"%6.0lf MB last\" " + . "GPRINT:committed:AVERAGE:\"%6.0lf MB avg\" " + . "GPRINT:committed:MAX:\"%6.0lf MB max\\n\" " ; +} + +/* Shared memory is part of RAM. So simply overlay it */ +if (isset($RRD['shared'])) { + $def[1] .= "DEF:shared=$RRD[shared] " + . "AREA:shared#44ccff:\"Shared Memory \" " + . "GPRINT:shared:LAST:\"%6.0lf MB last\" " + . "GPRINT:shared:AVERAGE:\"%6.0lf MB avg\" " + . "GPRINT:shared:MAX:\"%6.0lf MB max\\n\" " ; +} ?> diff -Nru check-mk-1.2.2p3/check_mk-hwg_temp.php check-mk-1.2.6p12/check_mk-hwg_temp.php --- check-mk-1.2.2p3/check_mk-hwg_temp.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-hwg_temp.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk-ibm_svc_enclosurestats.power.php check-mk-1.2.6p12/check_mk-ibm_svc_enclosurestats.power.php --- check-mk-1.2.2p3/check_mk-ibm_svc_enclosurestats.power.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-ibm_svc_enclosurestats.power.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,35 @@ + diff -Nru check-mk-1.2.2p3/check_mk-ibm_svc_enclosurestats.temp.php check-mk-1.2.6p12/check_mk-ibm_svc_enclosurestats.temp.php --- check-mk-1.2.2p3/check_mk-ibm_svc_enclosurestats.temp.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-ibm_svc_enclosurestats.temp.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk-ibm_svc_host.php check-mk-1.2.6p12/check_mk-ibm_svc_host.php --- check-mk-1.2.2p3/check_mk-ibm_svc_host.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-ibm_svc_host.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,49 @@ +MACRO['DISP_HOSTNAME'] . ' / ' . $this->MACRO['DISP_SERVICEDESC'] . ' per State" --lower=0'; + +$def[1] = "" + . "DEF:active=$RRDFILE[1]:$DS[1]:MAX " + . "DEF:inactive=$RRDFILE[2]:$DS[2]:MAX " + . "DEF:degraded=$RRDFILE[3]:$DS[3]:MAX " + . "DEF:offline=$RRDFILE[4]:$DS[4]:MAX " + . "DEF:other=$RRDFILE[5]:$DS[5]:MAX " + . "AREA:active#008000:\"Active \" " + . "GPRINT:active:AVERAGE:\"% 6.0lf Hosts avg\" " + . "GPRINT:active:LAST:\"% 6.0lf Hosts last\\n\" " + . "AREA:inactive#0000FF:\"Inactive \":STACK " + . "GPRINT:inactive:AVERAGE:\"% 6.0lf Hosts avg\" " + . "GPRINT:inactive:LAST:\"% 6.0lf Hosts last\\n\" " + . "AREA:degraded#F84:\"Degraded \":STACK " + . "GPRINT:degraded:AVERAGE:\"% 6.0lf Hosts avg\" " + . "GPRINT:degraded:LAST:\"% 6.0lf Hosts last\\n\" " + . "AREA:offline#FF0000:\"Offline \":STACK " + . "GPRINT:offline:AVERAGE:\"% 6.0lf Hosts avg\" " + . "GPRINT:offline:LAST:\"% 6.0lf Hosts last\\n\" " + . "AREA:other#000:\"Other \":STACK " + . "GPRINT:other:AVERAGE:\"% 6.0lf Hosts avg\" " + . "GPRINT:other:LAST:\"% 6.0lf Hosts last\\n\" " + . ""; diff -Nru check-mk-1.2.2p3/check_mk-ibm_svc_license.php check-mk-1.2.6p12/check_mk-ibm_svc_license.php --- check-mk-1.2.2p3/check_mk-ibm_svc_license.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-ibm_svc_license.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,37 @@ +MACRO['DISP_HOSTNAME'] . ' / ' . $this->MACRO['DISP_SERVICEDESC'] . ' per State" --lower=0'; + +$def[1] = "" + . "DEF:licensed=$RRDFILE[1]:$DS[1]:MAX " + . "DEF:used=$RRDFILE[2]:$DS[2]:MAX " + . "AREA:used#008000:\"Used \" " + . "GPRINT:used:AVERAGE:\"% 6.0lf avg\" " + . "GPRINT:used:LAST:\"% 6.0lf last\\n\" " + . "LINE1:licensed#0000FF:\"Licensed \" " + . "GPRINT:licensed:AVERAGE:\"% 6.0lf avg\" " + . "GPRINT:licensed:LAST:\"% 6.0lf last\\n\" " + . ""; diff -Nru check-mk-1.2.2p3/check_mk-ibm_svc_mdiskgrp.php check-mk-1.2.6p12/check_mk-ibm_svc_mdiskgrp.php --- check-mk-1.2.2p3/check_mk-ibm_svc_mdiskgrp.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-ibm_svc_mdiskgrp.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,157 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $RRD_MIN[$n] = "$RRDFILE[$i]:$DS[$i]:MIN"; + $RRD_AVG[$n] = "$RRDFILE[$i]:$DS[$i]:AVERAGE"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; +} + +# RRDtool Options +#$servicedes=$NAGIOS_SERVICEDESC + +$fsname = str_replace("_", "/", substr($servicedesc, 3)); +$fstitle = $fsname; + +# Hack for windows: replace C// with C:\ +if (strlen($fsname) == 3 && substr($fsname, 1, 2) == '//') { + $fsname = $fsname[0] . "\:\\\\"; + $fstitle = $fsname[0] . ":\\"; +} + +$sizegb = sprintf("%.1f", $MAX[1] / 1024.0); +$maxgb = $MAX[1] / 1024.0; +$warngb = $WARN[1] / 1024.0; +$critgb = $CRIT[1] / 1024.0; +$warngbtxt = sprintf("%.1f", $warngb); +$critgbtxt = sprintf("%.1f", $critgb); + +$opt[1] = "--vertical-label GB -l 0 -u $maxgb --title '$hostname: $servicedesc ($sizegb GB)' "; + +# First graph show current filesystem usage +$def[1] = "DEF:mb=$RRDFILE[1]:$DS[1]:MAX "; +$def[1] .= "CDEF:var1=mb,1024,/ "; +$def[1] .= "AREA:var1#00ffc6:\"used space on $fsname\\n\" "; + +# Optional uncommitted usage e.g. for esx hosts +if(isset($RRD['uncommitted'])) { + $def[1] .= "DEF:uncommitted_mb=".$RRD['uncommitted']." "; + $def[1] .= "CDEF:uncommitted_gb=uncommitted_mb,1024,/ "; + $def[1] .= "CDEF:total_gb=uncommitted_gb,var1,+ "; +} else { + $def[1] .= "CDEF:total_gb=var1 "; +} + +$def[1] .= "HRULE:$maxgb#003300:\"Size ($sizegb GB) \" "; +$def[1] .= "HRULE:$warngb#ffff00:\"Warning at $warngbtxt GB \" "; +$def[1] .= "HRULE:$critgb#ff0000:\"Critical at $critgbtxt GB \\n\" "; +$def[1] .= "GPRINT:var1:LAST:\"current\: %6.2lf GB\" "; +$def[1] .= "GPRINT:var1:MAX:\"max\: %6.2lf GB \" "; +$def[1] .= "GPRINT:var1:AVERAGE:\"avg\: %6.2lf GB\\n\" "; + +if(isset($RRD['uncommitted'])) { + $def[1] .= "AREA:uncommitted_gb#eeccff:\"Uncommited\":STACK "; + $def[1] .= "GPRINT:uncommitted_gb:MAX:\"%6.2lf GB\l\" "; +} + +$def[1] .= "LINE1:total_gb#226600 "; + +# Second graph is optional and shows trend. The MAX field +# of the third variable contains (size of the filesystem in MB +# / range in hours). From that we can compute the configured range. +if (isset($RRD['growth'])) { + $size_mb_per_hours = floatval($MAX['trend']); // this is size_mb / range(hours) + $size_mb = floatval($MAX[1]); + $hours = 1.0 / ($size_mb_per_hours / $size_mb); + $range = sprintf("%.0fh", $hours); + + // Current growth / shrinking. This value is give as MB / 24 hours. + // Note: This has changed in 1.1.13i3. Prior it was MB / trend_range! + $opt[2] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Growth of $servicedesc' "; + $def[2] = "DEF:growth_max=${RRD['growth']} "; + $def[2] .= "DEF:growth_min=${RRD_MIN['growth']} "; + $def[2] .= "DEF:trend=${RRD_AVG['trend']} "; + $def[2] .= "CDEF:growth_pos=growth_max,0,MAX "; + $def[2] .= "CDEF:growth_neg=growth_min,0,MIN "; + $def[2] .= "CDEF:growth_minabs=0,growth_min,- "; + $def[2] .= "CDEF:growth=growth_minabs,growth_max,MAX "; + $def[2] .= "HRULE:0#c0c0c0 "; + $def[2] .= "AREA:growth_pos#3060f0:\"Grow\" "; + $def[2] .= "AREA:growth_neg#30f060:\"Shrink \" "; + $def[2] .= "GPRINT:growth:LAST:\"Current\: %+9.2lfMB / 24h\" "; + $def[2] .= "GPRINT:growth:MAX:\"Max\: %+9.2lfMB / 24h\\n\" "; + + // Trend + $opt[3] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Trend for $servicedesc' "; + $def[3] = "DEF:trend=${RRD_AVG['trend']} "; + $def[3] .= "HRULE:0#c0c0c0 "; + $def[3] .= "LINE1:trend#000000:\"Trend\:\" "; + $def[3] .= "GPRINT:trend:LAST:\"%+7.2lf MB/24h\" "; + if ($WARN['trend']) { + $warn_mb = sprintf("%.2fMB", $WARN['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${WARN['trend']}#ffff00:\"Warn\: $warn_mb / $range\" "; + } + if ($CRIT['trend']) { + $crit_mb = sprintf("%.2fMB", $CRIT['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${CRIT['trend']}#ff0000:\"Crit\: $crit_mb / $range\" "; + } + $def[3] .= "COMMENT:\"\\n\" "; +} + +if (isset($RRD['trend_hoursleft'])) { + // Trend + $opt[4] = "--vertical-label 'Days left' -l -1 -u 365 -X0 --title '$hostname: Days left for $servicedesc' "; + $def[4] = "DEF:hours_left=${RRD_AVG['trend_hoursleft']} "; + $def[4] .= "DEF:hours_left_min=${RRD_MIN['trend_hoursleft']} "; + // negative hours indicate no growth + // the dataset hours_left_isneg stores this info for each point as True/False + $def[4] .= "CDEF:hours_left_isneg=hours_left_min,-1,EQ "; + $def[4] .= "CDEF:hours_left_unmon=hours_left_min,400,0,IF "; + $def[4] .= "CDEF:days_left=hours_left,24,/ "; + $def[4] .= "CDEF:days_left_cap=days_left,400,MIN "; + // Convert negative points to 400 (y-axis cap) + $def[4] .= "CDEF:days_left_cap_positive=hours_left_isneg,400,days_left_cap,IF "; + // The AREA has a rendering problem. Points are too far to the right + $def[4] .= "AREA:hours_left_unmon#AA2200: "; + + $def[4] .= "AREA:days_left_cap_positive#22AA44:\"Days left\:\" "; + if ($ACT[4] == -1) + { + $def[4] .= "COMMENT:\"Not growing\" "; + } + else { + $def[4] .= "GPRINT:days_left:LAST:\"%7.2lf days\" "; + } +} + +?> diff -Nru check-mk-1.2.2p3/check_mk-ibm_svc_nodestats.cache.php check-mk-1.2.6p12/check_mk-ibm_svc_nodestats.cache.php --- check-mk-1.2.2p3/check_mk-ibm_svc_nodestats.cache.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-ibm_svc_nodestats.cache.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,37 @@ +MACRO['DISP_HOSTNAME'] . ' / ' . $this->MACRO['DISP_SERVICEDESC'] . '" --lower=0 -u 100'; + +$def[1] = "" + . "DEF:write_cache_pc=$RRDFILE[1]:$DS[1]:MAX " + . "DEF:total_cache_pc=$RRDFILE[2]:$DS[2]:MAX " + . "LINE1:write_cache_pc#008000:\"Write Cache Usage \" " + . "GPRINT:write_cache_pc:AVERAGE:\"% 6.0lf%% avg\" " + . "GPRINT:write_cache_pc:LAST:\"% 6.0lf%% last\\n\" " + . "LINE1:total_cache_pc#0000FF:\"Total Cache Usage \" " + . "GPRINT:total_cache_pc:AVERAGE:\"% 6.0lf%% avg\" " + . "GPRINT:total_cache_pc:LAST:\"% 6.0lf%% last\\n\" " + . ""; diff -Nru check-mk-1.2.2p3/check_mk-ibm_svc_nodestats.cpu_util.php check-mk-1.2.6p12/check_mk-ibm_svc_nodestats.cpu_util.php --- check-mk-1.2.2p3/check_mk-ibm_svc_nodestats.cpu_util.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-ibm_svc_nodestats.cpu_util.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,70 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; +} + +$num_threads = $MAX[1]; + +$opt[1] = "--vertical-label 'Utilization %' -l0 -u 100 --title \"$hostname / $servicedesc\" "; + +$def[1] = "DEF:perc=$RRD[util] " + . "CDEF:util=perc,$num_threads,*,100,/ " + ; + +$def[1] .= "AREA:util#60f020:\"Utilization\:\" " + . "LINE:util#50b01a " + . "GPRINT:perc:LAST:\"%.1lf%%\" " + . "GPRINT:perc:MIN:\"min\: %.1lf%%\" " + . "GPRINT:perc:MAX:\"max\: %.1lf%%\\n\" " + ; + + +if (isset($RRD["avg"])) { + $def[1] .= "DEF:aperc=$RRD[avg] ". + "CDEF:avg=aperc,$num_threads,*,100,/ ". + "LINE:avg#004000:\"Averaged\: \" ". + "GPRINT:aperc:LAST:\"%.1lf%%,\" ". + "GPRINT:aperc:MIN:\"min\: %.1lf%%,\" ". + "GPRINT:aperc:MAX:\"max\: %.1lf%%\\n\" ". + ""; +} + +if ($WARN['util']) { + $def[1] .= "HRULE:$WARN[1]#fff000:\"Warn at $WARN[util]% \" " + . "HRULE:$CRIT[1]#ff0000:\"Critical at $CRIT[util]%\\n\" "; +} +else { + $def[1] .= "COMMENT:\"\\n\" "; +} + +?> diff -Nru check-mk-1.2.2p3/check_mk-ibm_svc_nodestats.diskio.php check-mk-1.2.6p12/check_mk-ibm_svc_nodestats.diskio.php --- check-mk-1.2.2p3/check_mk-ibm_svc_nodestats.diskio.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-ibm_svc_nodestats.diskio.php 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,148 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; + } + + $parts = explode("_", $servicedesc); + $disk = $parts[2]; + + $opt[1] = "--vertical-label 'Throughput (MB/s)' -X0 --title \"Disk throughput $hostname / $disk\" "; + + $def[1] = + "HRULE:0#a0a0a0 ". + # read + "DEF:read=$RRD[read] ". + "CDEF:read_mb=read,1048576,/ ". + "AREA:read_mb#40c080:\"Read \" ". + "GPRINT:read_mb:LAST:\"%8.1lf MB/s last\" ". + "GPRINT:read_mb:AVERAGE:\"%6.1lf MB/s avg\" ". + "GPRINT:read_mb:MAX:\"%6.1lf MB/s max\\n\" "; + + # read average as line in the same graph + if (isset($RRD["read.avg"])) { + $def[1] .= + "DEF:read_avg=${RRD['read.avg']} ". + "CDEF:read_avg_mb=read_avg,1048576,/ ". + "LINE:read_avg_mb#202020 "; + } + + # write + $def[1] .= + "DEF:write=$RRD[write] ". + "CDEF:write_mb=write,1048576,/ ". + "CDEF:write_mb_neg=write_mb,-1,* ". + "AREA:write_mb_neg#4080c0:\"Write \" ". + "GPRINT:write_mb:LAST:\"%6.1lf MB/s last\" ". + "GPRINT:write_mb:AVERAGE:\"%6.1lf MB/s avg\" ". + "GPRINT:write_mb:MAX:\"%6.1lf MB/s max\\n\" ". + ""; + + # show levels for read + if ($WARN['read']) { + $def[1] .= "HRULE:$WARN[read]#ffd000:\"Warning for read at " . sprintf("%6.1f", $WARN[1]) . " MB/s \" "; + $def[1] .= "HRULE:$CRIT[read]#ff0000:\"Critical for read at " . sprintf("%6.1f", $CRIT[1]) . " MB/s\\n\" "; + } + + # show levels for write + if ($WARN['write']) { + $def[1] .= "HRULE:-$WARN[write]#ffd000:\"Warning for write at " . sprintf("%6.1f", $WARN[2]) . " MB/s \" "; + $def[1] .= "HRULE:-$CRIT[write]#ff0000:\"Critical for write at " . sprintf("%6.1f", $CRIT[2]) . " MB/s\\n\" "; + } + + # write average + if (isset($DS["write.avg"])) { + $def[1] .= + "DEF:write_avg=${RRD['write.avg']} ". + "CDEF:write_avg_mb=write_avg,1048576,/ ". + "CDEF:write_avg_mb_neg=write_avg_mb,-1,* ". + "LINE:write_avg_mb_neg#202020 "; + } + + # latency + if (isset($RRD["latency"])) { + $opt[] = "--vertical-label 'Latency (ms)' -X0 --title \"Latency $hostname / $disk\" "; + $def[] = "" + . "DEF:latency=$RRD[latency] " + . "AREA:latency#aaccdd:\"Latency\" " + . "LINE:latency#7799aa " + . "GPRINT:latency:LAST:\"%6.1lf ms last\" " + . "GPRINT:latency:AVERAGE:\"%6.1lf ms avg\" " + . "GPRINT:latency:MAX:\"%6.1lf ms max\\n\" " + ; + } + + # IOs per second + if (isset($RRD["ios"])) { + $opt[] = "--vertical-label 'IO Operations / sec' -X0 --title \"IOs/sec $hostname / $disk\" "; + $def[] = "" + . "DEF:ios=$RRD[ios] " + . "AREA:ios#ddccaa:\"ios\" " + . "LINE:ios#aa9977 " + . "GPRINT:ios:LAST:\"%6.1lf/sec last\" " + . "GPRINT:ios:AVERAGE:\"%6.1lf/sec avg\" " + . "GPRINT:ios:MAX:\"%6.1lf/sec max\\n\" " + ; + } + + if (isset($RRD["read_ql"])) { + $opt[] = "--vertical-label 'Queue Length' -X0 -u5 -l-5 --title \"Queue Length $hostname / $disk\" "; + $def[] = "" + . "DEF:read=$RRD[read_ql] " + . "DEF:write=$RRD[write_ql] " + . "CDEF:writen=write,-1,* " + . "HRULE:0#a0a0a0 " + . "AREA:read#669a76 " + . "AREA:writen#517ba5 " + ; + + } + +} + +// legacy version of diskstat +else { + $opt[1] = "--vertical-label 'Througput (MByte/s)' -l0 -u 1 --title \"Disk throughput $hostname / $servicedesc\" "; + + $def[1] = "DEF:kb=$RRDFILE[1]:$DS[1]:AVERAGE " ; + $def[1] .= "CDEF:mb=kb,1024,/ " ; + $def[1] .= "AREA:mb#40c080 " ; + "HRULE:0#a0a0a0 ". + $def[1] .= "GPRINT:mb:LAST:\"%6.1lf MByte/s last\" " ; + $def[1] .= "GPRINT:mb:AVERAGE:\"%6.1lf MByte/s avg\" " ; + $def[1] .= "GPRINT:mb:MAX:\"%6.1lf MByte/s max\\n\" "; +} +?> + diff -Nru check-mk-1.2.2p3/check_mk-ibm_svc_nodestats.disk_latency.php check-mk-1.2.6p12/check_mk-ibm_svc_nodestats.disk_latency.php --- check-mk-1.2.2p3/check_mk-ibm_svc_nodestats.disk_latency.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-ibm_svc_nodestats.disk_latency.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,59 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; + } + + $opt[1] = "--vertical-label 'ms' -X0 --title \"$hostname / $servicedesc\" "; + + $def[1] = + "HRULE:0#a0a0a0 ". + # read + "DEF:read=$RRD[read_latency] ". + "AREA:read#40c080:\"Latency for read \" ". + "GPRINT:read:LAST:\"%8.0lf ms last\" ". + "GPRINT:read:AVERAGE:\"%6.0lf ms avg\" ". + "GPRINT:read:MAX:\"%6.0lf ms max\\n\" "; + + # write + $def[1] .= + "DEF:write=$RRD[write_latency] ". + "CDEF:write_neg=write,-1,* ". + "AREA:write_neg#4080c0:\"Latency for write \" ". + "GPRINT:write:LAST:\"%6.0lf ms last\" ". + "GPRINT:write:AVERAGE:\"%6.0lf ms avg\" ". + "GPRINT:write:MAX:\"%6.0lf ms max\\n\" ". + ""; +} + diff -Nru check-mk-1.2.2p3/check_mk-ibm_svc_nodestats.iops.php check-mk-1.2.6p12/check_mk-ibm_svc_nodestats.iops.php --- check-mk-1.2.2p3/check_mk-ibm_svc_nodestats.iops.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-ibm_svc_nodestats.iops.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,82 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; + } + + $parts = explode("_", $servicedesc); + $disk = $parts[2]; + + $opt[1] = "--vertical-label 'IO/s' -X0 --title \"$hostname / $servicedesc\" "; + + $def[1] = + "HRULE:0#a0a0a0 ". + # read + "DEF:read=$RRD[read] ". + "AREA:read#40c080:\"Read \" ". + "GPRINT:read:LAST:\"%8.0lf IO/s last\" ". + "GPRINT:read:AVERAGE:\"%6.0lf IO/s avg\" ". + "GPRINT:read:MAX:\"%6.0lf IO/s max\\n\" "; + + # read average as line in the same graph + if (isset($RRD["read.avg"])) { + $def[1] .= + "DEF:read_avg=${RRD['read.avg']} ". + "LINE:read_avg#202020 "; + } + + # write + $def[1] .= + "DEF:write=$RRD[write] ". + "CDEF:write_neg=write,-1,* ". + "AREA:write_neg#4080c0:\"Write \" ". + "GPRINT:write:LAST:\"%6.0lf IO/s last\" ". + "GPRINT:write:AVERAGE:\"%6.0lf IO/s avg\" ". + "GPRINT:write:MAX:\"%6.0lf IO/s max\\n\" ". + ""; + + # show levels for read + if ($WARN['read']) { + $def[1] .= "HRULE:$WARN[read]#ffd000:\"Warning for read at " . sprintf("%6.1f", $WARN[1]) . " IO/s \" "; + $def[1] .= "HRULE:$CRIT[read]#ff0000:\"Critical for read at " . sprintf("%6.1f", $CRIT[1]) . " IO/s\\n\" "; + } + + # show levels for write + if ($WARN['write']) { + $def[1] .= "HRULE:-$WARN[write]#ffd000:\"Warning for write at " . sprintf("%6.1f", $WARN[2]) . " IO/s \" "; + $def[1] .= "HRULE:-$CRIT[write]#ff0000:\"Critical for write at " . sprintf("%6.1f", $CRIT[2]) . " IO/s\\n\" "; + } + +} + diff -Nru check-mk-1.2.2p3/check_mk-ibm_svc_systemstats.cache.php check-mk-1.2.6p12/check_mk-ibm_svc_systemstats.cache.php --- check-mk-1.2.2p3/check_mk-ibm_svc_systemstats.cache.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-ibm_svc_systemstats.cache.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,37 @@ +MACRO['DISP_HOSTNAME'] . ' / ' . $this->MACRO['DISP_SERVICEDESC'] . '" --lower=0 -u 100'; + +$def[1] = "" + . "DEF:write_cache_pc=$RRDFILE[1]:$DS[1]:MAX " + . "DEF:total_cache_pc=$RRDFILE[2]:$DS[2]:MAX " + . "LINE1:write_cache_pc#008000:\"Write Cache Usage \" " + . "GPRINT:write_cache_pc:AVERAGE:\"% 6.0lf%% avg\" " + . "GPRINT:write_cache_pc:LAST:\"% 6.0lf%% last\\n\" " + . "LINE1:total_cache_pc#0000FF:\"Total Cache Usage \" " + . "GPRINT:total_cache_pc:AVERAGE:\"% 6.0lf%% avg\" " + . "GPRINT:total_cache_pc:LAST:\"% 6.0lf%% last\\n\" " + . ""; diff -Nru check-mk-1.2.2p3/check_mk-ibm_svc_systemstats.cpu_util.php check-mk-1.2.6p12/check_mk-ibm_svc_systemstats.cpu_util.php --- check-mk-1.2.2p3/check_mk-ibm_svc_systemstats.cpu_util.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-ibm_svc_systemstats.cpu_util.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,70 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; +} + +$num_threads = $MAX[1]; + +$opt[1] = "--vertical-label 'Utilization %' -l0 -u 100 --title \"$hostname / $servicedesc\" "; + +$def[1] = "DEF:perc=$RRD[util] " + . "CDEF:util=perc,$num_threads,*,100,/ " + ; + +$def[1] .= "AREA:util#60f020:\"Utilization\:\" " + . "LINE:util#50b01a " + . "GPRINT:perc:LAST:\"%.1lf%%\" " + . "GPRINT:perc:MIN:\"min\: %.1lf%%\" " + . "GPRINT:perc:MAX:\"max\: %.1lf%%\\n\" " + ; + + +if (isset($RRD["avg"])) { + $def[1] .= "DEF:aperc=$RRD[avg] ". + "CDEF:avg=aperc,$num_threads,*,100,/ ". + "LINE:avg#004000:\"Averaged\: \" ". + "GPRINT:aperc:LAST:\"%.1lf%%,\" ". + "GPRINT:aperc:MIN:\"min\: %.1lf%%,\" ". + "GPRINT:aperc:MAX:\"max\: %.1lf%%\\n\" ". + ""; +} + +if ($WARN['util']) { + $def[1] .= "HRULE:$WARN[1]#fff000:\"Warn at $WARN[util]% \" " + . "HRULE:$CRIT[1]#ff0000:\"Critical at $CRIT[util]%\\n\" "; +} +else { + $def[1] .= "COMMENT:\"\\n\" "; +} + +?> diff -Nru check-mk-1.2.2p3/check_mk-ibm_svc_systemstats.diskio.php check-mk-1.2.6p12/check_mk-ibm_svc_systemstats.diskio.php --- check-mk-1.2.2p3/check_mk-ibm_svc_systemstats.diskio.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-ibm_svc_systemstats.diskio.php 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,148 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; + } + + $parts = explode("_", $servicedesc); + $disk = $parts[2]; + + $opt[1] = "--vertical-label 'Throughput (MB/s)' -X0 --title \"Disk throughput $hostname / $disk\" "; + + $def[1] = + "HRULE:0#a0a0a0 ". + # read + "DEF:read=$RRD[read] ". + "CDEF:read_mb=read,1048576,/ ". + "AREA:read_mb#40c080:\"Read \" ". + "GPRINT:read_mb:LAST:\"%8.1lf MB/s last\" ". + "GPRINT:read_mb:AVERAGE:\"%6.1lf MB/s avg\" ". + "GPRINT:read_mb:MAX:\"%6.1lf MB/s max\\n\" "; + + # read average as line in the same graph + if (isset($RRD["read.avg"])) { + $def[1] .= + "DEF:read_avg=${RRD['read.avg']} ". + "CDEF:read_avg_mb=read_avg,1048576,/ ". + "LINE:read_avg_mb#202020 "; + } + + # write + $def[1] .= + "DEF:write=$RRD[write] ". + "CDEF:write_mb=write,1048576,/ ". + "CDEF:write_mb_neg=write_mb,-1,* ". + "AREA:write_mb_neg#4080c0:\"Write \" ". + "GPRINT:write_mb:LAST:\"%6.1lf MB/s last\" ". + "GPRINT:write_mb:AVERAGE:\"%6.1lf MB/s avg\" ". + "GPRINT:write_mb:MAX:\"%6.1lf MB/s max\\n\" ". + ""; + + # show levels for read + if ($WARN['read']) { + $def[1] .= "HRULE:$WARN[read]#ffd000:\"Warning for read at " . sprintf("%6.1f", $WARN[1]) . " MB/s \" "; + $def[1] .= "HRULE:$CRIT[read]#ff0000:\"Critical for read at " . sprintf("%6.1f", $CRIT[1]) . " MB/s\\n\" "; + } + + # show levels for write + if ($WARN['write']) { + $def[1] .= "HRULE:-$WARN[write]#ffd000:\"Warning for write at " . sprintf("%6.1f", $WARN[2]) . " MB/s \" "; + $def[1] .= "HRULE:-$CRIT[write]#ff0000:\"Critical for write at " . sprintf("%6.1f", $CRIT[2]) . " MB/s\\n\" "; + } + + # write average + if (isset($DS["write.avg"])) { + $def[1] .= + "DEF:write_avg=${RRD['write.avg']} ". + "CDEF:write_avg_mb=write_avg,1048576,/ ". + "CDEF:write_avg_mb_neg=write_avg_mb,-1,* ". + "LINE:write_avg_mb_neg#202020 "; + } + + # latency + if (isset($RRD["latency"])) { + $opt[] = "--vertical-label 'Latency (ms)' -X0 --title \"Latency $hostname / $disk\" "; + $def[] = "" + . "DEF:latency=$RRD[latency] " + . "AREA:latency#aaccdd:\"Latency\" " + . "LINE:latency#7799aa " + . "GPRINT:latency:LAST:\"%6.1lf ms last\" " + . "GPRINT:latency:AVERAGE:\"%6.1lf ms avg\" " + . "GPRINT:latency:MAX:\"%6.1lf ms max\\n\" " + ; + } + + # IOs per second + if (isset($RRD["ios"])) { + $opt[] = "--vertical-label 'IO Operations / sec' -X0 --title \"IOs/sec $hostname / $disk\" "; + $def[] = "" + . "DEF:ios=$RRD[ios] " + . "AREA:ios#ddccaa:\"ios\" " + . "LINE:ios#aa9977 " + . "GPRINT:ios:LAST:\"%6.1lf/sec last\" " + . "GPRINT:ios:AVERAGE:\"%6.1lf/sec avg\" " + . "GPRINT:ios:MAX:\"%6.1lf/sec max\\n\" " + ; + } + + if (isset($RRD["read_ql"])) { + $opt[] = "--vertical-label 'Queue Length' -X0 -u5 -l-5 --title \"Queue Length $hostname / $disk\" "; + $def[] = "" + . "DEF:read=$RRD[read_ql] " + . "DEF:write=$RRD[write_ql] " + . "CDEF:writen=write,-1,* " + . "HRULE:0#a0a0a0 " + . "AREA:read#669a76 " + . "AREA:writen#517ba5 " + ; + + } + +} + +// legacy version of diskstat +else { + $opt[1] = "--vertical-label 'Througput (MByte/s)' -l0 -u 1 --title \"Disk throughput $hostname / $servicedesc\" "; + + $def[1] = "DEF:kb=$RRDFILE[1]:$DS[1]:AVERAGE " ; + $def[1] .= "CDEF:mb=kb,1024,/ " ; + $def[1] .= "AREA:mb#40c080 " ; + "HRULE:0#a0a0a0 ". + $def[1] .= "GPRINT:mb:LAST:\"%6.1lf MByte/s last\" " ; + $def[1] .= "GPRINT:mb:AVERAGE:\"%6.1lf MByte/s avg\" " ; + $def[1] .= "GPRINT:mb:MAX:\"%6.1lf MByte/s max\\n\" "; +} +?> + diff -Nru check-mk-1.2.2p3/check_mk-ibm_svc_systemstats.disk_latency.php check-mk-1.2.6p12/check_mk-ibm_svc_systemstats.disk_latency.php --- check-mk-1.2.2p3/check_mk-ibm_svc_systemstats.disk_latency.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-ibm_svc_systemstats.disk_latency.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,59 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; + } + + $opt[1] = "--vertical-label 'ms' -X0 --title \"$hostname / $servicedesc\" "; + + $def[1] = + "HRULE:0#a0a0a0 ". + # read + "DEF:read=$RRD[read_latency] ". + "AREA:read#40c080:\"Latency for read \" ". + "GPRINT:read:LAST:\"%8.0lf ms last\" ". + "GPRINT:read:AVERAGE:\"%6.0lf ms avg\" ". + "GPRINT:read:MAX:\"%6.0lf ms max\\n\" "; + + # write + $def[1] .= + "DEF:write=$RRD[write_latency] ". + "CDEF:write_neg=write,-1,* ". + "AREA:write_neg#4080c0:\"Latency for write \" ". + "GPRINT:write:LAST:\"%6.0lf ms last\" ". + "GPRINT:write:AVERAGE:\"%6.0lf ms avg\" ". + "GPRINT:write:MAX:\"%6.0lf ms max\\n\" ". + ""; +} + diff -Nru check-mk-1.2.2p3/check_mk-ibm_svc_systemstats.iops.php check-mk-1.2.6p12/check_mk-ibm_svc_systemstats.iops.php --- check-mk-1.2.2p3/check_mk-ibm_svc_systemstats.iops.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-ibm_svc_systemstats.iops.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,82 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; + } + + $parts = explode("_", $servicedesc); + $disk = $parts[2]; + + $opt[1] = "--vertical-label 'IO/s' -X0 --title \"$hostname / $servicedesc\" "; + + $def[1] = + "HRULE:0#a0a0a0 ". + # read + "DEF:read=$RRD[read] ". + "AREA:read#40c080:\"Read \" ". + "GPRINT:read:LAST:\"%8.0lf IO/s last\" ". + "GPRINT:read:AVERAGE:\"%6.0lf IO/s avg\" ". + "GPRINT:read:MAX:\"%6.0lf IO/s max\\n\" "; + + # read average as line in the same graph + if (isset($RRD["read.avg"])) { + $def[1] .= + "DEF:read_avg=${RRD['read.avg']} ". + "LINE:read_avg#202020 "; + } + + # write + $def[1] .= + "DEF:write=$RRD[write] ". + "CDEF:write_neg=write,-1,* ". + "AREA:write_neg#4080c0:\"Write \" ". + "GPRINT:write:LAST:\"%6.0lf IO/s last\" ". + "GPRINT:write:AVERAGE:\"%6.0lf IO/s avg\" ". + "GPRINT:write:MAX:\"%6.0lf IO/s max\\n\" ". + ""; + + # show levels for read + if ($WARN['read']) { + $def[1] .= "HRULE:$WARN[read]#ffd000:\"Warning for read at " . sprintf("%6.1f", $WARN[1]) . " IO/s \" "; + $def[1] .= "HRULE:$CRIT[read]#ff0000:\"Critical for read at " . sprintf("%6.1f", $CRIT[1]) . " IO/s\\n\" "; + } + + # show levels for write + if ($WARN['write']) { + $def[1] .= "HRULE:-$WARN[write]#ffd000:\"Warning for write at " . sprintf("%6.1f", $WARN[2]) . " IO/s \" "; + $def[1] .= "HRULE:-$CRIT[write]#ff0000:\"Critical for write at " . sprintf("%6.1f", $CRIT[2]) . " IO/s\\n\" "; + } + +} + diff -Nru check-mk-1.2.2p3/check_mk-if64.php check-mk-1.2.6p12/check_mk-if64.php --- check-mk-1.2.2p3/check_mk-if64.php 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-if64.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -40,87 +40,105 @@ # Graph 1: used bandwidth -# Determine if Bit or Byte. -# Change multiplier and labels -$unit = "B"; -$unit_multiplier = 1; -$vertical_label_name = "MByte/sec"; -if (strcmp($MIN[11], "0.0") == 0) { +# Determine if Bit or Byte. Bit is signalled via a min value of 0.0 +# in the 11th performance value. +if (!strcmp($MIN[11], "0.0")) { $unit = "Bit"; $unit_multiplier = 8; - $vertical_label_name = "MBit/sec"; + $base = 1000; // Megabit is 1000 * 1000 } +else { + $unit = "B"; + $unit_multiplier = 1; + $base = 1000; // Megabyte is 1000 * 1000 +} + +# Convert bytes to bits if neccessary $bandwidth = $MAX[1] * $unit_multiplier; $warn = $WARN[1] * $unit_multiplier; $crit = $CRIT[1] * $unit_multiplier; -# Horizontal lines -$mega = 1024.0 * 1024.0; -$mBandwidthH = $bandwidth / $mega; -$mWarnH = $warn / $mega; -$mCritH = $crit / $mega; - -# Break down bandwidth, warn and crit +# Now choose a convenient scale, based on the known bandwith of +# the interface, and break down bandwidth, warn and crit by that +# scale. $bwuom = ' '; -$base = 1000; -if($bandwidth > $base * $base * $base) { - $warn /= $base * $base * $base; - $crit /= $base * $base * $base; - $bandwidth /= $base * $base * $base; - $bwuom = 'G'; -} elseif ($bandwidth > $base * $base) { - $warn /= $base * $base; - $crit /= $base * $base; - $bandwidth /= $base * $base; - $bwuom = 'M'; -} elseif ($bandwidth > $base) { - $warn /= $base; - $crit /= $base; - $bandwidth /= $base; - $bwuom = 'k'; -} - -if ($mBandwidthH < 10) - $range = $mBandwidthH; -else - $range = 10.0; +if ($bandwidth > $base * $base * $base) { + $scale = $base * $base * $base; + $bwuom = 'G'; +} +elseif ($bandwidth > $base * $base) { + $scale = $base * $base; + $bwuom = 'M'; +} +elseif ($bandwidth > $base) { + $scale = $base; + $bwuom = 'k'; +} +else { + $scale = 1; + $bwuom = ' '; +} + +$warn /= $scale; +$crit /= $scale; +$bandwidth /= $scale; + +$vertical_label_name = $bwuom . $unit . "/sec"; + +$range = min(10, $bandwidth); + $bandwidthInfo = ""; if ($bandwidth > 0){ - $bandwidthInfo = " at bandwidth ${bwuom}${unit}/s"; + $bandwidthInfo = " at " . sprintf("%.1f", $bandwidth) . " ${bwuom}${unit}/s"; } $ds_name[1] = 'Used bandwidth'; -$opt[1] = "--vertical-label \"$vertical_label_name\" -l -$range -u $range -X0 -b 1024 --title \"Used bandwidth $hostname / $servicedesc $bandwidthInfo\" "; -$def[1] = +$opt[1] = "--vertical-label \"$vertical_label_name\" -l -$range -u $range -X0 -b 1024 --title \"Used bandwidth $hostname / $servicedesc$bandwidthInfo\" "; +$def[1] = "HRULE:0#c0c0c0 "; - if ($mBandwidthH) - $def[1] .= "HRULE:$mBandwidthH#808080:\"Port speed\: " . sprintf("%.1f", $bandwidth) . " ".$bwuom."$unit/s\\n\" ". - "HRULE:-$mBandwidthH#808080: "; - if ($warn) - $def[1] .= "HRULE:$mWarnH#ffff00:\"Warning\: " . sprintf("%6.1f", $warn) . " ".$bwuom."$unit/s\\n\" ". - "HRULE:-$mWarnH#ffff00: "; - if ($crit) - $def[1] .= "HRULE:$mCritH#ff0000:\"Critical\: " . sprintf("%6.1f", $crit) . " ".$bwuom."$unit/s\\n\" ". - "HRULE:-$mCritH#ff0000: "; +if ($bandwidth) + $def[1] .= "HRULE:$bandwidth#808080:\"Port speed\: " . sprintf("%10.1f", $bandwidth) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$bandwidth#808080: "; +if ($warn) + $def[1] .= "HRULE:$warn#ffff00:\"Warning\: " . sprintf("%13.1f", $warn) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$warn#ffff00: "; +if ($crit) + $def[1] .= "HRULE:$crit#ff0000:\"Critical\: " . sprintf("%13.1f", $crit) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$crit#ff0000: "; + + $def[1] .= "". + # incoming + "DEF:inbytes=$RRDFILE[1]:$DS[1]:MAX ". + "CDEF:intraffic=inbytes,$unit_multiplier,* ". + "CDEF:inmb=intraffic,$scale,/ ". + "AREA:inmb#00e060:\"in \" ". + "GPRINT:intraffic:LAST:\"%7.1lf %s$unit/s last\" ". + "GPRINT:intraffic:AVERAGE:\"%7.1lf %s$unit/s avg\" ". + "GPRINT:intraffic:MAX:\"%7.1lf %s$unit/s max\\n\" ". + "VDEF:inperc=intraffic,95,PERCENTNAN ". + "VDEF:inpercmb=inmb,95,PERCENTNAN ". + "LINE:inpercmb#008f00:\"95% percentile\" ". + "GPRINT:inperc:\"%7.1lf %s$unit/s\\n\" ". - $def[1] .= "DEF:inbytes=$RRDFILE[1]:$DS[1]:MAX ". + # outgoing "DEF:outbytes=$RRDFILE[6]:$DS[6]:MAX ". - "CDEF:intraffic=inbytes,$unit_multiplier,* ". "CDEF:outtraffic=outbytes,$unit_multiplier,* ". - "CDEF:inmb=intraffic,1048576,/ ". - "CDEF:outmb=outtraffic,1048576,/ ". + "CDEF:minusouttraffic=outtraffic,-1,* ". + "CDEF:outmb=outtraffic,$scale,/ ". "CDEF:minusoutmb=0,outmb,- ". - "AREA:inmb#00e060:\"in \" ". - "GPRINT:intraffic:LAST:\"%6.1lf %s$unit/s last\" ". - "GPRINT:intraffic:AVERAGE:\"%6.1lf %s$unit/s avg\" ". - "GPRINT:intraffic:MAX:\"%6.1lf %s$unit/s max\\n\" ". - "AREA:minusoutmb#0080e0:\"out \" ". - "GPRINT:outtraffic:LAST:\"%6.1lf %s$unit/s last\" ". - "GPRINT:outtraffic:AVERAGE:\"%6.1lf %s$unit/s avg\" ". - "GPRINT:outtraffic:MAX:\"%6.1lf %s$unit/s max\\n\" "; + "AREA:minusoutmb#0080e0:\"out \" ". + "GPRINT:outtraffic:LAST:\"%7.1lf %s$unit/s last\" ". + "GPRINT:outtraffic:AVERAGE:\"%7.1lf %s$unit/s avg\" ". + "GPRINT:outtraffic:MAX:\"%7.1lf %s$unit/s max\\n\" ". + "VDEF:outperc=minusouttraffic,5,PERCENTNAN ". + "VDEF:outpercmb=minusoutmb,5,PERCENTNAN ". + "LINE:outpercmb#00008f:\"95% percentile\" ". + "GPRINT:outperc:\"%7.1lf %s$unit/s\\n\" ". + + ""; if (isset($DS[12])) { - $def[1] .= + $def[1] .= "DEF:inbytesa=$RRDFILE[12]:$DS[12]:MAX ". "DEF:outbytesa=$RRDFILE[13]:$DS[13]:MAX ". "CDEF:intraffica=inbytesa,$unit_multiplier,* ". @@ -142,29 +160,41 @@ $ds_name[2] = 'Packets'; $opt[2] = "--vertical-label \"packets/sec\" --title \"Packets $hostname / $servicedesc\" "; $def[2] = + # ingoing "HRULE:0#c0c0c0 ". "DEF:inu=$RRDFILE[2]:$DS[2]:MAX ". "DEF:innu=$RRDFILE[3]:$DS[3]:MAX ". - "AREA:inu#00ffc0:\"in unicast \" ". - "GPRINT:inu:LAST:\"%7.2lf/s last \" ". - "GPRINT:inu:AVERAGE:\"%7.2lf/s avg \" ". - "GPRINT:inu:MAX:\"%7.2lf/s max\\n\" ". - "AREA:innu#00c080:\"in broadcast/multicast \":STACK ". - "GPRINT:innu:LAST:\"%7.2lf/s last \" ". - "GPRINT:innu:AVERAGE:\"%7.2lf/s avg \" ". - "GPRINT:innu:MAX:\"%7.2lf/s max\\n\" ". + "CDEF:in=inu,innu,+ ". + "AREA:inu#00ffc0:\"in unicast \" ". + "GPRINT:inu:LAST:\"%9.1lf/s last \" ". + "GPRINT:inu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:inu:MAX:\"%9.1lf/s max\\n\" ". + "AREA:innu#00c080:\"in broadcast/multicast \":STACK ". + "GPRINT:innu:LAST:\"%9.1lf/s last \" ". + "GPRINT:innu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:innu:MAX:\"%9.1lf/s max\\n\" ". + "VDEF:inperc=in,95,PERCENTNAN ". + "LINE:inperc#00cf00:\"in 95% percentile \" ". + "GPRINT:inperc:\"%9.1lf/s\\n\" ". + + # outgoing "DEF:outu=$RRDFILE[7]:$DS[7]:MAX ". "DEF:outnu=$RRDFILE[8]:$DS[8]:MAX ". "CDEF:minusoutu=0,outu,- ". "CDEF:minusoutnu=0,outnu,- ". - "AREA:minusoutu#00c0ff:\"out unicast \" ". - "GPRINT:outu:LAST:\"%7.2lf/s last \" ". - "GPRINT:outu:AVERAGE:\"%7.2lf/s avg \" ". - "GPRINT:outu:MAX:\"%7.2lf/s max\\n\" ". - "AREA:minusoutnu#0080c0:\"out broadcast/multicast \":STACK ". - "GPRINT:outnu:LAST:\"%7.2lf/s last \" ". - "GPRINT:outnu:AVERAGE:\"%7.2lf/s avg \" ". - "GPRINT:outnu:MAX:\"%7.2lf/s max\\n\" "; + "CDEF:minusout=minusoutu,minusoutnu,+ ". + "AREA:minusoutu#00c0ff:\"out unicast \" ". + "GPRINT:outu:LAST:\"%9.1lf/s last \" ". + "GPRINT:outu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:outu:MAX:\"%9.1lf/s max\\n\" ". + "AREA:minusoutnu#0080c0:\"out broadcast/multicast\":STACK ". + "GPRINT:outnu:LAST:\"%9.1lf/s last \" ". + "GPRINT:outnu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:outnu:MAX:\"%9.1lf/s max\\n\" ". + "VDEF:outperc=minusout,5,PERCENTNAN ". + "LINE:outperc#0000cf:\"out 95% percentile \" ". + "GPRINT:outperc:\"%9.1lf/s\\n\" ". + ""; # Graph 3: errors and discards $ds_name[3] = 'Errors and discards'; diff -Nru check-mk-1.2.2p3/check_mk-if64_tplink.php check-mk-1.2.6p12/check_mk-if64_tplink.php --- check-mk-1.2.2p3/check_mk-if64_tplink.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-if64_tplink.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,226 @@ + $base * $base * $base) { + $scale = $base * $base * $base; + $bwuom = 'G'; +} +elseif ($bandwidth > $base * $base) { + $scale = $base * $base; + $bwuom = 'M'; +} +elseif ($bandwidth > $base) { + $scale = $base; + $bwuom = 'k'; +} +else { + $scale = 1; + $bwuom = ' '; +} + +$warn /= $scale; +$crit /= $scale; +$bandwidth /= $scale; + +$vertical_label_name = $bwuom . $unit . "/sec"; + +$range = min(10, $bandwidth); + + +$bandwidthInfo = ""; +if ($bandwidth > 0){ + $bandwidthInfo = " at " . sprintf("%.1f", $bandwidth) . " ${bwuom}${unit}/s"; +} +$ds_name[1] = 'Used bandwidth'; +$opt[1] = "--vertical-label \"$vertical_label_name\" -l -$range -u $range -X0 -b 1024 --title \"Used bandwidth $hostname / $servicedesc$bandwidthInfo\" "; +$def[1] = + "HRULE:0#c0c0c0 "; +if ($bandwidth) + $def[1] .= "HRULE:$bandwidth#808080:\"Port speed\: " . sprintf("%10.1f", $bandwidth) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$bandwidth#808080: "; +if ($warn) + $def[1] .= "HRULE:$warn#ffff00:\"Warning\: " . sprintf("%13.1f", $warn) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$warn#ffff00: "; +if ($crit) + $def[1] .= "HRULE:$crit#ff0000:\"Critical\: " . sprintf("%13.1f", $crit) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$crit#ff0000: "; + + $def[1] .= "". + # incoming + "DEF:inbytes=$RRDFILE[1]:$DS[1]:MAX ". + "CDEF:intraffic=inbytes,$unit_multiplier,* ". + "CDEF:inmb=intraffic,$scale,/ ". + "AREA:inmb#00e060:\"in \" ". + "GPRINT:intraffic:LAST:\"%7.1lf %s$unit/s last\" ". + "GPRINT:intraffic:AVERAGE:\"%7.1lf %s$unit/s avg\" ". + "GPRINT:intraffic:MAX:\"%7.1lf %s$unit/s max\\n\" ". + "VDEF:inperc=intraffic,95,PERCENTNAN ". + "VDEF:inpercmb=inmb,95,PERCENTNAN ". + "LINE:inpercmb#008f00:\"95% percentile\" ". + "GPRINT:inperc:\"%7.1lf %s$unit/s\\n\" ". + + # outgoing + "DEF:outbytes=$RRDFILE[6]:$DS[6]:MAX ". + "CDEF:outtraffic=outbytes,$unit_multiplier,* ". + "CDEF:minusouttraffic=outtraffic,-1,* ". + "CDEF:outmb=outtraffic,$scale,/ ". + "CDEF:minusoutmb=0,outmb,- ". + "AREA:minusoutmb#0080e0:\"out \" ". + "GPRINT:outtraffic:LAST:\"%7.1lf %s$unit/s last\" ". + "GPRINT:outtraffic:AVERAGE:\"%7.1lf %s$unit/s avg\" ". + "GPRINT:outtraffic:MAX:\"%7.1lf %s$unit/s max\\n\" ". + "VDEF:outperc=minusouttraffic,5,PERCENTNAN ". + "VDEF:outpercmb=minusoutmb,5,PERCENTNAN ". + "LINE:outpercmb#00008f:\"95% percentile\" ". + "GPRINT:outperc:\"%7.1lf %s$unit/s\\n\" ". + + ""; + +if (isset($DS[12])) { + $def[1] .= + "DEF:inbytesa=$RRDFILE[12]:$DS[12]:MAX ". + "DEF:outbytesa=$RRDFILE[13]:$DS[13]:MAX ". + "CDEF:intraffica=inbytesa,$unit_multiplier,* ". + "CDEF:outtraffica=outbytesa,$unit_multiplier,* ". + "CDEF:inmba=intraffica,1048576,/ ". + "CDEF:outmba=outtraffica,1048576,/ ". + "CDEF:minusoutmba=0,outmba,- ". + "LINE:inmba#00a060:\"in (avg) \" ". + "GPRINT:intraffica:LAST:\"%6.1lf %s$unit/s last\" ". + "GPRINT:intraffica:AVERAGE:\"%6.1lf %s$unit/s avg\" ". + "GPRINT:intraffica:MAX:\"%6.1lf %s$unit/s max\\n\" ". + "LINE:minusoutmba#0060c0:\"out (avg) \" ". + "GPRINT:outtraffica:LAST:\"%6.1lf %s$unit/s last\" ". + "GPRINT:outtraffica:AVERAGE:\"%6.1lf %s$unit/s avg\" ". + "GPRINT:outtraffica:MAX:\"%6.1lf %s$unit/s max\\n\" "; +} + +# Graph 2: packets +$ds_name[2] = 'Packets'; +$opt[2] = "--vertical-label \"packets/sec\" --title \"Packets $hostname / $servicedesc\" "; +$def[2] = + # ingoing + "HRULE:0#c0c0c0 ". + "DEF:inu=$RRDFILE[2]:$DS[2]:MAX ". + "DEF:innu=$RRDFILE[3]:$DS[3]:MAX ". + "CDEF:in=inu,innu,+ ". + "AREA:inu#00ffc0:\"in unicast \" ". + "GPRINT:inu:LAST:\"%9.1lf/s last \" ". + "GPRINT:inu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:inu:MAX:\"%9.1lf/s max\\n\" ". + "AREA:innu#00c080:\"in broadcast/multicast \":STACK ". + "GPRINT:innu:LAST:\"%9.1lf/s last \" ". + "GPRINT:innu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:innu:MAX:\"%9.1lf/s max\\n\" ". + "VDEF:inperc=in,95,PERCENTNAN ". + "LINE:inperc#00cf00:\"in 95% percentile \" ". + "GPRINT:inperc:\"%9.1lf/s\\n\" ". + + # outgoing + "DEF:outu=$RRDFILE[7]:$DS[7]:MAX ". + "DEF:outnu=$RRDFILE[8]:$DS[8]:MAX ". + "CDEF:minusoutu=0,outu,- ". + "CDEF:minusoutnu=0,outnu,- ". + "CDEF:minusout=minusoutu,minusoutnu,+ ". + "AREA:minusoutu#00c0ff:\"out unicast \" ". + "GPRINT:outu:LAST:\"%9.1lf/s last \" ". + "GPRINT:outu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:outu:MAX:\"%9.1lf/s max\\n\" ". + "AREA:minusoutnu#0080c0:\"out broadcast/multicast\":STACK ". + "GPRINT:outnu:LAST:\"%9.1lf/s last \" ". + "GPRINT:outnu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:outnu:MAX:\"%9.1lf/s max\\n\" ". + "VDEF:outperc=minusout,5,PERCENTNAN ". + "LINE:outperc#0000cf:\"out 95% percentile \" ". + "GPRINT:outperc:\"%9.1lf/s\\n\" ". + ""; + +# Graph 3: errors and discards +$ds_name[3] = 'Errors and discards'; +$opt[3] = "--vertical-label \"packets/sec\" -X0 --title \"Problems $hostname / $servicedesc\" "; +$def[3] = + "HRULE:0#c0c0c0 ". + "DEF:inerr=$RRDFILE[5]:$DS[5]:MAX ". + "DEF:indisc=$RRDFILE[4]:$DS[4]:MAX ". + "AREA:inerr#ff0000:\"in errors \" ". + "GPRINT:inerr:LAST:\"%7.2lf/s last \" ". + "GPRINT:inerr:AVERAGE:\"%7.2lf/s avg \" ". + "GPRINT:inerr:MAX:\"%7.2lf/s max\\n\" ". + "AREA:indisc#ff8000:\"in discards \":STACK ". + "GPRINT:indisc:LAST:\"%7.2lf/s last \" ". + "GPRINT:indisc:AVERAGE:\"%7.2lf/s avg \" ". + "GPRINT:indisc:MAX:\"%7.2lf/s max\\n\" ". + "DEF:outerr=$RRDFILE[10]:$DS[10]:MAX ". + "DEF:outdisc=$RRDFILE[9]:$DS[9]:MAX ". + "CDEF:minusouterr=0,outerr,- ". + "CDEF:minusoutdisc=0,outdisc,- ". + "AREA:minusouterr#ff0080:\"out errors \" ". + "GPRINT:outerr:LAST:\"%7.2lf/s last \" ". + "GPRINT:outerr:AVERAGE:\"%7.2lf/s avg \" ". + "GPRINT:outerr:MAX:\"%7.2lf/s max\\n\" ". + "AREA:minusoutdisc#ff8080:\"out discards \":STACK ". + "GPRINT:outdisc:LAST:\"%7.2lf/s last \" ". + "GPRINT:outdisc:AVERAGE:\"%7.2lf/s avg \" ". + "GPRINT:outdisc:MAX:\"%7.2lf/s max\\n\" "; +?> diff -Nru check-mk-1.2.2p3/check_mk-if_lancom.php check-mk-1.2.6p12/check_mk-if_lancom.php --- check-mk-1.2.2p3/check_mk-if_lancom.php 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-if_lancom.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -40,87 +40,105 @@ # Graph 1: used bandwidth -# Determine if Bit or Byte. -# Change multiplier and labels -$unit = "B"; -$unit_multiplier = 1; -$vertical_label_name = "MByte/sec"; -if (strcmp($MIN[11], "0.0") == 0) { +# Determine if Bit or Byte. Bit is signalled via a min value of 0.0 +# in the 11th performance value. +if (!strcmp($MIN[11], "0.0")) { $unit = "Bit"; $unit_multiplier = 8; - $vertical_label_name = "MBit/sec"; + $base = 1000; // Megabit is 1000 * 1000 } +else { + $unit = "B"; + $unit_multiplier = 1; + $base = 1000; // Megabyte is 1000 * 1000 +} + +# Convert bytes to bits if neccessary $bandwidth = $MAX[1] * $unit_multiplier; $warn = $WARN[1] * $unit_multiplier; $crit = $CRIT[1] * $unit_multiplier; -# Horizontal lines -$mega = 1024.0 * 1024.0; -$mBandwidthH = $bandwidth / $mega; -$mWarnH = $warn / $mega; -$mCritH = $crit / $mega; - -# Break down bandwidth, warn and crit +# Now choose a convenient scale, based on the known bandwith of +# the interface, and break down bandwidth, warn and crit by that +# scale. $bwuom = ' '; -$base = 1000; -if($bandwidth > $base * $base * $base) { - $warn /= $base * $base * $base; - $crit /= $base * $base * $base; - $bandwidth /= $base * $base * $base; - $bwuom = 'G'; -} elseif ($bandwidth > $base * $base) { - $warn /= $base * $base; - $crit /= $base * $base; - $bandwidth /= $base * $base; - $bwuom = 'M'; -} elseif ($bandwidth > $base) { - $warn /= $base; - $crit /= $base; - $bandwidth /= $base; - $bwuom = 'k'; -} - -if ($mBandwidthH < 10) - $range = $mBandwidthH; -else - $range = 10.0; +if ($bandwidth > $base * $base * $base) { + $scale = $base * $base * $base; + $bwuom = 'G'; +} +elseif ($bandwidth > $base * $base) { + $scale = $base * $base; + $bwuom = 'M'; +} +elseif ($bandwidth > $base) { + $scale = $base; + $bwuom = 'k'; +} +else { + $scale = 1; + $bwuom = ' '; +} + +$warn /= $scale; +$crit /= $scale; +$bandwidth /= $scale; + +$vertical_label_name = $bwuom . $unit . "/sec"; + +$range = min(10, $bandwidth); + $bandwidthInfo = ""; if ($bandwidth > 0){ - $bandwidthInfo = " at bandwidth ${bwuom}${unit}/s"; + $bandwidthInfo = " at " . sprintf("%.1f", $bandwidth) . " ${bwuom}${unit}/s"; } $ds_name[1] = 'Used bandwidth'; -$opt[1] = "--vertical-label \"$vertical_label_name\" -l -$range -u $range -X0 -b 1024 --title \"Used bandwidth $hostname / $servicedesc $bandwidthInfo\" "; -$def[1] = +$opt[1] = "--vertical-label \"$vertical_label_name\" -l -$range -u $range -X0 -b 1024 --title \"Used bandwidth $hostname / $servicedesc$bandwidthInfo\" "; +$def[1] = "HRULE:0#c0c0c0 "; - if ($mBandwidthH) - $def[1] .= "HRULE:$mBandwidthH#808080:\"Port speed\: " . sprintf("%.1f", $bandwidth) . " ".$bwuom."$unit/s\\n\" ". - "HRULE:-$mBandwidthH#808080: "; - if ($warn) - $def[1] .= "HRULE:$mWarnH#ffff00:\"Warning\: " . sprintf("%6.1f", $warn) . " ".$bwuom."$unit/s\\n\" ". - "HRULE:-$mWarnH#ffff00: "; - if ($crit) - $def[1] .= "HRULE:$mCritH#ff0000:\"Critical\: " . sprintf("%6.1f", $crit) . " ".$bwuom."$unit/s\\n\" ". - "HRULE:-$mCritH#ff0000: "; +if ($bandwidth) + $def[1] .= "HRULE:$bandwidth#808080:\"Port speed\: " . sprintf("%10.1f", $bandwidth) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$bandwidth#808080: "; +if ($warn) + $def[1] .= "HRULE:$warn#ffff00:\"Warning\: " . sprintf("%13.1f", $warn) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$warn#ffff00: "; +if ($crit) + $def[1] .= "HRULE:$crit#ff0000:\"Critical\: " . sprintf("%13.1f", $crit) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$crit#ff0000: "; + + $def[1] .= "". + # incoming + "DEF:inbytes=$RRDFILE[1]:$DS[1]:MAX ". + "CDEF:intraffic=inbytes,$unit_multiplier,* ". + "CDEF:inmb=intraffic,$scale,/ ". + "AREA:inmb#00e060:\"in \" ". + "GPRINT:intraffic:LAST:\"%7.1lf %s$unit/s last\" ". + "GPRINT:intraffic:AVERAGE:\"%7.1lf %s$unit/s avg\" ". + "GPRINT:intraffic:MAX:\"%7.1lf %s$unit/s max\\n\" ". + "VDEF:inperc=intraffic,95,PERCENTNAN ". + "VDEF:inpercmb=inmb,95,PERCENTNAN ". + "LINE:inpercmb#008f00:\"95% percentile\" ". + "GPRINT:inperc:\"%7.1lf %s$unit/s\\n\" ". - $def[1] .= "DEF:inbytes=$RRDFILE[1]:$DS[1]:MAX ". + # outgoing "DEF:outbytes=$RRDFILE[6]:$DS[6]:MAX ". - "CDEF:intraffic=inbytes,$unit_multiplier,* ". "CDEF:outtraffic=outbytes,$unit_multiplier,* ". - "CDEF:inmb=intraffic,1048576,/ ". - "CDEF:outmb=outtraffic,1048576,/ ". + "CDEF:minusouttraffic=outtraffic,-1,* ". + "CDEF:outmb=outtraffic,$scale,/ ". "CDEF:minusoutmb=0,outmb,- ". - "AREA:inmb#00e060:\"in \" ". - "GPRINT:intraffic:LAST:\"%6.1lf %s$unit/s last\" ". - "GPRINT:intraffic:AVERAGE:\"%6.1lf %s$unit/s avg\" ". - "GPRINT:intraffic:MAX:\"%6.1lf %s$unit/s max\\n\" ". - "AREA:minusoutmb#0080e0:\"out \" ". - "GPRINT:outtraffic:LAST:\"%6.1lf %s$unit/s last\" ". - "GPRINT:outtraffic:AVERAGE:\"%6.1lf %s$unit/s avg\" ". - "GPRINT:outtraffic:MAX:\"%6.1lf %s$unit/s max\\n\" "; + "AREA:minusoutmb#0080e0:\"out \" ". + "GPRINT:outtraffic:LAST:\"%7.1lf %s$unit/s last\" ". + "GPRINT:outtraffic:AVERAGE:\"%7.1lf %s$unit/s avg\" ". + "GPRINT:outtraffic:MAX:\"%7.1lf %s$unit/s max\\n\" ". + "VDEF:outperc=minusouttraffic,5,PERCENTNAN ". + "VDEF:outpercmb=minusoutmb,5,PERCENTNAN ". + "LINE:outpercmb#00008f:\"95% percentile\" ". + "GPRINT:outperc:\"%7.1lf %s$unit/s\\n\" ". + + ""; if (isset($DS[12])) { - $def[1] .= + $def[1] .= "DEF:inbytesa=$RRDFILE[12]:$DS[12]:MAX ". "DEF:outbytesa=$RRDFILE[13]:$DS[13]:MAX ". "CDEF:intraffica=inbytesa,$unit_multiplier,* ". @@ -142,29 +160,41 @@ $ds_name[2] = 'Packets'; $opt[2] = "--vertical-label \"packets/sec\" --title \"Packets $hostname / $servicedesc\" "; $def[2] = + # ingoing "HRULE:0#c0c0c0 ". "DEF:inu=$RRDFILE[2]:$DS[2]:MAX ". "DEF:innu=$RRDFILE[3]:$DS[3]:MAX ". - "AREA:inu#00ffc0:\"in unicast \" ". - "GPRINT:inu:LAST:\"%7.2lf/s last \" ". - "GPRINT:inu:AVERAGE:\"%7.2lf/s avg \" ". - "GPRINT:inu:MAX:\"%7.2lf/s max\\n\" ". - "AREA:innu#00c080:\"in broadcast/multicast \":STACK ". - "GPRINT:innu:LAST:\"%7.2lf/s last \" ". - "GPRINT:innu:AVERAGE:\"%7.2lf/s avg \" ". - "GPRINT:innu:MAX:\"%7.2lf/s max\\n\" ". + "CDEF:in=inu,innu,+ ". + "AREA:inu#00ffc0:\"in unicast \" ". + "GPRINT:inu:LAST:\"%9.1lf/s last \" ". + "GPRINT:inu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:inu:MAX:\"%9.1lf/s max\\n\" ". + "AREA:innu#00c080:\"in broadcast/multicast \":STACK ". + "GPRINT:innu:LAST:\"%9.1lf/s last \" ". + "GPRINT:innu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:innu:MAX:\"%9.1lf/s max\\n\" ". + "VDEF:inperc=in,95,PERCENTNAN ". + "LINE:inperc#00cf00:\"in 95% percentile \" ". + "GPRINT:inperc:\"%9.1lf/s\\n\" ". + + # outgoing "DEF:outu=$RRDFILE[7]:$DS[7]:MAX ". "DEF:outnu=$RRDFILE[8]:$DS[8]:MAX ". "CDEF:minusoutu=0,outu,- ". "CDEF:minusoutnu=0,outnu,- ". - "AREA:minusoutu#00c0ff:\"out unicast \" ". - "GPRINT:outu:LAST:\"%7.2lf/s last \" ". - "GPRINT:outu:AVERAGE:\"%7.2lf/s avg \" ". - "GPRINT:outu:MAX:\"%7.2lf/s max\\n\" ". - "AREA:minusoutnu#0080c0:\"out broadcast/multicast \":STACK ". - "GPRINT:outnu:LAST:\"%7.2lf/s last \" ". - "GPRINT:outnu:AVERAGE:\"%7.2lf/s avg \" ". - "GPRINT:outnu:MAX:\"%7.2lf/s max\\n\" "; + "CDEF:minusout=minusoutu,minusoutnu,+ ". + "AREA:minusoutu#00c0ff:\"out unicast \" ". + "GPRINT:outu:LAST:\"%9.1lf/s last \" ". + "GPRINT:outu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:outu:MAX:\"%9.1lf/s max\\n\" ". + "AREA:minusoutnu#0080c0:\"out broadcast/multicast\":STACK ". + "GPRINT:outnu:LAST:\"%9.1lf/s last \" ". + "GPRINT:outnu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:outnu:MAX:\"%9.1lf/s max\\n\" ". + "VDEF:outperc=minusout,5,PERCENTNAN ". + "LINE:outperc#0000cf:\"out 95% percentile \" ". + "GPRINT:outperc:\"%9.1lf/s\\n\" ". + ""; # Graph 3: errors and discards $ds_name[3] = 'Errors and discards'; diff -Nru check-mk-1.2.2p3/check_mk-if.php check-mk-1.2.6p12/check_mk-if.php --- check-mk-1.2.2p3/check_mk-if.php 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-if.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -40,87 +40,105 @@ # Graph 1: used bandwidth -# Determine if Bit or Byte. -# Change multiplier and labels -$unit = "B"; -$unit_multiplier = 1; -$vertical_label_name = "MByte/sec"; -if (strcmp($MIN[11], "0.0") == 0) { +# Determine if Bit or Byte. Bit is signalled via a min value of 0.0 +# in the 11th performance value. +if (!strcmp($MIN[11], "0.0")) { $unit = "Bit"; $unit_multiplier = 8; - $vertical_label_name = "MBit/sec"; + $base = 1000; // Megabit is 1000 * 1000 } +else { + $unit = "B"; + $unit_multiplier = 1; + $base = 1000; // Megabyte is 1000 * 1000 +} + +# Convert bytes to bits if neccessary $bandwidth = $MAX[1] * $unit_multiplier; $warn = $WARN[1] * $unit_multiplier; $crit = $CRIT[1] * $unit_multiplier; -# Horizontal lines -$mega = 1024.0 * 1024.0; -$mBandwidthH = $bandwidth / $mega; -$mWarnH = $warn / $mega; -$mCritH = $crit / $mega; - -# Break down bandwidth, warn and crit +# Now choose a convenient scale, based on the known bandwith of +# the interface, and break down bandwidth, warn and crit by that +# scale. $bwuom = ' '; -$base = 1000; -if($bandwidth > $base * $base * $base) { - $warn /= $base * $base * $base; - $crit /= $base * $base * $base; - $bandwidth /= $base * $base * $base; - $bwuom = 'G'; -} elseif ($bandwidth > $base * $base) { - $warn /= $base * $base; - $crit /= $base * $base; - $bandwidth /= $base * $base; - $bwuom = 'M'; -} elseif ($bandwidth > $base) { - $warn /= $base; - $crit /= $base; - $bandwidth /= $base; - $bwuom = 'k'; -} - -if ($mBandwidthH < 10) - $range = $mBandwidthH; -else - $range = 10.0; +if ($bandwidth > $base * $base * $base) { + $scale = $base * $base * $base; + $bwuom = 'G'; +} +elseif ($bandwidth > $base * $base) { + $scale = $base * $base; + $bwuom = 'M'; +} +elseif ($bandwidth > $base) { + $scale = $base; + $bwuom = 'k'; +} +else { + $scale = 1; + $bwuom = ' '; +} + +$warn /= $scale; +$crit /= $scale; +$bandwidth /= $scale; + +$vertical_label_name = $bwuom . $unit . "/sec"; + +$range = min(10, $bandwidth); + $bandwidthInfo = ""; if ($bandwidth > 0){ - $bandwidthInfo = " at bandwidth ${bwuom}${unit}/s"; + $bandwidthInfo = " at " . sprintf("%.1f", $bandwidth) . " ${bwuom}${unit}/s"; } $ds_name[1] = 'Used bandwidth'; -$opt[1] = "--vertical-label \"$vertical_label_name\" -l -$range -u $range -X0 -b 1024 --title \"Used bandwidth $hostname / $servicedesc $bandwidthInfo\" "; -$def[1] = +$opt[1] = "--vertical-label \"$vertical_label_name\" -l -$range -u $range -X0 -b 1024 --title \"Used bandwidth $hostname / $servicedesc$bandwidthInfo\" "; +$def[1] = "HRULE:0#c0c0c0 "; - if ($mBandwidthH) - $def[1] .= "HRULE:$mBandwidthH#808080:\"Port speed\: " . sprintf("%.1f", $bandwidth) . " ".$bwuom."$unit/s\\n\" ". - "HRULE:-$mBandwidthH#808080: "; - if ($warn) - $def[1] .= "HRULE:$mWarnH#ffff00:\"Warning\: " . sprintf("%6.1f", $warn) . " ".$bwuom."$unit/s\\n\" ". - "HRULE:-$mWarnH#ffff00: "; - if ($crit) - $def[1] .= "HRULE:$mCritH#ff0000:\"Critical\: " . sprintf("%6.1f", $crit) . " ".$bwuom."$unit/s\\n\" ". - "HRULE:-$mCritH#ff0000: "; +if ($bandwidth) + $def[1] .= "HRULE:$bandwidth#808080:\"Port speed\: " . sprintf("%10.1f", $bandwidth) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$bandwidth#808080: "; +if ($warn) + $def[1] .= "HRULE:$warn#ffff00:\"Warning\: " . sprintf("%13.1f", $warn) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$warn#ffff00: "; +if ($crit) + $def[1] .= "HRULE:$crit#ff0000:\"Critical\: " . sprintf("%13.1f", $crit) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$crit#ff0000: "; + + $def[1] .= "". + # incoming + "DEF:inbytes=$RRDFILE[1]:$DS[1]:MAX ". + "CDEF:intraffic=inbytes,$unit_multiplier,* ". + "CDEF:inmb=intraffic,$scale,/ ". + "AREA:inmb#00e060:\"in \" ". + "GPRINT:intraffic:LAST:\"%7.1lf %s$unit/s last\" ". + "GPRINT:intraffic:AVERAGE:\"%7.1lf %s$unit/s avg\" ". + "GPRINT:intraffic:MAX:\"%7.1lf %s$unit/s max\\n\" ". + "VDEF:inperc=intraffic,95,PERCENTNAN ". + "VDEF:inpercmb=inmb,95,PERCENTNAN ". + "LINE:inpercmb#008f00:\"95% percentile\" ". + "GPRINT:inperc:\"%7.1lf %s$unit/s\\n\" ". - $def[1] .= "DEF:inbytes=$RRDFILE[1]:$DS[1]:MAX ". + # outgoing "DEF:outbytes=$RRDFILE[6]:$DS[6]:MAX ". - "CDEF:intraffic=inbytes,$unit_multiplier,* ". "CDEF:outtraffic=outbytes,$unit_multiplier,* ". - "CDEF:inmb=intraffic,1048576,/ ". - "CDEF:outmb=outtraffic,1048576,/ ". + "CDEF:minusouttraffic=outtraffic,-1,* ". + "CDEF:outmb=outtraffic,$scale,/ ". "CDEF:minusoutmb=0,outmb,- ". - "AREA:inmb#00e060:\"in \" ". - "GPRINT:intraffic:LAST:\"%6.1lf %s$unit/s last\" ". - "GPRINT:intraffic:AVERAGE:\"%6.1lf %s$unit/s avg\" ". - "GPRINT:intraffic:MAX:\"%6.1lf %s$unit/s max\\n\" ". - "AREA:minusoutmb#0080e0:\"out \" ". - "GPRINT:outtraffic:LAST:\"%6.1lf %s$unit/s last\" ". - "GPRINT:outtraffic:AVERAGE:\"%6.1lf %s$unit/s avg\" ". - "GPRINT:outtraffic:MAX:\"%6.1lf %s$unit/s max\\n\" "; + "AREA:minusoutmb#0080e0:\"out \" ". + "GPRINT:outtraffic:LAST:\"%7.1lf %s$unit/s last\" ". + "GPRINT:outtraffic:AVERAGE:\"%7.1lf %s$unit/s avg\" ". + "GPRINT:outtraffic:MAX:\"%7.1lf %s$unit/s max\\n\" ". + "VDEF:outperc=minusouttraffic,5,PERCENTNAN ". + "VDEF:outpercmb=minusoutmb,5,PERCENTNAN ". + "LINE:outpercmb#00008f:\"95% percentile\" ". + "GPRINT:outperc:\"%7.1lf %s$unit/s\\n\" ". + + ""; if (isset($DS[12])) { - $def[1] .= + $def[1] .= "DEF:inbytesa=$RRDFILE[12]:$DS[12]:MAX ". "DEF:outbytesa=$RRDFILE[13]:$DS[13]:MAX ". "CDEF:intraffica=inbytesa,$unit_multiplier,* ". @@ -142,29 +160,41 @@ $ds_name[2] = 'Packets'; $opt[2] = "--vertical-label \"packets/sec\" --title \"Packets $hostname / $servicedesc\" "; $def[2] = + # ingoing "HRULE:0#c0c0c0 ". "DEF:inu=$RRDFILE[2]:$DS[2]:MAX ". "DEF:innu=$RRDFILE[3]:$DS[3]:MAX ". - "AREA:inu#00ffc0:\"in unicast \" ". - "GPRINT:inu:LAST:\"%7.2lf/s last \" ". - "GPRINT:inu:AVERAGE:\"%7.2lf/s avg \" ". - "GPRINT:inu:MAX:\"%7.2lf/s max\\n\" ". - "AREA:innu#00c080:\"in broadcast/multicast \":STACK ". - "GPRINT:innu:LAST:\"%7.2lf/s last \" ". - "GPRINT:innu:AVERAGE:\"%7.2lf/s avg \" ". - "GPRINT:innu:MAX:\"%7.2lf/s max\\n\" ". + "CDEF:in=inu,innu,+ ". + "AREA:inu#00ffc0:\"in unicast \" ". + "GPRINT:inu:LAST:\"%9.1lf/s last \" ". + "GPRINT:inu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:inu:MAX:\"%9.1lf/s max\\n\" ". + "AREA:innu#00c080:\"in broadcast/multicast \":STACK ". + "GPRINT:innu:LAST:\"%9.1lf/s last \" ". + "GPRINT:innu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:innu:MAX:\"%9.1lf/s max\\n\" ". + "VDEF:inperc=in,95,PERCENTNAN ". + "LINE:inperc#00cf00:\"in 95% percentile \" ". + "GPRINT:inperc:\"%9.1lf/s\\n\" ". + + # outgoing "DEF:outu=$RRDFILE[7]:$DS[7]:MAX ". "DEF:outnu=$RRDFILE[8]:$DS[8]:MAX ". "CDEF:minusoutu=0,outu,- ". "CDEF:minusoutnu=0,outnu,- ". - "AREA:minusoutu#00c0ff:\"out unicast \" ". - "GPRINT:outu:LAST:\"%7.2lf/s last \" ". - "GPRINT:outu:AVERAGE:\"%7.2lf/s avg \" ". - "GPRINT:outu:MAX:\"%7.2lf/s max\\n\" ". - "AREA:minusoutnu#0080c0:\"out broadcast/multicast \":STACK ". - "GPRINT:outnu:LAST:\"%7.2lf/s last \" ". - "GPRINT:outnu:AVERAGE:\"%7.2lf/s avg \" ". - "GPRINT:outnu:MAX:\"%7.2lf/s max\\n\" "; + "CDEF:minusout=minusoutu,minusoutnu,+ ". + "AREA:minusoutu#00c0ff:\"out unicast \" ". + "GPRINT:outu:LAST:\"%9.1lf/s last \" ". + "GPRINT:outu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:outu:MAX:\"%9.1lf/s max\\n\" ". + "AREA:minusoutnu#0080c0:\"out broadcast/multicast\":STACK ". + "GPRINT:outnu:LAST:\"%9.1lf/s last \" ". + "GPRINT:outnu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:outnu:MAX:\"%9.1lf/s max\\n\" ". + "VDEF:outperc=minusout,5,PERCENTNAN ". + "LINE:outperc#0000cf:\"out 95% percentile \" ". + "GPRINT:outperc:\"%9.1lf/s\\n\" ". + ""; # Graph 3: errors and discards $ds_name[3] = 'Errors and discards'; diff -Nru check-mk-1.2.2p3/check_mk-innovaphone_cpu.php check-mk-1.2.6p12/check_mk-innovaphone_cpu.php --- check-mk-1.2.2p3/check_mk-innovaphone_cpu.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-innovaphone_cpu.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,80 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $RRD_AVG[$n] = "$RRDFILE[$i]:$DS[$i]:AVERAGE"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; +} + +$num_threads = $MAX[1]; +$warnthreads = $WARN[1] * $num_threads / 100.0; +$critthreads = $CRIT[1] * $num_threads / 100.0; +$rightscale = 100.0 / $num_threads; + +$opt[1] = "--vertical-label 'Used CPU threads' --right-axis $rightscale:0 --right-axis-format '%4.1lf%%' --right-axis-label 'Utilization %' -l0 -ru $num_threads --title \"CPU Utilization for $hostname ($num_threads CPU threads)\" "; + +$def[1] = "DEF:perc=$RRD_AVG[util] " + . "CDEF:util=perc,$num_threads,*,100,/ " + ; + +$def[1] .= "HRULE:$MAX[util]#0040d0:\"$num_threads CPU Threads\\n\" " + ; + +$def[1] .= "AREA:util#60f020:\"Utilization\:\" " + . "LINE:util#50b01a " + . "GPRINT:perc:LAST:\"%.1lf%%\" " + . "GPRINT:util:LAST:\"(%.1lf Threads) \" " + . "GPRINT:perc:MIN:\"min\: %.1lf%%,\" " + . "GPRINT:util:MIN:\"(%.1lf), \" " + . "GPRINT:perc:MAX:\"max\: %.1lf%%\" " + . "GPRINT:util:MAX:\"(%.1lf)\\n\" " + ; + + +if (isset($RRD_AVG["avg"])) { + $def[1] .= "DEF:aperc=$RRD_AVG[avg] ". + "CDEF:avg=aperc,$num_threads,*,100,/ ". + "LINE:avg#004000:\"Averaged\: \" ". + "GPRINT:aperc:LAST:\"%.1lf%%,\" ". + "GPRINT:aperc:MIN:\"min\: %.1lf%%,\" ". + "GPRINT:aperc:MAX:\"max\: %.1lf%%\\n\" ". + ""; +} + +if ($WARN['util']) { + $def[1] .= "HRULE:$warnthreads#fff000:\"Warn at $WARN[util]% \" " + . "HRULE:$critthreads#ff0000:\"Critical at $CRIT[util]%\\n\" "; +} +else { + $def[1] .= "COMMENT:\"\\n\" "; +} + +?> diff -Nru check-mk-1.2.2p3/check_mk-innovaphone_temp.php check-mk-1.2.6p12/check_mk-innovaphone_temp.php --- check-mk-1.2.2p3/check_mk-innovaphone_temp.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-innovaphone_temp.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk-ipmi.php check-mk-1.2.6p12/check_mk-ipmi.php --- check-mk-1.2.2p3/check_mk-ipmi.php 2013-11-05 09:22:41.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-ipmi.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mk-ipmi_sensors.php check-mk-1.2.6p12/check_mk-ipmi_sensors.php --- check-mk-1.2.2p3/check_mk-ipmi_sensors.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-ipmi_sensors.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -27,7 +27,7 @@ $sensorname = implode(" ", $parts); /* This is obsolete. ipmi_sensors does not longer send perfdata - for fans... + for fans... if ($parts[2] == "Fan") { $opt[1] = "--vertical-label 'RPM' -X0 -l0 -u6000 --title \"$sensorname\" "; @@ -52,6 +52,6 @@ else { include("check_mk-local.php"); } - + ?> diff -Nru check-mk-1.2.2p3/check_mk-job.php check-mk-1.2.6p12/check_mk-job.php --- check-mk-1.2.2p3/check_mk-job.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-job.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,121 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; +} + +# +# Excecution time +# + +$ds_name[1] = "Job Duration"; +$opt[1] = "--vertical-label 'Duration (min)' -l0 --title \"Duration for $hostname / $servicedesc\" "; + +$def[1] = "DEF:sec=".$RRD['real_time']." "; + +$def[1] .= "CDEF:total_minutes=sec,60,/ "; + +$def[1] .= "CDEF:days=sec,86400,/,FLOOR "; +$def[1] .= "CDEF:day_rest=sec,86400,% "; +$def[1] .= "CDEF:hours=day_rest,3600,/,FLOOR "; +$def[1] .= "CDEF:hour_rest=day_rest,3600,% "; +$def[1] .= "CDEF:minutes=hour_rest,60,/,FLOOR "; +$def[1] .= "CDEF:seconds=hour_rest,60,% "; + +$def[1] .= "AREA:total_minutes#80f000:\"Duration (Last)\" "; +$def[1] .= "LINE:total_minutes#408000 "; +$def[1] .= "GPRINT:days:LAST:\"%2.0lf days\g\" "; +$def[1] .= "GPRINT:hours:LAST:\"%2.0lf hours\g\" "; +$def[1] .= "GPRINT:minutes:LAST:\"%2.0lf min\g\" "; +$def[1] .= "GPRINT:seconds:LAST:\"%2.2lf sec\" "; + +# +# CPU time +# + +$ds_name[2] = "CPU Time"; +$opt[2] = "--vertical-label 'CPU Time' -l0 -u 100 --title \"CPU Time for $hostname / $servicedesc\" "; +$def[2] = "DEF:user=".$RRD['user_time']." " ; +$def[2] .= "DEF:system=".$RRD['system_time']." " ; +$def[2] .= "CDEF:sum=user,system,+ "; +$def[2] .= "CDEF:idle=100,sum,- "; + +$def[2] .= "AREA:system#ff6000:\"System\" " + . "GPRINT:system:LAST:\"%2.1lf%% \" " + . "AREA:user#60f020:\"User\":STACK " + . "GPRINT:user:LAST:\"%2.1lf%% \" " + . "LINE:sum#004080:\"Total\" " + . "GPRINT:sum:LAST:\"%2.1lf%% \\n\" "; + +# +# Disk IO +# + +$ds_name[3] = "Disk IO"; +$opt[3] = "--vertical-label 'Throughput (MB/s)' -X0 --title \"Disk throughput $hostname / $servicedesc\" "; +$def[3] = "HRULE:0#a0a0a0 " + . "DEF:read=".$RRD['reads']." " + . "CDEF:read_mb=read,1048576,/ " + . "AREA:read_mb#40c080:\"Read \" " + . "GPRINT:read_mb:LAST:\"%8.1lf MB/s last\" " + . "GPRINT:read_mb:AVERAGE:\"%6.1lf MB/s avg\" " + . "GPRINT:read_mb:MAX:\"%6.1lf MB/s max\\n\" "; + +$def[3] .= "DEF:write=".$RRD['writes']." " + . "CDEF:write_mb=write,1048576,/ " + . "CDEF:write_mb_neg=write_mb,-1,* " + . "AREA:write_mb_neg#4080c0:\"Write \" " + . "GPRINT:write_mb:LAST:\"%6.1lf MB/s last\" " + . "GPRINT:write_mb:AVERAGE:\"%6.1lf MB/s avg\" " + . "GPRINT:write_mb:MAX:\"%6.1lf MB/s max\\n\" "; + +# +# Context Switches +# + +$ds_name[4] = "Context Switches"; +$opt[4] = " --vertical-label \"Switches / sec\" --title \"Context Switches $hostname / $servicedesc\" "; + +$def[4] = "DEF:sec=".$RRD['real_time']." "; +$def[4] .= "DEF:vol=".$RRD['vol_context_switches']. " "; +$def[4] .= "DEF:invol=".$RRD['invol_context_switches']. " "; +$def[4] .= "CDEF:vol_persec=vol,sec,/ "; +$def[4] .= "CDEF:invol_persec=invol,sec,/ "; + +$def[4] .= "AREA:vol_persec#48C4EC:\"Voluntary\:\" "; +$def[4] .= "LINE1:vol_persec#1598C3:\"\" "; +$def[4] .= "GPRINT:vol_persec:LAST:\" %8.2lf/s last\\n\" "; + +$def[4] .= "AREA:invol_persec#7648EC:\"Involuntary\:\":STACK "; +$def[4] .= "LINE1:0#4D18E4:\"\":STACK "; +$def[4] .= "GPRINT:invol_persec:LAST:\"%8.2lf/s last\\n\" "; +?> diff -Nru check-mk-1.2.2p3/check_mk-jolokia_metrics.gc.php check-mk-1.2.6p12/check_mk-jolokia_metrics.gc.php --- check-mk-1.2.2p3/check_mk-jolokia_metrics.gc.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-jolokia_metrics.gc.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,61 @@ + diff -Nru check-mk-1.2.2p3/check_mk-jolokia_metrics.mem.php check-mk-1.2.6p12/check_mk-jolokia_metrics.mem.php --- check-mk-1.2.2p3/check_mk-jolokia_metrics.mem.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-jolokia_metrics.mem.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -31,20 +31,24 @@ . "CDEF:min_nonheap=0,nonheap,- " . "CDEF:total=heap,nonheap,+ " - . "AREA:heap#00c0ff:\"Heap\" " - . "LINE1:$MAX[1]#003077:\"Heap MAX\" "; + . "AREA:heap#00c0ff:\"Heap\" "; +if ($MAX[1]) { + $def[1] .= "LINE1:$MAX[1]#003077:\"Heap MAX\" "; +} if ($CRIT[1]) { $def[1] .= "LINE1:$WARN[1]#a0ad00:\"Heap WARN\" " . "LINE1:$CRIT[1]#ad0000:\"Heap CRIT\" "; } -$def[1] .= "AREA:min_nonheap#3430bf:\"Nonheap\" " - . "LINE1:-$MAX[2]#003233:\"Nonheap MAX \" "; +$def[1] .= "AREA:min_nonheap#3430bf:\"Nonheap\" "; +if ($MAX[2]) { + $def[1] .= "LINE1:-$MAX[2]#003233:\"Nonheap MAX \" "; +} if ($CRIT[2]) { $def[1] .= "LINE1:-$WARN[2]#adfd30:\"Nonheap WARN\" " . "LINE1:-$CRIT[2]#ff0080:\"Nonheap CRIT\" "; -} +} $def[1] .= "GPRINT:total:LAST:\"Total %.2lfMB last\" " diff -Nru check-mk-1.2.2p3/check_mk-jolokia_metrics.threads.php check-mk-1.2.6p12/check_mk-jolokia_metrics.threads.php --- check-mk-1.2.2p3/check_mk-jolokia_metrics.threads.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-jolokia_metrics.threads.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,55 @@ + diff -Nru check-mk-1.2.2p3/check_mk-jolokia_metrics.tp.php check-mk-1.2.6p12/check_mk-jolokia_metrics.tp.php --- check-mk-1.2.2p3/check_mk-jolokia_metrics.tp.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-jolokia_metrics.tp.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,63 @@ + diff -Nru check-mk-1.2.2p3/check_mk-jolokia_metrics.uptime.php check-mk-1.2.6p12/check_mk-jolokia_metrics.uptime.php --- check-mk-1.2.2p3/check_mk-jolokia_metrics.uptime.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-jolokia_metrics.uptime.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,36 @@ + diff -Nru check-mk-1.2.2p3/check_mk-juniper_screenos_cpu.php check-mk-1.2.6p12/check_mk-juniper_screenos_cpu.php --- check-mk-1.2.2p3/check_mk-juniper_screenos_cpu.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-juniper_screenos_cpu.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,40 @@ + diff -Nru check-mk-1.2.2p3/check_mk-juniper_screenos_mem.php check-mk-1.2.6p12/check_mk-juniper_screenos_mem.php --- check-mk-1.2.2p3/check_mk-juniper_screenos_mem.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-juniper_screenos_mem.php 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,35 @@ + diff -Nru check-mk-1.2.2p3/check_mk-juniper_screenos_temp.php check-mk-1.2.6p12/check_mk-juniper_screenos_temp.php --- check-mk-1.2.2p3/check_mk-juniper_screenos_temp.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-juniper_screenos_temp.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk-juniper_trpz_mem.php check-mk-1.2.6p12/check_mk-juniper_trpz_mem.php --- check-mk-1.2.2p3/check_mk-juniper_trpz_mem.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-juniper_trpz_mem.php 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,35 @@ + diff -Nru check-mk-1.2.2p3/check_mk-kernel.php check-mk-1.2.6p12/check_mk-kernel.php --- check-mk-1.2.2p3/check_mk-kernel.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-kernel.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mk-kernel.util.php check-mk-1.2.6p12/check_mk-kernel.util.php --- check-mk-1.2.2p3/check_mk-kernel.util.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-kernel.util.php 2015-09-21 10:59:54.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -40,24 +40,24 @@ $def[1] .= "" . "COMMENT:Average\: " - . "AREA:system#ff6000:\"System\" " - . "GPRINT:system:AVERAGE:\"%2.1lf%% \" " - . "AREA:user#60f020:\"User\":STACK " - . "GPRINT:user:AVERAGE:\"%2.1lf%% \" " - . "AREA:wait#00b0c0:\"$thirdname\":STACK " - . "GPRINT:wait:AVERAGE:\"%2.1lf%% \" " - . "LINE:sum#004080:\"Total\" " - . "GPRINT:sum:AVERAGE:\"%2.1lf%% \\n\" " + . "AREA:system#ff6000:\"System\" " + . "GPRINT:system:AVERAGE:\"%4.1lf%% \" " + . "AREA:user#60f020:\"User\":STACK " + . "GPRINT:user:AVERAGE:\"%4.1lf%% \" " + . "AREA:wait#00b0c0:\"$thirdname\":STACK " + . "GPRINT:wait:AVERAGE:\"%4.1lf%% \" " + . "LINE:sum#004080:\"Total\" " + . "GPRINT:sum:AVERAGE:\"%4.1lf%% \\n\" " . "COMMENT:\"Last\: \" " - . "AREA:system#ff6000:\"System\" " - . "GPRINT:system:LAST:\"%2.1lf%% \" " - . "AREA:user#60f020:\"User\":STACK " - . "GPRINT:user:LAST:\"%2.1lf%% \" " - . "AREA:wait#00b0c0:\"$thirdname\":STACK " - . "GPRINT:wait:LAST:\"%2.1lf%% \" " - . "LINE:sum#004080:\"Total\" " - . "GPRINT:sum:LAST:\"%2.1lf%% \\n\" " + . "AREA:system#ff6000:\"System\" " + . "GPRINT:system:LAST:\"%4.1lf%% \" " + . "AREA:user#60f020:\"User\":STACK " + . "GPRINT:user:LAST:\"%4.1lf%% \" " + . "AREA:wait#00b0c0:\"$thirdname\":STACK " + . "GPRINT:wait:LAST:\"%4.1lf%% \" " + . "LINE:sum#004080:\"Total\" " + . "GPRINT:sum:LAST:\"%4.1lf%% \\n\" " .""; diff -Nru check-mk-1.2.2p3/check_mk-knuerr_rms_humidity.php check-mk-1.2.6p12/check_mk-knuerr_rms_humidity.php --- check-mk-1.2.2p3/check_mk-knuerr_rms_humidity.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-knuerr_rms_humidity.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk-knuerr_rms_temp.php check-mk-1.2.6p12/check_mk-knuerr_rms_temp.php --- check-mk-1.2.2p3/check_mk-knuerr_rms_temp.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-knuerr_rms_temp.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk-libelle_business_shadow.archive_dir.php check-mk-1.2.6p12/check_mk-libelle_business_shadow.archive_dir.php --- check-mk-1.2.2p3/check_mk-libelle_business_shadow.archive_dir.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-libelle_business_shadow.archive_dir.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,157 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $RRD_MIN[$n] = "$RRDFILE[$i]:$DS[$i]:MIN"; + $RRD_AVG[$n] = "$RRDFILE[$i]:$DS[$i]:AVERAGE"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; +} + +# RRDtool Options +#$servicedes=$NAGIOS_SERVICEDESC + +$fsname = str_replace("_", "/", substr($servicedesc, 3)); +$fstitle = $fsname; + +# Hack for windows: replace C// with C:\ +if (strlen($fsname) == 3 && substr($fsname, 1, 2) == '//') { + $fsname = $fsname[0] . "\:\\\\"; + $fstitle = $fsname[0] . ":\\"; +} + +$sizegb = sprintf("%.1f", $MAX[1] / 1024.0); +$maxgb = $MAX[1] / 1024.0; +$warngb = $WARN[1] / 1024.0; +$critgb = $CRIT[1] / 1024.0; +$warngbtxt = sprintf("%.1f", $warngb); +$critgbtxt = sprintf("%.1f", $critgb); + +$opt[1] = "--vertical-label GB -l 0 -u $maxgb --title '$hostname: $servicedesc ($sizegb GB)' "; + +# First graph show current filesystem usage +$def[1] = "DEF:mb=$RRDFILE[1]:$DS[1]:MAX "; +$def[1] .= "CDEF:var1=mb,1024,/ "; +$def[1] .= "AREA:var1#00ffc6:\"used space on $fsname\\n\" "; + +# Optional uncommitted usage e.g. for esx hosts +if(isset($RRD['uncommitted'])) { + $def[1] .= "DEF:uncommitted_mb=".$RRD['uncommitted']." "; + $def[1] .= "CDEF:uncommitted_gb=uncommitted_mb,1024,/ "; + $def[1] .= "CDEF:total_gb=uncommitted_gb,var1,+ "; +} else { + $def[1] .= "CDEF:total_gb=var1 "; +} + +$def[1] .= "HRULE:$maxgb#003300:\"Size ($sizegb GB) \" "; +$def[1] .= "HRULE:$warngb#ffff00:\"Warning at $warngbtxt GB \" "; +$def[1] .= "HRULE:$critgb#ff0000:\"Critical at $critgbtxt GB \\n\" "; +$def[1] .= "GPRINT:var1:LAST:\"current\: %6.2lf GB\" "; +$def[1] .= "GPRINT:var1:MAX:\"max\: %6.2lf GB \" "; +$def[1] .= "GPRINT:var1:AVERAGE:\"avg\: %6.2lf GB\\n\" "; + +if(isset($RRD['uncommitted'])) { + $def[1] .= "AREA:uncommitted_gb#eeccff:\"Uncommited\":STACK "; + $def[1] .= "GPRINT:uncommitted_gb:MAX:\"%6.2lf GB\l\" "; +} + +$def[1] .= "LINE1:total_gb#226600 "; + +# Second graph is optional and shows trend. The MAX field +# of the third variable contains (size of the filesystem in MB +# / range in hours). From that we can compute the configured range. +if (isset($RRD['growth'])) { + $size_mb_per_hours = floatval($MAX['trend']); // this is size_mb / range(hours) + $size_mb = floatval($MAX[1]); + $hours = 1.0 / ($size_mb_per_hours / $size_mb); + $range = sprintf("%.0fh", $hours); + + // Current growth / shrinking. This value is give as MB / 24 hours. + // Note: This has changed in 1.1.13i3. Prior it was MB / trend_range! + $opt[2] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Growth of $servicedesc' "; + $def[2] = "DEF:growth_max=${RRD['growth']} "; + $def[2] .= "DEF:growth_min=${RRD_MIN['growth']} "; + $def[2] .= "DEF:trend=${RRD_AVG['trend']} "; + $def[2] .= "CDEF:growth_pos=growth_max,0,MAX "; + $def[2] .= "CDEF:growth_neg=growth_min,0,MIN "; + $def[2] .= "CDEF:growth_minabs=0,growth_min,- "; + $def[2] .= "CDEF:growth=growth_minabs,growth_max,MAX "; + $def[2] .= "HRULE:0#c0c0c0 "; + $def[2] .= "AREA:growth_pos#3060f0:\"Grow\" "; + $def[2] .= "AREA:growth_neg#30f060:\"Shrink \" "; + $def[2] .= "GPRINT:growth:LAST:\"Current\: %+9.2lfMB / 24h\" "; + $def[2] .= "GPRINT:growth:MAX:\"Max\: %+9.2lfMB / 24h\\n\" "; + + // Trend + $opt[3] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Trend for $servicedesc' "; + $def[3] = "DEF:trend=${RRD_AVG['trend']} "; + $def[3] .= "HRULE:0#c0c0c0 "; + $def[3] .= "LINE1:trend#000000:\"Trend\:\" "; + $def[3] .= "GPRINT:trend:LAST:\"%+7.2lf MB/24h\" "; + if ($WARN['trend']) { + $warn_mb = sprintf("%.2fMB", $WARN['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${WARN['trend']}#ffff00:\"Warn\: $warn_mb / $range\" "; + } + if ($CRIT['trend']) { + $crit_mb = sprintf("%.2fMB", $CRIT['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${CRIT['trend']}#ff0000:\"Crit\: $crit_mb / $range\" "; + } + $def[3] .= "COMMENT:\"\\n\" "; +} + +if (isset($RRD['trend_hoursleft'])) { + // Trend + $opt[4] = "--vertical-label 'Days left' -l -1 -u 365 -X0 --title '$hostname: Days left for $servicedesc' "; + $def[4] = "DEF:hours_left=${RRD_AVG['trend_hoursleft']} "; + $def[4] .= "DEF:hours_left_min=${RRD_MIN['trend_hoursleft']} "; + // negative hours indicate no growth + // the dataset hours_left_isneg stores this info for each point as True/False + $def[4] .= "CDEF:hours_left_isneg=hours_left_min,-1,EQ "; + $def[4] .= "CDEF:hours_left_unmon=hours_left_min,400,0,IF "; + $def[4] .= "CDEF:days_left=hours_left,24,/ "; + $def[4] .= "CDEF:days_left_cap=days_left,400,MIN "; + // Convert negative points to 400 (y-axis cap) + $def[4] .= "CDEF:days_left_cap_positive=hours_left_isneg,400,days_left_cap,IF "; + // The AREA has a rendering problem. Points are too far to the right + $def[4] .= "AREA:hours_left_unmon#AA2200: "; + + $def[4] .= "AREA:days_left_cap_positive#22AA44:\"Days left\:\" "; + if ($ACT[4] == -1) + { + $def[4] .= "COMMENT:\"Not growing\" "; + } + else { + $def[4] .= "GPRINT:days_left:LAST:\"%7.2lf days\" "; + } +} + +?> diff -Nru check-mk-1.2.2p3/check_mk-livestatus_status.php check-mk-1.2.6p12/check_mk-livestatus_status.php --- check-mk-1.2.2p3/check_mk-livestatus_status.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-livestatus_status.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mk-lnx_if.php check-mk-1.2.6p12/check_mk-lnx_if.php --- check-mk-1.2.2p3/check_mk-lnx_if.php 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-lnx_if.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -40,87 +40,105 @@ # Graph 1: used bandwidth -# Determine if Bit or Byte. -# Change multiplier and labels -$unit = "B"; -$unit_multiplier = 1; -$vertical_label_name = "MByte/sec"; -if (strcmp($MIN[11], "0.0") == 0) { +# Determine if Bit or Byte. Bit is signalled via a min value of 0.0 +# in the 11th performance value. +if (!strcmp($MIN[11], "0.0")) { $unit = "Bit"; $unit_multiplier = 8; - $vertical_label_name = "MBit/sec"; + $base = 1000; // Megabit is 1000 * 1000 } +else { + $unit = "B"; + $unit_multiplier = 1; + $base = 1000; // Megabyte is 1000 * 1000 +} + +# Convert bytes to bits if neccessary $bandwidth = $MAX[1] * $unit_multiplier; $warn = $WARN[1] * $unit_multiplier; $crit = $CRIT[1] * $unit_multiplier; -# Horizontal lines -$mega = 1024.0 * 1024.0; -$mBandwidthH = $bandwidth / $mega; -$mWarnH = $warn / $mega; -$mCritH = $crit / $mega; - -# Break down bandwidth, warn and crit +# Now choose a convenient scale, based on the known bandwith of +# the interface, and break down bandwidth, warn and crit by that +# scale. $bwuom = ' '; -$base = 1000; -if($bandwidth > $base * $base * $base) { - $warn /= $base * $base * $base; - $crit /= $base * $base * $base; - $bandwidth /= $base * $base * $base; - $bwuom = 'G'; -} elseif ($bandwidth > $base * $base) { - $warn /= $base * $base; - $crit /= $base * $base; - $bandwidth /= $base * $base; - $bwuom = 'M'; -} elseif ($bandwidth > $base) { - $warn /= $base; - $crit /= $base; - $bandwidth /= $base; - $bwuom = 'k'; -} - -if ($mBandwidthH < 10) - $range = $mBandwidthH; -else - $range = 10.0; +if ($bandwidth > $base * $base * $base) { + $scale = $base * $base * $base; + $bwuom = 'G'; +} +elseif ($bandwidth > $base * $base) { + $scale = $base * $base; + $bwuom = 'M'; +} +elseif ($bandwidth > $base) { + $scale = $base; + $bwuom = 'k'; +} +else { + $scale = 1; + $bwuom = ' '; +} + +$warn /= $scale; +$crit /= $scale; +$bandwidth /= $scale; + +$vertical_label_name = $bwuom . $unit . "/sec"; + +$range = min(10, $bandwidth); + $bandwidthInfo = ""; if ($bandwidth > 0){ - $bandwidthInfo = " at bandwidth ${bwuom}${unit}/s"; + $bandwidthInfo = " at " . sprintf("%.1f", $bandwidth) . " ${bwuom}${unit}/s"; } $ds_name[1] = 'Used bandwidth'; -$opt[1] = "--vertical-label \"$vertical_label_name\" -l -$range -u $range -X0 -b 1024 --title \"Used bandwidth $hostname / $servicedesc $bandwidthInfo\" "; -$def[1] = +$opt[1] = "--vertical-label \"$vertical_label_name\" -l -$range -u $range -X0 -b 1024 --title \"Used bandwidth $hostname / $servicedesc$bandwidthInfo\" "; +$def[1] = "HRULE:0#c0c0c0 "; - if ($mBandwidthH) - $def[1] .= "HRULE:$mBandwidthH#808080:\"Port speed\: " . sprintf("%.1f", $bandwidth) . " ".$bwuom."$unit/s\\n\" ". - "HRULE:-$mBandwidthH#808080: "; - if ($warn) - $def[1] .= "HRULE:$mWarnH#ffff00:\"Warning\: " . sprintf("%6.1f", $warn) . " ".$bwuom."$unit/s\\n\" ". - "HRULE:-$mWarnH#ffff00: "; - if ($crit) - $def[1] .= "HRULE:$mCritH#ff0000:\"Critical\: " . sprintf("%6.1f", $crit) . " ".$bwuom."$unit/s\\n\" ". - "HRULE:-$mCritH#ff0000: "; +if ($bandwidth) + $def[1] .= "HRULE:$bandwidth#808080:\"Port speed\: " . sprintf("%10.1f", $bandwidth) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$bandwidth#808080: "; +if ($warn) + $def[1] .= "HRULE:$warn#ffff00:\"Warning\: " . sprintf("%13.1f", $warn) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$warn#ffff00: "; +if ($crit) + $def[1] .= "HRULE:$crit#ff0000:\"Critical\: " . sprintf("%13.1f", $crit) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$crit#ff0000: "; + + $def[1] .= "". + # incoming + "DEF:inbytes=$RRDFILE[1]:$DS[1]:MAX ". + "CDEF:intraffic=inbytes,$unit_multiplier,* ". + "CDEF:inmb=intraffic,$scale,/ ". + "AREA:inmb#00e060:\"in \" ". + "GPRINT:intraffic:LAST:\"%7.1lf %s$unit/s last\" ". + "GPRINT:intraffic:AVERAGE:\"%7.1lf %s$unit/s avg\" ". + "GPRINT:intraffic:MAX:\"%7.1lf %s$unit/s max\\n\" ". + "VDEF:inperc=intraffic,95,PERCENTNAN ". + "VDEF:inpercmb=inmb,95,PERCENTNAN ". + "LINE:inpercmb#008f00:\"95% percentile\" ". + "GPRINT:inperc:\"%7.1lf %s$unit/s\\n\" ". - $def[1] .= "DEF:inbytes=$RRDFILE[1]:$DS[1]:MAX ". + # outgoing "DEF:outbytes=$RRDFILE[6]:$DS[6]:MAX ". - "CDEF:intraffic=inbytes,$unit_multiplier,* ". "CDEF:outtraffic=outbytes,$unit_multiplier,* ". - "CDEF:inmb=intraffic,1048576,/ ". - "CDEF:outmb=outtraffic,1048576,/ ". + "CDEF:minusouttraffic=outtraffic,-1,* ". + "CDEF:outmb=outtraffic,$scale,/ ". "CDEF:minusoutmb=0,outmb,- ". - "AREA:inmb#00e060:\"in \" ". - "GPRINT:intraffic:LAST:\"%6.1lf %s$unit/s last\" ". - "GPRINT:intraffic:AVERAGE:\"%6.1lf %s$unit/s avg\" ". - "GPRINT:intraffic:MAX:\"%6.1lf %s$unit/s max\\n\" ". - "AREA:minusoutmb#0080e0:\"out \" ". - "GPRINT:outtraffic:LAST:\"%6.1lf %s$unit/s last\" ". - "GPRINT:outtraffic:AVERAGE:\"%6.1lf %s$unit/s avg\" ". - "GPRINT:outtraffic:MAX:\"%6.1lf %s$unit/s max\\n\" "; + "AREA:minusoutmb#0080e0:\"out \" ". + "GPRINT:outtraffic:LAST:\"%7.1lf %s$unit/s last\" ". + "GPRINT:outtraffic:AVERAGE:\"%7.1lf %s$unit/s avg\" ". + "GPRINT:outtraffic:MAX:\"%7.1lf %s$unit/s max\\n\" ". + "VDEF:outperc=minusouttraffic,5,PERCENTNAN ". + "VDEF:outpercmb=minusoutmb,5,PERCENTNAN ". + "LINE:outpercmb#00008f:\"95% percentile\" ". + "GPRINT:outperc:\"%7.1lf %s$unit/s\\n\" ". + + ""; if (isset($DS[12])) { - $def[1] .= + $def[1] .= "DEF:inbytesa=$RRDFILE[12]:$DS[12]:MAX ". "DEF:outbytesa=$RRDFILE[13]:$DS[13]:MAX ". "CDEF:intraffica=inbytesa,$unit_multiplier,* ". @@ -142,29 +160,41 @@ $ds_name[2] = 'Packets'; $opt[2] = "--vertical-label \"packets/sec\" --title \"Packets $hostname / $servicedesc\" "; $def[2] = + # ingoing "HRULE:0#c0c0c0 ". "DEF:inu=$RRDFILE[2]:$DS[2]:MAX ". "DEF:innu=$RRDFILE[3]:$DS[3]:MAX ". - "AREA:inu#00ffc0:\"in unicast \" ". - "GPRINT:inu:LAST:\"%7.2lf/s last \" ". - "GPRINT:inu:AVERAGE:\"%7.2lf/s avg \" ". - "GPRINT:inu:MAX:\"%7.2lf/s max\\n\" ". - "AREA:innu#00c080:\"in broadcast/multicast \":STACK ". - "GPRINT:innu:LAST:\"%7.2lf/s last \" ". - "GPRINT:innu:AVERAGE:\"%7.2lf/s avg \" ". - "GPRINT:innu:MAX:\"%7.2lf/s max\\n\" ". + "CDEF:in=inu,innu,+ ". + "AREA:inu#00ffc0:\"in unicast \" ". + "GPRINT:inu:LAST:\"%9.1lf/s last \" ". + "GPRINT:inu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:inu:MAX:\"%9.1lf/s max\\n\" ". + "AREA:innu#00c080:\"in broadcast/multicast \":STACK ". + "GPRINT:innu:LAST:\"%9.1lf/s last \" ". + "GPRINT:innu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:innu:MAX:\"%9.1lf/s max\\n\" ". + "VDEF:inperc=in,95,PERCENTNAN ". + "LINE:inperc#00cf00:\"in 95% percentile \" ". + "GPRINT:inperc:\"%9.1lf/s\\n\" ". + + # outgoing "DEF:outu=$RRDFILE[7]:$DS[7]:MAX ". "DEF:outnu=$RRDFILE[8]:$DS[8]:MAX ". "CDEF:minusoutu=0,outu,- ". "CDEF:minusoutnu=0,outnu,- ". - "AREA:minusoutu#00c0ff:\"out unicast \" ". - "GPRINT:outu:LAST:\"%7.2lf/s last \" ". - "GPRINT:outu:AVERAGE:\"%7.2lf/s avg \" ". - "GPRINT:outu:MAX:\"%7.2lf/s max\\n\" ". - "AREA:minusoutnu#0080c0:\"out broadcast/multicast \":STACK ". - "GPRINT:outnu:LAST:\"%7.2lf/s last \" ". - "GPRINT:outnu:AVERAGE:\"%7.2lf/s avg \" ". - "GPRINT:outnu:MAX:\"%7.2lf/s max\\n\" "; + "CDEF:minusout=minusoutu,minusoutnu,+ ". + "AREA:minusoutu#00c0ff:\"out unicast \" ". + "GPRINT:outu:LAST:\"%9.1lf/s last \" ". + "GPRINT:outu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:outu:MAX:\"%9.1lf/s max\\n\" ". + "AREA:minusoutnu#0080c0:\"out broadcast/multicast\":STACK ". + "GPRINT:outnu:LAST:\"%9.1lf/s last \" ". + "GPRINT:outnu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:outnu:MAX:\"%9.1lf/s max\\n\" ". + "VDEF:outperc=minusout,5,PERCENTNAN ". + "LINE:outperc#0000cf:\"out 95% percentile \" ". + "GPRINT:outperc:\"%9.1lf/s\\n\" ". + ""; # Graph 3: errors and discards $ds_name[3] = 'Errors and discards'; diff -Nru check-mk-1.2.2p3/check_mk-local.php check-mk-1.2.6p12/check_mk-local.php --- check-mk-1.2.2p3/check_mk-local.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-local.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -53,10 +53,10 @@ foreach ($RRDFILE as $i => $RRD) { $ii = $i % 8; $name = $NAME[$i]; - $def[$i] = "DEF:cnt=$RRDFILE[$i]:$DS[$i]:MAX "; - $def[$i] .= "AREA:cnt#$area_colors[$ii]:\"$name\" "; - $def[$i] .= "LINE1:cnt#$line_colors[$ii]: "; - + $def[$i] = "DEF:cnt=$RRDFILE[$i]:$DS[$i]:MAX "; + $def[$i] .= "AREA:cnt#$area_colors[$ii]:\"$name\" "; + $def[$i] .= "LINE1:cnt#$line_colors[$ii]: "; + $upper = ""; $lower = " -l 0"; if ($WARN[$i] != "") { @@ -73,7 +73,7 @@ $upper = " -u" . $MAX[$i]; $def[$i] .= "HRULE:$MAX[$i]#0000b0:\"Upper limit\" "; } - + $opt[$i] = "$lower $upper --title '$hostname: $servicedesc - $name' "; $def[$i] .= "GPRINT:cnt:LAST:\"current\: %6.2lf\" "; $def[$i] .= "GPRINT:cnt:MAX:\"max\: %6.2lf\" "; diff -Nru check-mk-1.2.2p3/check_mk-lparstat_aix.cpu_util.php check-mk-1.2.6p12/check_mk-lparstat_aix.cpu_util.php --- check-mk-1.2.2p3/check_mk-lparstat_aix.cpu_util.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-lparstat_aix.cpu_util.php 2015-09-21 10:59:54.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -40,24 +40,24 @@ $def[1] .= "" . "COMMENT:Average\: " - . "AREA:system#ff6000:\"System\" " - . "GPRINT:system:AVERAGE:\"%2.1lf%% \" " - . "AREA:user#60f020:\"User\":STACK " - . "GPRINT:user:AVERAGE:\"%2.1lf%% \" " - . "AREA:wait#00b0c0:\"$thirdname\":STACK " - . "GPRINT:wait:AVERAGE:\"%2.1lf%% \" " - . "LINE:sum#004080:\"Total\" " - . "GPRINT:sum:AVERAGE:\"%2.1lf%% \\n\" " + . "AREA:system#ff6000:\"System\" " + . "GPRINT:system:AVERAGE:\"%4.1lf%% \" " + . "AREA:user#60f020:\"User\":STACK " + . "GPRINT:user:AVERAGE:\"%4.1lf%% \" " + . "AREA:wait#00b0c0:\"$thirdname\":STACK " + . "GPRINT:wait:AVERAGE:\"%4.1lf%% \" " + . "LINE:sum#004080:\"Total\" " + . "GPRINT:sum:AVERAGE:\"%4.1lf%% \\n\" " . "COMMENT:\"Last\: \" " - . "AREA:system#ff6000:\"System\" " - . "GPRINT:system:LAST:\"%2.1lf%% \" " - . "AREA:user#60f020:\"User\":STACK " - . "GPRINT:user:LAST:\"%2.1lf%% \" " - . "AREA:wait#00b0c0:\"$thirdname\":STACK " - . "GPRINT:wait:LAST:\"%2.1lf%% \" " - . "LINE:sum#004080:\"Total\" " - . "GPRINT:sum:LAST:\"%2.1lf%% \\n\" " + . "AREA:system#ff6000:\"System\" " + . "GPRINT:system:LAST:\"%4.1lf%% \" " + . "AREA:user#60f020:\"User\":STACK " + . "GPRINT:user:LAST:\"%4.1lf%% \" " + . "AREA:wait#00b0c0:\"$thirdname\":STACK " + . "GPRINT:wait:LAST:\"%4.1lf%% \" " + . "LINE:sum#004080:\"Total\" " + . "GPRINT:sum:LAST:\"%4.1lf%% \\n\" " .""; diff -Nru check-mk-1.2.2p3/check_mk-lparstat_aix.php check-mk-1.2.6p12/check_mk-lparstat_aix.php --- check-mk-1.2.2p3/check_mk-lparstat_aix.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-lparstat_aix.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mk-mcdata_fcport.php check-mk-1.2.6p12/check_mk-mcdata_fcport.php --- check-mk-1.2.2p3/check_mk-mcdata_fcport.php 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-mcdata_fcport.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -40,87 +40,105 @@ # Graph 1: used bandwidth -# Determine if Bit or Byte. -# Change multiplier and labels -$unit = "B"; -$unit_multiplier = 1; -$vertical_label_name = "MByte/sec"; -if (strcmp($MIN[11], "0.0") == 0) { +# Determine if Bit or Byte. Bit is signalled via a min value of 0.0 +# in the 11th performance value. +if (!strcmp($MIN[11], "0.0")) { $unit = "Bit"; $unit_multiplier = 8; - $vertical_label_name = "MBit/sec"; + $base = 1000; // Megabit is 1000 * 1000 } +else { + $unit = "B"; + $unit_multiplier = 1; + $base = 1000; // Megabyte is 1000 * 1000 +} + +# Convert bytes to bits if neccessary $bandwidth = $MAX[1] * $unit_multiplier; $warn = $WARN[1] * $unit_multiplier; $crit = $CRIT[1] * $unit_multiplier; -# Horizontal lines -$mega = 1024.0 * 1024.0; -$mBandwidthH = $bandwidth / $mega; -$mWarnH = $warn / $mega; -$mCritH = $crit / $mega; - -# Break down bandwidth, warn and crit +# Now choose a convenient scale, based on the known bandwith of +# the interface, and break down bandwidth, warn and crit by that +# scale. $bwuom = ' '; -$base = 1000; -if($bandwidth > $base * $base * $base) { - $warn /= $base * $base * $base; - $crit /= $base * $base * $base; - $bandwidth /= $base * $base * $base; - $bwuom = 'G'; -} elseif ($bandwidth > $base * $base) { - $warn /= $base * $base; - $crit /= $base * $base; - $bandwidth /= $base * $base; - $bwuom = 'M'; -} elseif ($bandwidth > $base) { - $warn /= $base; - $crit /= $base; - $bandwidth /= $base; - $bwuom = 'k'; -} - -if ($mBandwidthH < 10) - $range = $mBandwidthH; -else - $range = 10.0; +if ($bandwidth > $base * $base * $base) { + $scale = $base * $base * $base; + $bwuom = 'G'; +} +elseif ($bandwidth > $base * $base) { + $scale = $base * $base; + $bwuom = 'M'; +} +elseif ($bandwidth > $base) { + $scale = $base; + $bwuom = 'k'; +} +else { + $scale = 1; + $bwuom = ' '; +} + +$warn /= $scale; +$crit /= $scale; +$bandwidth /= $scale; + +$vertical_label_name = $bwuom . $unit . "/sec"; + +$range = min(10, $bandwidth); + $bandwidthInfo = ""; if ($bandwidth > 0){ - $bandwidthInfo = " at bandwidth ${bwuom}${unit}/s"; + $bandwidthInfo = " at " . sprintf("%.1f", $bandwidth) . " ${bwuom}${unit}/s"; } $ds_name[1] = 'Used bandwidth'; -$opt[1] = "--vertical-label \"$vertical_label_name\" -l -$range -u $range -X0 -b 1024 --title \"Used bandwidth $hostname / $servicedesc $bandwidthInfo\" "; -$def[1] = +$opt[1] = "--vertical-label \"$vertical_label_name\" -l -$range -u $range -X0 -b 1024 --title \"Used bandwidth $hostname / $servicedesc$bandwidthInfo\" "; +$def[1] = "HRULE:0#c0c0c0 "; - if ($mBandwidthH) - $def[1] .= "HRULE:$mBandwidthH#808080:\"Port speed\: " . sprintf("%.1f", $bandwidth) . " ".$bwuom."$unit/s\\n\" ". - "HRULE:-$mBandwidthH#808080: "; - if ($warn) - $def[1] .= "HRULE:$mWarnH#ffff00:\"Warning\: " . sprintf("%6.1f", $warn) . " ".$bwuom."$unit/s\\n\" ". - "HRULE:-$mWarnH#ffff00: "; - if ($crit) - $def[1] .= "HRULE:$mCritH#ff0000:\"Critical\: " . sprintf("%6.1f", $crit) . " ".$bwuom."$unit/s\\n\" ". - "HRULE:-$mCritH#ff0000: "; +if ($bandwidth) + $def[1] .= "HRULE:$bandwidth#808080:\"Port speed\: " . sprintf("%10.1f", $bandwidth) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$bandwidth#808080: "; +if ($warn) + $def[1] .= "HRULE:$warn#ffff00:\"Warning\: " . sprintf("%13.1f", $warn) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$warn#ffff00: "; +if ($crit) + $def[1] .= "HRULE:$crit#ff0000:\"Critical\: " . sprintf("%13.1f", $crit) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$crit#ff0000: "; + + $def[1] .= "". + # incoming + "DEF:inbytes=$RRDFILE[1]:$DS[1]:MAX ". + "CDEF:intraffic=inbytes,$unit_multiplier,* ". + "CDEF:inmb=intraffic,$scale,/ ". + "AREA:inmb#00e060:\"in \" ". + "GPRINT:intraffic:LAST:\"%7.1lf %s$unit/s last\" ". + "GPRINT:intraffic:AVERAGE:\"%7.1lf %s$unit/s avg\" ". + "GPRINT:intraffic:MAX:\"%7.1lf %s$unit/s max\\n\" ". + "VDEF:inperc=intraffic,95,PERCENTNAN ". + "VDEF:inpercmb=inmb,95,PERCENTNAN ". + "LINE:inpercmb#008f00:\"95% percentile\" ". + "GPRINT:inperc:\"%7.1lf %s$unit/s\\n\" ". - $def[1] .= "DEF:inbytes=$RRDFILE[1]:$DS[1]:MAX ". + # outgoing "DEF:outbytes=$RRDFILE[6]:$DS[6]:MAX ". - "CDEF:intraffic=inbytes,$unit_multiplier,* ". "CDEF:outtraffic=outbytes,$unit_multiplier,* ". - "CDEF:inmb=intraffic,1048576,/ ". - "CDEF:outmb=outtraffic,1048576,/ ". + "CDEF:minusouttraffic=outtraffic,-1,* ". + "CDEF:outmb=outtraffic,$scale,/ ". "CDEF:minusoutmb=0,outmb,- ". - "AREA:inmb#00e060:\"in \" ". - "GPRINT:intraffic:LAST:\"%6.1lf %s$unit/s last\" ". - "GPRINT:intraffic:AVERAGE:\"%6.1lf %s$unit/s avg\" ". - "GPRINT:intraffic:MAX:\"%6.1lf %s$unit/s max\\n\" ". - "AREA:minusoutmb#0080e0:\"out \" ". - "GPRINT:outtraffic:LAST:\"%6.1lf %s$unit/s last\" ". - "GPRINT:outtraffic:AVERAGE:\"%6.1lf %s$unit/s avg\" ". - "GPRINT:outtraffic:MAX:\"%6.1lf %s$unit/s max\\n\" "; + "AREA:minusoutmb#0080e0:\"out \" ". + "GPRINT:outtraffic:LAST:\"%7.1lf %s$unit/s last\" ". + "GPRINT:outtraffic:AVERAGE:\"%7.1lf %s$unit/s avg\" ". + "GPRINT:outtraffic:MAX:\"%7.1lf %s$unit/s max\\n\" ". + "VDEF:outperc=minusouttraffic,5,PERCENTNAN ". + "VDEF:outpercmb=minusoutmb,5,PERCENTNAN ". + "LINE:outpercmb#00008f:\"95% percentile\" ". + "GPRINT:outperc:\"%7.1lf %s$unit/s\\n\" ". + + ""; if (isset($DS[12])) { - $def[1] .= + $def[1] .= "DEF:inbytesa=$RRDFILE[12]:$DS[12]:MAX ". "DEF:outbytesa=$RRDFILE[13]:$DS[13]:MAX ". "CDEF:intraffica=inbytesa,$unit_multiplier,* ". @@ -142,29 +160,41 @@ $ds_name[2] = 'Packets'; $opt[2] = "--vertical-label \"packets/sec\" --title \"Packets $hostname / $servicedesc\" "; $def[2] = + # ingoing "HRULE:0#c0c0c0 ". "DEF:inu=$RRDFILE[2]:$DS[2]:MAX ". "DEF:innu=$RRDFILE[3]:$DS[3]:MAX ". - "AREA:inu#00ffc0:\"in unicast \" ". - "GPRINT:inu:LAST:\"%7.2lf/s last \" ". - "GPRINT:inu:AVERAGE:\"%7.2lf/s avg \" ". - "GPRINT:inu:MAX:\"%7.2lf/s max\\n\" ". - "AREA:innu#00c080:\"in broadcast/multicast \":STACK ". - "GPRINT:innu:LAST:\"%7.2lf/s last \" ". - "GPRINT:innu:AVERAGE:\"%7.2lf/s avg \" ". - "GPRINT:innu:MAX:\"%7.2lf/s max\\n\" ". + "CDEF:in=inu,innu,+ ". + "AREA:inu#00ffc0:\"in unicast \" ". + "GPRINT:inu:LAST:\"%9.1lf/s last \" ". + "GPRINT:inu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:inu:MAX:\"%9.1lf/s max\\n\" ". + "AREA:innu#00c080:\"in broadcast/multicast \":STACK ". + "GPRINT:innu:LAST:\"%9.1lf/s last \" ". + "GPRINT:innu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:innu:MAX:\"%9.1lf/s max\\n\" ". + "VDEF:inperc=in,95,PERCENTNAN ". + "LINE:inperc#00cf00:\"in 95% percentile \" ". + "GPRINT:inperc:\"%9.1lf/s\\n\" ". + + # outgoing "DEF:outu=$RRDFILE[7]:$DS[7]:MAX ". "DEF:outnu=$RRDFILE[8]:$DS[8]:MAX ". "CDEF:minusoutu=0,outu,- ". "CDEF:minusoutnu=0,outnu,- ". - "AREA:minusoutu#00c0ff:\"out unicast \" ". - "GPRINT:outu:LAST:\"%7.2lf/s last \" ". - "GPRINT:outu:AVERAGE:\"%7.2lf/s avg \" ". - "GPRINT:outu:MAX:\"%7.2lf/s max\\n\" ". - "AREA:minusoutnu#0080c0:\"out broadcast/multicast \":STACK ". - "GPRINT:outnu:LAST:\"%7.2lf/s last \" ". - "GPRINT:outnu:AVERAGE:\"%7.2lf/s avg \" ". - "GPRINT:outnu:MAX:\"%7.2lf/s max\\n\" "; + "CDEF:minusout=minusoutu,minusoutnu,+ ". + "AREA:minusoutu#00c0ff:\"out unicast \" ". + "GPRINT:outu:LAST:\"%9.1lf/s last \" ". + "GPRINT:outu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:outu:MAX:\"%9.1lf/s max\\n\" ". + "AREA:minusoutnu#0080c0:\"out broadcast/multicast\":STACK ". + "GPRINT:outnu:LAST:\"%9.1lf/s last \" ". + "GPRINT:outnu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:outnu:MAX:\"%9.1lf/s max\\n\" ". + "VDEF:outperc=minusout,5,PERCENTNAN ". + "LINE:outperc#0000cf:\"out 95% percentile \" ". + "GPRINT:outperc:\"%9.1lf/s\\n\" ". + ""; # Graph 3: errors and discards $ds_name[3] = 'Errors and discards'; diff -Nru check-mk-1.2.2p3/check_mk-mem.used.php check-mk-1.2.6p12/check_mk-mem.used.php --- check-mk-1.2.2p3/check_mk-mem.used.php 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-mem.used.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -27,52 +27,91 @@ $maxgb = sprintf("%.1f", $MAX[1] / 1024.0); -$def[1] = "DEF:ram=$RRDFILE[1]:$DS[1]:MAX " ; -$def[1] .= "DEF:swap=$RRDFILE[2]:$DS[2]:MAX " ; -$def[1] .= "DEF:virt=$RRDFILE[3]:$DS[3]:MAX " ; -$def[1] .= "HRULE:$MAX[3]#000080:\"RAM+SWAP installed\" "; -$def[1] .= "HRULE:$MAX[1]#2040d0:\"$maxgb GB RAM installed\" "; -$def[1] .= "HRULE:$WARN[3]#FFFF00:\"Warning\" "; -$def[1] .= "HRULE:$CRIT[3]#FF0000:\"Critical\" "; - -$def[1] .= "'COMMENT:\\n' "; -$def[1] .= "AREA:ram#80ff40:\"RAM used \" " ; -$def[1] .= "GPRINT:ram:LAST:\"%6.0lf MB last\" " ; -$def[1] .= "GPRINT:ram:AVERAGE:\"%6.0lf MB avg\" " ; -$def[1] .= "GPRINT:ram:MAX:\"%6.0lf MB max\\n\" "; - -$def[1] .= "AREA:swap#008030:\"SWAP used \":STACK " ; -$def[1] .= "GPRINT:swap:LAST:\"%6.0lf MB last\" " ; -$def[1] .= "GPRINT:swap:AVERAGE:\"%6.0lf MB avg\" " ; -$def[1] .= "GPRINT:swap:MAX:\"%6.0lf MB max\\n\" " ; - -$def[1] .= "LINE:virt#000000:\"RAM+SWAP used\" " ; -$def[1] .= "GPRINT:virt:LAST:\"%6.0lf MB last\" " ; -$def[1] .= "GPRINT:virt:AVERAGE:\"%6.0lf MB avg\" " ; -$def[1] .= "GPRINT:virt:MAX:\"%6.0lf MB max\\n\" " ; - -/* HACK: Avoid error if RRD does not contain two data - sources which .XML file *does*. F..ck. This does not - work with multiple RRDs... */ -$retval = -1; -system("rrdtool info $RRDFILE[1] | fgrep -q 'ds[5]'", $retval); -if ($retval == 0) -{ - if (count($NAME) >= 4 and $NAME[4] == "mapped") { - $def[1] .= "DEF:mapped=$RRDFILE[4]:$DS[4]:MAX " ; - $def[1] .= "LINE2:mapped#8822ff:\"Memory mapped\" " ; - $def[1] .= "GPRINT:mapped:LAST:\"%6.0lf MB last\" " ; - $def[1] .= "GPRINT:mapped:AVERAGE:\"%6.0lf MB avg\" " ; - $def[1] .= "GPRINT:mapped:MAX:\"%6.0lf MB max\\n\" " ; - } - - if (count($NAME) >= 5 and $NAME[5] == "committed_as") { - $def[1] .= "DEF:committed=$RRDFILE[5]:$DS[5]:MAX " ; - $def[1] .= "LINE2:committed#cc00dd:\"Committed \" " ; - $def[1] .= "GPRINT:committed:LAST:\"%6.0lf MB last\" " ; - $def[1] .= "GPRINT:committed:AVERAGE:\"%6.0lf MB avg\" " ; - $def[1] .= "GPRINT:committed:MAX:\"%6.0lf MB max\\n\" " ; - } - } - +# For the rest of the data we rather work with names instead +# of numbers +$RRD = array(); +foreach ($NAME as $i => $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; +} + +$def[1] = ""; + +if (isset($RRD['pagetables'])) { + $def[1] .= "DEF:pagetables=$RRD[pagetables] " + . "DEF:ram=$RRD[ramused] "; +} +else { + $def[1] .= "DEF:ram=$RRD[ramused] "; +} + +$def[1] .= "DEF:virt=$RRDFILE[3]:$DS[3]:MAX " + . "DEF:swap=$RRDFILE[2]:$DS[2]:MAX " + + . "HRULE:$MAX[3]#000080:\"RAM+SWAP installed\" " + . "HRULE:$MAX[1]#2040d0:\"$maxgb GB RAM installed\" " + . "HRULE:$WARN[3]#FFFF00:\"Warning\" " + . "HRULE:$CRIT[3]#FF0000:\"Critical\" " + + . "'COMMENT:\\n' " + . "AREA:ram#80ff40:\"RAM used \" " + . "GPRINT:ram:LAST:\"%6.0lf MB last\" " + . "GPRINT:ram:AVERAGE:\"%6.0lf MB avg\" " + . "GPRINT:ram:MAX:\"%6.0lf MB max\\n\" " + + . "AREA:swap#008030:\"SWAP used \":STACK " + . "GPRINT:swap:LAST:\"%6.0lf MB last\" " + . "GPRINT:swap:AVERAGE:\"%6.0lf MB avg\" " + . "GPRINT:swap:MAX:\"%6.0lf MB max\\n\" " + ; + + +if (isset($RRD['pagetables'])) { + $def[1] .= "" + . "AREA:pagetables#ff8800:\"Page tables \":STACK " + . "GPRINT:pagetables:LAST:\"%6.0lf MB last\" " + . "GPRINT:pagetables:AVERAGE:\"%6.0lf MB avg\" " + . "GPRINT:pagetables:MAX:\"%6.0lf MB max\\n\" " + . "LINE:virt#000000:\"RAM+SWAP+PT used\" " + . "GPRINT:virt:LAST:\"%6.0lf MB last\" " + . "GPRINT:virt:AVERAGE:\"%6.0lf MB avg\" " + . "GPRINT:virt:MAX:\"%6.0lf MB max\\n\" " + ; +} + +else { + $def[1] .= "LINE:virt#000000:\"RAM+SWAP used \" " + . "GPRINT:virt:LAST:\"%6.0lf MB last\" " + . "GPRINT:virt:AVERAGE:\"%6.0lf MB avg\" " + . "GPRINT:virt:MAX:\"%6.0lf MB max\\n\" " + ; +} + +if (isset($RRD['mapped'])) { + $def[1] .= "DEF:mapped=$RRD[mapped] " + . "LINE2:mapped#8822ff:\"Memory mapped \" " + . "GPRINT:mapped:LAST:\"%6.0lf MB last\" " + . "GPRINT:mapped:AVERAGE:\"%6.0lf MB avg\" " + . "GPRINT:mapped:MAX:\"%6.0lf MB max\\n\" " ; +} + +if (isset($RRD['committed_as'])) { + $def[1] .= "DEF:committed=$RRD[committed_as] " + . "LINE2:committed#cc00dd:\"Committed \" " + . "GPRINT:committed:LAST:\"%6.0lf MB last\" " + . "GPRINT:committed:AVERAGE:\"%6.0lf MB avg\" " + . "GPRINT:committed:MAX:\"%6.0lf MB max\\n\" " ; +} + +/* Shared memory is part of RAM. So simply overlay it */ +if (isset($RRD['shared'])) { + $def[1] .= "DEF:shared=$RRD[shared] " + . "AREA:shared#44ccff:\"Shared Memory \" " + . "GPRINT:shared:LAST:\"%6.0lf MB last\" " + . "GPRINT:shared:AVERAGE:\"%6.0lf MB avg\" " + . "GPRINT:shared:MAX:\"%6.0lf MB max\\n\" " ; +} ?> diff -Nru check-mk-1.2.2p3/check_mk-mem.vmalloc.php check-mk-1.2.6p12/check_mk-mem.vmalloc.php --- check-mk-1.2.2p3/check_mk-mem.vmalloc.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-mem.vmalloc.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mk-mem.win.php check-mk-1.2.6p12/check_mk-mem.win.php --- check-mk-1.2.2p3/check_mk-mem.win.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-mem.win.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -23,9 +23,19 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -$maxmem = $MAX[1] / 1024.0; +// Make data sources available via names +$RRD = array(); +foreach ($NAME as $i => $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; +} + +$maxmem = $MAX["memory"] / 1024.0; $maxmemprint = sprintf("%5.2f", $maxmem); -$maxpage = $MAX[2] / 1024.0; +$maxpage = $MAX["pagefile"] / 1024.0; $maxpageprint = sprintf("%5.2f", $maxpage); $opt[1] = " --vertical-label 'Gigabytes' -X0 " @@ -34,29 +44,40 @@ . " --title \"Memory and page file usage $hostname\" "; -$def[1] = "DEF:mem=$RRDFILE[1]:$DS[1]:MAX " +$def[1] = "DEF:mem=$RRD[memory] " . "CDEF:memgb=mem,1024,/ " - . "DEF:page=$RRDFILE[2]:$DS[2]:MAX " + . "DEF:page=$RRD[pagefile] " . "CDEF:pagegb=page,1024,/ " . "CDEF:mpagegb=pagegb,-1,* " - - . "AREA:$maxmem#a0f8c0:\"$maxmemprint GB RAM \" " - . "AREA:memgb#20d060 " - . "GPRINT:memgb:LAST:\"%5.2lf GB last\" " - . "GPRINT:memgb:AVERAGE:\"%5.2lf GB avg\" " - . "GPRINT:memgb:MAX:\"%5.2lf GB max\" " - . "HRULE:".($WARN[1]/1024)."#FFFF00:\"Warn\" " - . "HRULE:".($CRIT[1]/1024)."#FF0000:\"Crit\\n\" " - - . "AREA:\"-$maxpage\"#a0d0e8:\"$maxpageprint GB page file\" " - . "AREA:mpagegb#3040d0 " - . "GPRINT:pagegb:LAST:\"%5.2lf GB last\" " - . "GPRINT:pagegb:AVERAGE:\"%5.2lf GB avg\" " - . "GPRINT:pagegb:MAX:\"%5.2lf GB max\" " - . "HRULE:".(-$WARN[2]/1024)."#FFFF00:\"Warn\" " - . "HRULE:".(-$CRIT[2]/1024)."#FF0000:\"Crit\\n\" " - + . "AREA:$maxmem#b0ffe0:\"$maxmemprint GB RAM \" " + . "AREA:memgb#40f090 " + . "GPRINT:memgb:LAST:\"%5.2lf GB last\" " + . "GPRINT:memgb:AVERAGE:\"%5.2lf GB avg\" " + . "GPRINT:memgb:MAX:\"%5.2lf GB max\" " + . "HRULE:".($WARN["memory"]/1024)."#FFFF00:\"Warn\" " + . "HRULE:".($CRIT["memory"]/1024)."#FF0000:\"Crit\\n\" " + + . "AREA:\"-$maxpage\"#b0e0f0:\"$maxpageprint GB page file\" " + . "AREA:mpagegb#90b0ff " + . "GPRINT:pagegb:LAST:\"%5.2lf GB last\" " + . "GPRINT:pagegb:AVERAGE:\"%5.2lf GB avg\" " + . "GPRINT:pagegb:MAX:\"%5.2lf GB max\" " + . "HRULE:".(-$WARN["pagefile"]/1024)."#FFFF00:\"Warn\" " + . "HRULE:".(-$CRIT["pagefile"]/1024)."#FF0000:\"Crit\\n\" " + ; + +# If averaging is enabled then we get two further metrics +if (isset($RRD["memory_avg"])) { + $def[1] .= "" + . "DEF:memavg=$RRD[memory_avg] " + . "CDEF:memavggb=memavg,1024,/ " + . "LINE:memavggb#006000:\"Memory Average \" " + . "DEF:pageavg=$RRD[pagefile_avg] " + . "CDEF:pageavggb=pageavg,1024,/ " + . "CDEF:mpageavggb=pageavggb,-1,* " + . "LINE:mpageavggb#000060:\"Pagefile Average\\n\" " ; +} ?> diff -Nru check-mk-1.2.2p3/check_mk-mssql_tablespaces.php check-mk-1.2.6p12/check_mk-mssql_tablespaces.php --- check-mk-1.2.2p3/check_mk-mssql_tablespaces.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-mssql_tablespaces.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mk-mysql_capacity.php check-mk-1.2.6p12/check_mk-mysql_capacity.php --- check-mk-1.2.2p3/check_mk-mysql_capacity.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-mysql_capacity.php 2015-09-21 10:59:54.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mk-mysql.innodb_io.php check-mk-1.2.6p12/check_mk-mysql.innodb_io.php --- check-mk-1.2.2p3/check_mk-mysql.innodb_io.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-mysql.innodb_io.php 2015-09-21 10:59:54.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -41,7 +41,7 @@ $opt[1] = "--vertical-label 'Throughput (MB/s)' -X0 --title \"Disk throughput $hostname / $disk\" "; - $def[1] = + $def[1] = "HRULE:0#a0a0a0 ". # read "DEF:read=$RRD[read] ". @@ -53,7 +53,7 @@ # read average as line in the same graph if (isset($RRD["read.avg"])) { - $def[1] .= + $def[1] .= "DEF:read_avg=${RRD['read.avg']} ". "CDEF:read_avg_mb=read_avg,1048576,/ ". "LINE:read_avg_mb#202020 "; @@ -84,7 +84,7 @@ # write average if (isset($DS["write.avg"])) { - $def[1] .= + $def[1] .= "DEF:write_avg=${RRD['write.avg']} ". "CDEF:write_avg_mb=write_avg,1048576,/ ". "CDEF:write_avg_mb_neg=write_avg_mb,-1,* ". @@ -122,14 +122,14 @@ $def[] = "" . "DEF:read=$RRD[read_ql] " . "DEF:write=$RRD[write_ql] " - . "CDEF:writen=write,-1,* " + . "CDEF:writen=write,-1,* " . "HRULE:0#a0a0a0 " . "AREA:read#669a76 " . "AREA:writen#517ba5 " ; } - + } // legacy version of diskstat diff -Nru check-mk-1.2.2p3/check_mk-mysql_slave.php check-mk-1.2.6p12/check_mk-mysql_slave.php --- check-mk-1.2.2p3/check_mk-mysql_slave.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-mysql_slave.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,34 @@ + diff -Nru check-mk-1.2.2p3/check_mk-netapp_api_aggr.php check-mk-1.2.6p12/check_mk-netapp_api_aggr.php --- check-mk-1.2.2p3/check_mk-netapp_api_aggr.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-netapp_api_aggr.php 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,157 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $RRD_MIN[$n] = "$RRDFILE[$i]:$DS[$i]:MIN"; + $RRD_AVG[$n] = "$RRDFILE[$i]:$DS[$i]:AVERAGE"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; + $ACT[$n] = $ACT[$i]; +} + +# RRDtool Options +#$servicedes=$NAGIOS_SERVICEDESC + +$fsname = str_replace("_", "/", substr($servicedesc,11)); +$fstitle = $fsname; + +# Hack for windows: replace C// with C:\ +if (strlen($fsname) == 3 && substr($fsname, 1, 2) == '//') { + $fsname = $fsname[0] . "\:\\\\"; + $fstitle = $fsname[0] . ":\\"; +} + +$sizegb = sprintf("%.1f", $MAX[1] / 1024.0); +$maxgb = $MAX[1] / 1024.0; +$warngb = $WARN[1] / 1024.0; +$critgb = $CRIT[1] / 1024.0; +$warngbtxt = sprintf("%.1f", $warngb); +$critgbtxt = sprintf("%.1f", $critgb); + +$opt[1] = "--vertical-label GB -l 0 -u $maxgb --title '$hostname: Filesystem $fstitle ($sizegb GB)' "; + +# First graph show current filesystem usage +$def[1] = "DEF:mb=$RRDFILE[1]:$DS[1]:MAX "; +$def[1] .= "CDEF:var1=mb,1024,/ "; +$def[1] .= "AREA:var1#00ffc6:\"used space on $fsname\\n\" "; + +# Optional uncommitted usage e.g. for esx hosts +if(isset($RRD['uncommitted'])) { + $def[1] .= "DEF:uncommitted_mb=".$RRD['uncommitted']." "; + $def[1] .= "CDEF:uncommitted_gb=uncommitted_mb,1024,/ "; + $def[1] .= "CDEF:total_gb=uncommitted_gb,var1,+ "; +} else { + $def[1] .= "CDEF:total_gb=var1 "; +} + +$def[1] .= "HRULE:$maxgb#003300:\"Size ($sizegb GB) \" "; +$def[1] .= "HRULE:$warngb#ffff00:\"Warning at $warngbtxt GB \" "; +$def[1] .= "HRULE:$critgb#ff0000:\"Critical at $critgbtxt GB \\n\" "; +$def[1] .= "GPRINT:var1:LAST:\"current\: %6.2lf GB\" "; +$def[1] .= "GPRINT:var1:MAX:\"max\: %6.2lf GB \" "; +$def[1] .= "GPRINT:var1:AVERAGE:\"avg\: %6.2lf GB\\n\" "; + +if(isset($RRD['uncommitted'])) { + $def[1] .= "AREA:uncommitted_gb#eeccff:\"Uncommited\":STACK "; + $def[1] .= "GPRINT:uncommitted_gb:MAX:\"%6.2lf GB\l\" "; +} + +$def[1] .= "LINE1:total_gb#226600 "; + +# Second graph is optional and shows trend. The MAX field +# of the third variable contains (size of the filesystem in MB +# / range in hours). From that we can compute the configured range. +if (isset($RRD['growth'])) { + $size_mb_per_hours = floatval($MAX['trend']); // this is size_mb / range(hours) + $size_mb = floatval($MAX[1]); + $hours = 1.0 / ($size_mb_per_hours / $size_mb); + $range = sprintf("%.0fh", $hours); + + // Current growth / shrinking. This value is give as MB / 24 hours. + // Note: This has changed in 1.1.13i3. Prior it was MB / trend_range! + $opt[2] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Growth of $fstitle' "; + $def[2] = "DEF:growth_max=${RRD['growth']} "; + $def[2] .= "DEF:growth_min=${RRD_MIN['growth']} "; + $def[2] .= "DEF:trend=${RRD_AVG['trend']} "; + $def[2] .= "CDEF:growth_pos=growth_max,0,MAX "; + $def[2] .= "CDEF:growth_neg=growth_min,0,MIN "; + $def[2] .= "CDEF:growth_minabs=0,growth_min,- "; + $def[2] .= "CDEF:growth=growth_minabs,growth_max,MAX "; + $def[2] .= "HRULE:0#c0c0c0 "; + $def[2] .= "AREA:growth_pos#3060f0:\"Grow\" "; + $def[2] .= "AREA:growth_neg#30f060:\"Shrink \" "; + $def[2] .= "GPRINT:growth:LAST:\"Current\: %+9.2lfMB / 24h\" "; + $def[2] .= "GPRINT:growth:MAX:\"Max\: %+9.2lfMB / 24h\\n\" "; + + // Trend + $opt[3] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Trend for $fstitle' "; + $def[3] = "DEF:trend=${RRD_AVG['trend']} "; + $def[3] .= "HRULE:0#c0c0c0 "; + $def[3] .= "LINE1:trend#000000:\"Trend\:\" "; + $def[3] .= "GPRINT:trend:LAST:\"%+7.2lf MB/24h\" "; + if ($WARN['trend']) { + $warn_mb = sprintf("%.2fMB", $WARN['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${WARN['trend']}#ffff00:\"Warn\: $warn_mb / $range\" "; + } + if ($CRIT['trend']) { + $crit_mb = sprintf("%.2fMB", $CRIT['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${CRIT['trend']}#ff0000:\"Crit\: $crit_mb / $range\" "; + } + $def[3] .= "COMMENT:\"\\n\" "; +} + +if (isset($RRD['trend_hoursleft'])) { + // Trend + $opt[4] = "--vertical-label 'Days left' -l -1 -u 365 -X0 --title '$hostname: Days left for $fstitle' "; + $def[4] = "DEF:hours_left=${RRD_AVG['trend_hoursleft']} "; + $def[4] .= "DEF:hours_left_min=${RRD_MIN['trend_hoursleft']} "; + // negative hours indicate no growth + // the dataset hours_left_isneg stores this info for each point as True/False + $def[4] .= "CDEF:hours_left_isneg=hours_left_min,-1,EQ "; + $def[4] .= "CDEF:hours_left_unmon=hours_left_min,400,0,IF "; + $def[4] .= "CDEF:days_left=hours_left,24,/ "; + $def[4] .= "CDEF:days_left_cap=days_left,400,MIN "; + // Convert negative points to 400 (y-axis cap) + $def[4] .= "CDEF:days_left_cap_positive=hours_left_isneg,400,days_left_cap,IF "; + // The AREA has a rendering problem. Points are too far to the right + $def[4] .= "AREA:hours_left_unmon#AA2200: "; + + $def[4] .= "AREA:days_left_cap_positive#22AA44:\"Days left\:\" "; + if ($ACT[4] == -1) + { + $def[4] .= "COMMENT:\"Not growing\" "; + } + else { + $def[4] .= "GPRINT:days_left:LAST:\"%7.2lf days\" "; + } +} +?> diff -Nru check-mk-1.2.2p3/check_mk-netapp_api_cpu.utilization.php check-mk-1.2.6p12/check_mk-netapp_api_cpu.utilization.php --- check-mk-1.2.2p3/check_mk-netapp_api_cpu.utilization.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-netapp_api_cpu.utilization.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,44 @@ + diff -Nru check-mk-1.2.2p3/check_mk-netapp_api_disk.summary.php check-mk-1.2.6p12/check_mk-netapp_api_disk.summary.php --- check-mk-1.2.2p3/check_mk-netapp_api_disk.summary.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-netapp_api_disk.summary.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,56 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:AVERAGE"; +} + +$sizegb = sprintf("%.1f", $MAX[1]); + +$opt[1] = "--vertical-label Bytes -l 0 -b 1024 --title 'Total raw capacity of $hostname' "; +# First graph show current filesystem usage +$def[1] = "DEF:bytes=$RRD[total_space] "; +$def[1] .= "AREA:bytes#00ffc6:\"Capacity\" "; + +# read ops +$opt[2] = "--vertical-label Disks -l 0 --title 'Spare and broken disks of $hostname' "; +$def[2] = "". +"DEF:sparedisks=$RRD[spare] ". +"LINE:sparedisks#00e060:\" Spare \" ". +"GPRINT:sparedisks:LAST:\"%7.0lf last\" ". +"GPRINT:sparedisks:AVERAGE:\"%7.0lf avg\" ". +"GPRINT:sparedisks:MAX:\"%7.0lf max\\n\" ". + +"DEF:brokendisks=$RRD[broken] ". +"LINE:brokendisks#e04000:\" Broken \" ". +"GPRINT:brokendisks:LAST:\"%7.0lf last\" ". +"GPRINT:brokendisks:AVERAGE:\"%7.0lf avg\" ". +"GPRINT:brokendisks:MAX:\"%7.0lf max\\n\" "; + +?> diff -Nru check-mk-1.2.2p3/check_mk-netapp_api_if.php check-mk-1.2.6p12/check_mk-netapp_api_if.php --- check-mk-1.2.2p3/check_mk-netapp_api_if.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-netapp_api_if.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,226 @@ + $base * $base * $base) { + $scale = $base * $base * $base; + $bwuom = 'G'; +} +elseif ($bandwidth > $base * $base) { + $scale = $base * $base; + $bwuom = 'M'; +} +elseif ($bandwidth > $base) { + $scale = $base; + $bwuom = 'k'; +} +else { + $scale = 1; + $bwuom = ' '; +} + +$warn /= $scale; +$crit /= $scale; +$bandwidth /= $scale; + +$vertical_label_name = $bwuom . $unit . "/sec"; + +$range = min(10, $bandwidth); + + +$bandwidthInfo = ""; +if ($bandwidth > 0){ + $bandwidthInfo = " at " . sprintf("%.1f", $bandwidth) . " ${bwuom}${unit}/s"; +} +$ds_name[1] = 'Used bandwidth'; +$opt[1] = "--vertical-label \"$vertical_label_name\" -l -$range -u $range -X0 -b 1024 --title \"Used bandwidth $hostname / $servicedesc$bandwidthInfo\" "; +$def[1] = + "HRULE:0#c0c0c0 "; +if ($bandwidth) + $def[1] .= "HRULE:$bandwidth#808080:\"Port speed\: " . sprintf("%10.1f", $bandwidth) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$bandwidth#808080: "; +if ($warn) + $def[1] .= "HRULE:$warn#ffff00:\"Warning\: " . sprintf("%13.1f", $warn) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$warn#ffff00: "; +if ($crit) + $def[1] .= "HRULE:$crit#ff0000:\"Critical\: " . sprintf("%13.1f", $crit) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$crit#ff0000: "; + + $def[1] .= "". + # incoming + "DEF:inbytes=$RRDFILE[1]:$DS[1]:MAX ". + "CDEF:intraffic=inbytes,$unit_multiplier,* ". + "CDEF:inmb=intraffic,$scale,/ ". + "AREA:inmb#00e060:\"in \" ". + "GPRINT:intraffic:LAST:\"%7.1lf %s$unit/s last\" ". + "GPRINT:intraffic:AVERAGE:\"%7.1lf %s$unit/s avg\" ". + "GPRINT:intraffic:MAX:\"%7.1lf %s$unit/s max\\n\" ". + "VDEF:inperc=intraffic,95,PERCENTNAN ". + "VDEF:inpercmb=inmb,95,PERCENTNAN ". + "LINE:inpercmb#008f00:\"95% percentile\" ". + "GPRINT:inperc:\"%7.1lf %s$unit/s\\n\" ". + + # outgoing + "DEF:outbytes=$RRDFILE[6]:$DS[6]:MAX ". + "CDEF:outtraffic=outbytes,$unit_multiplier,* ". + "CDEF:minusouttraffic=outtraffic,-1,* ". + "CDEF:outmb=outtraffic,$scale,/ ". + "CDEF:minusoutmb=0,outmb,- ". + "AREA:minusoutmb#0080e0:\"out \" ". + "GPRINT:outtraffic:LAST:\"%7.1lf %s$unit/s last\" ". + "GPRINT:outtraffic:AVERAGE:\"%7.1lf %s$unit/s avg\" ". + "GPRINT:outtraffic:MAX:\"%7.1lf %s$unit/s max\\n\" ". + "VDEF:outperc=minusouttraffic,5,PERCENTNAN ". + "VDEF:outpercmb=minusoutmb,5,PERCENTNAN ". + "LINE:outpercmb#00008f:\"95% percentile\" ". + "GPRINT:outperc:\"%7.1lf %s$unit/s\\n\" ". + + ""; + +if (isset($DS[12])) { + $def[1] .= + "DEF:inbytesa=$RRDFILE[12]:$DS[12]:MAX ". + "DEF:outbytesa=$RRDFILE[13]:$DS[13]:MAX ". + "CDEF:intraffica=inbytesa,$unit_multiplier,* ". + "CDEF:outtraffica=outbytesa,$unit_multiplier,* ". + "CDEF:inmba=intraffica,1048576,/ ". + "CDEF:outmba=outtraffica,1048576,/ ". + "CDEF:minusoutmba=0,outmba,- ". + "LINE:inmba#00a060:\"in (avg) \" ". + "GPRINT:intraffica:LAST:\"%6.1lf %s$unit/s last\" ". + "GPRINT:intraffica:AVERAGE:\"%6.1lf %s$unit/s avg\" ". + "GPRINT:intraffica:MAX:\"%6.1lf %s$unit/s max\\n\" ". + "LINE:minusoutmba#0060c0:\"out (avg) \" ". + "GPRINT:outtraffica:LAST:\"%6.1lf %s$unit/s last\" ". + "GPRINT:outtraffica:AVERAGE:\"%6.1lf %s$unit/s avg\" ". + "GPRINT:outtraffica:MAX:\"%6.1lf %s$unit/s max\\n\" "; +} + +# Graph 2: packets +$ds_name[2] = 'Packets'; +$opt[2] = "--vertical-label \"packets/sec\" --title \"Packets $hostname / $servicedesc\" "; +$def[2] = + # ingoing + "HRULE:0#c0c0c0 ". + "DEF:inu=$RRDFILE[2]:$DS[2]:MAX ". + "DEF:innu=$RRDFILE[3]:$DS[3]:MAX ". + "CDEF:in=inu,innu,+ ". + "AREA:inu#00ffc0:\"in unicast \" ". + "GPRINT:inu:LAST:\"%9.1lf/s last \" ". + "GPRINT:inu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:inu:MAX:\"%9.1lf/s max\\n\" ". + "AREA:innu#00c080:\"in broadcast/multicast \":STACK ". + "GPRINT:innu:LAST:\"%9.1lf/s last \" ". + "GPRINT:innu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:innu:MAX:\"%9.1lf/s max\\n\" ". + "VDEF:inperc=in,95,PERCENTNAN ". + "LINE:inperc#00cf00:\"in 95% percentile \" ". + "GPRINT:inperc:\"%9.1lf/s\\n\" ". + + # outgoing + "DEF:outu=$RRDFILE[7]:$DS[7]:MAX ". + "DEF:outnu=$RRDFILE[8]:$DS[8]:MAX ". + "CDEF:minusoutu=0,outu,- ". + "CDEF:minusoutnu=0,outnu,- ". + "CDEF:minusout=minusoutu,minusoutnu,+ ". + "AREA:minusoutu#00c0ff:\"out unicast \" ". + "GPRINT:outu:LAST:\"%9.1lf/s last \" ". + "GPRINT:outu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:outu:MAX:\"%9.1lf/s max\\n\" ". + "AREA:minusoutnu#0080c0:\"out broadcast/multicast\":STACK ". + "GPRINT:outnu:LAST:\"%9.1lf/s last \" ". + "GPRINT:outnu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:outnu:MAX:\"%9.1lf/s max\\n\" ". + "VDEF:outperc=minusout,5,PERCENTNAN ". + "LINE:outperc#0000cf:\"out 95% percentile \" ". + "GPRINT:outperc:\"%9.1lf/s\\n\" ". + ""; + +# Graph 3: errors and discards +$ds_name[3] = 'Errors and discards'; +$opt[3] = "--vertical-label \"packets/sec\" -X0 --title \"Problems $hostname / $servicedesc\" "; +$def[3] = + "HRULE:0#c0c0c0 ". + "DEF:inerr=$RRDFILE[5]:$DS[5]:MAX ". + "DEF:indisc=$RRDFILE[4]:$DS[4]:MAX ". + "AREA:inerr#ff0000:\"in errors \" ". + "GPRINT:inerr:LAST:\"%7.2lf/s last \" ". + "GPRINT:inerr:AVERAGE:\"%7.2lf/s avg \" ". + "GPRINT:inerr:MAX:\"%7.2lf/s max\\n\" ". + "AREA:indisc#ff8000:\"in discards \":STACK ". + "GPRINT:indisc:LAST:\"%7.2lf/s last \" ". + "GPRINT:indisc:AVERAGE:\"%7.2lf/s avg \" ". + "GPRINT:indisc:MAX:\"%7.2lf/s max\\n\" ". + "DEF:outerr=$RRDFILE[10]:$DS[10]:MAX ". + "DEF:outdisc=$RRDFILE[9]:$DS[9]:MAX ". + "CDEF:minusouterr=0,outerr,- ". + "CDEF:minusoutdisc=0,outdisc,- ". + "AREA:minusouterr#ff0080:\"out errors \" ". + "GPRINT:outerr:LAST:\"%7.2lf/s last \" ". + "GPRINT:outerr:AVERAGE:\"%7.2lf/s avg \" ". + "GPRINT:outerr:MAX:\"%7.2lf/s max\\n\" ". + "AREA:minusoutdisc#ff8080:\"out discards \":STACK ". + "GPRINT:outdisc:LAST:\"%7.2lf/s last \" ". + "GPRINT:outdisc:AVERAGE:\"%7.2lf/s avg \" ". + "GPRINT:outdisc:MAX:\"%7.2lf/s max\\n\" "; +?> diff -Nru check-mk-1.2.2p3/check_mk-netapp_api_protocol.php check-mk-1.2.6p12/check_mk-netapp_api_protocol.php --- check-mk-1.2.2p3/check_mk-netapp_api_protocol.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-netapp_api_protocol.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,50 @@ + diff -Nru check-mk-1.2.2p3/check_mk-netapp_api_temp.php check-mk-1.2.6p12/check_mk-netapp_api_temp.php --- check-mk-1.2.2p3/check_mk-netapp_api_temp.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-netapp_api_temp.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk-netapp_api_vf_stats.cpu_util.php check-mk-1.2.6p12/check_mk-netapp_api_vf_stats.cpu_util.php --- check-mk-1.2.2p3/check_mk-netapp_api_vf_stats.cpu_util.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-netapp_api_vf_stats.cpu_util.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,44 @@ + diff -Nru check-mk-1.2.2p3/check_mk-netapp_api_vf_stats.traffic.php check-mk-1.2.6p12/check_mk-netapp_api_vf_stats.traffic.php --- check-mk-1.2.2p3/check_mk-netapp_api_vf_stats.traffic.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-netapp_api_vf_stats.traffic.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,92 @@ + diff -Nru check-mk-1.2.2p3/check_mk-netapp_api_volumes.php check-mk-1.2.6p12/check_mk-netapp_api_volumes.php --- check-mk-1.2.2p3/check_mk-netapp_api_volumes.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-netapp_api_volumes.php 2015-09-16 14:25:30.000000000 +0000 @@ -0,0 +1,196 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $RRD_MIN[$n] = "$RRDFILE[$i]:$DS[$i]:MIN"; + $RRD_AVG[$n] = "$RRDFILE[$i]:$DS[$i]:AVERAGE"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; + $ACT[$n] = $ACT[$i]; +} + +# RRDtool Options +#$servicedes=$NAGIOS_SERVICEDESC + +$fsname = substr($servicedesc, 7); +$fstitle = $fsname; + + +$sizegb = sprintf("%.1f", $MAX[1] / 1024.0); +$maxgb = $MAX[1] / 1024.0; +$warngb = $WARN[1] / 1024.0; +$critgb = $CRIT[1] / 1024.0; +$warngbtxt = sprintf("%.1f", $warngb); +$critgbtxt = sprintf("%.1f", $critgb); + +$opt[1] = "--vertical-label GB -l 0 -u $maxgb --title '$hostname: Volume $fstitle ($sizegb GB)' "; + +# First graph show current filesystem usage +$def[1] = "DEF:mb=$RRDFILE[1]:$DS[1]:MAX "; +$def[1] .= "CDEF:var1=mb,1024,/ "; +$def[1] .= "AREA:var1#00ffc6:\"used space on $fsname\\n\" "; + +# Optional uncommitted usage e.g. for esx hosts +if(isset($RRD['uncommitted'])) { + $def[1] .= "DEF:uncommitted_mb=".$RRD['uncommitted']." "; + $def[1] .= "CDEF:uncommitted_gb=uncommitted_mb,1024,/ "; + $def[1] .= "CDEF:total_gb=uncommitted_gb,var1,+ "; +} else { + $def[1] .= "CDEF:total_gb=var1 "; +} + +$def[1] .= "HRULE:$maxgb#003300:\"Size ($sizegb GB) \" "; +$def[1] .= "HRULE:$warngb#ffff00:\"Warning at $warngbtxt GB \" "; +$def[1] .= "HRULE:$critgb#ff0000:\"Critical at $critgbtxt GB \\n\" "; +$def[1] .= "GPRINT:var1:LAST:\"current\: %6.2lf GB\" "; +$def[1] .= "GPRINT:var1:MAX:\"max\: %6.2lf GB \" "; +$def[1] .= "GPRINT:var1:AVERAGE:\"avg\: %6.2lf GB\\n\" "; + +if(isset($RRD['uncommitted'])) { + $def[1] .= "AREA:uncommitted_gb#eeccff:\"Uncommited\":STACK "; + $def[1] .= "GPRINT:uncommitted_gb:MAX:\"%6.2lf GB\l\" "; +} + +$def[1] .= "LINE1:total_gb#226600 "; + +# Second graph is optional and shows trend. The MAX field +# of the third variable contains (size of the filesystem in MB +# / range in hours). From that we can compute the configured range. +if (isset($RRD['growth'])) { + $size_mb_per_hours = floatval($MAX['trend']); // this is size_mb / range(hours) + $size_mb = floatval($MAX[1]); + $hours = 1.0 / ($size_mb_per_hours / $size_mb); + $range = sprintf("%.0fh", $hours); + + // Current growth / shrinking. This value is give as MB / 24 hours. + // Note: This has changed in 1.1.13i3. Prior it was MB / trend_range! + $opt[2] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Growth of $fstitle' "; + $def[2] = "DEF:growth_max=${RRD['growth']} "; + $def[2] .= "DEF:growth_min=${RRD_MIN['growth']} "; + $def[2] .= "DEF:trend=${RRD_AVG['trend']} "; + $def[2] .= "CDEF:growth_pos=growth_max,0,MAX "; + $def[2] .= "CDEF:growth_neg=growth_min,0,MIN "; + $def[2] .= "CDEF:growth_minabs=0,growth_min,- "; + $def[2] .= "CDEF:growth=growth_minabs,growth_max,MAX "; + $def[2] .= "HRULE:0#c0c0c0 "; + $def[2] .= "AREA:growth_pos#3060f0:\"Grow\" "; + $def[2] .= "AREA:growth_neg#30f060:\"Shrink \" "; + $def[2] .= "GPRINT:growth:LAST:\"Current\: %+9.2lfMB / 24h\" "; + $def[2] .= "GPRINT:growth:MAX:\"Max\: %+9.2lfMB / 24h\\n\" "; + + // Trend + $opt[3] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Trend for $fstitle' "; + $def[3] = "DEF:trend=${RRD_AVG['trend']} "; + $def[3] .= "HRULE:0#c0c0c0 "; + $def[3] .= "LINE1:trend#000000:\"Trend\:\" "; + $def[3] .= "GPRINT:trend:LAST:\"%+7.2lf MB/24h\" "; + if ($WARN['trend']) { + $warn_mb = sprintf("%.2fMB", $WARN['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${WARN['trend']}#ffff00:\"Warn\: $warn_mb / $range\" "; + } + if ($CRIT['trend']) { + $crit_mb = sprintf("%.2fMB", $CRIT['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${CRIT['trend']}#ff0000:\"Crit\: $crit_mb / $range\" "; + } + $def[3] .= "COMMENT:\"\\n\" "; +} + +if (isset($RRD['trend_hoursleft'])) { + // Trend + $opt[4] = "--vertical-label 'Days left' -l -1 -u 365 -X0 --title '$hostname: Days left for $fstitle' "; + $def[4] = "DEF:hours_left=${RRD_AVG['trend_hoursleft']} "; + $def[4] .= "DEF:hours_left_min=${RRD_MIN['trend_hoursleft']} "; + // negative hours indicate no growth + // the dataset hours_left_isneg stores this info for each point as True/False + $def[4] .= "CDEF:hours_left_isneg=hours_left_min,-1,EQ "; + $def[4] .= "CDEF:hours_left_unmon=hours_left_min,400,0,IF "; + $def[4] .= "CDEF:days_left=hours_left,24,/ "; + $def[4] .= "CDEF:days_left_cap=days_left,400,MIN "; + // Convert negative points to 400 (y-axis cap) + $def[4] .= "CDEF:days_left_cap_positive=hours_left_isneg,400,days_left_cap,IF "; + // The AREA has a rendering problem. Points are too far to the right + $def[4] .= "AREA:hours_left_unmon#AA2200: "; + + $def[4] .= "AREA:days_left_cap_positive#22AA44:\"Days left\:\" "; + if ($ACT[4] == -1) + { + $def[4] .= "COMMENT:\"Not growing\" "; + } + else { + $def[4] .= "GPRINT:days_left:LAST:\"%7.2lf days\" "; + } +} + + +$protocols = array('', 'nfs_', 'cifs_', 'san_', 'fcp_', 'iscsi_'); +$graph_offset = 5; +$mega = 1024 * 1024; +for ($i = 0; $i < count($protocols); $i++) { + if (isset($RRD[$protocols[$i].'read_data'])) { + $protocol_name = substr($protocols[$i], 0, -1); + $title = "$protocol_name read and write data of ".$hostname."/".$servicedesc; + $opt[$graph_offset + $i * 2] = "--vertical-label 'MB' -b 1024 --title \"$title\""; + + $def[$graph_offset + $i * 2] = "". + "DEF:read=${RRD[$protocols[$i].'read_data']} ". + "CDEF:readmb=read,$mega,/ ". + "LINE:readmb#00e060:\"$protocol_name Read Data \" ". + "GPRINT:readmb:LAST:\"%7.2lf MB last\" ". + "GPRINT:readmb:AVERAGE:\"%7.2lf MB avg\" ". + "GPRINT:readmb:MAX:\"%7.2lf MB max\\n\" ". + + "DEF:write=${RRD[$protocols[$i].'write_data']} ". + "CDEF:writemb=write,$mega,/ ". + "CDEF:minuswritemb=writemb,-1,* ". + "LINE:minuswritemb#0080e0:\"$protocol_name Write Data \" ". + "GPRINT:writemb:LAST:\"%7.2lf MB last\" ". + "GPRINT:writemb:AVERAGE:\"%7.2lf MB avg\" ". + "GPRINT:writemb:MAX:\"%7.2lf MB max\\n\" "; + + $title = "$protocol_name latency of ".$hostname."/".$servicedesc; + $opt[$graph_offset + $i * 2 + 1] = "--vertical-label 'ms' --title \"$title\""; + $def[$graph_offset + $i * 2 + 1] = "". + "DEF:readlat=${RRD[$protocols[$i].'read_latency']} ". + "LINE:readlat#a02000:\"$protocol_name read latency\" ". + "GPRINT:readlat:LAST:\"%7.2lf ms last\" ". + "GPRINT:readlat:AVERAGE:\"%7.2lf ms avg\" ". + "GPRINT:readlat:MAX:\"%7.2lf ms max\\n\" ". + + "DEF:writelat=${RRD[$protocols[$i].'write_latency']} ". + "LINE:writelat#20a000:\"$protocol_name write latency\" ". + "GPRINT:writelat:LAST:\"%7.2lf ms last\" ". + "GPRINT:writelat:AVERAGE:\"%7.2lf ms avg\" ". + "GPRINT:writelat:MAX:\"%7.2lf ms max\\n\" "; + } +} + +?> diff -Nru check-mk-1.2.2p3/check_mk-netapp_cpu.php check-mk-1.2.6p12/check_mk-netapp_cpu.php --- check-mk-1.2.2p3/check_mk-netapp_cpu.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-netapp_cpu.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,44 @@ + diff -Nru check-mk-1.2.2p3/check_mk-netapp_fcpio.php check-mk-1.2.6p12/check_mk-netapp_fcpio.php --- check-mk-1.2.2p3/check_mk-netapp_fcpio.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-netapp_fcpio.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -39,7 +39,7 @@ $opt[1] = "--vertical-label 'Throughput (MB/s)' -X0 --title \"FC Port throughput on $hostname\" "; - $def[1] = + $def[1] = "HRULE:0#a0a0a0 ". # read "DEF:read=$RRD[read] ". @@ -51,7 +51,7 @@ # read average as line in the same graph if (isset($RRD["read.avg"])) { - $def[1] .= + $def[1] .= "DEF:read_avg=${RRD['read.avg']} ". "CDEF:read_avg_mb=read_avg,1048576,/ ". "LINE:read_avg_mb#202020 "; @@ -81,7 +81,7 @@ } - + } ?> diff -Nru check-mk-1.2.2p3/check_mk-netctr.combined.php check-mk-1.2.6p12/check_mk-netctr.combined.php --- check-mk-1.2.2p3/check_mk-netctr.combined.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-netctr.combined.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -32,13 +32,13 @@ # 5: rx_errors # 6: tx_errors # 7: tx_collisions - + # $x = explode("_", $servicedesc); $nic = $x[1]; $opt[1] = "--vertical-label 'Bytes/s' -l -1024 -u 1024 --title \"$hostname / NIC $nic\" "; -# -l0 -u1048576 +# -l0 -u1048576 # # $def[1] = "DEF:rx_bytes=$RRDFILE[1]:$DS[1]:AVERAGE " ; diff -Nru check-mk-1.2.2p3/check_mk-netctr.php check-mk-1.2.6p12/check_mk-netctr.php --- check-mk-1.2.2p3/check_mk-netctr.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-netctr.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mk-nginx_status.php check-mk-1.2.6p12/check_mk-nginx_status.php --- check-mk-1.2.2p3/check_mk-nginx_status.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-nginx_status.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,109 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; + $ACT[$n] = $ACT[$i]; +} + +# +# First graph with all data +# +$ds_name[$i] = "Connections"; +$def[$i] = ""; +$opt[$i] = " --vertical-label 'Connections' --title '$hostname: $servicedesc' -l 0"; + +$def[$i] .= "DEF:active=${RRD['active']} "; +$def[$i] .= "GPRINT:active:LAST:\" Active Last %5.0lf\" "; +$def[$i] .= "GPRINT:active:MAX:\"Max %5.0lf\" "; +$def[$i] .= "GPRINT:active:AVERAGE:\"Average %5.1lf\" "; +$def[$i] .= "COMMENT:\"\\n\" "; + +foreach ($this->DS as $KEY=>$VAL) { + if (preg_match('/^(reading|writing|waiting)$/', $VAL['NAME'])) { + $def[$i] .= "DEF:var${KEY}=${VAL['RRDFILE']}:${DS[$VAL['DS']]}:AVERAGE "; + $def[$i] .= "AREA:var${KEY}".rrd::color($KEY).":\"".$VAL['NAME']."\":STACK "; + $def[$i] .= "GPRINT:var${KEY}:LAST:\"Last %5.0lf\" "; + $def[$i] .= "GPRINT:var${KEY}:MAX:\"Max %5.0lf\" "; + $def[$i] .= "GPRINT:var${KEY}:AVERAGE:\"Average %5.1lf\" "; + $def[$i] .= "COMMENT:\"\\n\" "; + } +} + +# +# Requests per Second +# +$i++; +$def[$i] = ""; +$opt[$i] = " --title '$hostname: $servicedesc Requests/sec' -l 0"; +$ds_name[$i] = "Requests/sec"; +$color = '#000000'; +foreach ($this->DS as $KEY=>$VAL) { + if($VAL['NAME'] == 'requests_per_sec') { + $def[$i] .= rrd::def ("var".$KEY, $VAL['RRDFILE'], $VAL['DS'], "AVERAGE"); + $def[$i] .= rrd::line1 ("var".$KEY, $color, rrd::cut($VAL['NAME'],16), 'STACK' ); + $def[$i] .= rrd::gprint ("var".$KEY, array("LAST","MAX","AVERAGE"), "%6.1lf/s"); + } +} + +# +# Requests per Connection +# +$i++; +$def[$i] = ""; +$opt[$i] = " --title '$hostname: $servicedesc Requests/Connection' -l 0"; +$ds_name[$i] = "Requests/Connection"; +$color = '#000000'; +foreach ($this->DS as $KEY=>$VAL) { + if($VAL['NAME'] == 'requests_per_conn') { + $def[$i] .= rrd::def ("var".$KEY, $VAL['RRDFILE'], $VAL['DS'], "AVERAGE"); + $def[$i] .= rrd::line1 ("var".$KEY, $color, rrd::cut($VAL['NAME'],16), 'STACK' ); + $def[$i] .= rrd::gprint ("var".$KEY, array("LAST","MAX","AVERAGE"), "%6.1lf/s"); + } +} + +# +# Connections per Second +# +$i++; +$def[$i] = ""; +$opt[$i] = " --title '$hostname: $servicedesc Connections/sec' -l 0"; +$ds_name[$i] = "Accepted/sec"; +$color = '#000000'; +foreach ($this->DS as $KEY=>$VAL) { + if($VAL['NAME'] == 'accepted_per_sec') { + $def[$i] .= rrd::def ("var".$KEY, $VAL['RRDFILE'], $VAL['DS'], "AVERAGE"); + $def[$i] .= rrd::line1 ("var".$KEY, $color, rrd::cut($VAL['NAME'],16), 'STACK' ); + $def[$i] .= rrd::gprint ("var".$KEY, array("LAST","MAX","AVERAGE"), "%6.1lf/s"); + } +} +?> diff -Nru check-mk-1.2.2p3/check_mk-ntp.php check-mk-1.2.6p12/check_mk-ntp.php --- check-mk-1.2.2p3/check_mk-ntp.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-ntp.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -29,17 +29,17 @@ $opt[1] = "--vertical-label 'offset (ms)' -l -$range -u $range --title '$hostname: NTP time offset to $peer' "; -$def[1] = "DEF:offset=$RRDFILE[1]:$DS[1]:MAX "; -$def[1] .= "DEF:jitter=$RRDFILE[2]:$DS[2]:MAX "; +$def[1] = "DEF:offset=$RRDFILE[1]:$DS[1]:MAX "; +$def[1] .= "DEF:jitter=$RRDFILE[2]:$DS[2]:MAX "; $def[1] .= "CDEF:offsetabs=offset,ABS "; -$def[1] .= "AREA:offset#4080ff:\"time offset \" "; -$def[1] .= "LINE1:offset#2060d0: "; -$def[1] .= "LINE2:jitter#10c000:jitter "; +$def[1] .= "AREA:offset#4080ff:\"time offset \" "; +$def[1] .= "LINE1:offset#2060d0: "; +$def[1] .= "LINE2:jitter#10c000:jitter "; $def[1] .= "HRULE:0#c0c0c0: "; $def[1] .= "HRULE:$WARN[1]#ffff00:\"\" "; $def[1] .= "HRULE:-$WARN[1]#ffff00:\"Warning\\: +/- $WARN[1] ms \" "; -$def[1] .= "HRULE:$CRIT[1]#ff0000:\"\" "; -$def[1] .= "HRULE:-$CRIT[1]#ff0000:\"Critical\\: +/- $CRIT[1] ms \\n\" "; +$def[1] .= "HRULE:$CRIT[1]#ff0000:\"\" "; +$def[1] .= "HRULE:-$CRIT[1]#ff0000:\"Critical\\: +/- $CRIT[1] ms \\n\" "; $def[1] .= "GPRINT:offset:LAST:\"current\: %.1lf ms\" "; $def[1] .= "GPRINT:offsetabs:MAX:\"max(+/-)\: %.1lf ms \" "; $def[1] .= "GPRINT:offsetabs:AVERAGE:\"avg(+/-)\: %.1lf ms\" "; diff -Nru check-mk-1.2.2p3/check_mk-ntp.time.php check-mk-1.2.6p12/check_mk-ntp.time.php --- check-mk-1.2.2p3/check_mk-ntp.time.php 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-ntp.time.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -25,20 +25,22 @@ $range = $CRIT[1]; -$opt[1] = "--vertical-label 'offset (ms)' -l -$range -u $range --title '$hostname: NTP time offset to preferred peer' "; +$opt[1] = "--vertical-label 'offset (ms)' -X 0 -l -$range -u $range --title '$hostname: NTP time offset to preferred peer' "; -$def[1] = "DEF:offset=$RRDFILE[1]:$DS[1]:MAX "; -$def[1] .= "DEF:jitter=$RRDFILE[2]:$DS[2]:MAX "; +$def[1] = "DEF:offset=$RRDFILE[1]:$DS[1]:MAX "; +if (isset($DS[2])) # missing for chrony + $def[1] .= "DEF:jitter=$RRDFILE[2]:$DS[2]:MAX "; $def[1] .= "CDEF:offsetabs=offset,ABS "; -$def[1] .= "AREA:offset#4080ff:\"time offset \" "; -$def[1] .= "LINE1:offset#2060d0: "; -$def[1] .= "LINE2:jitter#10c000:jitter "; +$def[1] .= "AREA:offset#4080ff:\"time offset \" "; +$def[1] .= "LINE1:offset#2060d0: "; +if (isset($DS[2])) # missing for chrony + $def[1] .= "LINE2:jitter#10c000:jitter "; $def[1] .= "HRULE:0#c0c0c0: "; $def[1] .= "HRULE:$WARN[1]#ffff00:\"\" "; $def[1] .= "HRULE:-$WARN[1]#ffff00:\"Warning\\: +/- $WARN[1] ms \" "; -$def[1] .= "HRULE:$CRIT[1]#ff0000:\"\" "; -$def[1] .= "HRULE:-$CRIT[1]#ff0000:\"Critical\\: +/- $CRIT[1] ms \\n\" "; -$def[1] .= "GPRINT:offset:LAST:\"current\: %.1lf ms\" "; -$def[1] .= "GPRINT:offsetabs:MAX:\"max(+/-)\: %.1lf ms \" "; -$def[1] .= "GPRINT:offsetabs:AVERAGE:\"avg(+/-)\: %.1lf ms\" "; +$def[1] .= "HRULE:$CRIT[1]#ff0000:\"\" "; +$def[1] .= "HRULE:-$CRIT[1]#ff0000:\"Critical\\: +/- $CRIT[1] ms \\n\" "; +$def[1] .= "GPRINT:offset:LAST:\"current\: %.4lf ms\" "; +$def[1] .= "GPRINT:offsetabs:MAX:\"max(+/-)\: %.4lf ms \" "; +$def[1] .= "GPRINT:offsetabs:AVERAGE:\"avg(+/-)\: %.4lf ms\" "; ?> diff -Nru check-mk-1.2.2p3/check_mk-nvidia.temp.php check-mk-1.2.6p12/check_mk-nvidia.temp.php --- check-mk-1.2.2p3/check_mk-nvidia.temp.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-nvidia.temp.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -23,35 +23,16 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -$parts = explode("_", $servicedesc); -$sensorname = implode(" ", $parts); +$opt[1] = "--vertical-label \"Celsius\" -l 0 -u 40 --title \"Temperature $servicedesc\" "; -/* This is obsolete. ipmi_sensors does not longer send perfdata - for fans... -if ($parts[2] == "Fan") -{ - $opt[1] = "--vertical-label 'RPM' -X0 -l0 -u6000 --title \"$sensorname\" "; - - $def[1] = "DEF:rpm=$RRDFILE[1]:$DS[1]:MIN "; - $def[1] .= "AREA:rpm#0080a0:\"Rotations per minute\" "; - $def[1] .= "LINE:rpm#004060 "; - $def[1] .= "HRULE:$CRIT[1]#ff0000:\"Critical below $CRIT[1] RPM\" "; -} -else */ -if ($parts[2] == "Temperature") -{ - $upper = max(60, $CRIT[1] + 3); - $opt[1] = "--vertical-label '$CRIT[1] Celsius' -l0 -u$upper --title \"$sensorname\" "; - - $def[1] = "DEF:temp=$RRDFILE[1]:$DS[1]:MAX "; - $def[1] .= "AREA:temp#ffd040:\"temperature (max)\" "; - $def[1] .= "LINE:temp#ff8000 "; - $def[1] .= "HRULE:$CRIT[1]#ff0000:\"Critical at $CRIT[1] C\" "; +$def[1] = "DEF:var1=$RRDFILE[1]:$DS[1]:MAX "; +$def[1] .= "AREA:var1#2080ff:\"Temperature\:\" "; +$def[1] .= "GPRINT:var1:LAST:\"%2.0lfC\" "; +$def[1] .= "LINE1:var1#000080:\"\" "; +$def[1] .= "GPRINT:var1:MAX:\"(Max\: %2.0lfC,\" "; +$def[1] .= "GPRINT:var1:AVERAGE:\"Avg\: %2.0lfC)\" "; +if ($WARN[1] != "") { + $def[1] .= "HRULE:$WARN[1]#FFFF00:\"Warning\: $WARN[1]C\" "; + $def[1] .= "HRULE:$CRIT[1]#FF0000:\"Critical\: $CRIT[1]C\" "; } - -else { - include("check_mk-local.php"); -} - - ?> diff -Nru check-mk-1.2.2p3/check_mk.only_from check-mk-1.2.6p12/check_mk.only_from --- check-mk-1.2.2p3/check_mk.only_from 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/check_mk.only_from 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ title: Check IP restriction of Check_MK agent agents: linux, windows -author: Mathias Kettner +catalog: generic license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/check_mk-openvpn_clients.php check-mk-1.2.6p12/check_mk-openvpn_clients.php --- check-mk-1.2.2p3/check_mk-openvpn_clients.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-openvpn_clients.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,45 @@ + + diff -Nru check-mk-1.2.2p3/check_mk-oracle_asm_diskgroup.php check-mk-1.2.6p12/check_mk-oracle_asm_diskgroup.php --- check-mk-1.2.2p3/check_mk-oracle_asm_diskgroup.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-oracle_asm_diskgroup.php 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,157 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $RRD_MIN[$n] = "$RRDFILE[$i]:$DS[$i]:MIN"; + $RRD_AVG[$n] = "$RRDFILE[$i]:$DS[$i]:AVERAGE"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; + $ACT[$n] = $ACT[$i]; +} + +# RRDtool Options +#$servicedes=$NAGIOS_SERVICEDESC + +$fsname = str_replace("_", "/", substr($servicedesc,11)); +$fstitle = $fsname; + +# Hack for windows: replace C// with C:\ +if (strlen($fsname) == 3 && substr($fsname, 1, 2) == '//') { + $fsname = $fsname[0] . "\:\\\\"; + $fstitle = $fsname[0] . ":\\"; +} + +$sizegb = sprintf("%.1f", $MAX[1] / 1024.0); +$maxgb = $MAX[1] / 1024.0; +$warngb = $WARN[1] / 1024.0; +$critgb = $CRIT[1] / 1024.0; +$warngbtxt = sprintf("%.1f", $warngb); +$critgbtxt = sprintf("%.1f", $critgb); + +$opt[1] = "--vertical-label GB -l 0 -u $maxgb --title '$hostname: Filesystem $fstitle ($sizegb GB)' "; + +# First graph show current filesystem usage +$def[1] = "DEF:mb=$RRDFILE[1]:$DS[1]:MAX "; +$def[1] .= "CDEF:var1=mb,1024,/ "; +$def[1] .= "AREA:var1#00ffc6:\"used space on $fsname\\n\" "; + +# Optional uncommitted usage e.g. for esx hosts +if(isset($RRD['uncommitted'])) { + $def[1] .= "DEF:uncommitted_mb=".$RRD['uncommitted']." "; + $def[1] .= "CDEF:uncommitted_gb=uncommitted_mb,1024,/ "; + $def[1] .= "CDEF:total_gb=uncommitted_gb,var1,+ "; +} else { + $def[1] .= "CDEF:total_gb=var1 "; +} + +$def[1] .= "HRULE:$maxgb#003300:\"Size ($sizegb GB) \" "; +$def[1] .= "HRULE:$warngb#ffff00:\"Warning at $warngbtxt GB \" "; +$def[1] .= "HRULE:$critgb#ff0000:\"Critical at $critgbtxt GB \\n\" "; +$def[1] .= "GPRINT:var1:LAST:\"current\: %6.2lf GB\" "; +$def[1] .= "GPRINT:var1:MAX:\"max\: %6.2lf GB \" "; +$def[1] .= "GPRINT:var1:AVERAGE:\"avg\: %6.2lf GB\\n\" "; + +if(isset($RRD['uncommitted'])) { + $def[1] .= "AREA:uncommitted_gb#eeccff:\"Uncommited\":STACK "; + $def[1] .= "GPRINT:uncommitted_gb:MAX:\"%6.2lf GB\l\" "; +} + +$def[1] .= "LINE1:total_gb#226600 "; + +# Second graph is optional and shows trend. The MAX field +# of the third variable contains (size of the filesystem in MB +# / range in hours). From that we can compute the configured range. +if (isset($RRD['growth'])) { + $size_mb_per_hours = floatval($MAX['trend']); // this is size_mb / range(hours) + $size_mb = floatval($MAX[1]); + $hours = 1.0 / ($size_mb_per_hours / $size_mb); + $range = sprintf("%.0fh", $hours); + + // Current growth / shrinking. This value is give as MB / 24 hours. + // Note: This has changed in 1.1.13i3. Prior it was MB / trend_range! + $opt[2] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Growth of $fstitle' "; + $def[2] = "DEF:growth_max=${RRD['growth']} "; + $def[2] .= "DEF:growth_min=${RRD_MIN['growth']} "; + $def[2] .= "DEF:trend=${RRD_AVG['trend']} "; + $def[2] .= "CDEF:growth_pos=growth_max,0,MAX "; + $def[2] .= "CDEF:growth_neg=growth_min,0,MIN "; + $def[2] .= "CDEF:growth_minabs=0,growth_min,- "; + $def[2] .= "CDEF:growth=growth_minabs,growth_max,MAX "; + $def[2] .= "HRULE:0#c0c0c0 "; + $def[2] .= "AREA:growth_pos#3060f0:\"Grow\" "; + $def[2] .= "AREA:growth_neg#30f060:\"Shrink \" "; + $def[2] .= "GPRINT:growth:LAST:\"Current\: %+9.2lfMB / 24h\" "; + $def[2] .= "GPRINT:growth:MAX:\"Max\: %+9.2lfMB / 24h\\n\" "; + + // Trend + $opt[3] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Trend for $fstitle' "; + $def[3] = "DEF:trend=${RRD_AVG['trend']} "; + $def[3] .= "HRULE:0#c0c0c0 "; + $def[3] .= "LINE1:trend#000000:\"Trend\:\" "; + $def[3] .= "GPRINT:trend:LAST:\"%+7.2lf MB/24h\" "; + if ($WARN['trend']) { + $warn_mb = sprintf("%.2fMB", $WARN['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${WARN['trend']}#ffff00:\"Warn\: $warn_mb / $range\" "; + } + if ($CRIT['trend']) { + $crit_mb = sprintf("%.2fMB", $CRIT['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${CRIT['trend']}#ff0000:\"Crit\: $crit_mb / $range\" "; + } + $def[3] .= "COMMENT:\"\\n\" "; +} + +if (isset($RRD['trend_hoursleft'])) { + // Trend + $opt[4] = "--vertical-label 'Days left' -l -1 -u 365 -X0 --title '$hostname: Days left for $fstitle' "; + $def[4] = "DEF:hours_left=${RRD_AVG['trend_hoursleft']} "; + $def[4] .= "DEF:hours_left_min=${RRD_MIN['trend_hoursleft']} "; + // negative hours indicate no growth + // the dataset hours_left_isneg stores this info for each point as True/False + $def[4] .= "CDEF:hours_left_isneg=hours_left_min,-1,EQ "; + $def[4] .= "CDEF:hours_left_unmon=hours_left_min,400,0,IF "; + $def[4] .= "CDEF:days_left=hours_left,24,/ "; + $def[4] .= "CDEF:days_left_cap=days_left,400,MIN "; + // Convert negative points to 400 (y-axis cap) + $def[4] .= "CDEF:days_left_cap_positive=hours_left_isneg,400,days_left_cap,IF "; + // The AREA has a rendering problem. Points are too far to the right + $def[4] .= "AREA:hours_left_unmon#AA2200: "; + + $def[4] .= "AREA:days_left_cap_positive#22AA44:\"Days left\:\" "; + if ($ACT[4] == -1) + { + $def[4] .= "COMMENT:\"Not growing\" "; + } + else { + $def[4] .= "GPRINT:days_left:LAST:\"%7.2lf days\" "; + } +} +?> diff -Nru check-mk-1.2.2p3/check_mk-oracle_dataguard_stats.php check-mk-1.2.6p12/check_mk-oracle_dataguard_stats.php --- check-mk-1.2.2p3/check_mk-oracle_dataguard_stats.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-oracle_dataguard_stats.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,37 @@ + diff -Nru check-mk-1.2.2p3/check_mk-oracle_instance.php check-mk-1.2.6p12/check_mk-oracle_instance.php --- check-mk-1.2.2p3/check_mk-oracle_instance.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-oracle_instance.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,34 @@ + diff -Nru check-mk-1.2.2p3/check_mk-oracle_logswitches.php check-mk-1.2.6p12/check_mk-oracle_logswitches.php --- check-mk-1.2.2p3/check_mk-oracle_logswitches.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-oracle_logswitches.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mk-oracle_processes.php check-mk-1.2.6p12/check_mk-oracle_processes.php --- check-mk-1.2.2p3/check_mk-oracle_processes.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-oracle_processes.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk-oracle_recovery_status.php check-mk-1.2.6p12/check_mk-oracle_recovery_status.php --- check-mk-1.2.2p3/check_mk-oracle_recovery_status.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-oracle_recovery_status.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,34 @@ + diff -Nru check-mk-1.2.2p3/check_mk-oracle_sessions.php check-mk-1.2.6p12/check_mk-oracle_sessions.php --- check-mk-1.2.2p3/check_mk-oracle_sessions.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-oracle_sessions.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mk-oracle_tablespaces.php check-mk-1.2.6p12/check_mk-oracle_tablespaces.php --- check-mk-1.2.2p3/check_mk-oracle_tablespaces.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-oracle_tablespaces.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check-mk.php check-mk-1.2.6p12/check-mk.php --- check-mk-1.2.2p3/check-mk.php 2013-11-05 09:22:41.000000000 +0000 +++ check-mk-1.2.6p12/check-mk.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -27,9 +27,9 @@ $opt[1] = "--vertical-label 'time (s)' -l 0 --title '$hostname: Check_MK check execution time' "; -$def[1] = "DEF:extime=$RRDFILE[1]:$DS[1]:MAX "; -$def[1] .= "AREA:extime#d080af:\"execution time \" "; -$def[1] .= "LINE1:extime#d020a0: "; +$def[1] = "DEF:extime=$RRDFILE[1]:$DS[1]:MAX "; +$def[1] .= "AREA:extime#d080af:\"execution time \" "; +$def[1] .= "LINE1:extime#d020a0: "; $def[1] .= "GPRINT:extime:LAST:\"last\: %8.2lf s\" "; $def[1] .= "GPRINT:extime:MAX:\"max\: %8.2lf s \" "; $def[1] .= "GPRINT:extime:AVERAGE:\"avg\: %8.2lf s\\n\" "; @@ -37,26 +37,26 @@ if (isset($RRDFILE[2])) { $opt[2] = "--vertical-label 'time (s)' -l 0 --title '$hostname: Check_MK process times' "; -$def[2] = "DEF:user_time=$RRDFILE[2]:$DS[1]:MAX "; -$def[2] .= "LINE1:user_time#d020a0:\"user time\" "; -$def[2] .= "GPRINT:user_time:LAST:\" last\: %8.2lf s\" "; +$def[2] = "DEF:user_time=$RRDFILE[2]:$DS[1]:MAX "; +$def[2] .= "LINE1:user_time#d020a0:\"user time\" "; +$def[2] .= "GPRINT:user_time:LAST:\" last\: %8.2lf s\" "; $def[2] .= "GPRINT:user_time:MAX:\"max\: %8.2lf s \" "; $def[2] .= "GPRINT:user_time:AVERAGE:\"avg\: %8.2lf s\\n\" "; -$def[2] .= "DEF:system_time=$RRDFILE[3]:$DS[1]:MAX "; -$def[2] .= "LINE1:system_time#d08400:\"system time\" "; +$def[2] .= "DEF:system_time=$RRDFILE[3]:$DS[1]:MAX "; +$def[2] .= "LINE1:system_time#d08400:\"system time\" "; $def[2] .= "GPRINT:system_time:LAST:\" last\: %8.2lf s\" "; $def[2] .= "GPRINT:system_time:MAX:\"max\: %8.2lf s \" "; $def[2] .= "GPRINT:system_time:AVERAGE:\"avg\: %8.2lf s\\n\" "; -$def[2] .= "DEF:children_user_time=$RRDFILE[4]:$DS[1]:MAX "; -$def[2] .= "LINE1:children_user_time#308400:\"childr. user time \" "; +$def[2] .= "DEF:children_user_time=$RRDFILE[4]:$DS[1]:MAX "; +$def[2] .= "LINE1:children_user_time#308400:\"childr. user time \" "; $def[2] .= "GPRINT:children_user_time:LAST:\" last\: %8.2lf s\" "; $def[2] .= "GPRINT:children_user_time:MAX:\"max\: %8.2lf s \" "; $def[2] .= "GPRINT:children_user_time:AVERAGE:\"avg\: %8.2lf s\\n\" "; -$def[2] .= "DEF:children_system_time=$RRDFILE[5]:$DS[1]:MAX "; -$def[2] .= "LINE1:children_system_time#303400:\"childr. system time\" "; +$def[2] .= "DEF:children_system_time=$RRDFILE[5]:$DS[1]:MAX "; +$def[2] .= "LINE1:children_system_time#303400:\"childr. system time\" "; $def[2] .= "GPRINT:children_system_time:LAST:\"last\: %8.2lf s\" "; $def[2] .= "GPRINT:children_system_time:MAX:\"max\: %8.2lf s \" "; $def[2] .= "GPRINT:children_system_time:AVERAGE:\"avg\: %8.2lf s\\n\" "; diff -Nru check-mk-1.2.2p3/check-mk-ping.php check-mk-1.2.6p12/check-mk-ping.php --- check-mk-1.2.2p3/check-mk-ping.php 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/check-mk-ping.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,7 +24,7 @@ # Boston, MA 02110-1301 USA. $ds_name[1] = "Round Trip Averages"; -$opt[1] = "--vertical-label \"RTA (ms)\" --title \"Ping times for $hostname\" "; +$opt[1] = "--vertical-label \"RTA (ms)\" -X0 --title \"Ping times for $hostname\" "; $def[1] = "DEF:var1=$RRDFILE[1]:$DS[1]:AVERAGE " ; $def[1] .= "DEF:var2=$RRDFILE[2]:$DS[2]:MAX " ; $def[1] .= "VDEF:maxrta=var1,MAXIMUM " ; diff -Nru check-mk-1.2.2p3/check_mk-postfix_mailq.php check-mk-1.2.6p12/check_mk-postfix_mailq.php --- check-mk-1.2.2p3/check_mk-postfix_mailq.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-postfix_mailq.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mk-printer_pages.php check-mk-1.2.6p12/check_mk-printer_pages.php --- check-mk-1.2.2p3/check_mk-printer_pages.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-printer_pages.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,32 @@ + diff -Nru check-mk-1.2.2p3/check_mk-printer_supply.php check-mk-1.2.6p12/check_mk-printer_supply.php --- check-mk-1.2.2p3/check_mk-printer_supply.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-printer_supply.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mk-ps.perf.php check-mk-1.2.6p12/check_mk-ps.perf.php --- check-mk-1.2.2p3/check_mk-ps.perf.php 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-ps.perf.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -23,48 +23,77 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. +# The number of data source various due to different +# settings (such as averaging). We rather work with names +# than with numbers. +$RRD = array(); +foreach ($NAME as $i => $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; +} + + +# 1. Graph: Number of processes $vertical = "count"; $format = "%3.0lf"; -$upto = max(20, $CRIT[1]); +$upto = max(20, $CRIT["count"]); $color = "8040f0"; $line = "202060"; $opt[1] = " --vertical-label \"count\" -X0 -L5 -l 0 -u $upto --title \"Number of Processes\" "; -$def[1] = "DEF:count=$RRDFILE[1]:$DS[1]:MAX "; -$def[1] .= "AREA:count#$color:\"Processes\" "; -$def[1] .= "LINE1:count#$line:\"\" "; -$def[1] .= "GPRINT:count:LAST:\"Current\: $format\" "; -$def[1] .= "GPRINT:count:MAX:\"Maximum\: $format \" "; -$def[1] .= "HRULE:$WARN[1]#FFFF00:\"Warning at $WARN[1]\" "; -$def[1] .= "HRULE:$CRIT[1]#FF0000:\"Critical at $CRIT[1]\" "; - -if (isset($DS[2])) { - $opt[2] = " --vertical-label \"MB\" -l 0 --title \"Memory Usage per process\" "; - $def[2] = "DEF:count=$RRDFILE[1]:$DS[1]:MAX "; - $def[2] .= "DEF:vsz=$RRDFILE[2]:$DS[2]:MAX "; - $def[2] .= "DEF:rss=$RRDFILE[3]:$DS[3]:MAX "; - $def[2] .= "CDEF:vszmb=vsz,1024,/,count,/ "; - $def[2] .= "CDEF:rssmb=rss,1024,/,count,/ "; - $def[2] .= "AREA:vszmb#90a0f0:\"Virtual size \" "; - $def[2] .= "GPRINT:vszmb:LAST:\"Current\: %5.1lf MB\" "; - $def[2] .= "GPRINT:vszmb:MIN:\"Min\: %5.1lf MB\" "; - $def[2] .= "GPRINT:vszmb:MAX:\"Max\: %5.1lf MB\" "; - $def[2] .= "AREA:rssmb#2070ff:\"Resident size\" "; - $def[2] .= "GPRINT:rssmb:LAST:\"Current\: %5.1lf MB\" "; - $def[2] .= "GPRINT:rssmb:MIN:\"Min\: %5.1lf MB\" "; - $def[2] .= "GPRINT:rssmb:MAX:\"Max\: %5.1lf MB\" "; +$def[1] = "" + . "DEF:count=$RRD[count] " + . "AREA:count#$color:\"Processes\" " + . "LINE1:count#$line:\"\" " + . "GPRINT:count:LAST:\"Current\: $format\" " + . "GPRINT:count:MAX:\"Maximum\: $format \" " + . "HRULE:$WARN[count]#FFFF00:\"Warning at $WARN[count]\" " + . "HRULE:$CRIT[count]#FF0000:\"Critical at $CRIT[count]\" " + ; + +# 2. Graph: Memory usage +if (isset($RRD["vsz"])) { + $opt[2] = " --vertical-label \"MB\" -l 0 --title \"Memory Usage per process\" "; + $def[2] = "" + . "DEF:count=$RRD[count] " + . "DEF:vsz=$RRD[vsz] " + . "DEF:rss=$RRD[rss] " + . "CDEF:vszmb=vsz,1024,/,count,/ " + . "CDEF:rssmb=rss,1024,/,count,/ " + . "AREA:vszmb#90a0f0:\"Virtual size \" " + . "GPRINT:vszmb:LAST:\"Current\: %5.1lf MB\" " + . "GPRINT:vszmb:MIN:\"Min\: %5.1lf MB\" " + . "GPRINT:vszmb:MAX:\"Max\: %5.1lf MB\" " + . "AREA:rssmb#2070ff:\"Resident size\" " + . "GPRINT:rssmb:LAST:\"Current\: %5.1lf MB\" " + . "GPRINT:rssmb:MIN:\"Min\: %5.1lf MB\" " + . "GPRINT:rssmb:MAX:\"Max\: %5.1lf MB\" " + ; } -if (isset($DS[3])) { - $opt[3] = " --vertical-label \"CPU(%)\" -l 0 -u 100 --title \"CPU Usage\" "; - $def[3] = "DEF:pcpu=$RRDFILE[4]:$DS[4]:MAX "; - $def[3] .= "AREA:pcpu#30ff80:\"CPU usage (%) \" "; - $def[3] .= "LINE:pcpu#20a060:\"\" "; - $def[3] .= "GPRINT:pcpu:LAST:\"Current\: %4.1lf %%\" "; - $def[3] .= "GPRINT:pcpu:MIN:\"Min\: %4.1lf %%\" "; - $def[3] .= "GPRINT:pcpu:MAX:\"Max\: %4.1lf %%\" "; +if (isset($RRD["pcpu"])) { + $opt[3] = " --vertical-label \"CPU(%)\" -l 0 -u 100 --title \"CPU Usage\" "; + $def[3] = "" + . "DEF:pcpu=$RRD[pcpu] " + . "AREA:pcpu#30ff80:\"CPU usage (%) \" " + . "LINE:pcpu#20a060:\"\" " + . "GPRINT:pcpu:LAST:\"Current\: %4.1lf%%\" " + . "GPRINT:pcpu:MIN:\"Min\: %4.1lf%%\" " + . "GPRINT:pcpu:MAX:\"Max\: %4.1lf%%\\n\" "; + + if ($WARN['pcpu'] != '') + $def[3] .= "HRULE:$WARN[pcpu]#FFFF00:\"Warning at $WARN[pcpu]%\" "; + if ($CRIT['pcpu'] != '') + $def[3] .= "HRULE:$CRIT[pcpu]#FF0000:\"Critical at $CRIT[pcpu]%\" "; + + if (isset($RRD["pcpuavg"])) { + $def[3] .= "DEF:pcpuavg=$RRD[pcpuavg] "; + $def[3] .= "LINE:pcpuavg#000000:\"Average over $MAX[pcpuavg] minutes\\n\" "; + } } - ?> diff -Nru check-mk-1.2.2p3/check_mk-ps.php check-mk-1.2.6p12/check_mk-ps.php --- check-mk-1.2.2p3/check_mk-ps.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-ps.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,99 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; +} + + +# 1. Graph: Number of processes +$vertical = "count"; +$format = "%3.0lf"; +$upto = max(20, $CRIT["count"]); +$color = "8040f0"; +$line = "202060"; + +$opt[1] = " --vertical-label \"count\" -X0 -L5 -l 0 -u $upto --title \"Number of Processes\" "; + +$def[1] = "" + . "DEF:count=$RRD[count] " + . "AREA:count#$color:\"Processes\" " + . "LINE1:count#$line:\"\" " + . "GPRINT:count:LAST:\"Current\: $format\" " + . "GPRINT:count:MAX:\"Maximum\: $format \" " + . "HRULE:$WARN[count]#FFFF00:\"Warning at $WARN[count]\" " + . "HRULE:$CRIT[count]#FF0000:\"Critical at $CRIT[count]\" " + ; + +# 2. Graph: Memory usage +if (isset($RRD["vsz"])) { + $opt[2] = " --vertical-label \"MB\" -l 0 --title \"Memory Usage per process\" "; + $def[2] = "" + . "DEF:count=$RRD[count] " + . "DEF:vsz=$RRD[vsz] " + . "DEF:rss=$RRD[rss] " + . "CDEF:vszmb=vsz,1024,/,count,/ " + . "CDEF:rssmb=rss,1024,/,count,/ " + . "AREA:vszmb#90a0f0:\"Virtual size \" " + . "GPRINT:vszmb:LAST:\"Current\: %5.1lf MB\" " + . "GPRINT:vszmb:MIN:\"Min\: %5.1lf MB\" " + . "GPRINT:vszmb:MAX:\"Max\: %5.1lf MB\" " + . "AREA:rssmb#2070ff:\"Resident size\" " + . "GPRINT:rssmb:LAST:\"Current\: %5.1lf MB\" " + . "GPRINT:rssmb:MIN:\"Min\: %5.1lf MB\" " + . "GPRINT:rssmb:MAX:\"Max\: %5.1lf MB\" " + ; +} + +if (isset($RRD["pcpu"])) { + $opt[3] = " --vertical-label \"CPU(%)\" -l 0 -u 100 --title \"CPU Usage\" "; + $def[3] = "" + . "DEF:pcpu=$RRD[pcpu] " + . "AREA:pcpu#30ff80:\"CPU usage (%) \" " + . "LINE:pcpu#20a060:\"\" " + . "GPRINT:pcpu:LAST:\"Current\: %4.1lf%%\" " + . "GPRINT:pcpu:MIN:\"Min\: %4.1lf%%\" " + . "GPRINT:pcpu:MAX:\"Max\: %4.1lf%%\\n\" "; + + if ($WARN['pcpu'] != '') + $def[3] .= "HRULE:$WARN[pcpu]#FFFF00:\"Warning at $WARN[pcpu]%\" "; + if ($CRIT['pcpu'] != '') + $def[3] .= "HRULE:$CRIT[pcpu]#FF0000:\"Critical at $CRIT[pcpu]%\" "; + + if (isset($RRD["pcpuavg"])) { + $def[3] .= "DEF:pcpuavg=$RRD[pcpuavg] "; + $def[3] .= "LINE:pcpuavg#000000:\"Average over $MAX[pcpuavg] minutes\\n\" "; + } +} + +?> diff -Nru check-mk-1.2.2p3/check_mk.py check-mk-1.2.6p12/check_mk.py --- check-mk-1.2.2p3/check_mk.py 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/check_mk.py 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,32 +24,51 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -# This file is also read in by check_mk's web pages. In that case, -# the variable check_mk_web is set to True - import os, sys, socket, time, getopt, glob, re, stat, py_compile, urllib, inspect +import subprocess # These variable will be substituted at 'make dist' time check_mk_version = '(inofficial)' +# .--Prelude-------------------------------------------------------------. +# | ____ _ _ | +# | | _ \ _ __ ___| |_ _ __| | ___ | +# | | |_) | '__/ _ \ | | | |/ _` |/ _ \ | +# | | __/| | | __/ | |_| | (_| | __/ | +# | |_| |_| \___|_|\__,_|\__,_|\___| | +# | | +# +----------------------------------------------------------------------+ +# | Pre-Parsing of some command line options that are needed before | +# | the main function. | +# '----------------------------------------------------------------------' + # Some things have to be done before option parsing and might # want to output some verbose messages. g_profile = None g_profile_path = 'profile.out' -if __name__ == "__main__": - opt_debug = '--debug' in sys.argv[1:] - opt_verbose = opt_debug or '-v' in sys.argv[1:] or '--verbose' in sys.argv[1:] - if '--profile' in sys.argv[1:]: - import cProfile - g_profile = cProfile.Profile() - g_profile.enable() - if opt_verbose: - sys.stderr.write("Enabled profiling.\n") +opt_debug = '--debug' in sys.argv[1:] +opt_interactive = '--interactive' in sys.argv[1:] +opt_verbose = ('-v' in sys.argv[1:] or '--verbose' in sys.argv[1:]) and 1 or 0 + +if '--profile' in sys.argv[1:]: + import cProfile + g_profile = cProfile.Profile() + g_profile.enable() + if opt_verbose: + sys.stderr.write("Enabled profiling.\n") + + +#. +# .--Pathnames-----------------------------------------------------------. +# | ____ _ _ | +# | | _ \ __ _| |_| |__ _ __ __ _ _ __ ___ ___ ___ | +# | | |_) / _` | __| '_ \| '_ \ / _` | '_ ` _ \ / _ \/ __| | +# | | __/ (_| | |_| | | | | | | (_| | | | | | | __/\__ \ | +# | |_| \__,_|\__|_| |_|_| |_|\__,_|_| |_| |_|\___||___/ | +# | | +# '----------------------------------------------------------------------' -else: - opt_verbose = False - opt_debug = False # are we running OMD? If yes, honor local/ hierarchy omd_root = os.getenv("OMD_ROOT", None) @@ -57,8 +76,11 @@ local_share = omd_root + "/local/share/check_mk" local_checks_dir = local_share + "/checks" local_notifications_dir = local_share + "/notifications" + local_inventory_dir = local_share + "/inventory" local_check_manpages_dir = local_share + "/checkman" local_agents_dir = local_share + "/agents" + local_special_agents_dir = local_agents_dir + "/special" + local_mibs_dir = local_share + "/mibs" local_web_dir = local_share + "/web" local_pnp_templates_dir = local_share + "/pnp-templates" local_doc_dir = omd_root + "/local/share/doc/check_mk" @@ -66,24 +88,16 @@ else: local_checks_dir = None local_notifications_dir = None + local_inventory_dir = None local_check_manpages_dir = None local_agents_dir = None + local_special_agents_dir = None + local_mibs_dir = None local_web_dir = None local_pnp_templates_dir = None local_doc_dir = None local_locale_dir = None - - -# +----------------------------------------------------------------------+ -# | ____ _ _ | -# | | _ \ __ _| |_| |__ _ __ __ _ _ __ ___ ___ ___ | -# | | |_) / _` | __| '_ \| '_ \ / _` | '_ ` _ \ / _ \/ __| | -# | | __/ (_| | |_| | | | | | | (_| | | | | | | __/\__ \ | -# | |_| \__,_|\__|_| |_|_| |_|\__,_|_| |_| |_|\___||___/ | -# | | -# +----------------------------------------------------------------------+ - # Pathnames, directories and other settings. All these settings # should be overriden by /usr/share/check_mk/modules/defaults, which # is created by setup.sh. The user might override those values again @@ -93,6 +107,7 @@ check_mk_configdir = default_config_dir + "/conf.d" checks_dir = '/usr/share/check_mk/checks' notifications_dir = '/usr/share/check_mk/notifications' +inventory_dir = '/usr/share/check_mk/inventory' agents_dir = '/usr/share/check_mk/agents' check_manpages_dir = '/usr/share/doc/check_mk/checks' modules_dir = '/usr/share/check_mk/modules' @@ -111,11 +126,8 @@ nagios_binary = '/usr/sbin/nagios' nagios_config_file = '/etc/nagios/nagios.cfg' logwatch_notes_url = "/nagios/logwatch.php?host=%s&file=%s" - -def verbose(t): - if opt_verbose: - sys.stderr.write(t) - sys.stderr.flush() +rrdcached_socket = None # used by prediction.py +rrd_path = None # used by prediction.py # During setup a file called defaults is created in the modules @@ -130,11 +142,9 @@ if len(sys.argv) >= 2 and sys.argv[1] == '--defaults': defaults_path = sys.argv[2] del sys.argv[1:3] -elif __name__ == "__main__": +else: defaults_path = os.path.dirname(sys.argv[0]) + "/defaults" -if opt_debug: - sys.stderr.write("Reading default settings from %s\n" % defaults_path) try: execfile(defaults_path) except Exception, e: @@ -151,225 +161,134 @@ # 1. if present - the option '-c' specifies the path to main.mk # 2. in the default_config_dir (that path should be present in modules/defaults) +try: + i = sys.argv.index('-c') + if i > 0 and i < len(sys.argv)-1: + check_mk_configfile = sys.argv[i+1] + parts = check_mk_configfile.split('/') + if len(parts) > 1: + check_mk_basedir = check_mk_configfile.rsplit('/',1)[0] + else: + check_mk_basedir = "." # no / contained in filename -if __name__ == "__main__": - try: - i = sys.argv.index('-c') - if i > 0 and i < len(sys.argv)-1: - check_mk_configfile = sys.argv[i+1] - parts = check_mk_configfile.split('/') - if len(parts) > 1: - check_mk_basedir = check_mk_configfile.rsplit('/',1)[0] - else: - check_mk_basedir = "." # no / contained in filename - - if not os.path.exists(check_mk_basedir): - sys.stderr.write("Directory %s does not exist.\n" % check_mk_basedir) - sys.exit(1) + if not os.path.exists(check_mk_basedir): + sys.stderr.write("Directory %s does not exist.\n" % check_mk_basedir) + sys.exit(1) - if not os.path.exists(check_mk_configfile): - sys.stderr.write("Missing configuration file %s.\n" % check_mk_configfile) - sys.exit(1) - else: - sys.stderr.write("Missing argument to option -c.\n") + if not os.path.exists(check_mk_configfile): + sys.stderr.write("Missing configuration file %s.\n" % check_mk_configfile) sys.exit(1) - except ValueError: - if not os.path.exists(default_config_dir + "/main.mk"): - sys.stderr.write("Missing main configuration file %s/main.mk\n" % default_config_dir) - sys.exit(4) - check_mk_basedir = default_config_dir - check_mk_configfile = check_mk_basedir + "/main.mk" + # Also rewrite the location of the conf.d directory + if os.path.exists(check_mk_basedir + "/conf.d"): + check_mk_configdir = check_mk_basedir + "/conf.d" - except SystemExit, exitcode: - sys.exit(exitcode) + else: + sys.stderr.write("Missing argument to option -c.\n") + sys.exit(1) -else: +except ValueError: + if not os.path.exists(default_config_dir + "/main.mk"): + sys.stderr.write("Missing main configuration file %s/main.mk\n" % default_config_dir) + sys.exit(4) check_mk_basedir = default_config_dir - check_mk_configfile = default_config_dir + "/main.mk" + check_mk_configfile = check_mk_basedir + "/main.mk" +except SystemExit, exitcode: + sys.exit(exitcode) -# +----------------------------------------------------------------------+ -# | ____ _ ____ __ _ _ | -# | / ___| ___| |_ | _ \ ___ / _| __ _ _ _| | |_ ___ | -# | \___ \ / _ \ __| | | | |/ _ \ |_ / _` | | | | | __/ __| | -# | ___) | __/ |_ | |_| | __/ _| (_| | |_| | | |_\__ \ | -# | |____/ \___|\__| |____/ \___|_| \__,_|\__,_|_|\__|___/ | + +#. +# .--Constants-----------------------------------------------------------. +# | ____ _ _ | +# | / ___|___ _ __ ___| |_ __ _ _ __ | |_ ___ | +# | | | / _ \| '_ \/ __| __/ _` | '_ \| __/ __| | +# | | |__| (_) | | | \__ \ || (_| | | | | |_\__ \ | +# | \____\___/|_| |_|___/\__\__,_|_| |_|\__|___/ | # | | # +----------------------------------------------------------------------+ +# | Some constants to be used in the configuration and at other places | +# '----------------------------------------------------------------------' -# Before we read the configuration files we create default settings -# for all variables. The user can easily override them. - -# define magic keys for use in host extraconf lists +# Conveniance macros for host and service rules PHYSICAL_HOSTS = [ '@physical' ] # all hosts but not clusters CLUSTER_HOSTS = [ '@cluster' ] # all cluster hosts ALL_HOSTS = [ '@all' ] # physical and cluster hosts ALL_SERVICES = [ "" ] # optical replacement" NEGATE = '@negate' # negation in boolean lists -# Basic Settings -agent_port = 6556 -agent_ports = [] -snmp_ports = [] # UDP ports used for SNMP -tcp_connect_timeout = 5.0 -delay_precompile = False # delay Python compilation to Nagios execution -restart_locking = "abort" # also possible: "wait", None -check_submission = "file" # alternative: "pipe" -aggr_summary_hostname = "%s-s" -agent_min_version = 0 # warn, if plugin has not at least version -check_max_cachefile_age = 0 # per default do not use cache files when checking -cluster_max_cachefile_age = 90 # secs. -simulation_mode = False -agent_simulator = False -perfdata_format = "pnp" # also possible: "standard" -check_mk_perfdata_with_times = False -debug_log = None -monitoring_host = None # deprecated -max_num_processes = 50 - -# SNMP communities and encoding -snmp_default_community = 'public' -snmp_communities = [] -snmp_timing = [] -snmp_character_encodings = [] - -# Inventory and inventory checks -inventory_check_interval = None # Nagios intervals (4h = 240) -inventory_check_severity = 1 # warning -inventory_max_cachefile_age = 120 # secs. -always_cleanup_autochecks = False - -# Nagios templates and other settings concerning generation -# of Nagios configuration files. No need to change these values. -# Better adopt the content of the templates -host_template = 'check_mk_host' -cluster_template = 'check_mk_cluster' -pingonly_template = 'check_mk_pingonly' -active_service_template = 'check_mk_active' -inventory_check_template = 'check_mk_inventory' -passive_service_template = 'check_mk_passive' -passive_service_template_perf = 'check_mk_passive_perf' -summary_service_template = 'check_mk_summarized' -service_dependency_template = 'check_mk' -default_host_group = 'check_mk' -generate_hostconf = True -generate_dummy_commands = True -dummy_check_commandline = 'echo "ERROR - you did an active check on this service - please disable active checks" && exit 1' -nagios_illegal_chars = '`;~!$%^&*|\'"<>?,()=' - -# Data to be defined in main.mk -checks = [] -static_checks = {} -check_parameters = [] -checkgroup_parameters = {} -legacy_checks = [] # non-WATO variant of legacy checks -active_checks = {} # WATO variant for fully formalized checks -custom_checks = [] # WATO variant for free-form custom checks without formalization -all_hosts = [] -host_paths = {} -snmp_hosts = [ (['snmp'], ALL_HOSTS) ] -tcp_hosts = [ (['tcp'], ALL_HOSTS), (NEGATE, ['snmp'], ALL_HOSTS), (['!ping'], ALL_HOSTS) ] -bulkwalk_hosts = [] -snmpv2c_hosts = [] -usewalk_hosts = [] -dyndns_hosts = [] # use host name as ip address for these hosts -ignored_checktypes = [] # exclude from inventory -ignored_services = [] # exclude from inventory -ignored_checks = [] # exclude from inventory -host_groups = [] -service_groups = [] -service_contactgroups = [] -service_notification_periods = [] # deprecated, will be removed soon. -host_notification_periods = [] # deprecated, will be removed soon. -host_contactgroups = [] -parents = [] -define_hostgroups = None -define_servicegroups = None -define_contactgroups = None -contactgroup_members = {} -contacts = {} -timeperiods = {} # needed for WATO -clusters = {} -clustered_services = [] -clustered_services_of = {} # new in 1.1.4 -datasource_programs = [] -service_aggregations = [] -service_dependencies = [] -non_aggregated_hosts = [] -aggregate_check_mk = False -aggregation_output_format = "multiline" # new in 1.1.6. Possible also: "multiline" -summary_host_groups = [] -summary_service_groups = [] # service groups for aggregated services -summary_service_contactgroups = [] # service contact groups for aggregated services -summary_host_notification_periods = [] -summary_service_notification_periods = [] -ipaddresses = {} # mapping from hostname to ipaddress -only_hosts = None -distributed_wato_site = None # used by distributed WATO -extra_host_conf = {} -extra_summary_host_conf = {} -extra_service_conf = {} -extra_summary_service_conf = {} -extra_nagios_conf = "" -service_descriptions = {} -donation_hosts = [] -donation_command = 'mail -r checkmk@yoursite.de -s "Host donation %s" donatehosts@mathias-kettner.de' % check_mk_version -scanparent_hosts = [ ( ALL_HOSTS ) ] -host_attributes = {} # needed by WATO, ignored by Check_MK -ping_levels = [] # special parameters for host/PING check_command -check_periods = [] - -# global variables used to cache temporary values (not needed in check_mk_base) -ip_to_hostname_cache = None +# Renaming of service descriptions while keeping backward compatibility with +# existing installations. +old_service_descriptions = { + "df" : "fs_%s", + "df_netapp" : "fs_%s", + "df_netapp32" : "fs_%s", + "esx_vsphere_datastores" : "fs_%s", + "hr_fs" : "fs_%s", + "vms_diskstat.df" : "fs_%s", + "zfsget" : "fs_%s", + "ps" : "proc_%s", + "ps.perf" : "proc_%s", + "wmic_process" : "proc_%s", + "services" : "service_%s", + "logwatch" : "LOG %s", + "hyperv_vm" : "hyperv_vms", +} -# The following data structures will be filled by the various checks -# found in the checks/ directory. -check_info = {} # all known checks -checkgroup_of = {} # groups of checks with compatible parametration -check_includes = {} # library files needed by checks -precompile_params = {} # optional functions for parameter precompilation, look at df for an example -check_default_levels = {} # dictionary-configured checks declare their default level variables here -factory_settings = {} # factory settings for dictionary-configured checks -check_config_variables = [] # variables (names) in checks/* needed for check itself -snmp_info = {} # whichs OIDs to fetch for which check (for tabular information) -snmp_scan_functions = {} # SNMP autodetection -active_check_info = {} # definitions of active "legacy" checks +#. +# .--Modules-------------------------------------------------------------. +# | __ __ _ _ | +# | | \/ | ___ __| |_ _| | ___ ___ | +# | | |\/| |/ _ \ / _` | | | | |/ _ \/ __| | +# | | | | | (_) | (_| | |_| | | __/\__ \ | +# | |_| |_|\___/ \__,_|\__,_|_|\___||___/ | +# | | +# +----------------------------------------------------------------------+ +# | Load the other modules | +# '----------------------------------------------------------------------' +known_vars = set(vars().keys()) +known_vars.add('known_vars') +execfile(modules_dir + '/config.py') +config_variable_names = set(vars().keys()).difference(known_vars) -# Now include the other modules. They contain everything that is needed -# at check time (and many of that is also needed at administration time). +# at check time (and many of what is also needed at administration time). try: - modules = [ 'check_mk_base', 'snmp', 'notify' ] + modules = [ 'check_mk_base', 'discovery', 'snmp', 'notify', 'prediction', 'cmc', 'inline_snmp', 'agent_bakery', 'cap' ] for module in modules: filename = modules_dir + "/" + module + ".py" - execfile(filename) + if os.path.exists(filename): + execfile(filename) except Exception, e: sys.stderr.write("Cannot read file %s: %s\n" % (filename, e)) sys.exit(5) -# +----------------------------------------------------------------------+ +#. +# .--Check helpers ------------------------------------------------------. # | ____ _ _ _ _ | # | / ___| |__ ___ ___| | __ | |__ ___| |_ __ ___ _ __ ___ | # | | | | '_ \ / _ \/ __| |/ / | '_ \ / _ \ | '_ \ / _ \ '__/ __| | # | | |___| | | | __/ (__| < | | | | __/ | |_) | __/ | \__ \ | # | \____|_| |_|\___|\___|_|\_\ |_| |_|\___|_| .__/ \___|_| |___/ | # | |_| | -# | | +# +----------------------------------------------------------------------+ # | These functions are used by some checks at administration time. | # +----------------------------------------------------------------------+ -# The function no_inventory_possible is as stub function used for +# The function no_discovery_possible is as stub function used for # those checks that do not support inventory. It must be known before # we read in all the checks -def no_inventory_possible(checkname, info): - sys.stderr.write("Sorry. No inventory possible for check type %s.\n" % checkname) - sys.exit(3) +def no_discovery_possible(check_type, info): + if opt_verbose: + sys.stdout.write("%s does not support discovery. Skipping it.\n" % check_type) + return [] -# +----------------------------------------------------------------------+ + +#. +# .--Load checks---------------------------------------------------------. # | _ _ _ _ | # | | | ___ __ _ __| | ___| |__ ___ ___| | _____ | # | | | / _ \ / _` |/ _` | / __| '_ \ / _ \/ __| |/ / __| | @@ -378,12 +297,24 @@ # | | # +----------------------------------------------------------------------+ +# The following data structures will be filled by the checks +check_info = {} # all known checks +checkgroup_of = {} # groups of checks with compatible parametration +check_includes = {} # library files needed by checks +precompile_params = {} # optional functions for parameter precompilation, look at df for an example +check_default_levels = {} # dictionary-configured checks declare their default level variables here +factory_settings = {} # factory settings for dictionary-configured checks +check_config_variables = [] # variables (names) in checks/* needed for check itself +snmp_info = {} # whichs OIDs to fetch for which check (for tabular information) +snmp_scan_functions = {} # SNMP autodetection +active_check_info = {} # definitions of active "legacy" checks +special_agent_info = {} + # Now read in all checks. Note: this is done *before* reading the # configuration, because checks define variables with default -# values. The user can override those variables in his configuration. +# values user can override those variables in his configuration. # Do not read in the checks if check_mk is called as module - -if __name__ == "__main__": +def load_checks(): filelist = glob.glob(checks_dir + "/*") filelist.sort() @@ -398,22 +329,35 @@ filelist = [ f for f in filelist if f.endswith(".include") ] + \ [ f for f in filelist if not f.endswith(".include") ] + varname = None + value = None + ignored_variable_types = [ type(lambda: None), type(os) ] + + known_vars = set(globals().keys()) # track new configuration variables + for f in filelist: if not f.endswith("~"): # ignore emacs-like backup files try: - execfile(f) + execfile(f, globals()) except Exception, e: sys.stderr.write("Error in plugin file %s: %s\n" % (f, e)) if opt_debug: raise sys.exit(5) + for varname, value in globals().iteritems(): + if varname[0] != '_' \ + and varname not in known_vars \ + and type(value) not in ignored_variable_types: + config_variable_names.add(varname) + # Now convert check_info to new format. convert_check_info() +load_checks() - -# +----------------------------------------------------------------------+ +#. +# .--Checks--------------------------------------------------------------. # | ____ _ _ | # | / ___| |__ ___ ___| | _____ | # | | | | '_ \ / _ \/ __| |/ / __| | @@ -423,38 +367,39 @@ # +----------------------------------------------------------------------+ def output_check_info(): - print "Available check types:" - print - print " plugin perf- in- " - print "Name type data vent. service description" - print "-------------------------------------------------------------------------" + all_check_manuals = all_manuals() + read_manpage_catalog() - checks_sorted = check_info.items() + checks_sorted = check_info.items() + active_check_info.items() checks_sorted.sort() for check_type, check in checks_sorted: + man_filename = all_check_manuals.get(check_type) try: - if check.get("has_perfdata", False): - p = tty_green + tty_bold + "yes" + tty_normal - else: - p = "no" - if check["inventory_function"] == None: - i = "no" + if 'command_line' in check: + what = 'active' + ty_color = tty_blue + elif check_uses_snmp(check_type): + what = 'snmp' + ty_color = tty_magenta else: - i = tty_blue + tty_bold + "yes" + tty_normal + what = 'tcp' + ty_color = tty_yellow - if check_uses_snmp(check_type): - typename = tty_magenta + "snmp" + tty_normal + if man_filename: + title = file(man_filename).readlines()[0].split(":", 1)[1].strip() else: - typename = tty_yellow + "tcp " + tty_normal + title = "(no man page present)" - print (tty_bold + "%-19s" + tty_normal + " %s %-3s %-3s %s") % \ - (check_type, typename, p, i, check["service_description"]) + print (tty_bold + "%-44s" + tty_normal + + ty_color + " %-6s " + tty_normal + + "%s") % \ + (check_type, what, title) except Exception, e: sys.stderr.write("ERROR in check_type %s: %s\n" % (check_type, e)) - -# +----------------------------------------------------------------------+ +#. +# .--Host tags-----------------------------------------------------------. # | _ _ _ _ | # | | | | | ___ ___| |_ | |_ __ _ __ _ ___ | # | | |_| |/ _ \/ __| __| | __/ _` |/ _` / __| | @@ -462,6 +407,8 @@ # | |_| |_|\___/|___/\__| \__\__,_|\__, |___/ | # | |___/ | # +----------------------------------------------------------------------+ +# | Helper functions for dealing with host tags | +# '----------------------------------------------------------------------' def strip_tags(host_or_list): if type(host_or_list) == list: @@ -500,7 +447,8 @@ return True -# +----------------------------------------------------------------------+ +#. +# .--Aggregation---------------------------------------------------------. # | _ _ _ | # | / \ __ _ __ _ _ __ ___ __ _ __ _| |_(_) ___ _ __ | # | / _ \ / _` |/ _` | '__/ _ \/ _` |/ _` | __| |/ _ \| '_ \ | @@ -508,6 +456,10 @@ # | /_/ \_\__, |\__, |_| \___|\__, |\__,_|\__|_|\___/|_| |_| | # | |___/ |___/ |___/ | # +----------------------------------------------------------------------+ +# | Service aggregations is deprecated and has been superseeded by BI. | +# | This code will dropped soon. Do not use service_aggregations any | +# | more... | +# '----------------------------------------------------------------------' # Checks if a host has service aggregations def host_is_aggregated(hostname): @@ -557,8 +509,105 @@ return aggrname return "" - +#. +# .--Helpers-------------------------------------------------------------. +# | _ _ _ | +# | | | | | ___| |_ __ ___ _ __ ___ | +# | | |_| |/ _ \ | '_ \ / _ \ '__/ __| | +# | | _ | __/ | |_) | __/ | \__ \ | +# | |_| |_|\___|_| .__/ \___|_| |___/ | +# | |_| | # +----------------------------------------------------------------------+ +# | Misc functions which do not belong to any other topic | +# '----------------------------------------------------------------------' + +def is_tcp_host(hostname): + return in_binary_hostlist(hostname, tcp_hosts) + +def is_ping_host(hostname): + return not is_snmp_host(hostname) and not is_tcp_host(hostname) and not has_piggyback_info(hostname) + +def is_dual_host(hostname): + return is_tcp_host(hostname) and is_snmp_host(hostname) + +def check_period_of(hostname, service): + periods = service_extra_conf(hostname, service, check_periods) + if periods: + period = periods[0] + if period == "24X7": + return None + else: + return period + else: + return None + +def check_interval_of(hostname, checkname): + if not check_uses_snmp(checkname): + return # no values at all for non snmp checks + for match, minutes in host_extra_conf(hostname, snmp_check_interval): + if match is None or match == checkname: + return minutes # use first match + +def agent_target_version(hostname): + agent_target_versions = host_extra_conf(hostname, check_mk_agent_target_versions) + if len(agent_target_versions) > 0: + spec = agent_target_versions[0] + if spec == "ignore": + return None + elif spec == "site": + return check_mk_version + elif type(spec) == str: + # Compatibility to old value specification format (a single version string) + return spec + elif spec[0] == 'specific': + return spec[1] + else: + return spec # return the whole spec in case of an "at least version" config + +regex_cache = {} +def regex(r): + rx = regex_cache.get(r) + if rx: + return rx + try: + rx = re.compile(r) + except Exception, e: + raise MKGeneralException("Invalid regular expression '%s': %s" % (r, e)) + regex_cache[r] = rx + return rx + +orig_check_max_cachefile_age = None +orig_cluster_max_cachefile_age = None +orig_inventory_max_cachefile_age = None + +# TODO: Why 1000000000? Can't we really clean this up to a global variable which can +# be toggled to enforce the cache usage (if available). This way we would not need +# to store the original values of the different caches and modify them etc. +def enforce_using_agent_cache(): + global check_max_cachefile_age, cluster_max_cachefile_age, inventory_max_cachefile_age + global orig_check_max_cachefile_age, orig_cluster_max_cachefile_age, \ + orig_inventory_max_cachefile_age + + if check_max_cachefile_age != 1000000000: + orig_check_max_cachefile_age = check_max_cachefile_age + orig_cluster_max_cachefile_age = cluster_max_cachefile_age + orig_inventory_max_cachefile_age = inventory_max_cachefile_age + + check_max_cachefile_age = 1000000000 + cluster_max_cachefile_age = 1000000000 + inventory_max_cachefile_age = 1000000000 + + +def restore_original_agent_caching_usage(): + global check_max_cachefile_age, cluster_max_cachefile_age, inventory_max_cachefile_age + if orig_check_max_cachefile_age != None: + check_max_cachefile_age = orig_check_max_cachefile_age + cluster_max_cachefile_age = orig_cluster_max_cachefile_age + inventory_max_cachefile_age = orig_inventory_max_cachefile_age + + +#. +# .--SNMP----------------------------------------------------------------. # | ____ _ _ __ __ ____ | # | / ___|| \ | | \/ | _ \ | # | \___ \| \| | |\/| | |_) | | @@ -566,6 +615,64 @@ # | |____/|_| \_|_| |_|_| | # | | # +----------------------------------------------------------------------+ +# | Some basic SNMP functions. Note: most of the SNMP related code is | +# | the separate module snmp.py. | +# '----------------------------------------------------------------------' + +# Determine SNMP community for a specific host. It the host is found +# int the map snmp_communities, that community is returned. Otherwise +# the snmp_default_community is returned (wich is preset with +# "public", but can be overridden in main.mk +def snmp_credentials_of(hostname): + try: + return explicit_snmp_communities[hostname] + except KeyError: + pass + + communities = host_extra_conf(hostname, snmp_communities) + if len(communities) > 0: + return communities[0] + + # nothing configured for this host -> use default + return snmp_default_community + +def get_snmp_character_encoding(hostname): + entries = host_extra_conf(hostname, snmp_character_encodings) + if len(entries) > 0: + return entries[0] + +def is_snmpv3_host(hostname): + return type(snmp_credentials_of(hostname)) == tuple + +def is_snmp_host(hostname): + return in_binary_hostlist(hostname, snmp_hosts) + +def is_bulkwalk_host(hostname): + if bulkwalk_hosts: + return in_binary_hostlist(hostname, bulkwalk_hosts) + else: + return False + +def is_snmpv2c_host(hostname): + return is_bulkwalk_host(hostname) or \ + in_binary_hostlist(hostname, snmpv2c_hosts) + +def is_usewalk_host(hostname): + return in_binary_hostlist(hostname, usewalk_hosts) + +def snmp_timing_of(hostname): + timing = host_extra_conf(hostname, snmp_timing) + if len(timing) > 0: + return timing[0] + else: + return {} + +def snmp_port_spec(hostname): + port = snmp_port_of(hostname) + if port == None: + return "" + else: + return ":%d" % port # Returns command lines for snmpwalk and snmpget including # options for authentication. This handles communities and @@ -573,9 +680,6 @@ def snmp_walk_command(hostname): return snmp_base_command('walk', hostname) + " -Cc" -# Constructs the basic snmp commands for a host with all important information -# like the commandname, SNMP version and credentials. -# This function also changes snmpbulkwalk to snmpwalk for snmpv1. def snmp_base_command(what, hostname): # if the credentials are a string, we use that as community, # if it is a four-tuple, we use it as V3 auth parameters: @@ -592,11 +696,13 @@ command = 'snmpget' elif what == 'getnext': command = 'snmpgetnext -Cf' - else: + elif is_bulkwalk_host(hostname): command = 'snmpbulkwalk' + else: + command = 'snmpwalk' # Handle V1 and V2C - if type(credentials) == str: + if type(credentials) in [ str, unicode ]: if is_bulkwalk_host(hostname): options = '-v2c' else: @@ -623,78 +729,69 @@ # Configuration of timing and retries settings = snmp_timing_of(hostname) if "timeout" in settings: - options += " -t %d" % settings["timeout"] + options += " -t %0.2f" % settings["timeout"] if "retries" in settings: options += " -r %d" % settings["retries"] return command + ' ' + options - -# Determine SNMP community for a specific host. It the host is found -# int the map snmp_communities, that community is returned. Otherwise -# the snmp_default_community is returned (wich is preset with -# "public", but can be overridden in main.mk -def snmp_credentials_of(hostname): - communities = host_extra_conf(hostname, snmp_communities) - if len(communities) > 0: - return communities[0] - - # nothing configured for this host -> use default - return snmp_default_community - -def snmp_timing_of(hostname): - timing = host_extra_conf(hostname, snmp_timing) - if len(timing) > 0: - return timing[0] +def snmp_get_oid(hostname, ipaddress, oid): + if oid.endswith(".*"): + oid_prefix = oid[:-2] + commandtype = "getnext" else: - return {} - -def get_snmp_character_encoding(hostname): - entries = host_extra_conf(hostname, snmp_character_encodings) - if len(entries) > 0: - return entries[0] - -def check_uses_snmp(check_type): - return snmp_info.get(check_type.split(".")[0]) != None + oid_prefix = oid + commandtype = "get" -def is_snmp_host(hostname): - return in_binary_hostlist(hostname, snmp_hosts) + portspec = snmp_port_spec(hostname) + command = snmp_base_command(commandtype, hostname) + \ + " -On -OQ -Oe -Ot %s%s %s" % (ipaddress, portspec, oid_prefix) -def is_tcp_host(hostname): - return in_binary_hostlist(hostname, tcp_hosts) + if opt_debug: + sys.stdout.write("Running '%s'\n" % command) -def is_ping_host(hostname): - return not is_snmp_host(hostname) and not is_tcp_host(hostname) + snmp_process = subprocess.Popen(command, shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE) + exitstatus = snmp_process.wait() + if exitstatus: + if opt_verbose: + sys.stderr.write(tty_red + tty_bold + "ERROR: " + tty_normal + "SNMP error\n") + sys.stderr.write(snmp_process.stderr.read()) + return None -def is_bulkwalk_host(hostname): - if bulkwalk_hosts: - return in_binary_hostlist(hostname, bulkwalk_hosts) - else: - return False + line = snmp_process.stdout.readline().strip() + if not line: + if opt_debug: + sys.stdout.write("Error in response to snmpget.\n") + return None -def is_snmpv2c_host(hostname): - return is_bulkwalk_host(hostname) or \ - in_binary_hostlist(hostname, snmpv2c_hosts) + item, value = line.split("=", 1) + value = value.strip() + if opt_debug: + sys.stdout.write("SNMP answer: ==> [%s]\n" % value) + if value.startswith('No more variables') or value.startswith('End of MIB') \ + or value.startswith('No Such Object available') or value.startswith('No Such Instance currently exists'): + value = None -def is_usewalk_host(hostname): - return in_binary_hostlist(hostname, usewalk_hosts) + # In case of .*, check if prefix is the one we are looking for + if commandtype == "getnext" and not item.startswith(oid_prefix + "."): + value = None -def check_period_of(hostname, service): - periods = service_extra_conf(hostname, service, check_periods) - if periods: - period = periods[0] - if period == "24X7": - return None - else: - return period - else: - return None + # Strip quotes + if value.startswith('"') and value.endswith('"'): + value = value[1:-1] + return value def get_single_oid(hostname, ipaddress, oid): # New in Check_MK 1.1.11: oid can end with ".*". In that case # we do a snmpgetnext and try to find an OID with the prefix # in question. The *cache* is working including the X, however. + if oid[0] != '.': + if opt_debug: + raise MKGeneralException("OID definition '%s' does not begin with a '.'" % oid) + else: + oid = '.' + oid + global g_single_oid_hostname global g_single_oid_cache @@ -712,100 +809,21 @@ else: return None - if oid.endswith(".*"): - oid_prefix = oid[:-2] - commandtype = "getnext" - else: - oid_prefix = oid - commandtype = "get" - - portspec = snmp_port_spec(hostname) - command = snmp_base_command(commandtype, hostname) + \ - " -On -OQ -Oe -Ot %s%s %s 2>/dev/null" % (ipaddress, portspec, oid_prefix) try: - if opt_debug: - sys.stdout.write("Running '%s'\n" % command) - - snmp_process = os.popen(command, "r") - line = snmp_process.readline().strip() - item, value = line.split("=", 1) - value = value.strip() - if opt_debug: - sys.stdout.write("SNMP answer: ==> [%s]\n" % value) - if value.startswith('No more variables') or value.startswith('End of MIB') \ - or value.startswith('No Such Object available') or value.startswith('No Such Instance currently exists'): - value = None - - # In case of .*, check if prefix is the one we are looking for - if commandtype == "getnext" and not item.startswith(oid_prefix + "."): - value = None - - # Strip quotes - if value.startswith('"') and value.endswith('"'): - value = value[1:-1] - # try to remove text, only keep number - # value_num = value_text.split(" ")[0] - # value_num = value_num.lstrip("+") - # value_num = value_num.rstrip("%") - # value = value_num + if has_inline_snmp and use_inline_snmp: + value = inline_snmp_get_oid(hostname, oid) + else: + value = snmp_get_oid(hostname, ipaddress, oid) except: + if opt_debug: + raise value = None g_single_oid_cache[oid] = value return value -def snmp_scan(hostname, ipaddress): - # Make hostname globally available for scan functions. - # This is rarely used, but e.g. the scan for if/if64 needs - # this to evaluate if_disabled_if64_checks. - global g_hostname - g_hostname = hostname - - if opt_verbose: - sys.stdout.write("Scanning host %s(%s) for SNMP checks..." % (hostname, ipaddress)) - sys_descr = get_single_oid(hostname, ipaddress, ".1.3.6.1.2.1.1.1.0") - if sys_descr == None: - if opt_debug: - sys.stderr.write("no SNMP answer\n") - return [] - - found = [] - for check_type, check in check_info.items(): - if check_type in ignored_checktypes: - continue - elif not check_uses_snmp(check_type): - continue - basename = check_type.split(".")[0] - # The scan function should be assigned to the basename, because - # subchecks sharing the same SNMP info of course should have - # an identical scan function. But some checks do not do this - # correctly - scan_function = snmp_scan_functions.get(check_type, - snmp_scan_functions.get(basename)) - if scan_function: - try: - if scan_function(lambda oid: get_single_oid(hostname, ipaddress, oid)): - found.append(check_type) - if opt_verbose: - sys.stdout.write(tty_green + tty_bold + check_type - + " " + tty_normal) - sys.stdout.flush() - except: - pass - else: - found.append(check_type) - if opt_verbose: - sys.stdout.write(tty_blue + tty_bold + check_type \ - + tty_normal + " ") - sys.stdout.flush() - - if opt_verbose: - sys.stdout.write("\n") - found.sort() - return found - - -# +----------------------------------------------------------------------+ +#. +# .--Cluster-------------------------------------------------------------. # | ____ _ _ | # | / ___| |_ _ ___| |_ ___ _ __ | # | | | | | | | / __| __/ _ \ '__| | @@ -813,6 +831,10 @@ # | \____|_|\__,_|___/\__\___|_| | # | | # +----------------------------------------------------------------------+ +# | Code dealing with clusters (virtual hosts that are used to deal with | +# | services that can move between physical nodes. | +# '----------------------------------------------------------------------' + # clusternames (keys into dictionary) might be tagged :-( # names of nodes not! @@ -835,6 +857,10 @@ if not the_clusters: return hostname + cluster_mapping = service_extra_conf(hostname, servicedesc, clustered_services_mapping) + if cluster_mapping: + return cluster_mapping[0] + # 1. New style: explicitly assigned services for cluster, conf in clustered_services_of.items(): nodes = nodes_of(cluster) @@ -852,35 +878,35 @@ return hostname - -# +----------------------------------------------------------------------+ -# | _ _ _ _ _ | -# | | | | | ___ ___| |_ ___| |__ ___ ___| | _____ | -# | | |_| |/ _ \/ __| __/ __| '_ \ / _ \/ __| |/ / __| | -# | | _ | (_) \__ \ || (__| | | | __/ (__| <\__ \ | -# | |_| |_|\___/|___/\__\___|_| |_|\___|\___|_|\_\___/ | +#. +# .--Checktable----------------------------------------------------------. +# | ____ _ _ _ _ _ | +# | / ___| |__ ___ ___| | _| |_ __ _| |__ | | ___ | +# | | | | '_ \ / _ \/ __| |/ / __/ _` | '_ \| |/ _ \ | +# | | |___| | | | __/ (__| <| || (_| | |_) | | __/ | +# | \____|_| |_|\___|\___|_|\_\\__\__,_|_.__/|_|\___| | # | | # +----------------------------------------------------------------------+ +# | Code for computing the table of checks of a host. | +# '----------------------------------------------------------------------' # Returns check table for a specific host -# Format: ( checkname, item ) -> (params, description ) +# Format: (checkname, item) -> (params, description) -# Keep a global cache of per-host-checktables, since this -# operation is quite lengthy. -g_check_table_cache = {} -# A further cache splits up all checks into single-host-entries -# and those possibly matching multiple hosts. The single host entries -# are used in the autochecks and assumed be make up the vast majority. -g_singlehost_checks = None -g_multihost_checks = None -def get_check_table(hostname): +def get_check_table(hostname, remove_duplicates=False, use_cache=True, world='config', skip_autochecks=False): global g_singlehost_checks global g_multihost_checks + if is_ping_host(hostname): + skip_autochecks = True + # speed up multiple lookup of same host - if hostname in g_check_table_cache: - return g_check_table_cache[hostname] + if not skip_autochecks and use_cache and hostname in g_check_table_cache: + if remove_duplicates and is_dual_host(hostname): + return remove_duplicate_checks(g_check_table_cache[hostname]) + else: + return g_check_table_cache[hostname] check_table = {} @@ -896,7 +922,11 @@ g_multihost_checks.append(entry) def handle_entry(entry): - if len(entry) == 4: + if len(entry) == 3: # from autochecks + hostlist = hostname + checkname, item, params = entry + tags = [] + elif len(entry) == 4: hostlist, checkname, item, params = entry tags = [] elif len(entry) == 5: @@ -922,22 +952,46 @@ elif type(hostlist[0]) == str: hostlist = strip_tags(hostlist) elif hostlist != []: - raise MKGeneralException("Invalid entry '%r' in check table. Must be single hostname or list of hostnames" % hostinfolist) + raise MKGeneralException("Invalid entry '%r' in check table. Must be single hostname or list of hostnames" % hostlist) if hosttags_match_taglist(tags_of_host(hostname), tags) and \ in_extraconf_hostlist(hostlist, hostname): descr = service_description(checkname, item) + if service_ignored(hostname, checkname, descr): + return + if hostname != host_of_clustered_service(hostname, descr): + return deps = service_deps(hostname, descr) check_table[(checkname, item)] = (params, descr, deps) # Now process all entries that are specific to the host # in search (single host) or that might match the host. + if not skip_autochecks: + for entry in read_autochecks_of(hostname, world): + handle_entry(entry) + for entry in g_singlehost_checks.get(hostname, []): handle_entry(entry) for entry in g_multihost_checks: handle_entry(entry) + # Now add checks a cluster might receive from its nodes + if is_cluster(hostname): + for node in nodes_of(hostname): + node_checks = g_singlehost_checks.get(node, []) + if not skip_autochecks: + node_checks = node_checks + read_autochecks_of(node, world) + for entry in node_checks: + if len(entry) == 4: + entry = entry[1:] # drop hostname from g_singlehost_checks + checkname, item, params = entry + descr = service_description(checkname, item) + if hostname == host_of_clustered_service(node, descr): + cluster_params = compute_check_parameters(hostname, checkname, item, params) + handle_entry((hostname, checkname, item, cluster_params)) + + # Remove dependencies to non-existing services all_descr = set([ descr for ((checkname, item), (params, descr, deps)) in check_table.items() ]) for (checkname, item), (params, descr, deps) in check_table.items(): @@ -947,16 +1001,46 @@ if d in all_descr: deps.append(d) - g_check_table_cache[hostname] = check_table - return check_table + if not skip_autochecks and use_cache: + g_check_table_cache[hostname] = check_table + + if remove_duplicates and is_dual_host(hostname): + return remove_duplicate_checks(check_table) + else: + return check_table + + +def remove_duplicate_checks(check_table): + have_with_tcp = {} + have_with_snmp = {} + without_duplicates = {} + for key, value in check_table.iteritems(): + checkname = key[0] + descr = value[1] + if check_uses_snmp(checkname): + if descr in have_with_tcp: + continue + have_with_snmp[descr] = key + else: + if descr in have_with_snmp: + snmp_key = have_with_snmp[descr] + del without_duplicates[snmp_key] + del have_with_snmp[descr] + have_with_tcp[descr] = key + without_duplicates[key] = value + return without_duplicates + -def get_sorted_check_table(hostname): +# remove_duplicates: Automatically remove SNMP based checks +# if there already is a TCP based one with the same +# description. E.g: df vs hr_fs. +def get_sorted_check_table(hostname, remove_duplicates=False, world="config"): # Convert from dictionary into simple tuple list. Then sort # it according to the service dependencies. unsorted = [ (checkname, item, params, descr, deps) for ((checkname, item), (params, descr, deps)) - in get_check_table(hostname).items() ] + in get_check_table(hostname, remove_duplicates=remove_duplicates, world=world).items() ] def cmp(a, b): if a[3] < b[3]: return -1 @@ -993,13 +1077,33 @@ # Determine, which program to call to get data. Should # be None in most cases -> to TCP connect on port 6556 + def get_datasource_program(hostname, ipaddress): + special_agents_dir = agents_dir + "/special" + + # First check WATO-style special_agent rules + for agentname, ruleset in special_agents.items(): + params = host_extra_conf(hostname, ruleset) + if params: # rule match! + # Create command line using the special_agent_info + cmd_arguments = special_agent_info[agentname](params[0], hostname, ipaddress) + if local_special_agents_dir and \ + os.path.exists(local_special_agents_dir + "/agent_" + agentname): + path = local_special_agents_dir + "/agent_" + agentname + else: + path = special_agents_dir + "/agent_" + agentname + return path + " " + cmd_arguments + programs = host_extra_conf(hostname, datasource_programs) if len(programs) == 0: return None else: return programs[0].replace("", ipaddress).replace("", hostname) +# Variables needed during the renaming of hosts (see automation.py) +ignore_ip_lookup_failures = False +failed_ip_lookups = [] + # Determine the IP address of a host def lookup_ipaddress(hostname): # Quick hack, where all IP addresses are faked (--fake-dns) @@ -1025,14 +1129,82 @@ if hostname in g_dns_cache: return g_dns_cache[hostname] - # No do the actual DNS lookup + # Prepare file based fall-back DNS cache in case resolution fails + init_ip_lookup_cache() + + cached_ip = g_ip_lookup_cache.get(hostname) + if cached_ip and use_dns_cache: + g_dns_cache[hostname] = cached_ip + return cached_ip + + # Now do the actual DNS lookup try: ipa = socket.gethostbyname(hostname) + + # Update our cached address if that has changed or was missing + if ipa != cached_ip: + if opt_verbose: + print "Updating DNS cache for %s: %s" % (hostname, ipa) + g_ip_lookup_cache[hostname] = ipa + write_ip_lookup_cache() + + g_dns_cache[hostname] = ipa # Update in-memory-cache + return ipa + except: - g_dns_cache[hostname] = None - raise - g_dns_cache[hostname] = ipa - return ipa + # DNS failed. Use cached IP address if present, even if caching + # is disabled. + if cached_ip: + g_dns_cache[hostname] = cached_ip + return cached_ip + else: + g_dns_cache[hostname] = None + raise + +def init_ip_lookup_cache(): + global g_ip_lookup_cache + if g_ip_lookup_cache is None: + try: + g_ip_lookup_cache = eval(file(var_dir + '/ipaddresses.cache').read()) + except: + g_ip_lookup_cache = {} + + +def write_ip_lookup_cache(): + suffix = "." + str(os.getpid()) + file(var_dir + '/ipaddresses.cache' + suffix, 'w').write(repr(g_ip_lookup_cache)) + os.rename(var_dir + '/ipaddresses.cache' + suffix, var_dir + '/ipaddresses.cache') + + +def do_update_dns_cache(): + # Temporarily disable *use* of cache, we want to force an update + global use_dns_cache + use_dns_cache = False + updated = 0 + failed = [] + + if opt_verbose: + print "Updating DNS cache..." + for hostname in all_active_hosts() + all_active_clusters(): + if opt_verbose: + sys.stdout.write("%s..." % hostname) + sys.stdout.flush() + # Use intelligent logic. This prevents DNS lookups for hosts + # with statically configured addresses, etc. + try: + ip = lookup_ipaddress(hostname) + if opt_verbose: + sys.stdout.write("%s\n" % ip) + updated += 1 + except Exception, e: + failed.append(hostname) + if opt_verbose: + sys.stdout.write("lookup failed: %s\n" % e) + if opt_debug: + raise + continue + + return updated, failed def agent_port_of(hostname): ports = host_extra_conf(hostname, agent_ports) @@ -1048,12 +1220,17 @@ else: return ports[0] -def snmp_port_spec(hostname): - port = snmp_port_of(hostname) - if port == None: - return "" - else: - return ":%d" % port +def exit_code_spec(hostname): + spec = {} + specs = host_extra_conf(hostname, check_mk_exit_status) + for entry in specs[::-1]: + spec.update(entry) + return spec + + +# Remove illegal characters from a service description +def sanitize_service_description(descr): + return "".join([ c for c in descr if c not in nagios_illegal_chars ]).rstrip("\\") def service_description(check_type, item): @@ -1069,7 +1246,12 @@ # use user-supplied service description, of available descr_format = service_descriptions.get(check_type) if not descr_format: - descr_format = check_info[check_type]["service_description"] + # handle renaming for backward compatibility + if check_type in old_service_descriptions and \ + check_type not in use_new_descriptions_for: + descr_format = old_service_descriptions[check_type] + else: + descr_format = check_info[check_type]["service_description"] # Note: we strip the service description (remove spaces). # One check defines "Pages %s" as a description, but the item @@ -1078,7 +1260,7 @@ if type(item) == str: # Remove characters from item name that are banned by Nagios - item_safe = "".join([ c for c in item if c not in nagios_illegal_chars ]) + item_safe = sanitize_service_description(item) if "%s" not in descr_format: descr_format += " %s" return (descr_format % (item_safe,)).strip() @@ -1089,14 +1271,27 @@ else: return descr_format.strip() + +# Get rules for piggyback translation for that hostname +def get_piggyback_translation(hostname): + rules = host_extra_conf(hostname, piggyback_translation) + translations = {} + for rule in rules[::-1]: + translations.update(rule) + return translations + + +#. +# .--Config Ouptut-------------------------------------------------------. +# | ____ __ _ ___ _ _ | +# | / ___|___ _ __ / _(_) __ _ / _ \ _ _ _ __ | |_ _ _| |_ | +# | | | / _ \| '_ \| |_| |/ _` | | | | | | | | '_ \| __| | | | __| | +# | | |__| (_) | | | | _| | (_| | | |_| | |_| | |_) | |_| |_| | |_ | +# | \____\___/|_| |_|_| |_|\__, | \___/ \__,_| .__/ \__|\__,_|\__| | +# | |___/ |_| | # +----------------------------------------------------------------------+ -# | ____ __ _ _ _ | -# | / ___|___ _ __ / _(_) __ _ ___ _ _| |_ _ __ _ _| |_ | -# | | | / _ \| '_ \| |_| |/ _` | / _ \| | | | __| '_ \| | | | __| | -# | | |__| (_) | | | | _| | (_| | | (_) | |_| | |_| |_) | |_| | |_ | -# | \____\___/|_| |_|_| |_|\__, | \___/ \__,_|\__| .__/ \__,_|\__| | -# | |___/ |_| | -# +----------------------------------------------------------------------+ +# | Output an ASCII configuration file for the monitoring core. | +# '----------------------------------------------------------------------' def make_utf8(x): if type(x) == unicode: @@ -1111,6 +1306,12 @@ """) +# Returns a list of all host names, regardless if currently +# disabled or monitored on a remote site. Does not return +# cluster hosts. +def all_configured_physical_hosts(): + return strip_tags(all_hosts) + def all_active_hosts(): return filter_active_hosts(all_hosts) @@ -1139,8 +1340,13 @@ # hosts without a site: tag belong to all sites return True -def parse_hostname_list(args): - valid_hosts = all_active_hosts() + all_active_clusters() +def parse_hostname_list(args, with_clusters = True, with_foreign_hosts = False): + if with_foreign_hosts: + valid_hosts = all_configured_physical_hosts() + else: + valid_hosts = all_active_hosts() + if with_clusters: + valid_hosts += all_active_clusters() hostlist = [] for arg in args: if arg[0] != '@' and arg in valid_hosts: @@ -1161,6 +1367,17 @@ sys.exit(1) return hostlist +def alias_of(hostname, fallback): + aliases = host_extra_conf(hostname, extra_host_conf.get("alias", [])) + if len(aliases) == 0: + if fallback: + return fallback + else: + return hostname + else: + return aliases[0] + + def hostgroups_of(hostname): return host_extra_conf(hostname, host_groups) @@ -1182,14 +1399,10 @@ first_list = False else: cgrs.append(entry) + if monitoring_core == "nagios" and enable_rulebased_notifications: + cgrs.append("check-mk-notify") return list(set(cgrs)) -def host_contactgroups_nag(hostlist): - cgrs = host_contactgroups_of(hostlist) - if len(cgrs) > 0: - return " contact_groups " + ",".join(cgrs) + "\n" - else: - return "" def parents_of(hostname): par = host_extra_conf(hostname, parents) @@ -1219,6 +1432,8 @@ sercgr = service_extra_conf(hostname, description, service_contactgroups) contactgroups_to_define.update(sercgr) if len(sercgr) > 0: + if enable_rulebased_notifications: + sercgr.append("check-mk-notify") # not nessary if not explicit groups defined conf += " contact_groups\t\t" + ",".join(sercgr) + "\n" sergr = service_extra_conf(hostname, description, service_groups) @@ -1244,7 +1459,61 @@ result += format % (key, values[0]) return result -def check_icmp_arguments(hostname): +def host_check_command(hostname, ip, is_clust): + # Check dedicated host check command + values = host_extra_conf(hostname, host_check_commands) + if values: + value = values[0] + elif monitoring_core == "cmc": + value = "smart" + else: + value = "ping" + + if monitoring_core != "cmc" and value == "smart": + value = "ping" # avoid problems when switching back to nagios core + + if value == "smart" and not is_clust: + return "check-mk-host-smart" + + elif value in [ "ping", "smart" ]: + ping_args = check_icmp_arguments_of(hostname) + if is_clust and ip: # Do check cluster IP address if one is there + return "check-mk-host-ping!%s" % ping_args + elif ping_args and is_clust: # use check_icmp in cluster mode + return "check-mk-host-ping-cluster!%s" % ping_args + elif ping_args: # use special arguments + return "check-mk-host-ping!%s" % ping_args + else: + return None + + elif value == "ok": + return "check-mk-host-ok" + + elif value == "agent" or value[0] == "service": + service = value == "agent" and "Check_MK" or value[1] + if monitoring_core == "cmc": + return "check-mk-host-service!" + service + command = "check-mk-host-custom-%d" % (len(hostcheck_commands_to_define) + 1) + hostcheck_commands_to_define.append((command, + 'echo "$SERVICEOUTPUT:%s:%s$" && exit $SERVICESTATEID:%s:%s$' % (hostname, service, hostname, service))) + return command + + elif value[0] == "tcp": + return "check-mk-host-tcp!" + str(value[1]) + + elif value[0] == "custom": + try: + custom_commands_to_define.add("check-mk-custom") + except: + pass # not needed and not available with CMC + return "check-mk-custom!" + autodetect_plugin(value[1]) + + raise MKGeneralException("Invalid value %r for host_check_command of host %s." % ( + value, hostname)) + + + +def check_icmp_arguments_of(hostname): values = host_extra_conf(hostname, ping_levels) levels = {} for value in values[::-1]: # make first rules have precedence @@ -1320,6 +1589,8 @@ else: raise MKGeneralException("Invalid entry '%r' in host configuration list: must have 2 or 3 entries" % (entry,)) + # Note: hostname may be True. This is an unknown generic host, that has + # no tags and that does not match any positive criteria in any rule. if hosttags_match_taglist(tags_of_host(hostname), tags) and \ in_extraconf_hostlist(hostlist, hostname): items.append(item) @@ -1371,6 +1642,7 @@ return False + # Pick out the last element of an entry if it is a dictionary. # This is a new feature (1.2.0p3) that allows to add options # to rules. Currently only the option "disabled" is being @@ -1382,34 +1654,105 @@ return entry, {} -# Compute list of service_groups or contact_groups of service -# conf is either service_groups or service_contactgroups -def service_extra_conf(hostname, service, conf): - entries = [] - for entry in conf: - entry, rule_options = get_rule_options(entry) +def all_matching_hosts(tags, hostlist): + matching = set([]) + for taggedhost in all_hosts + clusters.keys(): + parts = taggedhost.split("|") + hostname = parts[0] + hosttags = parts[1:] + + if hosttags_match_taglist(hosttags, tags) and \ + in_extraconf_hostlist(hostlist, hostname): + matching.add(hostname) + return matching + +g_converted_rulesets_cache = {} + +def convert_service_ruleset(ruleset): + new_rules = [] + for rule in ruleset: + rule, rule_options = get_rule_options(rule) # Das könnte man einmal umbauen und so lassen (8 sec von 137) if rule_options.get("disabled"): continue - if len(entry) == 3: - item, hostlist, servlist = entry + if len(rule) == 3: + item, hostlist, servlist = rule tags = [] - elif len(entry) == 4: - item, tags, hostlist, servlist = entry + elif len(rule) == 4: + item, tags, hostlist, servlist = rule else: - raise MKGeneralException("Invalid entry '%r' in service configuration list: must have 3 or 4 elements" % (entry,)) + raise MKGeneralException("Invalid rule '%r' in service configuration list: must have 3 or 4 elements" % (rule,)) - if hosttags_match_taglist(tags_of_host(hostname), tags) and \ - in_extraconf_hostlist(hostlist, hostname) and \ - in_extraconf_servicelist(servlist, service): + # Directly compute set of all matching hosts here, this + # will avoid recomputation later + hosts = all_matching_hosts(tags, hostlist) + new_rules.append((item, hosts, servlist)) + + g_converted_rulesets_cache[id(ruleset)] = new_rules + + +def serviceruleset_is_converted(ruleset): + return id(ruleset) in g_converted_rulesets_cache + + +# Compute outcome of a service rule set that has an item +def service_extra_conf(hostname, service, ruleset): + if not serviceruleset_is_converted(ruleset): + convert_service_ruleset(ruleset) + + entries = [] + for item, hosts, servlist in g_converted_rulesets_cache[id(ruleset)]: + if hostname in hosts and in_extraconf_servicelist(servlist, service): entries.append(item) return entries +def convert_boolean_service_ruleset(ruleset): + new_rules = [] + for rule in ruleset: + entry, rule_options = get_rule_options(rule) + if rule_options.get("disabled"): + continue + + if entry[0] == NEGATE: # this entry is logically negated + negate = True + entry = entry[1:] + else: + negate = False + + if len(entry) == 2: + hostlist, servlist = entry + tags = [] + elif len(entry) == 3: + tags, hostlist, servlist = entry + else: + raise MKGeneralException("Invalid entry '%r' in configuration: must have 2 or 3 elements" % (entry,)) + + # Directly compute set of all matching hosts here, this + # will avoid recomputation later + hosts = all_matching_hosts(tags, hostlist) + new_rules.append((negate, hosts, servlist)) + + g_converted_rulesets_cache[id(ruleset)] = new_rules + + +# Compute outcome of a service rule set that just say yes/no +def in_boolean_serviceconf_list(hostname, service_description, ruleset): + if not serviceruleset_is_converted(ruleset): + convert_boolean_service_ruleset(ruleset) + + for negate, hosts, servlist in g_converted_rulesets_cache[id(ruleset)]: + if hostname in hosts and \ + in_extraconf_servicelist(servlist, service_description): + return not negate + return False # no match. Do not ignore + + # Entries in list are (tagged) hostnames that must equal the # (untagged) hostname. Expressions beginning with ! are negated: if -# they match, the item is excluded from the list. Also the three +# they match, the item is excluded from the list. Expressions beginning +# withy ~ are treated as Regular Expression. Also the three # special tags '@all', '@clusters', '@physical' are allowed. def in_extraconf_hostlist(hostlist, hostname): @@ -1420,6 +1763,8 @@ for hostentry in hostlist: if len(hostentry) == 0: raise MKGeneralException('Empty hostname in host list %r' % hostlist) + negate = False + use_regex = False if hostentry[0] == '@': if hostentry == '@all': return True @@ -1430,17 +1775,29 @@ return True # Allow negation of hostentry with prefix '!' - elif hostentry[0] == '!': - hostentry = hostentry[1:] - negate = True else: - negate = False + if hostentry[0] == '!': + hostentry = hostentry[1:] + negate = True + # Allow regex with prefix '~' + if hostentry[0] == '~': + hostentry = hostentry[1:] + use_regex = True - if hostname == strip_tags(hostentry): - return not negate + hostentry = strip_tags(hostentry) + try: + if not use_regex and hostname == hostentry: + return not negate + # Handle Regex. Note: hostname == True -> generic unknown host + elif use_regex and hostname != True and regex(hostentry).match(hostname): + return not negate + except MKGeneralException: + if opt_debug: + raise return False + def in_extraconf_servicelist(list, item): for pattern in list: # Allow negation of pattern with prefix '!' @@ -1461,7 +1818,6 @@ return False -# NEW IMPLEMENTATION def create_nagios_config(outfile = sys.stdout, hostnames = None): global hostgroups_to_define hostgroups_to_define = set([]) @@ -1475,6 +1831,8 @@ active_checks_to_define = set([]) global custom_commands_to_define custom_commands_to_define = set([]) + global hostcheck_commands_to_define + hostcheck_commands_to_define = [] if host_notification_periods != []: raise MKGeneralException("host_notification_periods is not longer supported. Please use extra_host_conf['notification_period'] instead.") @@ -1488,11 +1846,14 @@ if summary_service_notification_periods != []: raise MKGeneralException("summary_service_notification_periods is not longer supported. Please use extra_summary_service_conf['notification_period'] instead.") - if filesystem_levels != []: - raise MKGeneralException("filesystem_levels is not longer supported.\n" - "Please use check_parameters instead.\n" - "Please refer to documentation:\n" - " --> http://mathias-kettner.de/checkmk_check_parameters.html\n") + # Map service_period to _SERVICE_PERIOD. This field das not exist in Nagios/Icinga. + # The CMC has this field natively. + if "service_period" in extra_host_conf: + extra_host_conf["_SERVICE_PERIOD"] = extra_host_conf["service_period"] + del extra_host_conf["service_period"] + if "service_period" in extra_service_conf: + extra_service_conf["_SERVICE_PERIOD"] = extra_service_conf["service_period"] + del extra_service_conf["service_period"] output_conf_header(outfile) if hostnames == None: @@ -1501,12 +1862,12 @@ for hostname in hostnames: create_nagios_config_host(outfile, hostname) + create_nagios_config_contacts(outfile, hostnames) create_nagios_config_hostgroups(outfile) create_nagios_config_servicegroups(outfile) create_nagios_config_contactgroups(outfile) create_nagios_config_commands(outfile) create_nagios_config_timeperiods(outfile) - create_nagios_config_contacts(outfile) if extra_nagios_conf: outfile.write("\n# extra_nagios_conf\n\n") @@ -1529,7 +1890,10 @@ ip = lookup_ipaddress(hostname) except: if not is_clust: - raise MKGeneralException("Cannot determine ip address of %s. Please add to ipaddresses." % hostname) + if ignore_ip_lookup_failures: + failed_ip_lookups.append(hostname) + else: + raise MKGeneralException("Cannot determine ip address of %s. Please add to ipaddresses." % hostname) ip = None # _ @@ -1545,15 +1909,10 @@ outfile.write(" address\t\t\t%s\n" % (ip and make_utf8(ip) or "0.0.0.0")) outfile.write(" _TAGS\t\t\t\t%s\n" % " ".join(tags_of_host(hostname))) - # Levels for host check - ping_args = check_icmp_arguments(hostname) - if is_clust and ip: # Do check cluster IP address if one is there - outfile.write(" check_command\t\t\tcheck-mk-ping!%s\n" % ping_args) - elif ping_args and is_clust: # use check_icmp in cluster mode - outfile.write(" check_command\t\t\tcheck-mk-ping-cluster!%s\n" % ping_args) - elif ping_args: # use special arguments - outfile.write(" check_command\t\t\tcheck-mk-ping!%s\n" % ping_args) - + # Host check command might differ from default + command = host_check_command(hostname, ip, is_clust) + if command: + outfile.write(" check_command\t\t\t%s\n" % command) # WATO folder path path = host_paths.get(hostname) @@ -1600,16 +1959,12 @@ if not extra_conf_parents: outfile.write(" parents\t\t\t%s\n" % ",".join(nodes)) - # Host check uses (service-) IP address if available - if ip: - outfile.write(" check_command\t\t\tcheck-mk-ping\n") - - # Output alias, but only if it's not define in extra_host_conf - aliases = host_extra_conf(hostname, extra_host_conf.get("alias", [])) - if len(aliases) == 0: + # Output alias, but only if it's not defined in extra_host_conf + alias = alias_of(hostname, None) + if alias == None: outfile.write(" alias\t\t\t\t%s\n" % alias) else: - alias = make_utf8(aliases[0]) + alias = make_utf8(alias) # Custom configuration last -> user may override all other values @@ -1662,13 +2017,39 @@ # ___) | # |____/ 3. Services - host_checks = get_check_table(hostname).items() + + def do_omit_service(hostname, description): + if service_ignored(hostname, None, description): + return True + if hostname != host_of_clustered_service(hostname, description): + return True + return False + + def get_dependencies(hostname,servicedesc): + result = "" + for dep in service_deps(hostname, servicedesc): + result += """ +define servicedependency { + use\t\t\t\t%s + host_name\t\t\t%s + service_description\t%s + dependent_host_name\t%s + dependent_service_description %s +}\n +""" % (service_dependency_template, hostname, dep, hostname, servicedesc) + + return result + + host_checks = get_check_table(hostname, remove_duplicates=True).items() host_checks.sort() # Create deterministic order aggregated_services_conf = set([]) do_aggregation = host_is_aggregated(hostname) have_at_least_one_service = False used_descriptions = {} for ((checkname, item), (params, description, deps)) in host_checks: + if checkname not in check_info: + continue # simply ignore missing checks + # Make sure, the service description is unique on this host if description in used_descriptions: cn, it = used_descriptions[description] @@ -1711,14 +2092,29 @@ if asn != "": aggregated_services_conf.add(asn) + # Add the check interval of either the Check_MK service or + # (if configured) the snmp_check_interval for snmp based checks + check_interval = 1 # default hardcoded interval + # Customized interval of Check_MK service + values = service_extra_conf(hostname, "Check_MK", extra_service_conf.get('check_interval', [])) + if values: + try: + check_interval = int(values[0]) + except: + check_interval = float(values[0]) + value = check_interval_of(hostname, checkname) + if value is not None: + check_interval = value + outfile.write("""define service { use\t\t\t\t%s host_name\t\t\t%s service_description\t\t%s + check_interval\t\t%d %s%s check_command\t\t\tcheck_mk-%s } -""" % ( template, hostname, description, logwatch, +""" % ( template, hostname, description, check_interval, logwatch, extra_service_conf_of(hostname, description), checkname )) checknames_to_define.add(checkname) @@ -1782,33 +2178,16 @@ %s service_description\t\tCheck_MK } """ % (active_service_template, hostname, extra_service_conf_of(hostname, "Check_MK"))) - # Inventory checks - if user has configured them. Not for clusters. - if inventory_check_interval and not is_cluster(hostname): - outfile.write(""" -define service { - use\t\t\t\t%s - host_name\t\t\t%s - normal_check_interval\t\t%d - retry_check_interval\t\t%d -%s service_description\t\tCheck_MK inventory -} - -define servicedependency { - use\t\t\t\t%s - host_name\t\t\t%s - service_description\t\tCheck_MK - dependent_host_name\t\t%s - dependent_service_description\tCheck_MK inventory -} -""" % (inventory_check_template, hostname, inventory_check_interval, inventory_check_interval, - extra_service_conf_of(hostname, "Check_MK inventory"), - service_dependency_template, hostname, hostname)) # legacy checks via legacy_checks legchecks = host_extra_conf(hostname, legacy_checks) if len(legchecks) > 0: outfile.write("\n\n# Legacy checks\n") for command, description, has_perfdata in legchecks: + description = sanitize_service_description(description) + if do_omit_service(hostname, description): + continue + if description in used_descriptions: cn, it = used_descriptions[description] raise MKGeneralException( @@ -1835,6 +2214,9 @@ %s} """ % (template, hostname, make_utf8(description), simulate_command(command), extraconf)) + # write service dependencies for legacy checks + outfile.write(get_dependencies(hostname,description)) + # legacy checks via active_checks actchecks = [] needed_commands = [] @@ -1849,13 +2231,29 @@ if actchecks: outfile.write("\n\n# Active checks\n") for acttype, act_info, params in actchecks: + # Make hostname available as global variable in argument functions + global g_hostname + g_hostname = hostname + has_perfdata = act_info.get('has_perfdata', False) - description = act_info["service_description"](params) + description = sanitize_service_description( + act_info["service_description"](params) + .replace('$HOSTNAME$', g_hostname)) + + if do_omit_service(hostname, description): + continue + # compute argument, and quote ! and \ for Nagios args = act_info["argument_function"](params).replace("\\", "\\\\").replace("!", "\\!") if description in used_descriptions: cn, it = used_descriptions[description] + # If we have the same active check again with the same description, + # then we do not regard this as an error, but simply ignore the + # second one. That way one can override a check with other settings. + if cn == "active(%s)" % acttype: + continue + raise MKGeneralException( "ERROR: Duplicate service description (active check) '%s' for host '%s'!\n" " - 1st occurrance: checktype = %s, item = %r\n" @@ -1876,8 +2274,10 @@ check_command\t\t\t%s active_checks_enabled\t\t1 %s} -""" % (template, hostname, make_utf8(description), simulate_command(command), extraconf)) +""" % (template, hostname, make_utf8(description), make_utf8(simulate_command(command)), extraconf)) + # write service dependencies for active checks + outfile.write(get_dependencies(hostname,description)) # Legacy checks via custom_checks custchecks = host_extra_conf(hostname, custom_checks) @@ -1891,22 +2291,16 @@ # "command_name" (optional) Name of Monitoring command to define. If missing, # we use "check-mk-custom" # "has_perfdata" (optional) If present and True, we activate perf_data - description = entry["service_description"] + description = sanitize_service_description(entry["service_description"]) has_perfdata = entry.get("has_perfdata", False) command_name = entry.get("command_name", "check-mk-custom") command_line = entry.get("command_line", "") + if do_omit_service(hostname, description): + continue + if command_line: - plugin_name = command_line.split()[0] - if command_line[0] not in [ '$', '/' ]: - try: - for dir in [ "/local", "" ]: - path = omd_root + dir + "/lib/nagios/plugins/" - if os.path.exists(path + plugin_name): - command_line = path + command_line - break - except: - pass + command_line = autodetect_plugin(command_line).replace("\\", "\\\\") if "freshness" in entry: freshness = " check_freshness\t\t1\n" + \ @@ -1916,11 +2310,15 @@ else: freshness = "" - custom_commands_to_define.add(command_name) if description in used_descriptions: cn, it = used_descriptions[description] + # If we have the same active check again with the same description, + # then we do not regard this as an error, but simply ignore the + # second one. + if cn == "custom(%s)" % command_name: + continue raise MKGeneralException( "ERROR: Duplicate service description (custom check) '%s' for host '%s'!\n" " - 1st occurrance: checktype = %s, item = %r\n" @@ -1943,6 +2341,42 @@ """ % (template, hostname, make_utf8(description), simulate_command(command), (command_line and not freshness) and 1 or 0, extraconf, freshness)) + # write service dependencies for custom checks + outfile.write(get_dependencies(hostname,description)) + + # FIXME: Remove old name one day + service_discovery_name = 'Check_MK inventory' + if 'cmk-inventory' in use_new_descriptions_for: + service_discovery_name = 'Check_MK Discovery' + + # Inventory checks - if user has configured them. + if inventory_check_interval \ + and not service_ignored(hostname, None, service_discovery_name) \ + and not "ping" in tags_of_host(hostname): # FIXME/TODO: Why not user is_ping_host()? + outfile.write(""" +define service { + use\t\t\t\t%s + host_name\t\t\t%s + normal_check_interval\t\t%d + retry_check_interval\t\t%d +%s service_description\t\t%s +} +""" % (inventory_check_template, hostname, inventory_check_interval, + inventory_check_interval, + extra_service_conf_of(hostname, service_discovery_name), + service_discovery_name)) + + if have_at_least_one_service: + outfile.write(""" +define servicedependency { + use\t\t\t\t%s + host_name\t\t\t%s + service_description\t\tCheck_MK + dependent_host_name\t\t%s + dependent_service_description\t%s +} +""" % (service_dependency_template, hostname, hostname, service_discovery_name)) + # Levels for host check if is_cluster(hostname): ping_command = 'check-mk-ping-cluster' @@ -1958,7 +2392,20 @@ %s host_name\t\t\t%s } -""" % (pingonly_template, ping_command, check_icmp_arguments(hostname), extra_service_conf_of(hostname, "PING"), hostname)) +""" % (pingonly_template, ping_command, check_icmp_arguments_of(hostname), extra_service_conf_of(hostname, "PING"), hostname)) + +def autodetect_plugin(command_line): + plugin_name = command_line.split()[0] + if command_line[0] not in [ '$', '/' ]: + try: + for dir in [ "/local", "" ]: + path = omd_root + dir + "/lib/nagios/plugins/" + if os.path.exists(path + plugin_name): + command_line = path + command_line + break + except: + pass + return command_line def simulate_command(command): if simulation_mode: @@ -1989,7 +2436,7 @@ # No creation of host groups but we need to define # default host group elif default_host_group in hostgroups_to_define: - outfile.write(""" + outfile.write(""" define hostgroup { hostgroup_name\t\t%s alias\t\t\t\tCheck_MK default hostgroup @@ -2069,6 +2516,15 @@ """ % command_name) + # custom host checks + for command_name, command_line in hostcheck_commands_to_define: + outfile.write("""define command { + command_name\t\t\t%s + command_line\t\t\t%s +} + +""" % (command_name, command_line)) + def create_nagios_config_timeperiods(outfile): if len(timeperiods) > 0: @@ -2091,7 +2547,7 @@ outfile.write(" exclude\t\t\t%s\n" % ",".join(tp["exclude"])) outfile.write("}\n\n") -def create_nagios_config_contacts(outfile): +def create_nagios_config_contacts(outfile, hostnames): if len(contacts) > 0: outfile.write("\n# ------------------------------------------------------------\n") outfile.write("# Contact definitions (controlled by variable 'contacts')\n") @@ -2100,6 +2556,10 @@ cnames.sort() for cname in cnames: contact = contacts[cname] + # Create contact groups in nagios, even when they are empty. This is needed + # for RBN to work correctly when using contactgroups as recipients which are + # not assigned to any host + contactgroups_to_define.update(contact.get("contactgroups", [])) # If the contact is in no contact group or all of the contact groups # of the contact have neither hosts nor services assigned - in other # words if the contact is not assigned to any host or service, then @@ -2116,7 +2576,11 @@ outfile.write(" email\t\t\t\t%s\n" % contact["email"]) if "pager" in contact: outfile.write(" pager\t\t\t\t%s\n" % contact["pager"]) - not_enabled = contact.get("notifications_enabled", True) + if enable_rulebased_notifications: + not_enabled = False + else: + not_enabled = contact.get("notifications_enabled", True) + for what in [ "host", "service" ]: no = contact.get(what + "_notification_options", "") if not no or not not_enabled: @@ -2125,10 +2589,28 @@ outfile.write(" %s_notification_options\t%s\n" % (what, ",".join(list(no)))) outfile.write(" %s_notification_period\t%s\n" % (what, contact.get("notification_period", "24X7"))) outfile.write(" %s_notification_commands\t%s\n" % (what, contact.get("%s_notification_commands" % what, "check-mk-notify"))) + # Add custom macros + for macro in [ m for m in contact.keys() if m.startswith('_') ]: + outfile.write(" %s\t%s\n" % ( macro, contact[macro] )) outfile.write(" contactgroups\t\t\t%s\n" % ", ".join(cgrs)) outfile.write("}\n\n") + if enable_rulebased_notifications and hostnames: + outfile.write( + "# Needed for rule based notifications\n" + "define contact {\n" + " contact_name\t\t\tcheck-mk-notify\n" + " alias\t\t\t\tContact for rule based notifications\n" + " host_notification_options\td,u,r,f,s\n" + " service_notification_options\tu,c,w,r,f,s\n" + " host_notification_period\t24X7\n" + " service_notification_period\t24X7\n" + " host_notification_commands\tcheck-mk-notify\n" + " service_notification_commands\tcheck-mk-notify\n" + " contactgroups\t\t\tcheck-mk-notify\n" + "}\n\n"); + # Quote string for use in a nagios command execution. # Please note that also quoting for ! and \ vor Nagios @@ -2136,405 +2618,8 @@ def quote_nagios_string(s): return "'" + s.replace('\\', '\\\\').replace("'", "'\"'\"'").replace('!', '\\!') + "'" - - - -# +----------------------------------------------------------------------+ -# | ___ _ | -# | |_ _|_ ____ _____ _ __ | |_ ___ _ __ _ _ | -# | | || '_ \ \ / / _ \ '_ \| __/ _ \| '__| | | | | -# | | || | | \ V / __/ | | | || (_) | | | |_| | | -# | |___|_| |_|\_/ \___|_| |_|\__\___/|_| \__, | | -# | |___/ | -# +----------------------------------------------------------------------+ - - -def inventorable_checktypes(what): # snmp, tcp, all - checknames = [ k for k in check_info.keys() - if check_info[k]["inventory_function"] != None - and (what == "all" - or check_uses_snmp(k) == (what == "snmp")) - ] - checknames.sort() - return checknames - -def checktype_ignored_for_host(host, checktype): - if checktype in ignored_checktypes: - return True - ignored = host_extra_conf(host, ignored_checks) - for e in ignored: - if checktype == e or (type(e) == list and checktype in e): - return True - return False - -def do_snmp_scan(hostnamelist, check_only=False, include_state=False): - if hostnamelist == []: - hostnamelist = all_hosts_untagged - - result = [] - for hostname in hostnamelist: - if not is_snmp_host(hostname): - continue - try: - ipaddress = lookup_ipaddress(hostname) - except: - sys.stdout.write("Cannot resolve %s into IP address. Skipping.\n" % hostname) - continue - checknames = snmp_scan(hostname, ipaddress) - for checkname in checknames: - if opt_debug: - sys.stdout.write("Trying inventory for %s on %s\n" % (checkname, hostname)) - result += make_inventory(checkname, [hostname], check_only, include_state) - return result - - - -def make_inventory(checkname, hostnamelist, check_only=False, include_state=False): - try: - inventory_function = check_info[checkname]["inventory_function"] - if inventory_function == None: - inventory_function = no_inventory_possible - except KeyError: - sys.stderr.write("No such check type '%s'. Try check_mk -L.\n" % checkname) - sys.exit(1) - - is_snmp_check = check_uses_snmp(checkname) - - newchecks = [] - newitems = [] # used by inventory check to display unchecked items - count_new = 0 - checked_hosts = [] - - # if no hostnamelist is specified, we use all hosts - if not hostnamelist or len(hostnamelist) == 0: - global opt_use_cachefile - opt_use_cachefile = True - hostnamelist = all_hosts_untagged - - try: - for host in hostnamelist: - - # Skip SNMP checks on non-SNMP hosts - if is_snmp_check and not is_snmp_host(host): - continue - - # Skip TCP checks on non-TCP hosts - if not is_snmp_check and not is_tcp_host(host): - continue - - # Skip checktypes which are generally ignored for this host - # DONE LATER: if checktype_ignored_for_host(host, checkname): - # continue - - if is_cluster(host): - sys.stderr.write("%s is a cluster host and cannot be inventorized.\n" % host) - continue - - # host is either hostname or "hostname/ipaddress" - s = host.split("/") - hostname = s[0] - if len(s) == 2: - ipaddress = s[1] - else: - # try to resolve name into ip address - if not opt_no_tcp: - try: - ipaddress = lookup_ipaddress(hostname) - except: - sys.stderr.write("Cannot resolve %s into IP address.\n" % hostname) - continue - else: - ipaddress = None # not needed, not TCP used - - # Make hostname available as global variable in inventory functions - # (used e.g. by ps-inventory) - global g_hostname - g_hostname = hostname - - # On --no-tcp option skip hosts without cache file - if opt_no_tcp: - if opt_no_cache: - sys.stderr.write("You allowed me neither TCP nor cache. Bailing out.\n") - sys.exit(4) - - cachefile = tcp_cache_dir + "/" + hostname - if not os.path.exists(cachefile): - if opt_verbose: - sys.stderr.write("No cachefile %s. Skipping this host.\n" % cachefile) - continue - - checked_hosts.append(hostname) - - checkname_base = checkname.split('.')[0] # make e.g. 'lsi' from 'lsi.arrays' - try: - info = get_realhost_info(hostname, ipaddress, checkname_base, inventory_max_cachefile_age) - # Add information about nodes if check wants this - if check_info[checkname]["node_info"]: - if clusters_of(hostname): - add_host = hostname - else: - add_host = None - info = [ [add_host] + line for line in info ] - except MKAgentError, e: - # This special handling is needed for the inventory check. It needs special - # handling for WATO. - if check_only and not include_state and str(e): - raise - elif not include_state and str(e): - sys.stderr.write("Host '%s': %s\n" % (hostname, str(e))) - elif include_state and str(e): # WATO automation. Abort - raise - continue - except MKSNMPError, e: - # This special handling is needed for the inventory check. It needs special - # handling for WATO. - if check_only and not include_state and str(e): - raise - elif not include_state and str(e): - sys.stderr.write("Host '%s': %s\n" % (hostname, str(e))) - continue - except Exception, e: - if check_only or opt_debug: - raise - sys.stderr.write("Cannot get information from host '%s': %s\n" % (hostname, e)) - continue - - if info == None: # No data for this check type - continue - try: - # Check number of arguments of inventory function - if len(inspect.getargspec(inventory_function)[0]) == 2: - inventory = inventory_function(checkname, info) # inventory is a list of pairs (item, current_value) - else: - # New preferred style since 1.1.11i3: only one argument: info - inventory = inventory_function(info) - - if inventory == None: # tolerate if function does no explicit return - inventory = [] - except Exception, e: - if opt_debug: - sys.stderr.write("Exception in inventory function of check type %s\n" % checkname) - raise - if opt_verbose: - sys.stderr.write("%s: Invalid output from agent or invalid configuration: %s\n" % (hostname, e)) - continue - - if not isinstance(inventory, list): - sys.stderr.write("%s: Check %s returned invalid inventory data: %s\n" % - (hostname, checkname, repr(inventory))) - continue - - for entry in inventory: - state_type = "new" # assume new, change later if wrong - - if not isinstance(entry, tuple): - sys.stderr.write("%s: Check %s returned invalid inventory data (entry not a tuple): %s\n" % - (hostname, checkname, repr(inventory))) - continue - - if len(entry) == 2: # comment is now obsolete - item, paramstring = entry - else: - try: - item, comment, paramstring = entry - except ValueError: - sys.stderr.write("%s: Check %s returned invalid inventory data (not 2 or 3 elements): %s\n" % - (hostname, checkname, repr(inventory))) - continue - - description = service_description(checkname, item) - # make sanity check - if len(description) == 0: - sys.stderr.write("%s: Check %s returned empty service description - ignoring it.\n" % - (hostname, checkname)) - continue - - - # Find logical host this check belongs to. The service might belong to a cluster. - hn = host_of_clustered_service(hostname, description) - - # Now compare with already known checks for this host (from - # previous inventory or explicit checks). Also drop services - # the user wants to ignore via 'ignored_services'. - checktable = get_check_table(hn) - checked_items = [ i for ( (cn, i), (par, descr, deps) ) \ - in checktable.items() if cn == checkname ] - if item in checked_items: - if include_state: - state_type = "old" - else: - continue # we have that already - - if service_ignored(hn, checkname, description): - if include_state: - if state_type == "old": - state_type = "obsolete" - else: - state_type = "ignored" - else: - continue # user does not want this item to be checked - - newcheck = ' ("%s", "%s", %r, %s),' % (hn, checkname, item, paramstring) - newcheck += "\n" - if newcheck not in newchecks: # avoid duplicates if inventory outputs item twice - newchecks.append(newcheck) - if include_state: - newitems.append( (hn, checkname, item, paramstring, state_type) ) - else: - newitems.append( (hn, checkname, item) ) - count_new += 1 - - - except KeyboardInterrupt: - sys.stderr.write('\n') - - - if not check_only: - if newchecks != []: - filename = autochecksdir + "/" + checkname + "-" + time.strftime("%Y-%m-%d_%H.%M.%S") - while os.path.exists(filename + ".mk"): # in case of more than one file per second and checktype... - filename += ".x" - filename += ".mk" - if not os.path.exists(autochecksdir): - os.makedirs(autochecksdir) - file(filename, "w").write('# %s\n[\n%s]\n' % (filename, ''.join(newchecks))) - sys.stdout.write('%-30s ' % (tty_cyan + tty_bold + checkname + tty_normal)) - sys.stdout.write('%s%d new checks%s\n' % (tty_bold + tty_green, count_new, tty_normal)) - - return newitems - - -def check_inventory(hostname): - newchecks = [] - newitems = [] - total_count = 0 - is_snmp = is_snmp_host(hostname) - is_tcp = is_tcp_host(hostname) - check_table = get_check_table(hostname) - hosts_checktypes = set([ ct for (ct, item), params in check_table.items() ]) - try: - for ct in inventorable_checktypes("all"): - if check_uses_snmp(ct) and not is_snmp: - continue # Skip SNMP checks on non-SNMP hosts - elif check_uses_snmp(ct) and ct not in hosts_checktypes: - continue # Do not look for new SNMP services (maybe change in future) - elif not check_uses_snmp(ct) and not is_tcp: - continue # Skip TCP checks on non-TCP hosts - - new = make_inventory(ct, [hostname], True) - newitems += new - count = len(new) - if count > 0: - newchecks.append((ct, count)) - total_count += count - if total_count > 0: - info = ", ".join([ "%s:%d" % (ct, count) for ct,count in newchecks ]) - statustext = { 0 : "OK", 1: "WARNING", 2:"CRITICAL" }.get(inventory_check_severity, "UNKNOWN") - sys.stdout.write("%s - %d unchecked services (%s)\n" % (statustext, total_count, info)) - # Put detailed list into long plugin output - for hostname, checkname, item in newitems: - sys.stdout.write("%s: %s\n" % (checkname, service_description(checkname, item))) - sys.exit(inventory_check_severity) - else: - sys.stdout.write("OK - no unchecked services found\n") - sys.exit(0) - except SystemExit, e: - raise e - except Exception, e: - if opt_debug: - raise - sys.stdout.write("UNKNOWN - %s\n" % (e,)) - sys.exit(3) - - -def service_ignored(hostname, checktype, service_description): - if checktype in ignored_checktypes: - return True - if in_boolean_serviceconf_list(hostname, service_description, ignored_services): - return True - if checktype_ignored_for_host(hostname, checktype): - return True - return False - - -def in_boolean_serviceconf_list(hostname, service_description, conflist): - for entry in conflist: - entry, rule_options = get_rule_options(entry) - if rule_options.get("disabled"): - continue - - if entry[0] == NEGATE: # this entry is logically negated - negate = True - entry = entry[1:] - else: - negate = False - - if len(entry) == 2: - hostlist, servlist = entry - tags = [] - elif len(entry) == 3: - tags, hostlist, servlist = entry - else: - raise MKGeneralException("Invalid entry '%r' in configuration: must have 2 or 3 elements" % (entry,)) - - if hosttags_match_taglist(tags_of_host(hostname), tags) and \ - in_extraconf_hostlist(hostlist, hostname) and \ - in_extraconf_servicelist(servlist, service_description): - if opt_verbose: - print "Ignoring service '%s' on host %s." % (service_description, hostname) - return not negate - return False # no match. Do not ignore - - -# Remove all autochecks of certain types of a certain host -def remove_autochecks_of(hostname, checktypes = None): # None = all - removed = 0 - for fn in glob.glob(autochecksdir + "/*.mk"): - if opt_debug: - sys.stdout.write("Scanning %s...\n" % fn) - lines = [] - count = 0 - for line in file(fn): - # hostname and check type can be quoted with ' or with " - double_quoted = line.replace("'", '"').lstrip() - if double_quoted.startswith('("'): - count += 1 - splitted = double_quoted.split('"') - if splitted[1] != hostname or (checktypes != None and splitted[3] not in checktypes): - if splitted[3] not in check_info: - sys.stderr.write('Removing unimplemented check %s\n' % splitted[3]) - continue - lines.append(line) - else: - removed += 1 - if len(lines) == 0: - if opt_verbose: - sys.stdout.write("Deleting %s.\n" % fn) - os.remove(fn) - elif count > len(lines): - if opt_verbose: - sys.stdout.write("Removing %d checks from %s.\n" % (count - len(lines), fn)) - f = file(fn, "w+") - f.write("[\n") - for line in lines: - f.write(line) - f.write("]\n") - - return removed - -def remove_all_autochecks(): - for f in glob.glob(autochecksdir + '/*.mk'): - if opt_verbose: - sys.stdout.write("Deleting %s.\n" % f) - os.remove(f) - -def reread_autochecks(): - global checks - checks = checks[len(autochecks):] - read_all_autochecks() - checks = autochecks + checks - -# +----------------------------------------------------------------------+ +#. +# .--Precompile----------------------------------------------------------. # | ____ _ _ | # | | _ \ _ __ ___ ___ ___ _ __ ___ _ __ (_) | ___ | # | | |_) | '__/ _ \/ __/ _ \| '_ ` _ \| '_ \| | |/ _ \ | @@ -2542,6 +2627,13 @@ # | |_| |_| \___|\___\___/|_| |_| |_| .__/|_|_|\___| | # | |_| | # +----------------------------------------------------------------------+ +# | Precompiling creates on dedicated Python file per host, which just | +# | contains that code and information that is needed for executing all | +# | checks of that host. Also static data that cannot change during the | +# | normal monitoring process is being precomputed and hard coded. This | +# | all saves substantial CPU ressources as opposed to running Check_MK | +# | in adhoc mode (about 75%). | +# '----------------------------------------------------------------------' # Find files to be included in precompile host check for a certain # check (for example df or mem.used). In case of checks with a period @@ -2568,7 +2660,7 @@ return paths def get_precompiled_check_table(hostname): - host_checks = get_sorted_check_table(hostname) + host_checks = get_sorted_check_table(hostname, remove_duplicates=True) precomp_table = [] for checktype, item, params, description, deps in host_checks: aggr_name = aggregated_service_name(hostname, description) @@ -2642,14 +2734,18 @@ output.write(stripped_python_file(modules_dir + "/check_mk_base.py")) + # TODO: can we avoid adding this module if no predictive monitoring + # is being used? + output.write(stripped_python_file(modules_dir + "/prediction.py")) + # initialize global variables output.write(""" # very simple commandline parsing: only -v and -d are supported -opt_verbose = '-v' in sys.argv +opt_verbose = ('-v' in sys.argv) and 1 or 0 opt_debug = '-d' in sys.argv # make sure these names are defined (even if never needed) -no_inventory_possible = None +no_discovery_possible = None """) # Compile in all neccessary global variables @@ -2657,17 +2753,20 @@ for var in [ 'check_mk_version', 'tcp_connect_timeout', 'agent_min_version', 'perfdata_format', 'aggregation_output_format', 'aggr_summary_hostname', 'nagios_command_pipe_path', - 'check_result_path', 'check_submission', - 'var_dir', 'counters_directory', 'tcp_cache_dir', - 'snmpwalks_dir', 'check_mk_basedir', 'nagios_user', + 'check_result_path', 'check_submission', 'monitoring_core', + 'var_dir', 'counters_directory', 'tcp_cache_dir', 'tmp_dir', 'log_dir', + 'snmpwalks_dir', 'check_mk_basedir', 'nagios_user', 'rrd_path', 'rrdcached_socket', + 'omd_root', 'www_group', 'cluster_max_cachefile_age', 'check_max_cachefile_age', - 'simulation_mode', 'agent_simulator', 'aggregate_check_mk', 'debug_log', + 'piggyback_max_cachefile_age', + 'simulation_mode', 'agent_simulator', 'aggregate_check_mk', 'check_mk_perfdata_with_times', 'livestatus_unix_socket', + 'use_inline_snmp', 'record_inline_snmp_stats', ]: output.write("%s = %r\n" % (var, globals()[var])) output.write("\n# Checks for %s\n\n" % hostname) - output.write("def get_sorted_check_table(hostname):\n return %r\n\n" % check_table) + output.write("def get_sorted_check_table(hostname, remove_duplicates=False, world='config'):\n return %r\n\n" % check_table) # Do we need to load the SNMP module? This is the case, if the host # has at least one SNMP based check. Also collect the needed check @@ -2676,6 +2775,7 @@ needed_check_types = set([]) needed_sections = set([]) service_timeperiods = {} + check_intervals = {} for check_type, item, param, descr, aggr in check_table: if check_type not in check_info: sys.stderr.write('Warning: Ignoring missing check %s.\n' % check_type) @@ -2683,18 +2783,31 @@ period = check_period_of(hostname, descr) if period: service_timeperiods[descr] = period + interval = check_interval_of(hostname, check_type) + if interval is not None: + check_intervals[check_type] = interval needed_sections.add(check_type.split(".")[0]) needed_check_types.add(check_type) if check_uses_snmp(check_type): need_snmp_module = True + output.write("precompiled_check_intervals = %r\n" % check_intervals) + output.write("def check_interval_of(hostname, checktype):\n return precompiled_check_intervals.get(checktype)\n\n") output.write("precompiled_service_timeperiods = %r\n" % service_timeperiods) output.write("def check_period_of(hostname, service):\n return precompiled_service_timeperiods.get(service)\n\n") if need_snmp_module: output.write(stripped_python_file(modules_dir + "/snmp.py")) + if has_inline_snmp and use_inline_snmp: + output.write(stripped_python_file(modules_dir + "/inline_snmp.py")) + output.write("\ndef oid_range_limits_of(hostname):\n return %r\n" % oid_range_limits_of(hostname)) + else: + output.write("has_inline_snmp = False\n") + else: + output.write("has_inline_snmp = False\n") + if agent_simulator: output.write(stripped_python_file(modules_dir + "/agent_simulator.py")) @@ -2754,9 +2867,18 @@ # snmp hosts output.write("def is_snmp_host(hostname):\n return %r\n\n" % is_snmp_host(hostname)) + output.write("def is_snmpv3_host(hostname):\n return % r\n\n" % is_snmpv3_host(hostname)) output.write("def is_tcp_host(hostname):\n return %r\n\n" % is_tcp_host(hostname)) - output.write("def snmp_walk_command(hostname):\n return %r\n\n" % snmp_walk_command(hostname)) output.write("def is_usewalk_host(hostname):\n return %r\n\n" % is_usewalk_host(hostname)) + if has_inline_snmp and use_inline_snmp: + output.write("def is_snmpv2c_host(hostname):\n return %r\n\n" % is_snmpv2c_host(hostname)) + output.write("def is_bulkwalk_host(hostname):\n return %r\n\n" % is_bulkwalk_host(hostname)) + output.write("def snmp_timing_of(hostname):\n return %r\n\n" % snmp_timing_of(hostname)) + output.write("def snmp_credentials_of(hostname):\n return %s\n\n" % pprint.pformat(snmp_credentials_of(hostname))) + output.write("def snmp_port_of(hostname):\n return %r\n\n" % snmp_port_of(hostname)) + else: + output.write("def snmp_port_spec(hostname):\n return %r\n\n" % snmp_port_spec(hostname)) + output.write("def snmp_walk_command(hostname):\n return %r\n\n" % snmp_walk_command(hostname)) # IP addresses needed_ipaddresses = {} @@ -2795,7 +2917,15 @@ # TCP and SNMP port of agent output.write("def agent_port_of(hostname):\n return %d\n\n" % agent_port_of(hostname)) - output.write("def snmp_port_spec(hostname):\n return %r\n\n" % snmp_port_spec(hostname)) + + # Exit code of Check_MK in case of various errors + output.write("def exit_code_spec(hostname):\n return %r\n\n" % exit_code_spec(hostname)) + + # Piggyback translations + output.write("def get_piggyback_translation(hostname):\n return %r\n\n" % get_piggyback_translation(hostname)) + + # Expected agent version + output.write("def agent_target_version(hostname):\n return %r\n\n" % (agent_target_version(hostname),)) # SNMP character encoding output.write("def get_snmp_character_encoding(hostname):\n return %r\n\n" @@ -2816,7 +2946,7 @@ # perform actual check with a general exception handler output.write("try:\n") - output.write(" do_check(%r, %r)\n" % (hostname, ipaddress)) + output.write(" sys.exit(do_check(%r, %r))\n" % (hostname, ipaddress)) output.write("except SystemExit, e:\n") output.write(" sys.exit(e.code)\n") output.write("except Exception, e:\n") @@ -2829,18 +2959,18 @@ output.write(" sys.stdout.write(\"Traceback: %s\\n\" % traceback.format_exc())\n") # debug logging - output.write(" if debug_log:\n") - output.write(" l = file(debug_log, \"a\")\n") - output.write(" l.write((\"Exception in precompiled check:\\n\"\n") - output.write(" \" Check_MK Version: %s\\n\"\n") - output.write(" \" Date: %s\\n\"\n") - output.write(" \" Host: %s\\n\"\n") - output.write(" \" %s\\n\") % (\n") - output.write(" check_mk_version,\n") - output.write(" time.strftime(\"%Y-%d-%m %H:%M:%S\"),\n") - output.write(" \"%s\",\n" % hostname) - output.write(" traceback.format_exc().replace('\\n', '\\n ')))\n") - output.write(" l.close()\n") + output.write("\n") + output.write(" l = file(log_dir + \"/crashed-checks.log\", \"a\")\n") + output.write(" l.write((\"Exception in precompiled check:\\n\"\n") + output.write(" \" Check_MK Version: %s\\n\"\n") + output.write(" \" Date: %s\\n\"\n") + output.write(" \" Host: %s\\n\"\n") + output.write(" \" %s\\n\") % (\n") + output.write(" check_mk_version,\n") + output.write(" time.strftime(\"%Y-%d-%m %H:%M:%S\"),\n") + output.write(" \"%s\",\n" % hostname) + output.write(" traceback.format_exc().replace('\\n', '\\n ')))\n") + output.write(" l.close()\n") output.write(" sys.exit(3)\n") output.close() @@ -2869,17 +2999,117 @@ if opt_verbose: sys.stderr.write(" ==> %s.\n" % compiled_filename) - -# +----------------------------------------------------------------------+ -# | __ __ _ | -# | | \/ | __ _ _ __ _ _ __ _| | | -# | | |\/| |/ _` | '_ \| | | |/ _` | | | -# | | | | | (_| | | | | |_| | (_| | | | -# | |_| |_|\__,_|_| |_|\__,_|\__,_|_| | -# | | +#. +# .--Pack config---------------------------------------------------------. +# | ____ _ __ _ | +# | | _ \ __ _ ___| | __ ___ ___ _ __ / _(_) __ _ | +# | | |_) / _` |/ __| |/ / / __/ _ \| '_ \| |_| |/ _` | | +# | | __/ (_| | (__| < | (_| (_) | | | | _| | (_| | | +# | |_| \__,_|\___|_|\_\ \___\___/|_| |_|_| |_|\__, | | +# | |___/ | # +----------------------------------------------------------------------+ +# | Create packaged and precompiled config for keepalive mode | +# '----------------------------------------------------------------------' + +# Create a packed version of the configuration (main.mk and friend) +# and put that to var/check_mk/core/config.mk. Also create a copy +# of all autochecks files. The check helpers of the running core just +# use those files, so that changes in the actual config do not harm +# the running system. + +derived_config_variable_names = [ "hosttags" ] + +# These variables are part of the Check_MK configuration, but are not needed +# by the Check_MK keepalive mode, so exclude them from the packed config +skipped_config_variable_names = [ + "define_contactgroups", + "define_hostgroups", + "define_servicegroups", + "service_contactgroups", + "host_contactgroups", + "service_groups", + "host_groups", + "contacts", + "host_paths", + "timeperiods", + "host_attributes", + "all_hosts_untagged", + "extra_service_conf", + "extra_host_conf", + "extra_nagios_conf", +] + +def pack_config(): + # Checks whether or not a variable can be written to the config.mk + # and read again from it. + def packable(varname, val): + if type(val) in [ int, str, unicode, bool ] or not val: + return True + + try: + eval(repr(val)) + return True + except: + return False + + filepath = var_dir + "/core/config.mk" + out = file(filepath + ".new", "w") + out.write("#!/usr/bin/python\n" + "# encoding: utf-8\n" + "# Created by Check_MK. Dump of the currently active configuration\n\n") + for varname in list(config_variable_names) + derived_config_variable_names: + if varname not in skipped_config_variable_names: + val = globals()[varname] + if packable(varname, val): + out.write("\n%s = %r\n" % (varname, val)) + + for varname, factory_setting in factory_settings.items(): + if varname in globals(): + out.write("\n%s = %r\n" % (varname, globals()[varname])) + else: # remove explicit setting from previous packed config! + out.write("\nif %r in globals():\n del %s\n" % (varname, varname)) + + out.close() + os.rename(filepath + ".new", filepath) -opt_nowiki = False +def pack_autochecks(): + dstpath = var_dir + "/core/autochecks" + if not os.path.exists(dstpath): + os.makedirs(dstpath) + srcpath = autochecksdir + needed = set([]) + + # hardlink used files + for f in os.listdir(srcpath): + if f.endswith(".mk"): + d = dstpath + "/" + f + if os.path.exists(d): + os.remove(d) + os.link(srcpath + "/" + f, d) + needed.add(f) + + # Remove obsolete files + for f in os.listdir(dstpath): + if f not in needed: + os.remove(dstpath + "/" + f) + +def read_packed_config(): + filepath = var_dir + "/core/config.mk" + execfile(filepath, globals()) + +#. +# .--Man-Pages-----------------------------------------------------------. +# | __ __ ____ | +# | | \/ | __ _ _ __ | _ \ __ _ __ _ ___ ___ | +# | | |\/| |/ _` | '_ \ _____| |_) / _` |/ _` |/ _ \/ __| | +# | | | | | (_| | | | |_____| __/ (_| | (_| | __/\__ \ | +# | |_| |_|\__,_|_| |_| |_| \__,_|\__, |\___||___/ | +# | |___/ | +# +----------------------------------------------------------------------+ +# | Each Check has a man page. Here is that code for displaying that in- | +# | line documentation and also some code for outputting it in a format | +# | that is used by the official Check_MK documentation ("nowiki"). | +# '----------------------------------------------------------------------' def get_tty_size(): import termios,struct,fcntl @@ -2901,23 +3131,183 @@ for fn in os.listdir(local_check_manpages_dir)])) return entries -def list_all_manuals(): - table = [] - for filename, path in all_manuals().items(): - if filename.endswith("~"): - continue +def list_all_manuals(): + table = [] + for filename, path in all_manuals().items(): + if filename.endswith("~"): + continue + + try: + for line in file(path): + if line.startswith("title:"): + table.append((filename, line.split(":", 1)[1].strip())) + except: + pass + + table.sort() + print_table(['Check type', 'Title'], [tty_bold, tty_normal], table) + +def read_manpage_catalog(): + global g_manpage_catalog + g_manpage_catalog = {} + for checkname, path in all_manuals().items(): + # Skip .* file (.f12) + a, filename = os.path.split(path) + if filename.startswith("."): + continue + try: + parsed = parse_man_header(checkname, path) + except Exception, e: + if opt_debug: + raise + sys.stderr.write('ERROR: Skipping invalid manpage: %s: %s\n' % (checkname, e)) + continue + + try: + cat = parsed["catalog"] + except KeyError: + if opt_debug: + raise + sys.stderr.write('ERROR: Skipping invalid manpage: %s (Catalog info missing)\n' % checkname) + continue + + if not cat: + cat = [ "unsorted" ] + + if cat[0] == "os": + for agent in parsed["agents"]: + acat = [cat[0]] + [agent] + cat[1:] + g_manpage_catalog.setdefault(tuple(acat), []).append(parsed) + else: + g_manpage_catalog.setdefault(tuple(cat), []).append(parsed) + +def manpage_browser(cat = ()): + read_manpage_catalog() + entries = [] + subtrees = set([]) + for c, e in g_manpage_catalog.items(): + if c[:len(cat)] == cat: + if len(c) > len(cat): + subtrees.add(c[len(cat)]) + else: # leaf node + entries = e + break + + if entries and subtrees: + sys.stderr.write("ERROR: Catalog path %s contains man pages and subfolders.\n" % ("/".join(cat))) + if entries: + manpage_browse_entries(cat, entries) + elif subtrees: + manpage_browser_folder(cat, subtrees) + +def manpage_num_entries(cat): + num = 0 + for c, e in g_manpage_catalog.items(): + if c[:len(cat)] == cat: + num += len(e) + return num + + +def manpage_browser_folder(cat, subtrees): + execfile(modules_dir + "/catalog.py", globals()) + titles = [] + for e in subtrees: + title = manpage_catalog_titles.get(e,e) + count = manpage_num_entries(cat + (e,)) + if count: + title += " (%d)" % count + titles.append((title, e)) + titles.sort() + choices = [ (str(n+1), t[0]) for n,t in enumerate(titles) ] + + while True: + x = dialog_menu("Man Page Browser", manpage_display_header(cat), choices, "0", "Enter", cat and "Back" or "Quit") + if x[0] == True: + index = int(x[1]) + subcat = titles[index-1][1] + manpage_browser(cat + (subcat,)) + else: + break + + +def manpage_browse_entries(cat, entries): + checks = [] + for e in entries: + checks.append((e["title"], e["name"])) + checks.sort() + choices = [ (str(n+1), c[0]) for n,c in enumerate(checks) ] + while True: + x = dialog_menu("Man Page Browser", manpage_display_header(cat), choices, "0", "Show Manpage", "Back") + if x[0] == True: + index = int(x[1])-1 + checkname = checks[index][1] + show_check_manual(checkname) + else: + break + +def manpage_display_header(cat): + return " -> ".join([manpage_catalog_titles.get(e,e) for e in cat ]) + +def run_dialog(args): + env = { + "TERM": os.getenv("TERM", "linux"), + "LANG": "de_DE.UTF-8" + } + p = subprocess.Popen(["dialog", "--shadow"] + args, env = env, stderr = subprocess.PIPE) + response = p.stderr.read() + return 0 == os.waitpid(p.pid, 0)[1], response + + +def dialog_menu(title, text, choices, defvalue, oktext, canceltext): + args = [ "--ok-label", oktext, "--cancel-label", canceltext ] + if defvalue != None: + args += [ "--default-item", defvalue ] + args += [ "--title", title, "--menu", text, "0", "0", "0" ] # "20", "60", "17" ] + for text, value in choices: + args += [ text, value ] + return run_dialog(args) + + +def parse_man_header(checkname, path): + parsed = {} + parsed["name"] = checkname + parsed["path"] = path + key = None + lineno = 0 + for line in file(path): + line = line.rstrip() + lineno += 1 + try: + if not line: + parsed[key] += "\n\n" + elif line[0] == ' ': + parsed[key] += "\n" + line.lstrip() + elif line[0] == '[': + break # End of header + else: + key, rest = line.split(":", 1) + parsed[key] = rest.lstrip() + except Exception, e: + if opt_debug: + raise + sys.stderr.write("Invalid line %d in man page %s\n%s" % ( + lineno, path, line)) + break + + if "agents" not in parsed: + raise Exception("Section agents missing in man page of %s\n" % (checkname)) + else: + parsed["agents"] = parsed["agents"].replace(" ","").split(",") - try: - for line in file(path): - if line.startswith("title:"): - table.append((filename, line.split(":", 1)[1].strip())) - except: - pass + if parsed.get("catalog"): + parsed["catalog"] = parsed["catalog"].split("/") + + return parsed - table.sort() - print_table(['Check type', 'Title'], [tty_bold, tty_normal], table) def show_check_manual(checkname): + filename = all_manuals().get(checkname) + bg_color = 4 fg_color = 7 bold_color = tty_white + tty_bold @@ -2930,7 +3320,6 @@ parameters_color = tty(6,4,1) examples_color = tty(6,4,1) - filename = all_manuals().get(checkname) if not filename: sys.stdout.write("No manpage for %s. Sorry.\n" % checkname) return @@ -2993,8 +3382,8 @@ # preserve the inner { and } in double braces and then replace the braces left return line.replace('{{', '{{').replace('}}', '}}').replace("{", "").replace("}", "") - def print_sectionheader(line, ignored): - print "H1:" + line + def print_sectionheader(line, title): + print "H1:" + title def print_subheader(line): print "H2:" + line @@ -3030,7 +3419,7 @@ return re.sub('(?%s[check_%s|%s]\n" % (checkname, checkname, header['title'])) - print_splitline(header_color_left, "Author: ", header_color_right, header['author']) - print_splitline(header_color_left, "License: ", header_color_right, header['license']) - distro = header['distribution'] - if distro == 'check_mk': - distro = "official part of Check_MK" - print_splitline(header_color_left, "Distribution: ", header_color_right, distro) ags = [] for agent in header['agents'].split(","): agent = agent.strip() ags.append({ "vms" : "VMS", "linux":"Linux", "aix": "AIX", "solaris":"Solaris", "windows":"Windows", "snmp":"SNMP", - "openvms" : "OpenVMS" } + "openvms" : "OpenVMS", "vsphere" : "vSphere" } .get(agent, agent.upper())) - print_splitline(header_color_left, "Supported Agents: ", header_color_right, ", ".join(ags)) + print_splitline(header_color_left, "Supported Agents: ", header_color_right, ", ".join(ags)) + distro = header['distribution'] + if distro == 'check_mk': + distro = "official part of Check_MK" + print_splitline(header_color_left, "Distribution: ", header_color_right, distro) + print_splitline(header_color_left, "License: ", header_color_right, header['license']) empty_line() print_textbody(header['description']) @@ -3237,14 +3625,20 @@ except Exception, e: print "Invalid check manpage %s: missing %s" % (filename, e) + +#. +# .--Backup & Restore----------------------------------------------------. +# | ____ _ ___ ____ _ | +# | | __ ) __ _ ___| | ___ _ _ __ ( _ ) | _ \ ___ ___| |_ | +# | | _ \ / _` |/ __| |/ / | | | '_ \ / _ \/\ | |_) / _ \/ __| __| | +# | | |_) | (_| | (__| <| |_| | |_) | | (_> < | _ < __/\__ \ |_ _ | +# | |____/ \__,_|\___|_|\_\\__,_| .__/ \___/\/ |_| \_\___||___/\__(_) | +# | |_| | # +----------------------------------------------------------------------+ -# | ____ _ | -# | | __ ) __ _ ___| | ___ _ _ __ | -# | | _ \ / _` |/ __| |/ / | | | '_ \ | -# | | |_) | (_| | (__| <| |_| | |_) | | -# | |____/ \__,_|\___|_|\_\\__,_| .__/ | -# | |_| | -# +----------------------------------------------------------------------+ +# | Check_MK comes with a simple backup and restore of the current con- | +# | figuration and cache files (cmk --backup and cmk --restore). This is | +# | implemented here. | +# '----------------------------------------------------------------------' class fake_file: def __init__(self, content): @@ -3409,6 +3803,12 @@ sys.stdout.write(tty_bold + tty_green + " cache(%d)" % d) sys.stdout.flush() + # piggy files from this as source host + d = remove_piggyback_info_from(host) + if d: + sys.stdout.write(tty_bold + tty_magenta + " piggyback(%d)" % d) + + # logfiles dir = logwatch_dir + "/" + host if os.path.exists(dir): @@ -3425,26 +3825,33 @@ sys.stdout.write(tty_bold + tty_magenta + " logfiles(%d)" % d) # autochecks - d = remove_autochecks_of(host) - if d > 0: + count = remove_autochecks_of(host) + if count: flushed = True - sys.stdout.write(tty_bold + tty_cyan + " autochecks(%d)" % d) + sys.stdout.write(tty_bold + tty_cyan + " autochecks(%d)" % count) + + # inventory + path = var_dir + "/inventory/" + host + if os.path.exists(path): + os.remove(path) + sys.stdout.write(tty_bold + tty_yellow + " inventory") if not flushed: sys.stdout.write("(nothing)") - sys.stdout.write(tty_normal + "\n") - -# +----------------------------------------------------------------------+ -# | __ __ _ __ _ _ | -# | | \/ | __ _(_)_ __ / _|_ _ _ __ ___| |_(_) ___ _ __ ___ | -# | | |\/| |/ _` | | '_ \| |_| | | | '_ \ / __| __| |/ _ \| '_ \/ __| | -# | | | | | (_| | | | | | _| |_| | | | | (__| |_| | (_) | | | \__ \ | -# | |_| |_|\__,_|_|_| |_|_| \__,_|_| |_|\___|\__|_|\___/|_| |_|___/ | +#. +# .--Main Functions------------------------------------------------------. +# | __ __ _ _____ _ _ | +# || \/ | __ _(_)_ __ | ___| _ _ __ ___| |_(_) ___ _ __ ___ | +# || |\/| |/ _` | | '_ \ | |_ | | | | '_ \ / __| __| |/ _ \| '_ \/ __| | +# || | | | (_| | | | | | | _|| |_| | | | | (__| |_| | (_) | | | \__ \ | +# ||_| |_|\__,_|_|_| |_| |_| \__,_|_| |_|\___|\__|_|\___/|_| |_|___/ | # | | # +----------------------------------------------------------------------+ +# | Implementation of some of the toplevel functions. | +# '----------------------------------------------------------------------' # Create a list of all hosts of a certain hostgroup. Needed only for # option --list-hosts @@ -3476,22 +3883,86 @@ if info: sys.stdout.write(info) return - try: - ipaddress = lookup_ipaddress(hostname) - sys.stdout.write(get_agent_info(hostname, ipaddress, 0)) - except MKAgentError, e: - sys.stderr.write("Problem contacting agent: %s\n" % (e,)) - sys.exit(3) - except MKGeneralException, e: - sys.stderr.write("General problem: %s\n" % (e,)) - sys.exit(3) - except socket.gaierror, e: - sys.stderr.write("Network error: %s\n" % e) - except Exception, e: - sys.stderr.write("Unexpected exception: %s\n" % (e,)) - sys.exit(3) + if is_tcp_host(hostname): + try: + ipaddress = lookup_ipaddress(hostname) + sys.stdout.write(get_agent_info(hostname, ipaddress, 0)) + except MKAgentError, e: + sys.stderr.write("Problem contacting agent: %s\n" % (e,)) + sys.exit(3) + except MKGeneralException, e: + sys.stderr.write("General problem: %s\n" % (e,)) + sys.exit(3) + except socket.gaierror, e: + sys.stderr.write("Network error: %s\n" % e) + except Exception, e: + sys.stderr.write("Unexpected exception: %s\n" % (e,)) + sys.exit(3) + + sys.stdout.write(get_piggyback_info(hostname)) + +def do_snmptranslate(args): + if not args: + raise MKGeneralException("Please provide the name of a SNMP walk file") + walk_filename = args[0] + + walk_path = "%s/%s" % (snmpwalks_dir, walk_filename) + if not os.path.exists(walk_path): + raise MKGeneralException("Walk does not exist") + + def translate(lines): + result_lines = [] + try: + oids_for_command = [] + for line in lines: + oids_for_command.append(line.split(" ")[0]) + + extra_mib_path = "" + if local_mibs_dir: + extra_mib_path = " -M+%s" % local_mibs_dir + command = "snmptranslate -m ALL%s %s 2>/dev/null" % (extra_mib_path, " ".join(oids_for_command)) + process = os.popen(command, "r") + output = process.read() + result = output.split("\n")[0::2] + for idx, line in enumerate(result): + result_lines.append((line, lines[idx])) + + except Exception, e: + print e + + return result_lines + + + # Translate n-oid's per cycle + entries_per_cycle = 500 + translated_lines = [] + + walk_lines = file(walk_path).readlines() + sys.stderr.write("Processing %d lines.\n" % len(walk_lines)) + + i = 0 + while i < len(walk_lines): + sys.stderr.write("\r%d to go... " % (len(walk_lines) - i)) + sys.stderr.flush() + process_lines = walk_lines[i:i+entries_per_cycle] + translated = translate(process_lines) + i += len(translated) + translated_lines += translated + sys.stderr.write("\rfinished. \n") + + # Output formatted + longest_translation = 40 + for translation, line in translated_lines: + longest_translation = max(longest_translation, len(translation)) + + format_string = "%%-%ds %%s" % longest_translation + for translation, line in translated_lines: + sys.stdout.write(format_string % (translation, line)) def do_snmpwalk(hostnames): + if opt_oids and opt_extra_oids: + raise MKGeneralException("You cannot specify --oid and --extraoid at the same time.") + if len(hostnames) == 0: sys.stderr.write("Please specify host names to walk on.\n") return @@ -3506,48 +3977,31 @@ raise def do_snmpwalk_on(hostname, filename): - if opt_verbose: - sys.stdout.write("%s:\n" % hostname) + verbose("%s:\n" % hostname) ip = lookup_ipaddress(hostname) - portspec = snmp_port_spec(hostname) - cmd = snmp_walk_command(hostname) + " -On -Ob -OQ -Ot %s%s " % (ip, portspec) - if opt_debug: - print 'Executing: %s' % cmd + out = file(filename, "w") - for oid in [ "", "1.3.6.1.4.1" ]: # SNMPv2-SMI::enterprises - oids = [] - values = [] - if opt_verbose: - sys.stdout.write("%s..." % (cmd + oid)) - sys.stdout.flush() - count = 0 - f = os.popen(cmd + oid) - while True: - line = f.readline() - if not line: - break - parts = line.split("=", 1) - if len(parts) != 2: - continue - oid, value = parts - value = value.rstrip("\n") - if value.lstrip().startswith('"'): - while value[-1] != '"': - value += f.readline().rstrip("\n") - - if not oid.startswith("."): - oid = "." + oid - oids.append(oid) - values.append(value) - for oid, value in zip(oids, values): - out.write("%s %s\n" % (oid, value.strip())) - count += 1 - if opt_verbose: - sys.stdout.write("%d variables.\n" % count) + oids_to_walk = opt_oids + if not opt_oids: + oids_to_walk = [ + ".1.3.6.1.2.1", # SNMPv2-SMI::mib-2 + ".1.3.6.1.4.1" # SNMPv2-SMI::enterprises + ] + opt_extra_oids + + for oid in oids_to_walk: + try: + verbose("Walk on \"%s\"..." % oid) + + results = snmpwalk_on_suboid(hostname, ip, oid, hex_plain = True) + for oid, value in results: + out.write("%s %s\n" % (oid, value)) + verbose("%d variables.\n" % len(results)) + except: + if opt_debug: + raise out.close() - if opt_verbose: - sys.stdout.write("Successfully Wrote %s%s%s.\n" % (tty_bold, filename, tty_normal)) + verbose("Successfully Wrote %s%s%s.\n" % (tty_bold, filename, tty_normal)) def do_snmpget(oid, hostnames): if len(hostnames) == 0: @@ -3574,15 +4028,21 @@ ( modules_dir, dir, inst, "Main components of check_mk"), ( checks_dir, dir, inst, "Checks"), ( notifications_dir, dir, inst, "Notification scripts"), + ( inventory_dir, dir, inst, "Inventory plugins"), ( agents_dir, dir, inst, "Agents for operating systems"), ( doc_dir, dir, inst, "Documentation files"), ( web_dir, dir, inst, "Check_MK's web pages"), ( check_manpages_dir, dir, inst, "Check manpages (for check_mk -M)"), ( lib_dir, dir, inst, "Binary plugins (architecture specific)"), ( pnp_templates_dir, dir, inst, "Templates for PNP4Nagios"), - ( nagios_startscript, fil, inst, "Startscript for Nagios daemon"), - ( nagios_binary, fil, inst, "Path to Nagios executable"), + ] + if monitoring_core == "nagios": + paths += [ + ( nagios_startscript, fil, inst, "Startscript for Nagios daemon"), + ( nagios_binary, fil, inst, "Path to Nagios executable"), + ] + paths += [ ( default_config_dir, dir, conf, "Directory that contains main.mk"), ( check_mk_configdir, dir, conf, "Directory containing further *.mk files"), ( nagios_config_file, fil, conf, "Main configuration file of Nagios"), @@ -3609,6 +4069,7 @@ paths += [ ( local_checks_dir, dir, local, "Locally installed checks"), ( local_notifications_dir, dir, local, "Locally installed notification scripts"), + ( local_inventory_dir, dir, local, "Locally installed inventory plugins"), ( local_check_manpages_dir, dir, local, "Locally installed check man pages"), ( local_agents_dir, dir, local, "Locally installed agents and plugins"), ( local_web_dir, dir, local, "Locally installed Multisite addons"), @@ -3650,8 +4111,8 @@ add_txt = " (cluster of " + (",".join(nodes_of(hostname))) + ")" try: ipaddress = lookup_ipaddress(hostname) - except: - ipaddress = "0.0.0.0" + except: + ipaddress = "0.0.0.0" else: color = tty_bgblue try: @@ -3670,8 +4131,8 @@ parents_list = parents_of(hostname) if len(parents_list) > 0: print tty_yellow + "Parents: " + tty_normal + ", ".join(parents_list) - print tty_yellow + "Host groups: " + tty_normal + ", ".join(hostgroups_of(hostname)) - print tty_yellow + "Contact groups: " + tty_normal + ", ".join(host_contactgroups_of([hostname])) + print tty_yellow + "Host groups: " + tty_normal + make_utf8(", ".join(hostgroups_of(hostname))) + print tty_yellow + "Contact groups: " + tty_normal + make_utf8(", ".join(host_contactgroups_of([hostname]))) agenttypes = [] if is_tcp_host(hostname): @@ -3685,15 +4146,28 @@ if is_usewalk_host(hostname): agenttypes.append("SNMP (use stored walk)") else: + if has_inline_snmp and use_inline_snmp: + inline = "yes" + else: + inline = "no" + credentials = snmp_credentials_of(hostname) - if is_bulkwalk_host(hostname): + if type(credentials) in [ str, unicode ]: + cred = "community: \'%s\'" % credentials + else: + cred = "credentials: '%s'" % ", ".join(credentials) + + if is_snmpv3_host(hostname) or is_bulkwalk_host(hostname): bulk = "yes" else: bulk = "no" + portinfo = snmp_port_of(hostname) if portinfo == None: portinfo = 'default' - agenttypes.append("SNMP (community: '%s', bulk walk: %s, port: %s)" % (credentials, bulk, portinfo)) + + agenttypes.append("SNMP (%s, bulk walk: %s, port: %s, inline: %s)" % + (cred, bulk, portinfo, inline)) if is_ping_host(hostname): agenttypes.append('PING only') @@ -3780,40 +4254,51 @@ def usage(): print """WAYS TO CALL: - check_mk [-n] [-v] [-p] HOST [IPADDRESS] check all services on HOST - check_mk [-u] -I [HOST ..] inventory - find new services - check_mk [-u] -II ... renew inventory, drop old services - check_mk -u, --cleanup-autochecks reorder autochecks files - check_mk -N [HOSTS...] output Nagios configuration - check_mk -C, --compile precompile host checks - check_mk -U, --update precompile + create Nagios config - check_mk -O, --reload precompile + config + Nagios reload - check_mk -R, --restart precompile + config + Nagios restart - check_mk -D, --dump [H1 H2 ..] dump all or some hosts - check_mk -d HOSTNAME|IPADDRESS show raw information from agent - check_mk --check-inventory HOSTNAME check for items not yet checked - check_mk --list-hosts [G1 G2 ...] print list of hosts - check_mk --list-tag TAG1 TAG2 ... list hosts having certain tags - check_mk -L, --list-checks list all available check types - check_mk -M, --man [CHECKTYPE] show manpage for check CHECKTYPE - check_mk --paths list all pathnames and directories - check_mk -X, --check-config check configuration for invalid vars - check_mk --backup BACKUPFILE.tar.gz make backup of configuration and data - check_mk --restore BACKUPFILE.tar.gz restore configuration and data - check_mk --flush [HOST1 HOST2...] flush all data of some or all hosts - check_mk --donate Email data of configured hosts to MK - check_mk --snmpwalk HOST1 HOST2 ... Do snmpwalk on host - check_mk --snmpget OID HOST1 HOST2 ... Fetch single OIDs and output them - check_mk --scan-parents [HOST1 HOST2...] autoscan parents, create conf.d/parents.mk - check_mk -P, --package COMMAND do package operations - check_mk --localize COMMAND do localization operations - check_mk -V, --version print version - check_mk -h, --help print this help + cmk [-n] [-v] [-p] HOST [IPADDRESS] check all services on HOST + cmk -I [HOST ..] inventory - find new services + cmk -II ... renew inventory, drop old services + cmk -N [HOSTS...] output Nagios configuration + cmk -B create configuration for core + cmk -C, --compile precompile host checks + cmk -U, --update precompile + create config for core + cmk -O, --reload precompile + config + core reload + cmk -R, --restart precompile + config + core restart + cmk -D, --dump [H1 H2 ..] dump all or some hosts + cmk -d HOSTNAME|IPADDRESS show raw information from agent + cmk --check-discovery HOSTNAME check for items not yet checked + cmk --update-dns-cache update IP address lookup cache + cmk -l, --list-hosts [G1 G2 ...] print list of all hosts + cmk --list-tag TAG1 TAG2 ... list hosts having certain tags + cmk -L, --list-checks list all available check types + cmk -M, --man [CHECKTYPE] show manpage for check CHECKTYPE + cmk -m, --browse-man open interactive manpage browser + cmk --paths list all pathnames and directories + cmk -X, --check-config check configuration for invalid vars + cmk --backup BACKUPFILE.tar.gz make backup of configuration and data + cmk --restore BACKUPFILE.tar.gz restore configuration and data + cmk --flush [HOST1 HOST2...] flush all data of some or all hosts + cmk --donate Email data of configured hosts to MK + cmk --snmpwalk HOST1 HOST2 ... Do snmpwalk on one or more hosts + cmk --snmptranslate HOST Do snmptranslate on walk + cmk --snmpget OID HOST1 HOST2 ... Fetch single OIDs and output them + cmk --scan-parents [HOST1 HOST2...] autoscan parents, create conf.d/parents.mk + cmk -P, --package COMMAND do package operations + cmk --localize COMMAND do localization operations + cmk --notify used to send notifications from core + cmk --create-rrd [--keepalive|SPEC] create round robin database (only CMC) + cmk --convert-rrds [--split] [H...] convert exiting RRD to new format (only CMC) + cmk -i, --inventory [HOST1 HOST2...] Do a HW/SW-Inventory of some ar all hosts + cmk --inventory-as-check HOST Do HW/SW-Inventory, behave like check plugin + cmk -A, --bake-agents [-f] [H1 H2..] Bake agents for hosts (not in all versions) + cmk --cap pack|unpack|list FILE.cap Pack/unpack agent packages (not in all versions) + cmk --show-snmp-stats Analyzes recorded Inline SNMP statistics + cmk -V, --version print version + cmk -h, --help print this help OPTIONS: -v show what's going on -p also show performance data (use with -v) - -n do not submit results to Nagios, do not save counters + -n do not submit results to core, do not save counters -c FILE read config file FILE instead of %s --cache read info from cache file is present and fresh, use TCP only, if cache file is absent or too old @@ -3824,8 +4309,18 @@ prevents DNS lookups. --usewalk use snmpwalk stored with --snmpwalk --debug never catch Python exceptions + --interactive Some errors are only reported in interactive mode, i.e. if stdout + is a TTY. This option forces interactive mode even if the output + is directed into a pipe or file. --procs N start up to N processes in parallel during --scan-parents --checks A,.. restrict checks/inventory to specified checks (tcp/snmp/check type) + --keepalive used by Check_MK Mirco Core: run check and --notify in continous + mode. Read data from stdin and von from cmd line and environment + --cmc-file=X relative filename for CMC config file (used by -B/-U) + --extraoid A Do --snmpwalk also on this OID, in addition to mib-2 and enterprises. + You can specify this option multiple times. + --oid A Do --snmpwalk on this OID instead of mib-2 and enterprises. + You can specify this option multiple times. NOTES: -I can be restricted to certain check types. Write '--checks df -I' if you @@ -3836,10 +4331,6 @@ -II does the same as -I but deletes all existing checks of the specified types and hosts. - -u, --cleanup-autochecks resorts all checks found by inventory - into per-host files. It can be used as an options to -I or as - a standalone operation. - -N outputs the Nagios configuration. You may optionally add a list of hosts. In that case the configuration is generated only for that hosts (useful for debugging). @@ -3854,7 +4345,7 @@ -d does not work on clusters (such defined in main.mk) but only on real hosts. - --check-inventory make check_mk behave as Nagios plugins that + --check-discovery make check_mk behave as monitoring plugins that checks if an inventory would find new services for the host. --list-hosts called without argument lists all hosts. You may @@ -3872,9 +4363,9 @@ compressed tar file. --restore *erases* the current configuration and data and replaces it with that from the backup file. - --flush deletes all runtime data belonging to a host (not - inventory data). This includes the state of performance counters, - cached agent output, and logfiles. Precompiled host checks + --flush deletes all runtime data belonging to a host. This includes + the inventorized checks, the state of performance counters, + cached agent output, and logfiles. Precompiled host checks are not deleted. -P, --package brings you into packager mode. Packages are @@ -3896,32 +4387,73 @@ Check_MK and developing checks by donating hosts. This is completely voluntary and turned off by default. - --snmpwalk does a complete snmpwalk for the specifies hosts both + --snmpwalk does a complete snmpwalk for the specified hosts both on the standard MIB and the enterprises MIB and stores the - result in the directory %s. + result in the directory %s. Use the option --oid one or several + times in order to specify alternative OIDs to walk. You need to + specify numeric OIDs. If you want to keep the two standard OIDS + .1.3.6.1.2.1 and .1.3.6.1.4.1 then use --extraoid for just adding + additional OIDs to walk. + + --snmptranslate does not contact the host again, but reuses the hosts + walk from the directory %s.%s --scan-parents uses traceroute in order to automatically detect hosts's parents. It creates the file conf.d/parents.mk which defines gateway hosts and parent declarations. - Nagios can call check_mk without options and the hostname and its IP - address as arguments. Much faster is using precompiled host checks, - though. + -A, --bake-agents creates RPM/DEB/MSI packages with host-specific + monitoring agents. If you add the option -f, --force then all + agents are renewed, even if an uptodate version for a configuration + already exists. Note: baking agents is only contained in the + subscription version of Check_MK. + + --show-snmp-stats analyzes and shows a summary of the Inline SNMP + statistics which might have been recorded on your system before. + Note: This is only contained in the subscription version of Check_MK. + + --convert-rrds converts the internal structure of existing RRDs + to the new structure as configured via the rulesets cmc_host_rrd_config + and cmc_service_rrd_config. If you do not specify hosts, then all + RRDs will be converted. Conversion just takes place if the configuration + of the RRDs has changed. The option --split will activate conversion + from exising RRDs in PNP storage type SINGLE to MULTIPLE. + + -i, --inventory does a HW/SW-Inventory for all, one or several + hosts. If you add the option -f, --force then persisted sections + will be used even if they are outdated. """ % (check_mk_configfile, precompiled_hostchecks_dir, snmpwalks_dir, + snmpwalks_dir, + local_mibs_dir and ("\n You can add further MIBs to %s" % local_mibs_dir) or "", ) -def do_create_config(): - out = file(nagios_objects_file, "w") - sys.stdout.write("Generating Nagios configuration...") +def do_create_config(with_agents=True): + sys.stdout.write("Generating configuration for core (type %s)..." % monitoring_core) sys.stdout.flush() - create_nagios_config(out) + if monitoring_core == "cmc": + do_create_cmc_config(opt_cmc_relfilename, False) # do not use rushed ahead config + else: + out = file(nagios_objects_file, "w") + create_nagios_config(out) sys.stdout.write(tty_ok + "\n") + if bake_agents_on_restart and with_agents and 'do_bake_agents' in globals(): + sys.stdout.write("Baking agents...") + sys.stdout.flush() + try: + do_bake_agents() + sys.stdout.write(tty_ok + "\n") + except Exception, e: + if opt_debug: + raise + sys.stdout.write("Error: %s\n" % e) + + def do_output_nagios_conf(args): if len(args) == 0: args = None @@ -3933,15 +4465,22 @@ precompile_hostchecks() sys.stdout.write(tty_ok + "\n") +def do_pack_config(): + sys.stdout.write("Packing config...") + sys.stdout.flush() + pack_config() + pack_autochecks() + sys.stdout.write(tty_ok + "\n") + -def do_update(): +def do_update(with_precompile): try: - do_create_config() - do_precompile_hostchecks() - sys.stdout.write(("Successfully created Nagios configuration file %s%s%s.\n\n" + - "Please make sure that file will be read by Nagios.\n" + - "You need to restart Nagios in order to activate " + - "the changes.\n") % (tty_green + tty_bold, nagios_objects_file, tty_normal)) + do_create_config(with_agents=with_precompile) + if with_precompile: + if monitoring_core == "cmc": + do_pack_config() + else: + do_precompile_hostchecks() except Exception, e: sys.stderr.write("Configuration Error: %s\n" % e) @@ -3951,38 +4490,56 @@ def do_check_nagiosconfig(): - command = nagios_binary + " -vp " + nagios_config_file + " 2>&1" - sys.stdout.write("Validating Nagios configuration...") - if opt_verbose: - sys.stderr.write("Running '%s'" % command) - sys.stderr.flush() + if monitoring_core == 'nagios': + command = nagios_binary + " -vp " + nagios_config_file + " 2>&1" + sys.stdout.write("Validating Nagios configuration...") + if opt_verbose: + sys.stderr.write("Running '%s'" % command) + sys.stderr.flush() - process = os.popen(command, "r") - output = process.read() - exit_status = process.close() - if not exit_status: - sys.stdout.write(tty_ok + "\n") - return True + process = os.popen(command, "r") + output = process.read() + exit_status = process.close() + if not exit_status: + sys.stdout.write(tty_ok + "\n") + return True + else: + sys.stdout.write("ERROR:\n") + sys.stderr.write(output) + return False else: - sys.stdout.write("ERROR:\n") - sys.stderr.write(output) - return False + return True -def do_restart_nagios(only_reload): - action = only_reload and "load" or "start" - sys.stdout.write("Re%sing Nagios..." % action) - sys.stdout.flush() - os.putenv("CORE_NOVERIFY", "yes") - command = nagios_startscript + " re%s 2>&1" % action +# Action can be restart, reload, start or stop +def do_core_action(action, quiet=False): + if not quiet: + sys.stdout.write("%sing monitoring core..." % action.title()) + sys.stdout.flush() + if monitoring_core == "nagios": + os.putenv("CORE_NOVERIFY", "yes") + command = nagios_startscript + " %s 2>&1" % action + else: + command = "omd %s cmc 2>&1" % action process = os.popen(command, "r") output = process.read() if process.close(): - sys.stdout.write("ERROR: %s\n" % output) - raise MKGeneralException("Cannot re%s the monitoring core: %s" % (action, output)) + if not quiet: + sys.stdout.write("ERROR: %s\n" % output) + raise MKGeneralException("Cannot %s the monitoring core: %s" % (action, output)) + else: + if not quiet: + sys.stdout.write(tty_ok + "\n") + +def core_is_running(): + if monitoring_core == "nagios": + command = nagios_startscript + " status >/dev/null 2>&1" else: - sys.stdout.write(tty_ok + "\n") + command = "omd status cmc >/dev/null 2>&1" + code = os.system(command) + return not code + def do_reload(): do_restart(True) @@ -3991,7 +4548,7 @@ try: backup_path = None - if not lock_nagios_objects_file(): + if not lock_objects_file(): sys.stderr.write("Other restart currently in progress. Aborting.\n") sys.exit(1) @@ -4005,10 +4562,11 @@ backup_path = None try: - do_create_config() + do_create_config(with_agents=True) except Exception, e: sys.stderr.write("Error creating configuration: %s\n" % e) - os.rename(backup_path, nagios_objects_file) + if backup_path: + os.rename(backup_path, nagios_objects_file) if opt_debug: raise sys.exit(1) @@ -4016,8 +4574,11 @@ if do_check_nagiosconfig(): if backup_path: os.remove(backup_path) - do_precompile_hostchecks() - do_restart_nagios(only_reload) + if monitoring_core == "cmc": + do_pack_config() + else: + do_precompile_hostchecks() + do_core_action(only_reload and "reload" or "restart") else: sys.stderr.write("Nagios configuration is invalid. Rolling back.\n") if backup_path: @@ -4038,7 +4599,7 @@ sys.exit(1) restart_lock_fd = None -def lock_nagios_objects_file(): +def lock_objects_file(): global restart_lock_fd # In some bizarr cases (as cmk -RR) we need to avoid duplicate locking! if restart_locking and restart_lock_fd == None: @@ -4066,6 +4627,10 @@ for f in cache_files: if f == host or f.startswith("%s." % host): donate.append(f) + if not donate: + sys.stderr.write("No hosts specified. You need to set donation_hosts in main.mk.\n") + sys.exit(1) + if opt_verbose: print "Donating files %s" % " ".join(cache_files) import base64 @@ -4078,46 +4643,6 @@ output.write('\n') indata = indata[64:] -def do_cleanup_autochecks(): - # 1. Read in existing autochecks - hostdata = {} - os.chdir(autochecksdir) - checks = 0 - for fn in glob.glob("*.mk"): - if opt_debug: - sys.stdout.write("Scanning %s...\n" % fn) - for line in file(fn): - testline = line.lstrip().replace("'", '"') - if testline.startswith('("'): - splitted = testline.split('"') - hostname = splitted[1] - hostchecks = hostdata.get(hostname, []) - hostchecks.append(line) - checks += 1 - hostdata[hostname] = hostchecks - if opt_verbose: - sys.stdout.write("Found %d checks from %d hosts.\n" % (checks, len(hostdata))) - - # 2. Write out new autochecks. - newfiles = set([]) - for host, lines in hostdata.items(): - lines.sort() - fn = host.replace(":","_") + ".mk" - if opt_verbose: - sys.stdout.write("Writing %s: %d checks\n" % (fn, len(lines))) - newfiles.add(fn) - f = file(fn, "w+") - f.write("[\n") - for line in lines: - f.write(line) - f.write("]\n") - - # 3. Remove obsolete files - for f in glob.glob("*.mk"): - if f not in newfiles: - if opt_verbose: - sys.stdout.write("Deleting %s\n" % f) - os.remove(f) def find_bin_in_path(prog): for path in os.environ['PATH'].split(os.pathsep): @@ -4374,6 +4899,8 @@ # reverse DNS but the Check_MK mechanisms, since we do not # want to find the DNS name but the name of a matching host # from all_hosts + +ip_to_hostname_cache = None def ip_to_hostname(ip): global ip_to_hostname_cache if ip_to_hostname_cache == None: @@ -4392,16 +4919,284 @@ except: return None +def config_timestamp(): + mtime = 0 + for dirpath, dirnames, filenames in os.walk(check_mk_configdir): + for f in filenames: + mtime = max(mtime, os.stat(dirpath + "/" + f).st_mtime) + mtime = max(mtime, os.stat(default_config_dir + "/main.mk").st_mtime) + try: + mtime = max(mtime, os.stat(default_config_dir + "/final.mk").st_mtime) + except: + pass + try: + mtime = max(mtime, os.stat(default_config_dir + "/local.mk").st_mtime) + except: + pass + return mtime -# +----------------------------------------------------------------------+ -# | ____ _ __ _ | -# | | _ \ ___ __ _ __| | ___ ___ _ __ / _(_) __ _ | -# | | |_) / _ \/ _` |/ _` | / __/ _ \| '_ \| |_| |/ _` | | -# | | _ < __/ (_| | (_| | | (_| (_) | | | | _| | (_| | | -# | |_| \_\___|\__,_|\__,_| \___\___/|_| |_|_| |_|\__, | | + +# Reset some global variable to their original value. This +# is needed in keepalive mode. +# We could in fact do some positive caching in keepalive +# mode - e.g. the counters of the hosts could be saved in memory. +def cleanup_globals(): + global g_agent_already_contacted + g_agent_already_contacted = {} + global g_hostname + g_hostname = "unknown" + global g_counters + g_counters = {} + global g_infocache + g_infocache = {} + global g_broken_agent_hosts + g_broken_agent_hosts = set([]) + global g_broken_snmp_hosts + g_broken_snmp_hosts = set([]) + global g_inactive_timerperiods + g_inactive_timerperiods = None + global g_walk_cache + g_walk_cache = {} + global g_timeout + g_timeout = None + + if 'g_snmp_sessions' in globals(): + global g_snmp_sessions + g_snmp_sessions = {} + + +# Diagnostic function for detecting global variables that have +# changed during checking. This is slow and canno be used +# in production mode. +def copy_globals(): + import copy + global_saved = {} + for varname, value in globals().items(): + # Some global caches are allowed to change. + if varname not in [ "g_service_description", "g_multihost_checks", + "g_check_table_cache", "g_singlehost_checks", + "g_nodesof_cache", "compiled_regexes", "vars_before_config", + "g_initial_times", "g_keepalive_initial_memusage", + "g_dns_cache", "g_ip_lookup_cache", "g_converted_rulesets_cache" ] \ + and type(value).__name__ not in [ "function", "module", "SRE_Pattern" ]: + global_saved[varname] = copy.copy(value) + return global_saved + + +# Determine currently (VmSize, VmRSS) in Bytes +def current_memory_usage(): + parts = file('/proc/self/stat').read().split() + vsize = int(parts[22]) # in Bytes + rss = int(parts[23]) * 4096 # in Pages + return (vsize, rss) + +keepalive_memcheck_cycle = 20 +g_keepalive_initial_memusage = None +def keepalive_check_memory(num_checks, keepalive_fd): + if num_checks % keepalive_memcheck_cycle != 0: # Only do this after every 10 checks + return + + global g_keepalive_initial_memusage + if not g_keepalive_initial_memusage: + g_keepalive_initial_memusage = current_memory_usage() + else: + usage = current_memory_usage() + # Allow VM size to grow by at most 50% + if usage[0] > 1.5 * g_keepalive_initial_memusage[0]: + sys.stderr.write("memory usage increased from %s to %s after %d check cycles. Restarting.\n" % ( + get_bytes_human_readable(g_keepalive_initial_memusage[0]), + get_bytes_human_readable(usage[0]), num_checks)) + restart_myself(keepalive_fd) + + +def restart_myself(keepalive_fd): + sys.argv = [ x for x in sys.argv if not x.startswith('--keepalive-fd=') ] + os.execvp("cmk", sys.argv + [ "--keepalive-fd=%d" % keepalive_fd ]) + + +def do_check_keepalive(): + global g_initial_times, g_timeout + + def check_timeout(signum, frame): + raise MKCheckTimeout() + + signal.signal(signal.SIGALRM, signal.SIG_IGN) # Prevent ALRM from CheckHelper.cc + + # Prevent against plugins that output debug information (but shouldn't). + # Their stdout will interfer with communication with the Micro Core. + # So we simply redirect stdout to stderr, which will appear in the cmc.log, + # with the following trick: + # 1. move the filedescriptor 1 to a parking position + # 2. dup the stderr channel to stdout (2 to 1) + # 3. Send our answers to the Micro Core with the parked FD. + # BEWARE: this must not happen after we have execve'd ourselves! + if opt_keepalive_fd: + keepalive_fd = opt_keepalive_fd + else: + keepalive_fd = os.dup(1) + os.dup2(2, 1) # Send stuff that is written to stdout instead to stderr + + num_checks = 0 # count total number of check cycles + + read_packed_config() + global vars_before_config + vars_before_config = set([]) + + global total_check_output + total_check_output = "" + if opt_debug: + before = copy_globals() + + ipaddress_cache = {} + + while True: + cleanup_globals() + cmdline = keepalive_read_line() + g_initial_times = os.times() + + cmdline = cmdline.strip() + if cmdline == "*": + read_packed_config() + cleanup_globals() + reset_global_caches() + before = copy_globals() + continue + + elif not cmdline: + break + + # Always cleanup the total check output var before handling a new task + total_check_output = "" + + num_checks += 1 + + g_timeout = int(keepalive_read_line()) + try: # catch non-timeout exceptions + try: # catch timeouts + signal.signal(signal.SIGALRM, check_timeout) + signal.alarm(g_timeout) + + # The CMC always provides arguments. This is the only used case for CMC. The last + # two arguments are the hostname and the ipaddress of the host to be asked for. + # The other arguments might be different parameters to configure the actions to + # be done + args = cmdline.split() + if '--cache' in args: + args.remove('--cache') + enforce_using_agent_cache() + + # FIXME: remove obsolete check-inventory + if '--check-inventory' in args: + args.remove('--check-inventory') + mode_function = check_discovery + elif '--check-discovery' in args: + args.remove('--check-discovery') + mode_function = check_discovery + else: + mode_function = do_check + + if len(args) >= 2: + hostname, ipaddress = args[:2] + else: + hostname = args[0] + ipaddress = None + + if ipaddress == None: + if hostname in ipaddress_cache: + ipaddress = ipaddress_cache[hostname] + else: + if is_cluster(hostname): + ipaddress = None + else: + try: + ipaddress = lookup_ipaddress(hostname) + except: + raise MKGeneralException("Cannot resolve hostname %s into IP address" % hostname) + ipaddress_cache[hostname] = ipaddress + + status = mode_function(hostname, ipaddress) + signal.signal(signal.SIGALRM, signal.SIG_IGN) # Prevent ALRM from CheckHelper.cc + signal.alarm(0) + + except MKCheckTimeout: + signal.signal(signal.SIGALRM, signal.SIG_IGN) # Prevent ALRM from CheckHelper.cc + spec = exit_code_spec(hostname) + status = spec.get("timeout", 2) + total_check_output = "%s - Check_MK timed out after %d seconds\n" % ( + nagios_state_names[status], g_timeout) + + os.write(keepalive_fd, "%03d\n%08d\n%s" % + (status, len(total_check_output), total_check_output)) + total_check_output = "" + + except Exception, e: + signal.signal(signal.SIGALRM, signal.SIG_IGN) # Prevent ALRM from CheckHelper.cc + signal.alarm(0) + if opt_debug: + raise + output = "UNKNOWN - %s\n" % e + os.write(keepalive_fd, "%03d\n%08d\n%s" % (3, len(output), output)) + + # Flush file descriptors of stdout and stderr, so that diagnostic + # messages arrive in time in cmc.log + sys.stdout.flush() + sys.stderr.flush() + + cleanup_globals() # Prepare for next check + restore_original_agent_caching_usage() + + # Check if all global variables are clean, but only in debug mode + if opt_debug: + after = copy_globals() + for varname, value in before.items(): + if value != after[varname]: + sys.stderr.write("WARNING: global variable %s has changed: %r ==> %s\n" + % (varname, value, repr(after[varname])[:50])) + new_vars = set(after.keys()).difference(set(before.keys())) + if (new_vars): + sys.stderr.write("WARNING: new variable appeared: %s\n" % ", ".join(new_vars)) + sys.stderr.flush() + + keepalive_check_memory(num_checks, keepalive_fd) + # In case of profiling do just this one cycle and end afterwards + if g_profile: + output_profile() + sys.exit(0) + + # end of while True:... + + +# Just one lines from stdin. But: make sure that +# nothing more is read - not even into some internal +# buffer of sys.stdin! We do this by reading every +# single byte. I know that this is not performant, +# but we just read hostnames - not much data. + +def keepalive_read_line(): + line = "" + while True: + byte = os.read(0, 1) + if byte == '\n': + return line + elif not byte: # EOF + return '' + else: + line += byte + + +#. +# .--Read Config---------------------------------------------------------. +# | ____ _ ____ __ _ | +# | | _ \ ___ __ _ __| | / ___|___ _ __ / _(_) __ _ | +# | | |_) / _ \/ _` |/ _` | | | / _ \| '_ \| |_| |/ _` | | +# | | _ < __/ (_| | (_| | | |__| (_) | | | | _| | (_| | | +# | |_| \_\___|\__,_|\__,_| \____\___/|_| |_|_| |_|\__, | | # | |___/ | # +----------------------------------------------------------------------+ +# | Code for reading the configuration files. | +# '----------------------------------------------------------------------' + # Now - at last - we can read in the user's configuration files def all_nonfunction_vars(): @@ -4433,8 +5228,13 @@ cmp(len(pa), len(pb)) or \ cmp(pa, pb) +# Abort after an error, but only in interactive mode. +def interactive_abort(error): + if sys.stdout.isatty() or opt_interactive: + sys.stderr.write(error + "\n") + sys.exit(1) -def read_config_files(with_autochecks=True, with_conf_d=True): +def read_config_files(with_conf_d=True, validate_hosts=True): global vars_before_config, final_mk, local_mk, checks # Initialize dictionary-type default levels variables @@ -4471,8 +5271,6 @@ if '--scan-parents' in sys.argv and _f.endswith("/parents.mk"): continue try: - if opt_debug: - sys.stderr.write("Reading config file %s...\n" % _f) _old_all_hosts = all_hosts[:] _old_clusters = clusters.keys() # Make the config path available as a global variable to @@ -4488,11 +5286,10 @@ marks_hosts_with_path(_old_all_hosts, all_hosts, _f) marks_hosts_with_path(_old_clusters, clusters.keys(), _f) except Exception, e: - sys.stderr.write("Cannot read in configuration file %s:\n%s\n" % (_f, e)) - if __name__ == "__main__": - sys.exit(3) - else: + if opt_debug: raise + else: + interactive_abort("Cannot read in configuration file %s: %s" % (_f, e)) # Strip off host tags from the list of all_hosts. Host tags can be # appended to the hostnames in all_hosts, separated by pipe symbols, @@ -4505,13 +5302,14 @@ hosttags[parts[0]] = parts[1:] all_hosts_untagged = all_active_hosts() - # Sanity check for duplicate hostnames - seen_hostnames = set([]) - for hostname in strip_tags(all_hosts + clusters.keys()): - if hostname in seen_hostnames: - sys.stderr.write("Error in configuration: duplicate host '%s'\n" % hostname) - sys.exit(4) - seen_hostnames.add(hostname) + if validate_hosts: + # Sanity check for duplicate hostnames + seen_hostnames = set([]) + for hostname in strip_tags(all_hosts + clusters.keys()): + if hostname in seen_hostnames: + sys.stderr.write("Error in configuration: duplicate host '%s'\n" % hostname) + sys.exit(3) + seen_hostnames.add(hostname) # Add WATO-configured explicit checks to (possibly empty) checks # statically defined in checks. @@ -4544,16 +5342,22 @@ params[key] = value static.append((taglist, hostlist, checktype, item, params)) - checks = static + checks - # Read autochecks and append them to explicit checks - if with_autochecks: - read_all_autochecks() - checks = autochecks + checks + # Note: We need to reverse the order of the static_checks. This is because + # users assume that earlier rules have precedence over later ones. For static + # checks that is important if there are two rules for a host with the same + # combination of check type and item. When the variable 'checks' is evaluated, + # *later* rules have precedence. This is not consistent with the rest, but a + # result of this "historic implementation". + static.reverse() + + # Now prepend to checks. That makes that checks variable have precedence + # over WATO. + checks = static + checks # Check for invalid configuration variables vars_after_config = all_nonfunction_vars() - ignored_variables = set(['vars_before_config', 'autochecks', 'parts', + ignored_variables = set(['vars_before_config', 'parts', 'hosttags' ,'seen_hostnames', 'all_hosts_untagged' ,'taggedhost' ,'hostname']) errors = 0 @@ -4572,17 +5376,6 @@ sys.stderr.write("If you use own helper variables, please prefix them with _.\n") sys.exit(1) - # Convert www_group into numeric id - global www_group - if type(www_group) == str: - try: - import grp - www_group = grp.getgrnam(www_group)[2] - except Exception, e: - sys.stderr.write("Cannot convert group '%s' into group id: %s\n" % (www_group, e)) - sys.stderr.write("Please set www_group to an existing group in main.mk.\n") - sys.exit(3) - # Prepare information for --backup and --restore global backup_paths backup_paths = [ @@ -4597,8 +5390,7 @@ ] # Load agent simulator if enabled in configuration - if agent_simulator: - execfile(modules_dir + "/agent_simulator.py", globals(), globals()) + execfile(modules_dir + "/agent_simulator.py", globals(), globals()) # Compute parameters for a check honoring factory settings, @@ -4660,6 +5452,12 @@ if type(params) == dict and type(entry) == dict: params.update(entry) else: + if type(entry) == dict: + # The entry still has the reference from the rule.. + # If we don't make a deepcopy the rule might be modified by + # a followup params.update(...) + import copy + entry = copy.deepcopy(entry) params = entry return params @@ -4677,32 +5475,6 @@ return service_extra_conf(host, str(item), rules) -# read automatically generated checks. They are prepended to the check -# table: explicit user defined checks override automatically generated -# ones. Do not read in autochecks, if check_mk is called as module. -def read_all_autochecks(): - global autochecks - autochecks = [] - for f in glob.glob(autochecksdir + '/*.mk'): - try: - autochecks += eval(file(f).read()) - except SyntaxError,e: - if opt_verbose: - sys.stderr.write("Syntax error in file %s: %s\n" % (f, e)) - if opt_debug: - sys.exit(3) - except Exception, e: - if opt_verbose: - sys.stderr.write("Error in file %s:\n%s\n" % (f, e)) - if opt_debug: - sys.exit(3) - - # Exchange inventorized check parameters with those configured by - # the user. Also merge with default levels for modern dictionary based checks. - autochecks = [ (host, ct, it, compute_check_parameters(host, ct, it, par)) - for (host, ct, it, par) in autochecks ] - - def output_profile(): if g_profile: g_profile.dump_stats(g_profile_path) @@ -4716,7 +5488,9 @@ sys.stderr.write("Profile '%s' written. Please run %s.\n" % (g_profile_path, show_profile)) -# +----------------------------------------------------------------------+ + +#. +# .--Main----------------------------------------------------------------. # | __ __ _ | # | | \/ | __ _(_)_ __ | # | | |\/| |/ _` | | '_ \ | @@ -4724,283 +5498,299 @@ # | |_| |_|\__,_|_|_| |_| | # | | # +----------------------------------------------------------------------+ +# | Main entry point and option parsing. Here is where all begins. | +# '----------------------------------------------------------------------' - +opt_nowiki = False +opt_split_rrds = False # Do option parsing and execute main function - -# if check_mk is not called as module -if __name__ == "__main__": - short_options = 'SHVLCURODMd:Ic:nhvpXPuN' - long_options = [ "help", "version", "verbose", "compile", "debug", - "list-checks", "list-hosts", "list-tag", "no-tcp", "cache", - "flush", "package", "localize", "donate", "snmpwalk", "usewalk", - "scan-parents", "procs=", "automation=", "notify", - "snmpget=", "profile", - "no-cache", "update", "restart", "reload", "dump", "fake-dns=", - "man", "nowiki", "config-check", "backup=", "restore=", - "check-inventory=", "paths", "cleanup-autochecks", "checks=" ] - - non_config_options = ['-L', '--list-checks', '-P', '--package', '-M', '--notify', - '--man', '-V', '--version' ,'-h', '--help', '--automation', ] +short_options = 'ASHVLCURODMmd:Ic:nhvpXPNBilf' +long_options = [ "help", "version", "verbose", "compile", "debug", "interactive", + "list-checks", "list-hosts", "list-tag", "no-tcp", "cache", + "flush", "package", "localize", "donate", "snmpwalk", "oid=", "extraoid=", + "snmptranslate", "bake-agents", "force", "show-snmp-stats", + "usewalk", "scan-parents", "procs=", "automation=", "notify", + "snmpget=", "profile", "keepalive", "keepalive-fd=", "create-rrd", + "convert-rrds", "split-rrds", + "no-cache", "update", "restart", "reload", "dump", "fake-dns=", + "man", "nowiki", "config-check", "backup=", "restore=", + "check-inventory=", "check-discovery=", "paths", + "checks=", "inventory", "inventory-as-check=", + "cmc-file=", "browse-man", "list-man", "update-dns-cache", "cap" ] + +non_config_options = ['-L', '--list-checks', '-P', '--package', '-M', '--notify', + '--man', '-V', '--version' ,'-h', '--help', '--automation', + '--create-rrd', '--convert-rrds', '--keepalive', '--cap' ] - try: - opts, args = getopt.getopt(sys.argv[1:], short_options, long_options) - except getopt.GetoptError, err: - print str(err) - sys.exit(1) - - # Read the configuration files (main.mk, autochecks, etc.), but not for - # certain operation modes that does not need them and should not be harmed - # by a broken configuration - if len(set.intersection(set(non_config_options), [o[0] for o in opts])) == 0: - read_config_files() - - done = False - seen_I = 0 - inventory_checks = None - # Scan modifying options first (makes use independent of option order) - for o,a in opts: - if o in [ '-v', '--verbose' ]: - opt_verbose = True - elif o == '-c': - check_mk_configfile = a - elif o == '--cache': - opt_use_cachefile = True - check_max_cachefile_age = 1000000000 - inventory_max_cachefile_age = 1000000000 - elif o == '--no-tcp': - opt_no_tcp = True - elif o == '--no-cache': - opt_no_cache = True - elif o == '-p': - opt_showperfdata = True - elif o == '-n': - opt_dont_submit = True - elif o in [ '-u', '--cleanup-autochecks' ]: - opt_cleanup_autochecks = True - elif o == '--fake-dns': - fake_dns = a - elif o == '--usewalk': - opt_use_snmp_walk = True - elif o == '--procs': - max_num_processes = int(a) - elif o == '--nowiki': - opt_nowiki = True - elif o == '--debug': - opt_debug = True - elif o == '-I': - seen_I += 1 - elif o == "--checks": - inventory_checks = a - - # Perform actions (major modes) - try: - for o, a in opts: - if o in [ '-h', '--help' ]: - usage() - done = True - elif o in [ '-V', '--version' ]: - print_version() - done = True - elif o in [ '-X', '--config-check' ]: - done = True - elif o in [ '-S', '-H' ]: - sys.stderr.write(tty_bold + tty_red + "ERROR" + tty_normal + "\n") - sys.stderr.write("The options -S and -H have been replaced with the option -N. If you \n") - sys.stderr.write("want to generate only the service definitions, please set \n") - sys.stderr.write("'generate_hostconf = False' in main.mk.\n") - done = True - elif o == '-N': - do_output_nagios_conf(args) - done = True - elif o in [ '-C', '--compile' ]: - precompile_hostchecks() - done = True - elif o in [ '-U', '--update' ] : - do_update() - done = True - elif o in [ '-R', '--restart' ] : - do_restart() - done = True - elif o in [ '-O', '--reload' ] : - do_reload() - done = True - elif o in [ '-D', '--dump' ]: - dump_all_hosts(args) - done = True - elif o == '--backup': - do_backup(a) - done = True - elif o == '--restore': - do_restore(a) - done = True - elif o == '--flush': - do_flush(args) - done = True - elif o == '--paths': - show_paths() - done = True - elif o in ['-P', '--package']: - execfile(modules_dir + "/packaging.py") - do_packaging(args) - done = True - elif o in ['--localize']: - execfile(modules_dir + "/localize.py") - do_localize(args) - done = True - elif o == '--donate': - do_donation() - done = True - elif o == '--snmpwalk': - do_snmpwalk(args) - done = True - elif o == '--snmpget': - do_snmpget(a, args) - done = True - elif o in [ '-M', '--man' ]: - if len(args) > 0: - show_check_manual(args[0]) - else: - list_all_manuals() - done = True - elif o == '--list-hosts': - l = list_all_hosts(args) - sys.stdout.write("\n".join(l)) - if l != []: - sys.stdout.write("\n") - done = True - elif o == '--list-tag': - l = list_all_hosts_with_tags(args) - sys.stdout.write("\n".join(l)) - if l != []: - sys.stdout.write("\n") - done = True - elif o in [ '-L', '--list-checks' ]: - output_check_info() - done = True - elif o == '-d': - output_plain_hostinfo(a) - done = True - elif o == '--check-inventory': - check_inventory(a) - done = True - elif o == '--scan-parents': - do_scan_parents(args) - done = True - elif o == '--automation': - execfile(modules_dir + "/automation.py") - do_automation(a, args) - done = True - elif o == '--notify': - read_config_files(False, True) - do_notify(args) - done = True - - - except MKGeneralException, e: - sys.stderr.write("%s\n" % e) - if opt_debug: - raise - sys.exit(3) - - if not done and seen_I > 0: - - hostnames = parse_hostname_list(args) - # For clusters add their nodes to the list - nodes = [] - for h in hostnames: - nodes = nodes_of(h) - if nodes: - hostnames += nodes - - # Then remove clusters and make list unique - hostnames = list(set([ h for h in hostnames if not is_cluster(h) ])) - hostnames.sort() +try: + opts, args = getopt.getopt(sys.argv[1:], short_options, long_options) +except getopt.GetoptError, err: + print str(err) + sys.exit(1) + +# Read the configuration files (main.mk, autochecks, etc.), but not for +# certain operation modes that does not need them and should not be harmed +# by a broken configuration +if len(set.intersection(set(non_config_options), [o[0] for o in opts])) == 0: + read_config_files() + +done = False +seen_I = 0 +check_types = None +exit_status = 0 +opt_verbose = 0 # start again from 0, was already faked at the beginning + +# Scan modifying options first (makes use independent of option order) +for o,a in opts: + if o in [ '-v', '--verbose' ]: + opt_verbose += 1 + elif o in [ '-f', '--force' ]: + opt_force = True + elif o == '-c': + if check_mk_configfile != a: + sys.stderr.write("Please use the option -c separated by the other options.\n") + sys.exit(1) + elif o == '--cache': + opt_use_cachefile = True + enforce_using_agent_cache() + elif o == '--no-tcp': + opt_no_tcp = True + elif o == '--no-cache': + opt_no_cache = True + elif o == '-p': + opt_showperfdata = True + elif o == '-n': + opt_dont_submit = True + elif o == '--fake-dns': + fake_dns = a + elif o == '--keepalive': + opt_keepalive = True + elif o == '--keepalive-fd': + opt_keepalive_fd = int(a) + elif o == '--usewalk': + opt_use_snmp_walk = True + elif o == '--oid': + opt_oids.append(a) + elif o == '--extraoid': + opt_extra_oids.append(a) + elif o == '--procs': + max_num_processes = int(a) + elif o == '--nowiki': + opt_nowiki = True + elif o == '--debug': + opt_debug = True + elif o == '--interactive': + opt_interactive = True + elif o == '-I': + seen_I += 1 + elif o == "--checks": + check_types = a.split(",") + elif o == "--cmc-file": + opt_cmc_relfilename = a + elif o == "--split-rrds": + opt_split_rrds = True - if opt_verbose: - if len(hostnames) > 0: - sys.stdout.write("Inventorizing %s.\n" % ", ".join(hostnames)) +# Perform actions (major modes) +try: + for o, a in opts: + if o in [ '-h', '--help' ]: + usage() + done = True + elif o in [ '-V', '--version' ]: + print_version() + done = True + elif o in [ '-X', '--config-check' ]: + done = True + elif o in [ '-S', '-H' ]: + sys.stderr.write(tty_bold + tty_red + "ERROR" + tty_normal + "\n") + sys.stderr.write("The options -S and -H have been replaced with the option -N. If you \n") + sys.stderr.write("want to generate only the service definitions, please set \n") + sys.stderr.write("'generate_hostconf = False' in main.mk.\n") + done = True + elif o == '-N': + do_output_nagios_conf(args) + done = True + elif o == '-B': + do_update(with_precompile=False) + done = True + elif o in [ '-C', '--compile' ]: + precompile_hostchecks() + done = True + elif o in [ '-U', '--update' ] : + do_update(with_precompile=True) + done = True + elif o in [ '-R', '--restart' ] : + do_restart() + done = True + elif o in [ '-O', '--reload' ] : + do_reload() + done = True + elif o in [ '-D', '--dump' ]: + dump_all_hosts(args) + done = True + elif o == '--backup': + do_backup(a) + done = True + elif o == '--restore': + do_restore(a) + done = True + elif o == '--flush': + do_flush(args) + done = True + elif o == '--paths': + show_paths() + done = True + elif o in ['-P', '--package']: + execfile(modules_dir + "/packaging.py") + do_packaging(args) + done = True + elif o in ['--localize']: + execfile(modules_dir + "/localize.py") + do_localize(args) + done = True + elif o == '--donate': + do_donation() + done = True + elif o == '--update-dns-cache': + do_update_dns_cache() + done = True + elif o == '--snmpwalk': + do_snmpwalk(args) + done = True + elif o == '--snmptranslate': + do_snmptranslate(args) + done = True + elif o == '--snmpget': + do_snmpget(a, args) + done = True + elif o in [ '-M', '--man' ]: + if len(args) > 0: + show_check_manual(args[0]) else: - sys.stdout.write("Inventorizing all hosts.\n") - - if inventory_checks: - checknames = inventory_checks.split(",") - - # remove existing checks, if option -I is used twice - if seen_I > 1: - if inventory_checks == None: - checknames = inventorable_checktypes("all") - if len(hostnames) > 0: - # Entries in hostnames that are either prefixed with @ - # or are no valid hostnames are considered to be tags. - for host in hostnames: - remove_autochecks_of(host, checknames) - # If all nodes of a cluster are contained in the list, then - # also remove the autochecks of that cluster. Beware: a host - # can be part more multiple clusters - for clust in clusters_of(host): - missing = [] # collect nodes missing on the command line - for node in nodes_of(clust): - if node not in hostnames: - missing.append(node) - - if len(missing) == 0: - if opt_verbose: - sys.stdout.write("All nodes of %s specified, dropping checks of %s, too.\n" % (clust, node)) - remove_autochecks_of(clust, checknames) - - else: - sys.stdout.write("Warning: %s is part of cluster %s, but you didn't specify %s as well.\nChecks on %s will be kept.\n" % - (host, clust, ",".join(missing), clust)) - + list_all_manuals() + done = True + elif o in [ '--list-man' ]: + read_manpage_catalog() + print pprint.pformat(g_manpage_catalog) + done = True + elif o in [ '-m', '--browse-man' ]: + manpage_browser() + done = True + elif o in [ '-l', '--list-hosts' ]: + l = list_all_hosts(args) + sys.stdout.write("\n".join(l)) + if l != []: + sys.stdout.write("\n") + done = True + elif o == '--list-tag': + l = list_all_hosts_with_tags(args) + sys.stdout.write("\n".join(l)) + if l != []: + sys.stdout.write("\n") + done = True + elif o in [ '-L', '--list-checks' ]: + output_check_info() + done = True + elif o == '-d': + output_plain_hostinfo(a) + done = True + elif o in [ '--check-discovery', '--check-inventory' ]: + check_discovery(a) + done = True + elif o == '--scan-parents': + do_scan_parents(args) + done = True + elif o == '--automation': + execfile(modules_dir + "/automation.py") + do_automation(a, args) + done = True + elif o in [ '-i', '--inventory' ]: + execfile(modules_dir + "/inventory.py") + if args: + hostnames = parse_hostname_list(args, with_clusters = False) else: - for host in all_active_hosts() + all_active_clusters(): - remove_autochecks_of(host, checknames) - reread_autochecks() - - if inventory_checks == None: - do_snmp_scan(hostnames) - checknames = inventorable_checktypes("tcp") - - for checkname in checknames: - make_inventory(checkname, hostnames, False) - - # -u, --cleanup-autochecks called in stand alone mode - if opt_cleanup_autochecks or always_cleanup_autochecks: - do_cleanup_autochecks() - done = True + hostnames = None + do_inv(hostnames) + done = True + elif o == '--inventory-as-check': + execfile(modules_dir + "/inventory.py") + do_inv_check(a) + done = True + elif o == '--notify': + read_config_files(with_conf_d=True, validate_hosts=False) + sys.exit(do_notify(args)) + elif o == '--create-rrd': + read_config_files(with_conf_d=True) + execfile(modules_dir + "/rrd.py") + do_create_rrd(args) + done = True + elif o == '--convert-rrds': + read_config_files(with_conf_d=True) + execfile(modules_dir + "/rrd.py") + do_convert_rrds(args) + done = True + elif o in [ '-A', '--bake-agents' ]: + if 'do_bake_agents' not in globals(): + bail_out("Agent baking is not implemented in your version of Check_MK. Sorry.") + if args: + hostnames = parse_hostname_list(args, with_clusters = False, with_foreign_hosts = True) + else: + hostnames = None + do_bake_agents(hostnames) + done = True + + elif o == '--cap': + if 'do_cap' not in globals(): + bail_out("Agent packages are not supported by your version of Check_MK.") + do_cap(args) + done = True + + elif o in [ '--show-snmp-stats' ]: + if 'do_show_snmp_stats' not in globals(): + sys.stderr.write("Handling of SNMP statistics is not implemented in your version of Check_MK. Sorry.\n") + sys.exit(1) + do_show_snmp_stats() + done = True - if not done and opt_cleanup_autochecks: # -u as standalone option - do_cleanup_autochecks() + # handle -I / -II + if not done and seen_I > 0: + hostnames = parse_hostname_list(args) + do_discovery(hostnames, check_types, seen_I == 1) done = True + if not done: + if (len(args) == 0 and not opt_keepalive) or len(args) > 2: + usage() + sys.exit(1) - if done: - output_profile() - sys.exit(0) - elif len(args) == 0 or len(args) > 2: - usage() - sys.exit(1) - else: - - hostname = args[0] - if len(args) == 2: - ipaddress = args[1] - else: - if is_cluster(hostname): - ipaddress = None + # handle --keepalive + elif opt_keepalive: + do_check_keepalive() + + # handle adhoc-check + else: + hostname = args[0] + if len(args) == 2: + ipaddress = args[1] else: - try: - ipaddress = lookup_ipaddress(hostname) - except: - print "Cannot resolve hostname '%s'." % hostname - sys.exit(2) + if is_cluster(hostname): + ipaddress = None + else: + try: + ipaddress = lookup_ipaddress(hostname) + except: + print "Cannot resolve hostname '%s'." % hostname + sys.exit(2) - # honor --checks= also when checking (makes testing easier) - if inventory_checks: - check_types = inventory_checks.split(",") - else: - check_types = None + exit_status = do_check(hostname, ipaddress, check_types) - do_check(hostname, ipaddress, check_types) + output_profile() + sys.exit(exit_status) + +except (MKGeneralException, MKBailOut), e: + sys.stderr.write("%s\n" % e) + if opt_debug: + raise + sys.exit(3) diff -Nru check-mk-1.2.2p3/check_mk-qlogic_fcport.php check-mk-1.2.6p12/check_mk-qlogic_fcport.php --- check-mk-1.2.2p3/check_mk-qlogic_fcport.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-qlogic_fcport.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,172 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $RRDAVG[$n] = "$RRDFILE[$i]:$DS[$i]:AVERAGE"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; +} + + +# 1. GRAPH: THROUGHPUT IN MB/s + +$ds_name[1] = 'Traffic'; +$opt[1] = "--vertical-label \"MByte/sec\" -X0 -b 1024 --title \"Traffic for $hostname / $servicedesc\" "; + +$def[1] = "" + . "HRULE:0#c0c0c0 " + . "DEF:in=$RRD[in] " + . "DEF:out=$RRD[out] " + . "CDEF:inmb=in,1048576,/ " + . "CDEF:outmb=out,1048576,/ " + . "DEF:inavg=$RRDAVG[in] " + . "DEF:outavg=$RRDAVG[out] " + . "CDEF:inmbavg=inavg,1048576,/ " + . "CDEF:outmbavg=outavg,1048576,/ " + . "AREA:inmb#60a020:\"in \" " + . "GPRINT:inmb:LAST:\"%5.3lf MB/s last\" " + . "GPRINT:inmbavg:AVERAGE:\"%5.3lf MB/s avg\" " + . "GPRINT:inmb:MAX:\"%5.3lf MB/s max\\n\" " + . "CDEF:out_draw=outmb,-1,* " + . "AREA:out_draw#2060a0:\"out \" " + . "GPRINT:outmb:LAST:\"%5.3lf MB/s last\" " + . "GPRINT:outmbavg:AVERAGE:\"%5.3lf MB/s avg\" " + . "GPRINT:outmb:MAX:\"%5.3lf MB/s max\\n\" " + ; + +if (isset($RRD['in_avg'])) { +$def[1] .= "" + . "DEF:inaverage=$RRD[in_avg] " + . "DEF:outaverage=$RRD[out_avg] " + . "CDEF:inaveragemb=inaverage,1048576,/ " + . "CDEF:outaveragemb=outaverage,1048576,/ " + . "DEF:inaverage_avg=$RRDAVG[in_avg] " + . "DEF:outaverage_avg=$RRDAVG[out_avg] " + . "CDEF:inaveragemb_avg=inaverage_avg,1048576,/ " + . "CDEF:outaveragemb_avg=outaverage_avg,1048576,/ " + . "CDEF:outaveragemb_draw=outaverage,-1048576,/ " + . "LINE:inaveragemb_avg#a0d040:\"in (avg) \" " + . "GPRINT:inaveragemb:LAST:\"%5.3lf MB/s last\" " + . "GPRINT:inaveragemb_avg:AVERAGE:\"%5.3lf MB/s avg\" " + . "GPRINT:inaveragemb:MAX:\"%5.3lf MB/s max\\n\" " + . "LINE:outaveragemb_draw#40a0d0:\"out (avg)\" " + . "GPRINT:outaveragemb:LAST:\"%5.3lf MB/s last\" " + . "GPRINT:outaveragemb_avg:AVERAGE:\"%5.3lf MB/s avg\" " + . "GPRINT:outaveragemb:MAX:\"%5.3lf MB/s max\\n\" " + ; +} + +if ($WARN['in']) { + $def[1] .= "HRULE:$WARN[in]#ffff00:\"Warning (in)\" "; + $def[1] .= "HRULE:-$WARN[out]#ffff00:\"Warning (out)\" "; +} +if ($CRIT['in']) { + $def[1] .= "HRULE:$CRIT[in]#ff0000:\"Critical (in)\" "; + $def[1] .= "HRULE:-$CRIT[out]#ff0000:\"Critical (out)\" "; +} +if ($MAX['in']) { + $speedmb = $MAX['in'] / 1048576.0; + $speedtxt = sprintf("%.1f MB/s", $speedmb); + $def[1] .= "HRULE:$speedmb#ff80c0:\"Portspeed\: $speedtxt\" "; + $def[1] .= "HRULE:-$speedmb#ff80c0 "; + # $opt[1] .= " -u $speedmb -l -$speedmb"; +} + +# 2. GRAPH: FRAMES +$ds_name[2] = 'Frames'; +$opt[2] = "--vertical-label \"Frames/sec\" -b 1024 --title \"Frames per second\" "; +$def[2] = "" + . "HRULE:0#c0c0c0 " + . "DEF:in=$RRD[rxframes] " + . "DEF:out=$RRD[txframes] " + . "DEF:inavg=$RRDAVG[rxframes] " + . "DEF:outavg=$RRDAVG[txframes] " + . "AREA:in#a0d040:\"in \" " + . "GPRINT:in:LAST:\"%5.1lf/s last\" " + . "GPRINT:inavg:AVERAGE:\"%5.1lf/s avg\" " + . "GPRINT:in:MAX:\"%5.1lf/s max\\n\" " + . "CDEF:out_draw=out,-1,* " + . "AREA:out_draw#40a0d0:\"out \" " + . "GPRINT:out:LAST:\"%5.1lf/s last\" " + . "GPRINT:outavg:AVERAGE:\"%5.1lf/s avg\" " + . "GPRINT:out:MAX:\"%5.1lf/s max\\n\" " + ; + +# 3. GRAPH: ERRORS + +$ds_name[3] = 'Error counter'; +$opt[3] = "--vertical-label \"Error counter\" --title \"Problems\" "; +$def[3] = "" + . "DEF:link_failures=$RRD[link_failures] " + . "DEF:sync_losses=$RRD[sync_losses] " + . "DEF:prim_seq_proto_errors=$RRD[prim_seq_proto_errors] " + . "DEF:invalid_tx_words=$RRD[invalid_tx_words] " + . "DEF:invalid_crcs=$RRD[invalid_crcs] " + . "DEF:address_id_errors=$RRD[address_id_errors] " + . "DEF:link_reset_ins=$RRD[link_reset_ins] " + . "DEF:link_reset_outs=$RRD[link_reset_outs] " + . "DEF:ols_ins=$RRD[ols_ins] " + . "DEF:ols_outs=$RRD[ols_outs] " + . "DEF:discards=$RRD[discards] " + . "DEF:c2_fbsy_frames=$RRD[c2_fbsy_frames] " + . "DEF:c2_frjt_frames=$RRD[c2_frjt_frames] " + . "LINE1:link_failures#c00000:\"Link Failures \" " + . "GPRINT:link_failures:LAST:\"last\: %4.0lf/s \\n\" " + . "LINE1:sync_losses#ff8000:\"Sync Losses \" " + . "GPRINT:sync_losses:LAST:\"last\: %4.0lf/s\\n\" " + . "LINE1:prim_seq_proto_errors#ff0080:\"PrimitSeqErrors \" " + . "GPRINT:prim_seq_proto_errors:LAST:\"last\: %4.0lf/s \\n\" " + . "LINE1:invalid_tx_words#ffa0a0:\"Invalid TX Words \" " + . "GPRINT:invalid_tx_words:LAST:\"last\: %4.0lf/s\\n\" " + . "LINE1:invalid_crcs#0080FF:\"Invalid CRCs \" " + . "GPRINT:invalid_crcs:LAST:\"last\: %4.0lf/s \\n\" " + . "LINE1:address_id_errors#8080FF:\"Address ID Errors \" " + . "GPRINT:address_id_errors:LAST:\"last\: %4.0lf/s\\n\" " + . "LINE1:link_reset_ins#0000A0:\"Link Resets In \" " + . "GPRINT:link_reset_ins:LAST:\"last\: %4.0lf/s \\n\" " + . "LINE1:link_reset_outs#400080:\"Link Resets Out \" " + . "GPRINT:link_reset_outs:LAST:\"last\: %4.0lf/s\\n\" " + . "LINE1:ols_ins#800000:\"Offline Sequences In \" " + . "GPRINT:ols_ins:LAST:\"last\: %4.0lf/s\\n\" " + . "LINE1:ols_outs#FF0000:\"Offline Sequences Out\" " + . "GPRINT:ols_outs:LAST:\"last\: %4.0lf/s \\n\" " + . "LINE1:discards#800080:\"Discards \" " + . "GPRINT:discards:LAST:\"last\: %4.0lf/s\\n\" " + . "LINE1:c2_fbsy_frames#0000FF:\"F_BSY frames \" " + . "GPRINT:c2_fbsy_frames:LAST:\"last\: %4.0lf/s \\n\" " + . "LINE1:c2_frjt_frames#408080:\"F_RJT frames \" " + . "GPRINT:c2_frjt_frames:LAST:\"last\: %4.0lf/s\\n\" " + ; +?> + + diff -Nru check-mk-1.2.2p3/check_mk-qlogic_sanbox.temp.php check-mk-1.2.6p12/check_mk-qlogic_sanbox.temp.php --- check-mk-1.2.2p3/check_mk-qlogic_sanbox.temp.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-qlogic_sanbox.temp.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk-raritan_pdu_inlet.php check-mk-1.2.6p12/check_mk-raritan_pdu_inlet.php --- check-mk-1.2.2p3/check_mk-raritan_pdu_inlet.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-raritan_pdu_inlet.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,114 @@ +DS as $KEY=>$VAL) { + + $maximum = ""; + $minimum = ""; + $critical = ""; + $crit_min = ""; + $crit_max = ""; + $warning = ""; + $warn_max = ""; + $warn_min = ""; + $vlabel = " "; + $lower = ""; + $upper = ""; + + if ($VAL['WARN'] != "" && is_numeric($VAL['WARN']) ){ + $warning = $VAL['WARN']; + } + if ($VAL['WARN_MAX'] != "" && is_numeric($VAL['WARN_MAX']) ) { + $warn_max = $VAL['WARN_MAX']; + } + if ( $VAL['WARN_MIN'] != "" && is_numeric($VAL['WARN_MIN']) ) { + $warn_min = $VAL['WARN_MIN']; + } + if ( $VAL['CRIT'] != "" && is_numeric($VAL['CRIT']) ) { + $critical = $VAL['CRIT']; + } + if ( $VAL['CRIT_MAX'] != "" && is_numeric($VAL['CRIT_MAX']) ) { + $crit_max = $VAL['CRIT_MAX']; + } + if ( $VAL['CRIT_MIN'] != "" && is_numeric($VAL['CRIT_MIN']) ) { + $crit_min = $VAL['CRIT_MIN']; + } + if ( $VAL['MIN'] != "" && is_numeric($VAL['MIN']) ) { + $lower = " --lower=" . $VAL['MIN']; + $minimum = $VAL['MIN']; + } + if ( $VAL['MAX'] != "" && is_numeric($VAL['MAX']) ) { + $maximum = $VAL['MAX']; + } + if ($VAL['UNIT'] == "%%") { + $vlabel = "%"; + $upper = " --upper=101 "; + $lower = " --lower=0 "; + } + else { + $vlabel = $VAL['UNIT']; + } + + $opt[$KEY] = '--vertical-label "' . $vlabel . '" --title "' . $this->MACRO['DISP_HOSTNAME'] . ' / ' . $this->MACRO['DISP_SERVICEDESC'] . '"' . $upper . $lower; + $ds_name[$KEY] = $VAL['LABEL']; + $def[$KEY] = rrd::def ("var1", $VAL['RRDFILE'], $VAL['DS'], "AVERAGE"); + $def[$KEY] .= rrd::gradient("var1", "3152A5", "BDC6DE", rrd::cut($VAL['NAME'],16), 20); + $def[$KEY] .= rrd::line1 ("var1", $_LINE ); + $def[$KEY] .= rrd::gprint ("var1", array("LAST","MAX","AVERAGE"), "%3.2lf %S".$VAL['UNIT']); + if ($warning != "") { + $def[$KEY] .= rrd::hrule($warning, $_WARNRULE, "Warning $warning \\n"); + } + if ($warn_min != "") { + $def[$KEY] .= rrd::hrule($warn_min, $_WARNRULE, "Warning (min) $warn_min \\n"); + } + if ($warn_max != "") { + $def[$KEY] .= rrd::hrule($warn_max, $_WARNRULE, "Warning (max) $warn_max \\n"); + } + if ($critical != "") { + $def[$KEY] .= rrd::hrule($critical, $_CRITRULE, "Critical $critical \\n"); + } + if ($crit_min != "") { + $def[$KEY] .= rrd::hrule($crit_min, $_CRITRULE, "Critical (min) $crit_min \\n"); + } + if ($crit_max != "") { + $def[$KEY] .= rrd::hrule($crit_max, $_CRITRULE, "Critical (max) $crit_max \\n"); + } +} +?> diff -Nru check-mk-1.2.2p3/check_mk-raritan_pdu_outletcount.php check-mk-1.2.6p12/check_mk-raritan_pdu_outletcount.php --- check-mk-1.2.2p3/check_mk-raritan_pdu_outletcount.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-raritan_pdu_outletcount.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,114 @@ +DS as $KEY=>$VAL) { + + $maximum = ""; + $minimum = ""; + $critical = ""; + $crit_min = ""; + $crit_max = ""; + $warning = ""; + $warn_max = ""; + $warn_min = ""; + $vlabel = " "; + $lower = ""; + $upper = ""; + + if ($VAL['WARN'] != "" && is_numeric($VAL['WARN']) ){ + $warning = $VAL['WARN']; + } + if ($VAL['WARN_MAX'] != "" && is_numeric($VAL['WARN_MAX']) ) { + $warn_max = $VAL['WARN_MAX']; + } + if ( $VAL['WARN_MIN'] != "" && is_numeric($VAL['WARN_MIN']) ) { + $warn_min = $VAL['WARN_MIN']; + } + if ( $VAL['CRIT'] != "" && is_numeric($VAL['CRIT']) ) { + $critical = $VAL['CRIT']; + } + if ( $VAL['CRIT_MAX'] != "" && is_numeric($VAL['CRIT_MAX']) ) { + $crit_max = $VAL['CRIT_MAX']; + } + if ( $VAL['CRIT_MIN'] != "" && is_numeric($VAL['CRIT_MIN']) ) { + $crit_min = $VAL['CRIT_MIN']; + } + if ( $VAL['MIN'] != "" && is_numeric($VAL['MIN']) ) { + $lower = " --lower=" . $VAL['MIN']; + $minimum = $VAL['MIN']; + } + if ( $VAL['MAX'] != "" && is_numeric($VAL['MAX']) ) { + $maximum = $VAL['MAX']; + } + if ($VAL['UNIT'] == "%%") { + $vlabel = "%"; + $upper = " --upper=101 "; + $lower = " --lower=0 "; + } + else { + $vlabel = $VAL['UNIT']; + } + + $opt[$KEY] = '--vertical-label "' . $vlabel . '" --title "' . $this->MACRO['DISP_HOSTNAME'] . ' / ' . $this->MACRO['DISP_SERVICEDESC'] . '"' . $upper . $lower; + $ds_name[$KEY] = $VAL['LABEL']; + $def[$KEY] = rrd::def ("var1", $VAL['RRDFILE'], $VAL['DS'], "AVERAGE"); + $def[$KEY] .= rrd::gradient("var1", "3152A5", "BDC6DE", rrd::cut($VAL['NAME'],16), 20); + $def[$KEY] .= rrd::line1 ("var1", $_LINE ); + $def[$KEY] .= rrd::gprint ("var1", array("LAST","MAX","AVERAGE"), "%3.0lf %S".$VAL['UNIT']); + if ($warning != "") { + $def[$KEY] .= rrd::hrule($warning, $_WARNRULE, "Warning $warning \\n"); + } + if ($warn_min != "") { + $def[$KEY] .= rrd::hrule($warn_min, $_WARNRULE, "Warning (min) $warn_min \\n"); + } + if ($warn_max != "") { + $def[$KEY] .= rrd::hrule($warn_max, $_WARNRULE, "Warning (max) $warn_max \\n"); + } + if ($critical != "") { + $def[$KEY] .= rrd::hrule($critical, $_CRITRULE, "Critical $critical \\n"); + } + if ($crit_min != "") { + $def[$KEY] .= rrd::hrule($crit_min, $_CRITRULE, "Critical (min) $crit_min \\n"); + } + if ($crit_max != "") { + $def[$KEY] .= rrd::hrule($crit_max, $_CRITRULE, "Critical (max) $crit_max \\n"); + } +} +?> diff -Nru check-mk-1.2.2p3/check_mk-rmon_stats.php check-mk-1.2.6p12/check_mk-rmon_stats.php --- check-mk-1.2.2p3/check_mk-rmon_stats.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-rmon_stats.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,47 @@ + diff -Nru check-mk-1.2.2p3/check_mk-sensatronics_temp.php check-mk-1.2.6p12/check_mk-sensatronics_temp.php --- check-mk-1.2.2p3/check_mk-sensatronics_temp.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-sensatronics_temp.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk-smart.stats.php check-mk-1.2.6p12/check_mk-smart.stats.php --- check-mk-1.2.2p3/check_mk-smart.stats.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-smart.stats.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,84 @@ +config->conf['template_dirs'])) { + $template_dirs = $this->config->conf['template_dirs']; +} +$descr = str_replace("/", "_", $servicedesc); +foreach ($template_dirs as $template_dir) { + $found = 0; + for ($i = strlen($descr); $i > 0; $i--) + { + $tryname = $template_dir . '/' . substr($descr, 0, $i) . '.php'; + if (file_exists($tryname) && include($tryname)) { + $found = 1; + break; + } + } + if ($found) { + break; + } +} + +# Use another color for each graph. After eight graphs colors wrap around. +$area_colors = array( "beff5f", "5fffef", "5faaff", "cc5fff", "ff5fe2", "ff5f6c", "ff975f", "ffec5f"); +$line_colors = array( "5f7a2f", "2f8077", "2f5580", "662f80", "802f71", "802f36", "804b2f", "80762f"); + +if (!$found) { + foreach ($RRDFILE as $i => $RRD) { + $ii = $i % 8; + $name = $NAME[$i]; + $def[$i] = "DEF:cnt=$RRDFILE[$i]:$DS[$i]:MAX "; + $def[$i] .= "AREA:cnt#$area_colors[$ii]:\"$name\" "; + $def[$i] .= "LINE1:cnt#$line_colors[$ii]: "; + + $upper = ""; + $lower = " -l 0"; + if ($WARN[$i] != "") { + $def[$i] .= "HRULE:$WARN[$i]#ffff00:\"Warning\" "; + } + if ($CRIT[$i] != "") { + $def[$i] .= "HRULE:$CRIT[$i]#ff0000:\"Critical\" "; + } + if ($MIN[$i] != "") { + $lower = " -l " . $MIN[$i]; + $minimum = $MIN[$i]; + } + if ($MAX[$i] != "") { + $upper = " -u" . $MAX[$i]; + $def[$i] .= "HRULE:$MAX[$i]#0000b0:\"Upper limit\" "; + } + + $opt[$i] = "$lower $upper --title '$hostname: $servicedesc - $name' "; + $def[$i] .= "GPRINT:cnt:LAST:\"current\: %6.2lf\" "; + $def[$i] .= "GPRINT:cnt:MAX:\"max\: %6.2lf\" "; + $def[$i] .= "GPRINT:cnt:AVERAGE:\"avg\: %6.2lf\" "; + } +} + +?> diff -Nru check-mk-1.2.2p3/check_mk-smart.temp.php check-mk-1.2.6p12/check_mk-smart.temp.php --- check-mk-1.2.2p3/check_mk-smart.temp.php 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-smart.temp.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -31,6 +31,8 @@ $def[1] .= "LINE1:var1#000080:\"\" "; $def[1] .= "GPRINT:var1:MAX:\"(Max\: %2.0lfC,\" "; $def[1] .= "GPRINT:var1:AVERAGE:\"Avg\: %2.0lfC)\" "; -$def[1] .= "HRULE:$WARN[1]#FFFF00:\"Warning\: $WARN[1]C\" "; -$def[1] .= "HRULE:$CRIT[1]#FF0000:\"Critical\: $CRIT[1]C\" "; +if ($WARN[1] != "") { + $def[1] .= "HRULE:$WARN[1]#FFFF00:\"Warning\: $WARN[1]C\" "; + $def[1] .= "HRULE:$CRIT[1]#FF0000:\"Critical\: $CRIT[1]C\" "; +} ?> diff -Nru check-mk-1.2.2p3/check_mk-sni_octopuse_cpu.php check-mk-1.2.6p12/check_mk-sni_octopuse_cpu.php --- check-mk-1.2.2p3/check_mk-sni_octopuse_cpu.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-sni_octopuse_cpu.php 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,33 @@ + diff -Nru check-mk-1.2.2p3/check_mk-snmp_uptime.php check-mk-1.2.6p12/check_mk-snmp_uptime.php --- check-mk-1.2.2p3/check_mk-snmp_uptime.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-snmp_uptime.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mk-statgrab_cpu.php check-mk-1.2.6p12/check_mk-statgrab_cpu.php --- check-mk-1.2.2p3/check_mk-statgrab_cpu.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-statgrab_cpu.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mk-statgrab_disk.php check-mk-1.2.6p12/check_mk-statgrab_disk.php --- check-mk-1.2.2p3/check_mk-statgrab_disk.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-statgrab_disk.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -25,8 +25,8 @@ $opt[1] = "--vertical-label 'Througput (MByte/s)' -l0 -u 1 --title \"Disk throughput $hostname / $servicedesc\" "; -$def[1] = "DEF:kb=$RRDFILE[1]:$DS[1]:AVERAGE " ; -$def[1] .= "CDEF:mb=kb,1024,/ " ; +$def[1] = "DEF:bytes=$RRDFILE[1]:$DS[1]:AVERAGE " ; +$def[1] .= "CDEF:mb=bytes,1048576,/ " ; $def[1] .= "AREA:mb#40c080 " ; $def[1] .= "GPRINT:mb:LAST:\"%6.1lf MByte/s last\" " ; $def[1] .= "GPRINT:mb:AVERAGE:\"%6.1lf MByte/s avg\" " ; diff -Nru check-mk-1.2.2p3/check_mk-statgrab_load.php check-mk-1.2.6p12/check_mk-statgrab_load.php --- check-mk-1.2.2p3/check_mk-statgrab_load.php 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-statgrab_load.php 2015-09-21 10:59:54.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -23,19 +23,49 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -$opt[1] = "--vertical-label Load -l0 -u 1 --title \"CPU Load for $hostname / $servicedesc\" "; +# The number of data source various due to different +# settings (such as averaging). We rather work with names +# than with numbers. +$RRD = array(); +foreach ($NAME as $i => $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; +} -$def[1] = "DEF:var1=$RRDFILE[1]:$DS[1]:MAX " ; -$def[1] .= "DEF:var2=$RRDFILE[2]:$DS[2]:MAX " ; -$def[1] .= "DEF:var3=$RRDFILE[3]:$DS[3]:MAX " ; -$def[1] .= "HRULE:$WARN[1]#FFFF00 "; -$def[1] .= "HRULE:$CRIT[1]#FF0000 "; -$def[1] .= "AREA:var1#60c0e0:\"Load average 1 min \" " ; -$def[1] .= "GPRINT:var1:LAST:\"%6.2lf last\" " ; -$def[1] .= "GPRINT:var1:AVERAGE:\"%6.2lf avg\" " ; -$def[1] .= "GPRINT:var1:MAX:\"%6.2lf max\\n\" "; -$def[1] .= "LINE:var3#004080:\"Load average 15 min \" " ; -$def[1] .= "GPRINT:var3:LAST:\"%6.2lf last\" " ; -$def[1] .= "GPRINT:var3:AVERAGE:\"%6.2lf avg\" " ; -$def[1] .= "GPRINT:var3:MAX:\"%6.2lf max\\n\" " ; +$opt[1] = "--vertical-label 'Load average' -l0 -u 1 --title \"CPU Load for $hostname\" "; + +$def[1] = "" + . "DEF:load1=$RRD[load1] " + . "AREA:load1#60c0e0:\"Load average 1 min \" " + . "GPRINT:load1:LAST:\"%6.2lf last\" " + . "GPRINT:load1:AVERAGE:\"%6.2lf avg\" " + . "GPRINT:load1:MAX:\"%6.2lf max\\n\" " + + . "DEF:load15=$RRD[load15] " + . "LINE:load15#004080:\"Load average 15 min \" " + . "GPRINT:load15:LAST:\"%6.2lf last\" " + . "GPRINT:load15:AVERAGE:\"%6.2lf avg\" " + . "GPRINT:load15:MAX:\"%6.2lf max\\n\" " + . ""; + +if ($WARN[1]) { + $def[1] .= "" + . "HRULE:$WARN[1]#FFFF00 " + . "HRULE:$CRIT[1]#FF0000 " + . ""; +} + +if ($MAX[1]) { + $def[1] .= "COMMENT:\" Number of CPUs $MAX[1]\" "; +} + +if (isset($RRD["predict_load15"])) { + $def[1] .= "" + . "DEF:predict=$RRD[predict_load15] " + . "LINE:predict#ff0000:\"Reference for prediction \\n\" " + . ""; +} ?> diff -Nru check-mk-1.2.2p3/check_mk-statgrab_mem.php check-mk-1.2.6p12/check_mk-statgrab_mem.php --- check-mk-1.2.2p3/check_mk-statgrab_mem.php 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-statgrab_mem.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -25,14 +25,17 @@ $opt[1] = "--vertical-label 'MEMORY(MB)' --upper-limit " . ($MAX[1] * 120 / 100) . " -l0 --title \"Memory usage $hostname\" "; +$maxgb = sprintf("%.1f", $MAX[1] / 1024.0); + $def[1] = "DEF:ram=$RRDFILE[1]:$DS[1]:AVERAGE " ; $def[1] .= "DEF:swap=$RRDFILE[2]:$DS[2]:AVERAGE " ; $def[1] .= "DEF:virt=$RRDFILE[3]:$DS[3]:AVERAGE " ; $def[1] .= "HRULE:$MAX[3]#000080:\"RAM+SWAP installed\" "; -$def[1] .= "HRULE:$MAX[1]#2040d0:\"RAM installed\" "; +$def[1] .= "HRULE:$MAX[1]#2040d0:\"$maxgb GB RAM installed\" "; $def[1] .= "HRULE:$WARN[3]#FFFF00:\"Warning\" "; $def[1] .= "HRULE:$CRIT[3]#FF0000:\"Critical\\n\" "; +$def[1] .= "'COMMENT:\\n' " ; $def[1] .= "AREA:ram#80ff40:\"RAM used \" " ; $def[1] .= "GPRINT:ram:LAST:\"%6.0lf MB last\" " ; $def[1] .= "GPRINT:ram:AVERAGE:\"%6.0lf MB avg\" " ; diff -Nru check-mk-1.2.2p3/check_mk-statgrab_net.ctr.php check-mk-1.2.6p12/check_mk-statgrab_net.ctr.php --- check-mk-1.2.2p3/check_mk-statgrab_net.ctr.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-statgrab_net.ctr.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -32,13 +32,13 @@ # 5: rx_errors # 6: tx_errors # 7: tx_collisions - + # $x = explode("_", $servicedesc); $nic = $x[1]; $opt[1] = "--vertical-label 'Bytes/s' -l -1024 -u 1024 --title \"$hostname / NIC $nic\" "; -# -l0 -u1048576 +# -l0 -u1048576 # # $def[1] = "DEF:rx_bytes=$RRDFILE[1]:$DS[1]:AVERAGE " ; diff -Nru check-mk-1.2.2p3/check_mk-steelhead_connections.php check-mk-1.2.6p12/check_mk-steelhead_connections.php --- check-mk-1.2.2p3/check_mk-steelhead_connections.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-steelhead_connections.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mk-stulz_humidity.php check-mk-1.2.6p12/check_mk-stulz_humidity.php --- check-mk-1.2.2p3/check_mk-stulz_humidity.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-stulz_humidity.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk-stulz_temp.php check-mk-1.2.6p12/check_mk-stulz_temp.php --- check-mk-1.2.2p3/check_mk-stulz_temp.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-stulz_temp.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk-sylo.php check-mk-1.2.6p12/check_mk-sylo.php --- check-mk-1.2.2p3/check_mk-sylo.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-sylo.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mk-systemtime.php check-mk-1.2.6p12/check_mk-systemtime.php --- check-mk-1.2.2p3/check_mk-systemtime.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-systemtime.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -35,15 +35,15 @@ "CDEF:offsetabs_min=offset_min,ABS ". "CDEF:offsetabs_max=offset_max,ABS ". "CDEF:offsetabs=offset_min,offset_max,MAX ". - "AREA:offmax#4080ff:\"time offset \" ". - "AREA:offmin#4080ff ". - "LINE1:offmin#2060d0: ". - "LINE1:offmax#2060d0: ". + "AREA:offmax#4080ff:\"time offset \" ". + "AREA:offmin#4080ff ". + "LINE1:offmin#2060d0: ". + "LINE1:offmax#2060d0: ". "HRULE:0#c0c0c0: ". "HRULE:$WARN[1]#ffff00:\"\" ". "HRULE:-$WARN[1]#ffff00:\"Warning\\: +/- $WARN[1] s \" ". - "HRULE:$CRIT[1]#ff0000:\"\" ". - "HRULE:-$CRIT[1]#ff0000:\"Critical\\: +/- $CRIT[1] s \\n\" ". + "HRULE:$CRIT[1]#ff0000:\"\" ". + "HRULE:-$CRIT[1]#ff0000:\"Critical\\: +/- $CRIT[1] s \\n\" ". "GPRINT:offset_avg:LAST:\"current\: %.1lf s\" ". "GPRINT:offsetabs:MAX:\"max(+/-)\: %.1lf s \" ". "GPRINT:offsetabs:AVERAGE:\"avg(+/-)\: %.1lf s\" ". diff -Nru check-mk-1.2.2p3/check_mk-tcp_conn_stats.php check-mk-1.2.6p12/check_mk-tcp_conn_stats.php --- check-mk-1.2.2p3/check_mk-tcp_conn_stats.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-tcp_conn_stats.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mk_templates.cfg check-mk-1.2.6p12/check_mk_templates.cfg --- check-mk-1.2.2p3/check_mk_templates.cfg 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/check_mk_templates.cfg 2015-09-21 10:59:54.000000000 +0000 @@ -5,7 +5,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -49,7 +49,7 @@ # | | / _ \| '_ \| __/ _` |/ __| __/ __| # | |__| (_) | | | | || (_| | (__| |_\__ \ # \____\___/|_| |_|\__\__,_|\___|\__|___/ -# +# # This contact is only needed while you have not configured contacts # for your hosts and services with the variables @@ -95,7 +95,7 @@ retain_status_information 1 retain_nonstatus_information 1 process_perf_data 0 - check_command check-mk-ping + check_command check-mk-host-ping check_interval 1 check_period 24X7 max_check_attempts 1 @@ -133,7 +133,7 @@ define host { name check_mk_cluster use check_mk_default - check_command check-mk-ping-cluster + check_command check-mk-host-ping-cluster register 0 } @@ -151,7 +151,7 @@ # \___ \ / _ \ '__\ \ / / |/ __/ _ \/ __| # ___) | __/ | \ V /| | (_| __/\__ \ # |____/ \___|_| \_/ |_|\___\___||___/ -# +# # Template used by all other check_mk templates define service { @@ -230,7 +230,7 @@ } # This template is used for aggregated services (on the -# summary hosts). The never have performance data. A +# summary hosts). The never have performance data. A # check command must be defined - even if never called. # Notifications for aggregated services are disabled. # Otherwise you would get them twice. @@ -267,7 +267,7 @@ # This template is used by service dependencies created via # the configuration variable service_dependencies. Since we -# only deal with passive checks the dependencies are not +# only deal with passive checks the dependencies are not # used to suppress service checks. But they are very useful # for suppressing notifications. If you set inherits_parent # to 1, then if A depends on B and B depends on C, A will @@ -288,7 +288,7 @@ # | | / _ \| '_ ` _ \| '_ ` _ \ / _` | '_ \ / _` / __| # | |__| (_) | | | | | | | | | | | (_| | | | | (_| \__ \ # \____\___/|_| |_| |_|_| |_| |_|\__,_|_| |_|\__,_|___/ -# +# # Calling check_mk with precompiled checks @@ -301,7 +301,7 @@ # checks (which is not recommended): # define command { # command_name check-mk -# command_line @BINDIR@/check_mk $HOSTNAME$ $HOSTADDRESS$ +# command_line @BINDIR@/check_mk $HOSTNAME$ $HOSTADDRESS$ #} # Inventory check @@ -315,17 +315,39 @@ command_line echo "DUMMY - Always OK" } +# Commands for services of PING-only hosts define command { command_name check-mk-ping command_line @CHECK_ICMP@ $ARG1$ $HOSTADDRESS$ } -# Check for clusters: it is UP if at least one node is up define command { command_name check-mk-ping-cluster command_line @CHECK_ICMP@ -m 1 $ARG1$ $_HOSTNODEIPS$ } +# Host check commands +define command { + command_name check-mk-host-ping + command_line @CHECK_ICMP@ $ARG1$ $HOSTADDRESS$ +} + +define command { + command_name check-mk-host-ping-cluster + command_line @CHECK_ICMP@ -m 1 $ARG1$ $_HOSTNODEIPS$ +} + +define command { + command_name check-mk-host-ok + command_line echo "OK - Host is always assumed to be up" +} + +define command { + command_name check-mk-host-tcp + command_line $USER1$/check_tcp -H $HOSTADDRESS$ -p $ARG1$ +} + + # General notification script. Details can be configured # in main.mk via notification_... options define command { @@ -342,23 +364,29 @@ NOTIFY_HOSTNAME='$HOSTNAME$' \ NOTIFY_HOSTALIAS='$HOSTALIAS$' \ NOTIFY_HOSTADDRESS='$HOSTADDRESS$' \ + NOTIFY_HOSTATTEMPT='$HOSTATTEMPT$' \ NOTIFY_LASTHOSTSTATE='$LASTHOSTSTATE$' \ + NOTIFY_LASTHOSTSTATEID='$LASTHOSTSTATEID$' \ NOTIFY_LASTHOSTSTATECHANGE='$LASTHOSTSTATECHANGE$' \ + NOTIFY_LASTHOSTUP='$LASTHOSTUP$' \ NOTIFY_HOSTSTATE='$HOSTSTATE$' \ NOTIFY_HOSTSTATEID='$HOSTSTATEID$' \ - NOTIFY_HOSTCHECKCOMMAND='$HOSTCHECKCOMMAND$' \ + NOTIFY_HOSTCHECKCOMMAND="$HOSTCHECKCOMMAND$" \ NOTIFY_HOSTOUTPUT='$HOSTOUTPUT$' \ NOTIFY_HOSTPERFDATA='$HOSTPERFDATA$' \ NOTIFY_LONGHOSTOUTPUT='$LONGHOSTOUTPUT$' \ NOTIFY_SERVICEDESC='$SERVICEDESC$' \ NOTIFY_LASTSERVICESTATE='$LASTSERVICESTATE$' \ + NOTIFY_LASTSERVICESTATEID='$LASTSERVICESTATEID$' \ NOTIFY_LASTSERVICESTATECHANGE='$LASTSERVICESTATECHANGE$' \ + NOTIFY_LASTSERVICEOK='$LASTSERVICEOK$' \ + NOTIFY_SERVICEATTEMPT='$SERVICEATTEMPT$' \ NOTIFY_SERVICESTATE='$SERVICESTATE$' \ NOTIFY_SERVICESTATEID='$SERVICESTATEID$' \ NOTIFY_SERVICEOUTPUT='$SERVICEOUTPUT$' \ NOTIFY_LONGSERVICEOUTPUT='$LONGSERVICEOUTPUT$' \ NOTIFY_SERVICEPERFDATA='$SERVICEPERFDATA$' \ - NOTIFY_SERVICECHECKCOMMAND='$SERVICECHECKCOMMAND$' \ + NOTIFY_SERVICECHECKCOMMAND="$SERVICECHECKCOMMAND$" \ NOTIFY_DATE='$DATE$' \ NOTIFY_SHORTDATETIME='$SHORTDATETIME$' \ NOTIFY_LONGDATETIME='$LONGDATETIME$' \ @@ -367,8 +395,19 @@ NOTIFY_NOTIFICATIONAUTHOR='$NOTIFICATIONAUTHOR$' \ NOTIFY_NOTIFICATIONAUTHORNAME='$NOTIFICATIONAUTHORNAME$' \ NOTIFY_NOTIFICATIONAUTHORALIAS='$NOTIFICATIONAUTHORALIAS$' \ + NOTIFY_SERVICEACKAUTHOR='$SERVICEACKAUTHOR$' \ + NOTIFY_SERVICEACKCOMMENT='$SERVICEACKCOMMENT$' \ + NOTIFY_SERVICEGROUPNAMES='$SERVICEGROUPNAMES$' \ + NOTIFY_HOSTACKAUTHOR='$HOSTACKAUTHOR$' \ + NOTIFY_HOSTACKCOMMENT='$HOSTACKCOMMENT$' \ + NOTIFY_HOSTGROUPNAMES='$HOSTGROUPNAMES$' \ NOTIFY_HOSTTAGS='$_HOSTTAGS$' \ - @BINDIR@/check_mk --notify + NOTIFY_HOST_SL='$_HOSTEC_SL$' \ + NOTIFY_SVC_SL='$_SERVICEEC_SL$' \ + NOTIFY_SERVICE_SL='$_SERVICEEC_SL$' \ + NOTIFY_HOST_EC_CONTACT='$_HOSTEC_CONTACT$' \ + NOTIFY_SERVICE_EC_CONTACT='$_SERVICEEC_CONTACT$' \ + @BINDIR@/check_mk --notify } @@ -378,7 +417,7 @@ # | | | | | | | | | __/ |_) | __/ | | | (_) | (_| \__ \ # |_| |_|_| |_| |_|\___| .__/ \___|_| |_|\___/ \__,_|___/ # |_| -# +# # Make sure, timeperiod used in default template is available diff -Nru check-mk-1.2.2p3/check_mk-tsm_stagingpools.php check-mk-1.2.6p12/check_mk-tsm_stagingpools.php --- check-mk-1.2.2p3/check_mk-tsm_stagingpools.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-tsm_stagingpools.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mk-ucd_cpu_load.php check-mk-1.2.6p12/check_mk-ucd_cpu_load.php --- check-mk-1.2.2p3/check_mk-ucd_cpu_load.php 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-ucd_cpu_load.php 2015-09-21 10:59:54.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -23,19 +23,49 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -$opt[1] = "--vertical-label Load -l0 -u 1 --title \"CPU Load for $hostname / $servicedesc\" "; +# The number of data source various due to different +# settings (such as averaging). We rather work with names +# than with numbers. +$RRD = array(); +foreach ($NAME as $i => $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; +} -$def[1] = "DEF:var1=$RRDFILE[1]:$DS[1]:MAX " ; -$def[1] .= "DEF:var2=$RRDFILE[2]:$DS[2]:MAX " ; -$def[1] .= "DEF:var3=$RRDFILE[3]:$DS[3]:MAX " ; -$def[1] .= "HRULE:$WARN[1]#FFFF00 "; -$def[1] .= "HRULE:$CRIT[1]#FF0000 "; -$def[1] .= "AREA:var1#60c0e0:\"Load average 1 min \" " ; -$def[1] .= "GPRINT:var1:LAST:\"%6.2lf last\" " ; -$def[1] .= "GPRINT:var1:AVERAGE:\"%6.2lf avg\" " ; -$def[1] .= "GPRINT:var1:MAX:\"%6.2lf max\\n\" "; -$def[1] .= "LINE:var3#004080:\"Load average 15 min \" " ; -$def[1] .= "GPRINT:var3:LAST:\"%6.2lf last\" " ; -$def[1] .= "GPRINT:var3:AVERAGE:\"%6.2lf avg\" " ; -$def[1] .= "GPRINT:var3:MAX:\"%6.2lf max\\n\" " ; +$opt[1] = "--vertical-label 'Load average' -l0 -u 1 --title \"CPU Load for $hostname\" "; + +$def[1] = "" + . "DEF:load1=$RRD[load1] " + . "AREA:load1#60c0e0:\"Load average 1 min \" " + . "GPRINT:load1:LAST:\"%6.2lf last\" " + . "GPRINT:load1:AVERAGE:\"%6.2lf avg\" " + . "GPRINT:load1:MAX:\"%6.2lf max\\n\" " + + . "DEF:load15=$RRD[load15] " + . "LINE:load15#004080:\"Load average 15 min \" " + . "GPRINT:load15:LAST:\"%6.2lf last\" " + . "GPRINT:load15:AVERAGE:\"%6.2lf avg\" " + . "GPRINT:load15:MAX:\"%6.2lf max\\n\" " + . ""; + +if ($WARN[1]) { + $def[1] .= "" + . "HRULE:$WARN[1]#FFFF00 " + . "HRULE:$CRIT[1]#FF0000 " + . ""; +} + +if ($MAX[1]) { + $def[1] .= "COMMENT:\" Number of CPUs $MAX[1]\" "; +} + +if (isset($RRD["predict_load15"])) { + $def[1] .= "" + . "DEF:predict=$RRD[predict_load15] " + . "LINE:predict#ff0000:\"Reference for prediction \\n\" " + . ""; +} ?> diff -Nru check-mk-1.2.2p3/check_mk-ucd_cpu_util.php check-mk-1.2.6p12/check_mk-ucd_cpu_util.php --- check-mk-1.2.2p3/check_mk-ucd_cpu_util.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-ucd_cpu_util.php 2015-09-21 10:59:54.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -40,24 +40,24 @@ $def[1] .= "" . "COMMENT:Average\: " - . "AREA:system#ff6000:\"System\" " - . "GPRINT:system:AVERAGE:\"%2.1lf%% \" " - . "AREA:user#60f020:\"User\":STACK " - . "GPRINT:user:AVERAGE:\"%2.1lf%% \" " - . "AREA:wait#00b0c0:\"$thirdname\":STACK " - . "GPRINT:wait:AVERAGE:\"%2.1lf%% \" " - . "LINE:sum#004080:\"Total\" " - . "GPRINT:sum:AVERAGE:\"%2.1lf%% \\n\" " + . "AREA:system#ff6000:\"System\" " + . "GPRINT:system:AVERAGE:\"%4.1lf%% \" " + . "AREA:user#60f020:\"User\":STACK " + . "GPRINT:user:AVERAGE:\"%4.1lf%% \" " + . "AREA:wait#00b0c0:\"$thirdname\":STACK " + . "GPRINT:wait:AVERAGE:\"%4.1lf%% \" " + . "LINE:sum#004080:\"Total\" " + . "GPRINT:sum:AVERAGE:\"%4.1lf%% \\n\" " . "COMMENT:\"Last\: \" " - . "AREA:system#ff6000:\"System\" " - . "GPRINT:system:LAST:\"%2.1lf%% \" " - . "AREA:user#60f020:\"User\":STACK " - . "GPRINT:user:LAST:\"%2.1lf%% \" " - . "AREA:wait#00b0c0:\"$thirdname\":STACK " - . "GPRINT:wait:LAST:\"%2.1lf%% \" " - . "LINE:sum#004080:\"Total\" " - . "GPRINT:sum:LAST:\"%2.1lf%% \\n\" " + . "AREA:system#ff6000:\"System\" " + . "GPRINT:system:LAST:\"%4.1lf%% \" " + . "AREA:user#60f020:\"User\":STACK " + . "GPRINT:user:LAST:\"%4.1lf%% \" " + . "AREA:wait#00b0c0:\"$thirdname\":STACK " + . "GPRINT:wait:LAST:\"%4.1lf%% \" " + . "LINE:sum#004080:\"Total\" " + . "GPRINT:sum:LAST:\"%4.1lf%% \\n\" " .""; diff -Nru check-mk-1.2.2p3/check_mk-ucs_bladecenter_fans.temp.php check-mk-1.2.6p12/check_mk-ucs_bladecenter_fans.temp.php --- check-mk-1.2.2p3/check_mk-ucs_bladecenter_fans.temp.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-ucs_bladecenter_fans.temp.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk-ucs_bladecenter_if.php check-mk-1.2.6p12/check_mk-ucs_bladecenter_if.php --- check-mk-1.2.2p3/check_mk-ucs_bladecenter_if.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-ucs_bladecenter_if.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,226 @@ + $base * $base * $base) { + $scale = $base * $base * $base; + $bwuom = 'G'; +} +elseif ($bandwidth > $base * $base) { + $scale = $base * $base; + $bwuom = 'M'; +} +elseif ($bandwidth > $base) { + $scale = $base; + $bwuom = 'k'; +} +else { + $scale = 1; + $bwuom = ' '; +} + +$warn /= $scale; +$crit /= $scale; +$bandwidth /= $scale; + +$vertical_label_name = $bwuom . $unit . "/sec"; + +$range = min(10, $bandwidth); + + +$bandwidthInfo = ""; +if ($bandwidth > 0){ + $bandwidthInfo = " at " . sprintf("%.1f", $bandwidth) . " ${bwuom}${unit}/s"; +} +$ds_name[1] = 'Used bandwidth'; +$opt[1] = "--vertical-label \"$vertical_label_name\" -l -$range -u $range -X0 -b 1024 --title \"Used bandwidth $hostname / $servicedesc$bandwidthInfo\" "; +$def[1] = + "HRULE:0#c0c0c0 "; +if ($bandwidth) + $def[1] .= "HRULE:$bandwidth#808080:\"Port speed\: " . sprintf("%10.1f", $bandwidth) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$bandwidth#808080: "; +if ($warn) + $def[1] .= "HRULE:$warn#ffff00:\"Warning\: " . sprintf("%13.1f", $warn) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$warn#ffff00: "; +if ($crit) + $def[1] .= "HRULE:$crit#ff0000:\"Critical\: " . sprintf("%13.1f", $crit) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$crit#ff0000: "; + + $def[1] .= "". + # incoming + "DEF:inbytes=$RRDFILE[1]:$DS[1]:MAX ". + "CDEF:intraffic=inbytes,$unit_multiplier,* ". + "CDEF:inmb=intraffic,$scale,/ ". + "AREA:inmb#00e060:\"in \" ". + "GPRINT:intraffic:LAST:\"%7.1lf %s$unit/s last\" ". + "GPRINT:intraffic:AVERAGE:\"%7.1lf %s$unit/s avg\" ". + "GPRINT:intraffic:MAX:\"%7.1lf %s$unit/s max\\n\" ". + "VDEF:inperc=intraffic,95,PERCENTNAN ". + "VDEF:inpercmb=inmb,95,PERCENTNAN ". + "LINE:inpercmb#008f00:\"95% percentile\" ". + "GPRINT:inperc:\"%7.1lf %s$unit/s\\n\" ". + + # outgoing + "DEF:outbytes=$RRDFILE[6]:$DS[6]:MAX ". + "CDEF:outtraffic=outbytes,$unit_multiplier,* ". + "CDEF:minusouttraffic=outtraffic,-1,* ". + "CDEF:outmb=outtraffic,$scale,/ ". + "CDEF:minusoutmb=0,outmb,- ". + "AREA:minusoutmb#0080e0:\"out \" ". + "GPRINT:outtraffic:LAST:\"%7.1lf %s$unit/s last\" ". + "GPRINT:outtraffic:AVERAGE:\"%7.1lf %s$unit/s avg\" ". + "GPRINT:outtraffic:MAX:\"%7.1lf %s$unit/s max\\n\" ". + "VDEF:outperc=minusouttraffic,5,PERCENTNAN ". + "VDEF:outpercmb=minusoutmb,5,PERCENTNAN ". + "LINE:outpercmb#00008f:\"95% percentile\" ". + "GPRINT:outperc:\"%7.1lf %s$unit/s\\n\" ". + + ""; + +if (isset($DS[12])) { + $def[1] .= + "DEF:inbytesa=$RRDFILE[12]:$DS[12]:MAX ". + "DEF:outbytesa=$RRDFILE[13]:$DS[13]:MAX ". + "CDEF:intraffica=inbytesa,$unit_multiplier,* ". + "CDEF:outtraffica=outbytesa,$unit_multiplier,* ". + "CDEF:inmba=intraffica,1048576,/ ". + "CDEF:outmba=outtraffica,1048576,/ ". + "CDEF:minusoutmba=0,outmba,- ". + "LINE:inmba#00a060:\"in (avg) \" ". + "GPRINT:intraffica:LAST:\"%6.1lf %s$unit/s last\" ". + "GPRINT:intraffica:AVERAGE:\"%6.1lf %s$unit/s avg\" ". + "GPRINT:intraffica:MAX:\"%6.1lf %s$unit/s max\\n\" ". + "LINE:minusoutmba#0060c0:\"out (avg) \" ". + "GPRINT:outtraffica:LAST:\"%6.1lf %s$unit/s last\" ". + "GPRINT:outtraffica:AVERAGE:\"%6.1lf %s$unit/s avg\" ". + "GPRINT:outtraffica:MAX:\"%6.1lf %s$unit/s max\\n\" "; +} + +# Graph 2: packets +$ds_name[2] = 'Packets'; +$opt[2] = "--vertical-label \"packets/sec\" --title \"Packets $hostname / $servicedesc\" "; +$def[2] = + # ingoing + "HRULE:0#c0c0c0 ". + "DEF:inu=$RRDFILE[2]:$DS[2]:MAX ". + "DEF:innu=$RRDFILE[3]:$DS[3]:MAX ". + "CDEF:in=inu,innu,+ ". + "AREA:inu#00ffc0:\"in unicast \" ". + "GPRINT:inu:LAST:\"%9.1lf/s last \" ". + "GPRINT:inu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:inu:MAX:\"%9.1lf/s max\\n\" ". + "AREA:innu#00c080:\"in broadcast/multicast \":STACK ". + "GPRINT:innu:LAST:\"%9.1lf/s last \" ". + "GPRINT:innu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:innu:MAX:\"%9.1lf/s max\\n\" ". + "VDEF:inperc=in,95,PERCENTNAN ". + "LINE:inperc#00cf00:\"in 95% percentile \" ". + "GPRINT:inperc:\"%9.1lf/s\\n\" ". + + # outgoing + "DEF:outu=$RRDFILE[7]:$DS[7]:MAX ". + "DEF:outnu=$RRDFILE[8]:$DS[8]:MAX ". + "CDEF:minusoutu=0,outu,- ". + "CDEF:minusoutnu=0,outnu,- ". + "CDEF:minusout=minusoutu,minusoutnu,+ ". + "AREA:minusoutu#00c0ff:\"out unicast \" ". + "GPRINT:outu:LAST:\"%9.1lf/s last \" ". + "GPRINT:outu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:outu:MAX:\"%9.1lf/s max\\n\" ". + "AREA:minusoutnu#0080c0:\"out broadcast/multicast\":STACK ". + "GPRINT:outnu:LAST:\"%9.1lf/s last \" ". + "GPRINT:outnu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:outnu:MAX:\"%9.1lf/s max\\n\" ". + "VDEF:outperc=minusout,5,PERCENTNAN ". + "LINE:outperc#0000cf:\"out 95% percentile \" ". + "GPRINT:outperc:\"%9.1lf/s\\n\" ". + ""; + +# Graph 3: errors and discards +$ds_name[3] = 'Errors and discards'; +$opt[3] = "--vertical-label \"packets/sec\" -X0 --title \"Problems $hostname / $servicedesc\" "; +$def[3] = + "HRULE:0#c0c0c0 ". + "DEF:inerr=$RRDFILE[5]:$DS[5]:MAX ". + "DEF:indisc=$RRDFILE[4]:$DS[4]:MAX ". + "AREA:inerr#ff0000:\"in errors \" ". + "GPRINT:inerr:LAST:\"%7.2lf/s last \" ". + "GPRINT:inerr:AVERAGE:\"%7.2lf/s avg \" ". + "GPRINT:inerr:MAX:\"%7.2lf/s max\\n\" ". + "AREA:indisc#ff8000:\"in discards \":STACK ". + "GPRINT:indisc:LAST:\"%7.2lf/s last \" ". + "GPRINT:indisc:AVERAGE:\"%7.2lf/s avg \" ". + "GPRINT:indisc:MAX:\"%7.2lf/s max\\n\" ". + "DEF:outerr=$RRDFILE[10]:$DS[10]:MAX ". + "DEF:outdisc=$RRDFILE[9]:$DS[9]:MAX ". + "CDEF:minusouterr=0,outerr,- ". + "CDEF:minusoutdisc=0,outdisc,- ". + "AREA:minusouterr#ff0080:\"out errors \" ". + "GPRINT:outerr:LAST:\"%7.2lf/s last \" ". + "GPRINT:outerr:AVERAGE:\"%7.2lf/s avg \" ". + "GPRINT:outerr:MAX:\"%7.2lf/s max\\n\" ". + "AREA:minusoutdisc#ff8080:\"out discards \":STACK ". + "GPRINT:outdisc:LAST:\"%7.2lf/s last \" ". + "GPRINT:outdisc:AVERAGE:\"%7.2lf/s avg \" ". + "GPRINT:outdisc:MAX:\"%7.2lf/s max\\n\" "; +?> diff -Nru check-mk-1.2.2p3/check_mk-ucs_bladecenter_psu.chassis_temp.php check-mk-1.2.6p12/check_mk-ucs_bladecenter_psu.chassis_temp.php --- check-mk-1.2.2p3/check_mk-ucs_bladecenter_psu.chassis_temp.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-ucs_bladecenter_psu.chassis_temp.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk-ucs_bladecenter_psu.switch_power.php check-mk-1.2.6p12/check_mk-ucs_bladecenter_psu.switch_power.php --- check-mk-1.2.2p3/check_mk-ucs_bladecenter_psu.switch_power.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-ucs_bladecenter_psu.switch_power.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,114 @@ +DS as $KEY=>$VAL) { + + $maximum = ""; + $minimum = ""; + $critical = ""; + $crit_min = ""; + $crit_max = ""; + $warning = ""; + $warn_max = ""; + $warn_min = ""; + $vlabel = " "; + $lower = ""; + $upper = ""; + + if ($VAL['WARN'] != "" && is_numeric($VAL['WARN']) ){ + $warning = $VAL['WARN']; + } + if ($VAL['WARN_MAX'] != "" && is_numeric($VAL['WARN_MAX']) ) { + $warn_max = $VAL['WARN_MAX']; + } + if ( $VAL['WARN_MIN'] != "" && is_numeric($VAL['WARN_MIN']) ) { + $warn_min = $VAL['WARN_MIN']; + } + if ( $VAL['CRIT'] != "" && is_numeric($VAL['CRIT']) ) { + $critical = $VAL['CRIT']; + } + if ( $VAL['CRIT_MAX'] != "" && is_numeric($VAL['CRIT_MAX']) ) { + $crit_max = $VAL['CRIT_MAX']; + } + if ( $VAL['CRIT_MIN'] != "" && is_numeric($VAL['CRIT_MIN']) ) { + $crit_min = $VAL['CRIT_MIN']; + } + if ( $VAL['MIN'] != "" && is_numeric($VAL['MIN']) ) { + $lower = " --lower=" . $VAL['MIN']; + $minimum = $VAL['MIN']; + } + if ( $VAL['MAX'] != "" && is_numeric($VAL['MAX']) ) { + $maximum = $VAL['MAX']; + } + if ($VAL['UNIT'] == "%%") { + $vlabel = "%"; + $upper = " --upper=101 "; + $lower = " --lower=0 "; + } + else { + $vlabel = $VAL['UNIT']; + } + + $opt[$KEY] = '--vertical-label "' . $vlabel . '" --title "' . $this->MACRO['DISP_HOSTNAME'] . ' / ' . $this->MACRO['DISP_SERVICEDESC'] . '"' . $upper . $lower; + $ds_name[$KEY] = $VAL['LABEL']; + $def[$KEY] = rrd::def ("var1", $VAL['RRDFILE'], $VAL['DS'], "AVERAGE"); + $def[$KEY] .= rrd::gradient("var1", "3152A5", "BDC6DE", rrd::cut($VAL['NAME'],16), 20); + $def[$KEY] .= rrd::line1 ("var1", $_LINE ); + $def[$KEY] .= rrd::gprint ("var1", array("LAST","MAX","AVERAGE"), "%3.2lf %S".$VAL['UNIT']); + if ($warning != "") { + $def[$KEY] .= rrd::hrule($warning, $_WARNRULE, "Warning $warning \\n"); + } + if ($warn_min != "") { + $def[$KEY] .= rrd::hrule($warn_min, $_WARNRULE, "Warning (min) $warn_min \\n"); + } + if ($warn_max != "") { + $def[$KEY] .= rrd::hrule($warn_max, $_WARNRULE, "Warning (max) $warn_max \\n"); + } + if ($critical != "") { + $def[$KEY] .= rrd::hrule($critical, $_CRITRULE, "Critical $critical \\n"); + } + if ($crit_min != "") { + $def[$KEY] .= rrd::hrule($crit_min, $_CRITRULE, "Critical (min) $crit_min \\n"); + } + if ($crit_max != "") { + $def[$KEY] .= rrd::hrule($crit_max, $_CRITRULE, "Critical (max) $crit_max \\n"); + } +} +?> diff -Nru check-mk-1.2.2p3/check_mk-ups_bat_temp.php check-mk-1.2.6p12/check_mk-ups_bat_temp.php --- check-mk-1.2.2p3/check_mk-ups_bat_temp.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-ups_bat_temp.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk-ups_outphase.php check-mk-1.2.6p12/check_mk-ups_outphase.php --- check-mk-1.2.2p3/check_mk-ups_outphase.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-ups_outphase.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,88 @@ + $n) { + $RRD[$n] = $RRDFILE[$i].":".$DS[$i].":MAX"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; +} + +$nr = 0; + +# Paint graph for voltage, if check supports this + +if (isset($RRD["voltage"])) { + $nr++; + $opt[$nr] = "--vertical-label 'Voltage (V)' --title \"Output voltage for $hostname / $servicedesc\" "; + + $def[$nr] = "" + . "DEF:voltage=$RRD[voltage] " + . "LINE:voltage#003377:\"Output voltage\" " + . "GPRINT:voltage:LAST:\"%6.0lf V last\" " + . "GPRINT:voltage:AVERAGE:\"%6.0lf V avg\" " + . "GPRINT:voltage:MAX:\"%6.0lf V max\\n\" " + . "HRULE:$WARN[voltage]#FFFF00:\"Warning\: $WARN[voltage] V\" " + . "HRULE:$CRIT[voltage]#FF0000:\"Critical\: $CRIT[voltage] V\\n\" " + . ""; +} + +# Paint graph for current, if check supports this + +if (isset($RRD["current"])) { + $nr++; + $opt[$nr] = "--vertical-label 'Current (A)' --title \"Output current for $hostname / $servicedesc\" "; + + $def[$nr] = "" + . "DEF:current=$RRD[current] " + . "LINE:current#007733:\"Output current\" " + . "GPRINT:current:LAST:\"%6.0lf A last\" " + . "GPRINT:current:AVERAGE:\"%6.0lf A avg\" " + . "GPRINT:current:MAX:\"%6.0lf A max\\n\" " + . ""; +} + +# Paint graph for percentual load, if check supports this + +if (isset($RRD["load"])) { + $nr++; + $opt[$nr] = "--vertical-label 'Load (%)' -l0 -u100 --title \"Output load for $hostname / $servicedesc\" "; + + $def[$nr] = "" + . "DEF:load=$RRD[load] " + . "AREA:load#8050ff:\"Output load\" " + . "LINE:load#5030aa " + . "GPRINT:load:LAST:\"%6.0lf %% last\" " + . "GPRINT:load:AVERAGE:\"%6.0lf %% avg\" " + . "GPRINT:load:MAX:\"%6.2lf %% max\\n\" " + . "HRULE:$WARN[load]#FFFF00:\"Warning\: $WARN[load] %\" " + . "HRULE:$CRIT[load]#FF0000:\"Critical\: $CRIT[load] %\\n\" " + . ""; +} diff -Nru check-mk-1.2.2p3/check_mk-ups_socomec_outphase.php check-mk-1.2.6p12/check_mk-ups_socomec_outphase.php --- check-mk-1.2.2p3/check_mk-ups_socomec_outphase.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-ups_socomec_outphase.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,88 @@ + $n) { + $RRD[$n] = $RRDFILE[$i].":".$DS[$i].":MAX"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; +} + +$nr = 0; + +# Paint graph for voltage, if check supports this + +if (isset($RRD["voltage"])) { + $nr++; + $opt[$nr] = "--vertical-label 'Voltage (V)' --title \"Output voltage for $hostname / $servicedesc\" "; + + $def[$nr] = "" + . "DEF:voltage=$RRD[voltage] " + . "LINE:voltage#003377:\"Output voltage\" " + . "GPRINT:voltage:LAST:\"%6.0lf V last\" " + . "GPRINT:voltage:AVERAGE:\"%6.0lf V avg\" " + . "GPRINT:voltage:MAX:\"%6.0lf V max\\n\" " + . "HRULE:$WARN[voltage]#FFFF00:\"Warning\: $WARN[voltage] V\" " + . "HRULE:$CRIT[voltage]#FF0000:\"Critical\: $CRIT[voltage] V\\n\" " + . ""; +} + +# Paint graph for current, if check supports this + +if (isset($RRD["current"])) { + $nr++; + $opt[$nr] = "--vertical-label 'Current (A)' --title \"Output current for $hostname / $servicedesc\" "; + + $def[$nr] = "" + . "DEF:current=$RRD[current] " + . "LINE:current#007733:\"Output current\" " + . "GPRINT:current:LAST:\"%6.0lf A last\" " + . "GPRINT:current:AVERAGE:\"%6.0lf A avg\" " + . "GPRINT:current:MAX:\"%6.0lf A max\\n\" " + . ""; +} + +# Paint graph for percentual load, if check supports this + +if (isset($RRD["load"])) { + $nr++; + $opt[$nr] = "--vertical-label 'Load (%)' -l0 -u100 --title \"Output load for $hostname / $servicedesc\" "; + + $def[$nr] = "" + . "DEF:load=$RRD[load] " + . "AREA:load#8050ff:\"Output load\" " + . "LINE:load#5030aa " + . "GPRINT:load:LAST:\"%6.0lf %% last\" " + . "GPRINT:load:AVERAGE:\"%6.0lf %% avg\" " + . "GPRINT:load:MAX:\"%6.2lf %% max\\n\" " + . "HRULE:$WARN[load]#FFFF00:\"Warning\: $WARN[load] %\" " + . "HRULE:$CRIT[load]#FF0000:\"Critical\: $CRIT[load] %\\n\" " + . ""; +} diff -Nru check-mk-1.2.2p3/check_mk-uptime.php check-mk-1.2.6p12/check_mk-uptime.php --- check-mk-1.2.2p3/check_mk-uptime.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-uptime.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mk-veeam_client.php check-mk-1.2.6p12/check_mk-veeam_client.php --- check-mk-1.2.2p3/check_mk-veeam_client.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-veeam_client.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,52 @@ + diff -Nru check-mk-1.2.2p3/check_mk-viprinet_temp.php check-mk-1.2.6p12/check_mk-viprinet_temp.php --- check-mk-1.2.2p3/check_mk-viprinet_temp.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-viprinet_temp.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk-vms_cpu.php check-mk-1.2.6p12/check_mk-vms_cpu.php --- check-mk-1.2.2p3/check_mk-vms_cpu.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-vms_cpu.php 2015-09-21 10:59:54.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -40,24 +40,24 @@ $def[1] .= "" . "COMMENT:Average\: " - . "AREA:system#ff6000:\"System\" " - . "GPRINT:system:AVERAGE:\"%2.1lf%% \" " - . "AREA:user#60f020:\"User\":STACK " - . "GPRINT:user:AVERAGE:\"%2.1lf%% \" " - . "AREA:wait#00b0c0:\"$thirdname\":STACK " - . "GPRINT:wait:AVERAGE:\"%2.1lf%% \" " - . "LINE:sum#004080:\"Total\" " - . "GPRINT:sum:AVERAGE:\"%2.1lf%% \\n\" " + . "AREA:system#ff6000:\"System\" " + . "GPRINT:system:AVERAGE:\"%4.1lf%% \" " + . "AREA:user#60f020:\"User\":STACK " + . "GPRINT:user:AVERAGE:\"%4.1lf%% \" " + . "AREA:wait#00b0c0:\"$thirdname\":STACK " + . "GPRINT:wait:AVERAGE:\"%4.1lf%% \" " + . "LINE:sum#004080:\"Total\" " + . "GPRINT:sum:AVERAGE:\"%4.1lf%% \\n\" " . "COMMENT:\"Last\: \" " - . "AREA:system#ff6000:\"System\" " - . "GPRINT:system:LAST:\"%2.1lf%% \" " - . "AREA:user#60f020:\"User\":STACK " - . "GPRINT:user:LAST:\"%2.1lf%% \" " - . "AREA:wait#00b0c0:\"$thirdname\":STACK " - . "GPRINT:wait:LAST:\"%2.1lf%% \" " - . "LINE:sum#004080:\"Total\" " - . "GPRINT:sum:LAST:\"%2.1lf%% \\n\" " + . "AREA:system#ff6000:\"System\" " + . "GPRINT:system:LAST:\"%4.1lf%% \" " + . "AREA:user#60f020:\"User\":STACK " + . "GPRINT:user:LAST:\"%4.1lf%% \" " + . "AREA:wait#00b0c0:\"$thirdname\":STACK " + . "GPRINT:wait:LAST:\"%4.1lf%% \" " + . "LINE:sum#004080:\"Total\" " + . "GPRINT:sum:LAST:\"%4.1lf%% \\n\" " .""; diff -Nru check-mk-1.2.2p3/check_mk-vms_df.php check-mk-1.2.6p12/check_mk-vms_df.php --- check-mk-1.2.2p3/check_mk-vms_df.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-vms_df.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -35,13 +35,13 @@ # disk utilization $opt[1] = "--vertical-label GB -l 0 -u $maxgb --title \"$hostname: Filesystem $fsname ($sizegb GB)\" "; -$def[1] = "DEF:mb=$RRDFILE[1]:$DS[1]:MAX "; +$def[1] = "DEF:mb=$RRDFILE[1]:$DS[1]:MAX "; $def[1] .= "CDEF:var1=mb,1024,/ "; -$def[1] .= "AREA:var1#00ffc6:\"used space on $fsname\\n\" "; -$def[1] .= "LINE1:var1#226600: "; +$def[1] .= "AREA:var1#00ffc6:\"used space on $fsname\\n\" "; +$def[1] .= "LINE1:var1#226600: "; $def[1] .= "HRULE:$maxgb#003300:\"Size ($sizegb GB) \" "; $def[1] .= "HRULE:$warngb#ffff00:\"Warning at $warngbtxt GB \" "; -$def[1] .= "HRULE:$critgb#ff0000:\"Critical at $critgbtxt GB \\n\" "; +$def[1] .= "HRULE:$critgb#ff0000:\"Critical at $critgbtxt GB \\n\" "; $def[1] .= "GPRINT:var1:LAST:\"current\: %6.2lf GB\" "; $def[1] .= "GPRINT:var1:MAX:\"max\: %6.2lf GB \" "; $def[1] .= "GPRINT:var1:AVERAGE:\"avg\: %6.2lf GB\" "; @@ -50,7 +50,7 @@ $opt[2] = "--vertical-label 'IO ops/sec' --title \"$hostname: IO operations / sec\" "; $def[2] = "DEF:iops=$RRDFILE[2]:$DS[2]:MAX " ; -$def[2] .= "LINE1:iops#00ff00: "; +$def[2] .= "LINE1:iops#00ff00: "; ?> diff -Nru check-mk-1.2.2p3/check_mk-vms_diskstat.df.php check-mk-1.2.6p12/check_mk-vms_diskstat.df.php --- check-mk-1.2.2p3/check_mk-vms_diskstat.df.php 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-vms_diskstat.df.php 2015-09-21 10:59:54.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -25,10 +25,23 @@ setlocale(LC_ALL, "POSIX"); +// Make data sources available via names +$RRD = array(); +foreach ($NAME as $i => $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $RRD_MIN[$n] = "$RRDFILE[$i]:$DS[$i]:MIN"; + $RRD_AVG[$n] = "$RRDFILE[$i]:$DS[$i]:AVERAGE"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; + $ACT[$n] = $ACT[$i]; +} + # RRDtool Options #$servicedes=$NAGIOS_SERVICEDESC -$fsname = str_replace("_", "/", substr($servicedesc, 3)); +$fsname = str_replace("_", "/", substr($servicedesc,11)); $fstitle = $fsname; # Hack for windows: replace C// with C:\ @@ -47,57 +60,98 @@ $opt[1] = "--vertical-label GB -l 0 -u $maxgb --title '$hostname: Filesystem $fstitle ($sizegb GB)' "; # First graph show current filesystem usage -$def[1] = "DEF:mb=$RRDFILE[1]:$DS[1]:MAX "; +$def[1] = "DEF:mb=$RRDFILE[1]:$DS[1]:MAX "; $def[1] .= "CDEF:var1=mb,1024,/ "; -$def[1] .= "AREA:var1#00ffc6:\"used space on $fsname\\n\" "; -$def[1] .= "LINE1:var1#226600: "; +$def[1] .= "AREA:var1#00ffc6:\"used space on $fsname\\n\" "; + +# Optional uncommitted usage e.g. for esx hosts +if(isset($RRD['uncommitted'])) { + $def[1] .= "DEF:uncommitted_mb=".$RRD['uncommitted']." "; + $def[1] .= "CDEF:uncommitted_gb=uncommitted_mb,1024,/ "; + $def[1] .= "CDEF:total_gb=uncommitted_gb,var1,+ "; +} else { + $def[1] .= "CDEF:total_gb=var1 "; +} + $def[1] .= "HRULE:$maxgb#003300:\"Size ($sizegb GB) \" "; $def[1] .= "HRULE:$warngb#ffff00:\"Warning at $warngbtxt GB \" "; $def[1] .= "HRULE:$critgb#ff0000:\"Critical at $critgbtxt GB \\n\" "; $def[1] .= "GPRINT:var1:LAST:\"current\: %6.2lf GB\" "; $def[1] .= "GPRINT:var1:MAX:\"max\: %6.2lf GB \" "; -$def[1] .= "GPRINT:var1:AVERAGE:\"avg\: %6.2lf GB\" "; +$def[1] .= "GPRINT:var1:AVERAGE:\"avg\: %6.2lf GB\\n\" "; + +if(isset($RRD['uncommitted'])) { + $def[1] .= "AREA:uncommitted_gb#eeccff:\"Uncommited\":STACK "; + $def[1] .= "GPRINT:uncommitted_gb:MAX:\"%6.2lf GB\l\" "; +} + +$def[1] .= "LINE1:total_gb#226600 "; # Second graph is optional and shows trend. The MAX field # of the third variable contains (size of the filesystem in MB # / range in hours). From that we can compute the configured range. -if (isset($DS[2])) { - $size_mb_per_hours = floatval($MAX[3]); // this is size_mb / range(hours) +if (isset($RRD['growth'])) { + $size_mb_per_hours = floatval($MAX['trend']); // this is size_mb / range(hours) $size_mb = floatval($MAX[1]); $hours = 1.0 / ($size_mb_per_hours / $size_mb); $range = sprintf("%.0fh", $hours); - // Current growth / shrinking. This value is give as MB / 24 hours. + // Current growth / shrinking. This value is give as MB / 24 hours. // Note: This has changed in 1.1.13i3. Prior it was MB / trend_range! $opt[2] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Growth of $fstitle' "; - $def[2] = "DEF:growth_max=$RRDFILE[2]:$DS[2]:MAX "; - $def[2] .= "DEF:growth_min=$RRDFILE[2]:$DS[2]:MIN "; - $def[2] .= "DEF:trend=$RRDFILE[3]:$DS[3]:AVERAGE "; + $def[2] = "DEF:growth_max=${RRD['growth']} "; + $def[2] .= "DEF:growth_min=${RRD_MIN['growth']} "; + $def[2] .= "DEF:trend=${RRD_AVG['trend']} "; $def[2] .= "CDEF:growth_pos=growth_max,0,MAX "; $def[2] .= "CDEF:growth_neg=growth_min,0,MIN "; - $def[2] .= "CDEF:growth_minabs=0,growth_min,- "; + $def[2] .= "CDEF:growth_minabs=0,growth_min,- "; $def[2] .= "CDEF:growth=growth_minabs,growth_max,MAX "; $def[2] .= "HRULE:0#c0c0c0 "; - $def[2] .= "AREA:growth_pos#3060f0:\"Grow\" "; - $def[2] .= "AREA:growth_neg#30f060:\"Shrink \" "; + $def[2] .= "AREA:growth_pos#3060f0:\"Grow\" "; + $def[2] .= "AREA:growth_neg#30f060:\"Shrink \" "; $def[2] .= "GPRINT:growth:LAST:\"Current\: %+9.2lfMB / 24h\" "; $def[2] .= "GPRINT:growth:MAX:\"Max\: %+9.2lfMB / 24h\\n\" "; // Trend $opt[3] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Trend for $fstitle' "; - $def[3] = "DEF:trend=$RRDFILE[3]:$DS[3]:AVERAGE "; + $def[3] = "DEF:trend=${RRD_AVG['trend']} "; $def[3] .= "HRULE:0#c0c0c0 "; $def[3] .= "LINE1:trend#000000:\"Trend\:\" "; - $def[3] .= "GPRINT:trend:LAST:\"%+7.2lf MB/24h\" "; - if ($WARN[3]) { - $warn_mb = sprintf("%.2fMB", $WARN[3] * $hours / 24.0); - $def[3] .= "LINE1:$WARN[3]#ffff00:\"Warn\: $warn_mb / $range\" "; + $def[3] .= "GPRINT:trend:LAST:\"%+7.2lf MB/24h\" "; + if ($WARN['trend']) { + $warn_mb = sprintf("%.2fMB", $WARN['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${WARN['trend']}#ffff00:\"Warn\: $warn_mb / $range\" "; } - if ($CRIT[3]) { - $crit_mb = sprintf("%.2fMB", $CRIT[3] * $hours / 24.0); - $def[3] .= "LINE1:$CRIT[3]#ff0000:\"Crit\: $crit_mb / $range\" "; + if ($CRIT['trend']) { + $crit_mb = sprintf("%.2fMB", $CRIT['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${CRIT['trend']}#ff0000:\"Crit\: $crit_mb / $range\" "; } $def[3] .= "COMMENT:\"\\n\" "; } +if (isset($RRD['trend_hoursleft'])) { + // Trend + $opt[4] = "--vertical-label 'Days left' -l -1 -u 365 -X0 --title '$hostname: Days left for $fstitle' "; + $def[4] = "DEF:hours_left=${RRD_AVG['trend_hoursleft']} "; + $def[4] .= "DEF:hours_left_min=${RRD_MIN['trend_hoursleft']} "; + // negative hours indicate no growth + // the dataset hours_left_isneg stores this info for each point as True/False + $def[4] .= "CDEF:hours_left_isneg=hours_left_min,-1,EQ "; + $def[4] .= "CDEF:hours_left_unmon=hours_left_min,400,0,IF "; + $def[4] .= "CDEF:days_left=hours_left,24,/ "; + $def[4] .= "CDEF:days_left_cap=days_left,400,MIN "; + // Convert negative points to 400 (y-axis cap) + $def[4] .= "CDEF:days_left_cap_positive=hours_left_isneg,400,days_left_cap,IF "; + // The AREA has a rendering problem. Points are too far to the right + $def[4] .= "AREA:hours_left_unmon#AA2200: "; + + $def[4] .= "AREA:days_left_cap_positive#22AA44:\"Days left\:\" "; + if ($ACT[4] == -1) + { + $def[4] .= "COMMENT:\"Not growing\" "; + } + else { + $def[4] .= "GPRINT:days_left:LAST:\"%7.2lf days\" "; + } +} ?> diff -Nru check-mk-1.2.2p3/check_mk-vms_if.php check-mk-1.2.6p12/check_mk-vms_if.php --- check-mk-1.2.2p3/check_mk-vms_if.php 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-vms_if.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -40,87 +40,105 @@ # Graph 1: used bandwidth -# Determine if Bit or Byte. -# Change multiplier and labels -$unit = "B"; -$unit_multiplier = 1; -$vertical_label_name = "MByte/sec"; -if (strcmp($MIN[11], "0.0") == 0) { +# Determine if Bit or Byte. Bit is signalled via a min value of 0.0 +# in the 11th performance value. +if (!strcmp($MIN[11], "0.0")) { $unit = "Bit"; $unit_multiplier = 8; - $vertical_label_name = "MBit/sec"; + $base = 1000; // Megabit is 1000 * 1000 } +else { + $unit = "B"; + $unit_multiplier = 1; + $base = 1000; // Megabyte is 1000 * 1000 +} + +# Convert bytes to bits if neccessary $bandwidth = $MAX[1] * $unit_multiplier; $warn = $WARN[1] * $unit_multiplier; $crit = $CRIT[1] * $unit_multiplier; -# Horizontal lines -$mega = 1024.0 * 1024.0; -$mBandwidthH = $bandwidth / $mega; -$mWarnH = $warn / $mega; -$mCritH = $crit / $mega; - -# Break down bandwidth, warn and crit +# Now choose a convenient scale, based on the known bandwith of +# the interface, and break down bandwidth, warn and crit by that +# scale. $bwuom = ' '; -$base = 1000; -if($bandwidth > $base * $base * $base) { - $warn /= $base * $base * $base; - $crit /= $base * $base * $base; - $bandwidth /= $base * $base * $base; - $bwuom = 'G'; -} elseif ($bandwidth > $base * $base) { - $warn /= $base * $base; - $crit /= $base * $base; - $bandwidth /= $base * $base; - $bwuom = 'M'; -} elseif ($bandwidth > $base) { - $warn /= $base; - $crit /= $base; - $bandwidth /= $base; - $bwuom = 'k'; -} - -if ($mBandwidthH < 10) - $range = $mBandwidthH; -else - $range = 10.0; +if ($bandwidth > $base * $base * $base) { + $scale = $base * $base * $base; + $bwuom = 'G'; +} +elseif ($bandwidth > $base * $base) { + $scale = $base * $base; + $bwuom = 'M'; +} +elseif ($bandwidth > $base) { + $scale = $base; + $bwuom = 'k'; +} +else { + $scale = 1; + $bwuom = ' '; +} + +$warn /= $scale; +$crit /= $scale; +$bandwidth /= $scale; + +$vertical_label_name = $bwuom . $unit . "/sec"; + +$range = min(10, $bandwidth); + $bandwidthInfo = ""; if ($bandwidth > 0){ - $bandwidthInfo = " at bandwidth ${bwuom}${unit}/s"; + $bandwidthInfo = " at " . sprintf("%.1f", $bandwidth) . " ${bwuom}${unit}/s"; } $ds_name[1] = 'Used bandwidth'; -$opt[1] = "--vertical-label \"$vertical_label_name\" -l -$range -u $range -X0 -b 1024 --title \"Used bandwidth $hostname / $servicedesc $bandwidthInfo\" "; -$def[1] = +$opt[1] = "--vertical-label \"$vertical_label_name\" -l -$range -u $range -X0 -b 1024 --title \"Used bandwidth $hostname / $servicedesc$bandwidthInfo\" "; +$def[1] = "HRULE:0#c0c0c0 "; - if ($mBandwidthH) - $def[1] .= "HRULE:$mBandwidthH#808080:\"Port speed\: " . sprintf("%.1f", $bandwidth) . " ".$bwuom."$unit/s\\n\" ". - "HRULE:-$mBandwidthH#808080: "; - if ($warn) - $def[1] .= "HRULE:$mWarnH#ffff00:\"Warning\: " . sprintf("%6.1f", $warn) . " ".$bwuom."$unit/s\\n\" ". - "HRULE:-$mWarnH#ffff00: "; - if ($crit) - $def[1] .= "HRULE:$mCritH#ff0000:\"Critical\: " . sprintf("%6.1f", $crit) . " ".$bwuom."$unit/s\\n\" ". - "HRULE:-$mCritH#ff0000: "; +if ($bandwidth) + $def[1] .= "HRULE:$bandwidth#808080:\"Port speed\: " . sprintf("%10.1f", $bandwidth) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$bandwidth#808080: "; +if ($warn) + $def[1] .= "HRULE:$warn#ffff00:\"Warning\: " . sprintf("%13.1f", $warn) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$warn#ffff00: "; +if ($crit) + $def[1] .= "HRULE:$crit#ff0000:\"Critical\: " . sprintf("%13.1f", $crit) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$crit#ff0000: "; + + $def[1] .= "". + # incoming + "DEF:inbytes=$RRDFILE[1]:$DS[1]:MAX ". + "CDEF:intraffic=inbytes,$unit_multiplier,* ". + "CDEF:inmb=intraffic,$scale,/ ". + "AREA:inmb#00e060:\"in \" ". + "GPRINT:intraffic:LAST:\"%7.1lf %s$unit/s last\" ". + "GPRINT:intraffic:AVERAGE:\"%7.1lf %s$unit/s avg\" ". + "GPRINT:intraffic:MAX:\"%7.1lf %s$unit/s max\\n\" ". + "VDEF:inperc=intraffic,95,PERCENTNAN ". + "VDEF:inpercmb=inmb,95,PERCENTNAN ". + "LINE:inpercmb#008f00:\"95% percentile\" ". + "GPRINT:inperc:\"%7.1lf %s$unit/s\\n\" ". - $def[1] .= "DEF:inbytes=$RRDFILE[1]:$DS[1]:MAX ". + # outgoing "DEF:outbytes=$RRDFILE[6]:$DS[6]:MAX ". - "CDEF:intraffic=inbytes,$unit_multiplier,* ". "CDEF:outtraffic=outbytes,$unit_multiplier,* ". - "CDEF:inmb=intraffic,1048576,/ ". - "CDEF:outmb=outtraffic,1048576,/ ". + "CDEF:minusouttraffic=outtraffic,-1,* ". + "CDEF:outmb=outtraffic,$scale,/ ". "CDEF:minusoutmb=0,outmb,- ". - "AREA:inmb#00e060:\"in \" ". - "GPRINT:intraffic:LAST:\"%6.1lf %s$unit/s last\" ". - "GPRINT:intraffic:AVERAGE:\"%6.1lf %s$unit/s avg\" ". - "GPRINT:intraffic:MAX:\"%6.1lf %s$unit/s max\\n\" ". - "AREA:minusoutmb#0080e0:\"out \" ". - "GPRINT:outtraffic:LAST:\"%6.1lf %s$unit/s last\" ". - "GPRINT:outtraffic:AVERAGE:\"%6.1lf %s$unit/s avg\" ". - "GPRINT:outtraffic:MAX:\"%6.1lf %s$unit/s max\\n\" "; + "AREA:minusoutmb#0080e0:\"out \" ". + "GPRINT:outtraffic:LAST:\"%7.1lf %s$unit/s last\" ". + "GPRINT:outtraffic:AVERAGE:\"%7.1lf %s$unit/s avg\" ". + "GPRINT:outtraffic:MAX:\"%7.1lf %s$unit/s max\\n\" ". + "VDEF:outperc=minusouttraffic,5,PERCENTNAN ". + "VDEF:outpercmb=minusoutmb,5,PERCENTNAN ". + "LINE:outpercmb#00008f:\"95% percentile\" ". + "GPRINT:outperc:\"%7.1lf %s$unit/s\\n\" ". + + ""; if (isset($DS[12])) { - $def[1] .= + $def[1] .= "DEF:inbytesa=$RRDFILE[12]:$DS[12]:MAX ". "DEF:outbytesa=$RRDFILE[13]:$DS[13]:MAX ". "CDEF:intraffica=inbytesa,$unit_multiplier,* ". @@ -142,29 +160,41 @@ $ds_name[2] = 'Packets'; $opt[2] = "--vertical-label \"packets/sec\" --title \"Packets $hostname / $servicedesc\" "; $def[2] = + # ingoing "HRULE:0#c0c0c0 ". "DEF:inu=$RRDFILE[2]:$DS[2]:MAX ". "DEF:innu=$RRDFILE[3]:$DS[3]:MAX ". - "AREA:inu#00ffc0:\"in unicast \" ". - "GPRINT:inu:LAST:\"%7.2lf/s last \" ". - "GPRINT:inu:AVERAGE:\"%7.2lf/s avg \" ". - "GPRINT:inu:MAX:\"%7.2lf/s max\\n\" ". - "AREA:innu#00c080:\"in broadcast/multicast \":STACK ". - "GPRINT:innu:LAST:\"%7.2lf/s last \" ". - "GPRINT:innu:AVERAGE:\"%7.2lf/s avg \" ". - "GPRINT:innu:MAX:\"%7.2lf/s max\\n\" ". + "CDEF:in=inu,innu,+ ". + "AREA:inu#00ffc0:\"in unicast \" ". + "GPRINT:inu:LAST:\"%9.1lf/s last \" ". + "GPRINT:inu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:inu:MAX:\"%9.1lf/s max\\n\" ". + "AREA:innu#00c080:\"in broadcast/multicast \":STACK ". + "GPRINT:innu:LAST:\"%9.1lf/s last \" ". + "GPRINT:innu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:innu:MAX:\"%9.1lf/s max\\n\" ". + "VDEF:inperc=in,95,PERCENTNAN ". + "LINE:inperc#00cf00:\"in 95% percentile \" ". + "GPRINT:inperc:\"%9.1lf/s\\n\" ". + + # outgoing "DEF:outu=$RRDFILE[7]:$DS[7]:MAX ". "DEF:outnu=$RRDFILE[8]:$DS[8]:MAX ". "CDEF:minusoutu=0,outu,- ". "CDEF:minusoutnu=0,outnu,- ". - "AREA:minusoutu#00c0ff:\"out unicast \" ". - "GPRINT:outu:LAST:\"%7.2lf/s last \" ". - "GPRINT:outu:AVERAGE:\"%7.2lf/s avg \" ". - "GPRINT:outu:MAX:\"%7.2lf/s max\\n\" ". - "AREA:minusoutnu#0080c0:\"out broadcast/multicast \":STACK ". - "GPRINT:outnu:LAST:\"%7.2lf/s last \" ". - "GPRINT:outnu:AVERAGE:\"%7.2lf/s avg \" ". - "GPRINT:outnu:MAX:\"%7.2lf/s max\\n\" "; + "CDEF:minusout=minusoutu,minusoutnu,+ ". + "AREA:minusoutu#00c0ff:\"out unicast \" ". + "GPRINT:outu:LAST:\"%9.1lf/s last \" ". + "GPRINT:outu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:outu:MAX:\"%9.1lf/s max\\n\" ". + "AREA:minusoutnu#0080c0:\"out broadcast/multicast\":STACK ". + "GPRINT:outnu:LAST:\"%9.1lf/s last \" ". + "GPRINT:outnu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:outnu:MAX:\"%9.1lf/s max\\n\" ". + "VDEF:outperc=minusout,5,PERCENTNAN ". + "LINE:outperc#0000cf:\"out 95% percentile \" ". + "GPRINT:outperc:\"%9.1lf/s\\n\" ". + ""; # Graph 3: errors and discards $ds_name[3] = 'Errors and discards'; diff -Nru check-mk-1.2.2p3/check_mk-vms_system.ios.php check-mk-1.2.6p12/check_mk-vms_system.ios.php --- check-mk-1.2.2p3/check_mk-vms_system.ios.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-vms_system.ios.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mk-vms_system.procs.php check-mk-1.2.6p12/check_mk-vms_system.procs.php --- check-mk-1.2.2p3/check_mk-vms_system.procs.php 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-vms_system.procs.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mk-vms_sys.util.php check-mk-1.2.6p12/check_mk-vms_sys.util.php --- check-mk-1.2.2p3/check_mk-vms_sys.util.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-vms_sys.util.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mk-wagner_titanus_topsense.airflow_deviation.php check-mk-1.2.6p12/check_mk-wagner_titanus_topsense.airflow_deviation.php --- check-mk-1.2.2p3/check_mk-wagner_titanus_topsense.airflow_deviation.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-wagner_titanus_topsense.airflow_deviation.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,36 @@ + diff -Nru check-mk-1.2.2p3/check_mk-wagner_titanus_topsense.chamber_deviation.php check-mk-1.2.6p12/check_mk-wagner_titanus_topsense.chamber_deviation.php --- check-mk-1.2.2p3/check_mk-wagner_titanus_topsense.chamber_deviation.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-wagner_titanus_topsense.chamber_deviation.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,34 @@ + diff -Nru check-mk-1.2.2p3/check_mk-wagner_titanus_topsense.smoke.php check-mk-1.2.6p12/check_mk-wagner_titanus_topsense.smoke.php --- check-mk-1.2.2p3/check_mk-wagner_titanus_topsense.smoke.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-wagner_titanus_topsense.smoke.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,34 @@ + diff -Nru check-mk-1.2.2p3/check_mk-wagner_titanus_topsense.temp.php check-mk-1.2.6p12/check_mk-wagner_titanus_topsense.temp.php --- check-mk-1.2.2p3/check_mk-wagner_titanus_topsense.temp.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-wagner_titanus_topsense.temp.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ + diff -Nru check-mk-1.2.2p3/check_mk-win_dhcp_pools.php check-mk-1.2.6p12/check_mk-win_dhcp_pools.php --- check-mk-1.2.2p3/check_mk-win_dhcp_pools.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-win_dhcp_pools.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,51 @@ + diff -Nru check-mk-1.2.2p3/check_mk-winperf.cpuusage.php check-mk-1.2.6p12/check_mk-winperf.cpuusage.php --- check-mk-1.2.2p3/check_mk-winperf.cpuusage.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-winperf.cpuusage.php 2015-09-21 10:59:54.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -26,7 +26,7 @@ $opt[1] = "--vertical-label 'Percent' -l0 -u100 --title \"CPU Utilization of $hostname\" "; -$def[1] = "DEF:usage=$RRDFILE[1]:$DS[1]:MAX "; +$def[1] = "DEF:usage=$RRDFILE[1]:$DS[1]:AVERAGE "; $def[1] .= "AREA:usage#60f020:\"CPU utilization\" "; $def[1] .= "LINE:usage#40d010 "; diff -Nru check-mk-1.2.2p3/check_mk-winperf_if.php check-mk-1.2.6p12/check_mk-winperf_if.php --- check-mk-1.2.2p3/check_mk-winperf_if.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-winperf_if.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,226 @@ + $base * $base * $base) { + $scale = $base * $base * $base; + $bwuom = 'G'; +} +elseif ($bandwidth > $base * $base) { + $scale = $base * $base; + $bwuom = 'M'; +} +elseif ($bandwidth > $base) { + $scale = $base; + $bwuom = 'k'; +} +else { + $scale = 1; + $bwuom = ' '; +} + +$warn /= $scale; +$crit /= $scale; +$bandwidth /= $scale; + +$vertical_label_name = $bwuom . $unit . "/sec"; + +$range = min(10, $bandwidth); + + +$bandwidthInfo = ""; +if ($bandwidth > 0){ + $bandwidthInfo = " at " . sprintf("%.1f", $bandwidth) . " ${bwuom}${unit}/s"; +} +$ds_name[1] = 'Used bandwidth'; +$opt[1] = "--vertical-label \"$vertical_label_name\" -l -$range -u $range -X0 -b 1024 --title \"Used bandwidth $hostname / $servicedesc$bandwidthInfo\" "; +$def[1] = + "HRULE:0#c0c0c0 "; +if ($bandwidth) + $def[1] .= "HRULE:$bandwidth#808080:\"Port speed\: " . sprintf("%10.1f", $bandwidth) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$bandwidth#808080: "; +if ($warn) + $def[1] .= "HRULE:$warn#ffff00:\"Warning\: " . sprintf("%13.1f", $warn) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$warn#ffff00: "; +if ($crit) + $def[1] .= "HRULE:$crit#ff0000:\"Critical\: " . sprintf("%13.1f", $crit) . " ".$bwuom."$unit/s\\n\" ". + "HRULE:-$crit#ff0000: "; + + $def[1] .= "". + # incoming + "DEF:inbytes=$RRDFILE[1]:$DS[1]:MAX ". + "CDEF:intraffic=inbytes,$unit_multiplier,* ". + "CDEF:inmb=intraffic,$scale,/ ". + "AREA:inmb#00e060:\"in \" ". + "GPRINT:intraffic:LAST:\"%7.1lf %s$unit/s last\" ". + "GPRINT:intraffic:AVERAGE:\"%7.1lf %s$unit/s avg\" ". + "GPRINT:intraffic:MAX:\"%7.1lf %s$unit/s max\\n\" ". + "VDEF:inperc=intraffic,95,PERCENTNAN ". + "VDEF:inpercmb=inmb,95,PERCENTNAN ". + "LINE:inpercmb#008f00:\"95% percentile\" ". + "GPRINT:inperc:\"%7.1lf %s$unit/s\\n\" ". + + # outgoing + "DEF:outbytes=$RRDFILE[6]:$DS[6]:MAX ". + "CDEF:outtraffic=outbytes,$unit_multiplier,* ". + "CDEF:minusouttraffic=outtraffic,-1,* ". + "CDEF:outmb=outtraffic,$scale,/ ". + "CDEF:minusoutmb=0,outmb,- ". + "AREA:minusoutmb#0080e0:\"out \" ". + "GPRINT:outtraffic:LAST:\"%7.1lf %s$unit/s last\" ". + "GPRINT:outtraffic:AVERAGE:\"%7.1lf %s$unit/s avg\" ". + "GPRINT:outtraffic:MAX:\"%7.1lf %s$unit/s max\\n\" ". + "VDEF:outperc=minusouttraffic,5,PERCENTNAN ". + "VDEF:outpercmb=minusoutmb,5,PERCENTNAN ". + "LINE:outpercmb#00008f:\"95% percentile\" ". + "GPRINT:outperc:\"%7.1lf %s$unit/s\\n\" ". + + ""; + +if (isset($DS[12])) { + $def[1] .= + "DEF:inbytesa=$RRDFILE[12]:$DS[12]:MAX ". + "DEF:outbytesa=$RRDFILE[13]:$DS[13]:MAX ". + "CDEF:intraffica=inbytesa,$unit_multiplier,* ". + "CDEF:outtraffica=outbytesa,$unit_multiplier,* ". + "CDEF:inmba=intraffica,1048576,/ ". + "CDEF:outmba=outtraffica,1048576,/ ". + "CDEF:minusoutmba=0,outmba,- ". + "LINE:inmba#00a060:\"in (avg) \" ". + "GPRINT:intraffica:LAST:\"%6.1lf %s$unit/s last\" ". + "GPRINT:intraffica:AVERAGE:\"%6.1lf %s$unit/s avg\" ". + "GPRINT:intraffica:MAX:\"%6.1lf %s$unit/s max\\n\" ". + "LINE:minusoutmba#0060c0:\"out (avg) \" ". + "GPRINT:outtraffica:LAST:\"%6.1lf %s$unit/s last\" ". + "GPRINT:outtraffica:AVERAGE:\"%6.1lf %s$unit/s avg\" ". + "GPRINT:outtraffica:MAX:\"%6.1lf %s$unit/s max\\n\" "; +} + +# Graph 2: packets +$ds_name[2] = 'Packets'; +$opt[2] = "--vertical-label \"packets/sec\" --title \"Packets $hostname / $servicedesc\" "; +$def[2] = + # ingoing + "HRULE:0#c0c0c0 ". + "DEF:inu=$RRDFILE[2]:$DS[2]:MAX ". + "DEF:innu=$RRDFILE[3]:$DS[3]:MAX ". + "CDEF:in=inu,innu,+ ". + "AREA:inu#00ffc0:\"in unicast \" ". + "GPRINT:inu:LAST:\"%9.1lf/s last \" ". + "GPRINT:inu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:inu:MAX:\"%9.1lf/s max\\n\" ". + "AREA:innu#00c080:\"in broadcast/multicast \":STACK ". + "GPRINT:innu:LAST:\"%9.1lf/s last \" ". + "GPRINT:innu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:innu:MAX:\"%9.1lf/s max\\n\" ". + "VDEF:inperc=in,95,PERCENTNAN ". + "LINE:inperc#00cf00:\"in 95% percentile \" ". + "GPRINT:inperc:\"%9.1lf/s\\n\" ". + + # outgoing + "DEF:outu=$RRDFILE[7]:$DS[7]:MAX ". + "DEF:outnu=$RRDFILE[8]:$DS[8]:MAX ". + "CDEF:minusoutu=0,outu,- ". + "CDEF:minusoutnu=0,outnu,- ". + "CDEF:minusout=minusoutu,minusoutnu,+ ". + "AREA:minusoutu#00c0ff:\"out unicast \" ". + "GPRINT:outu:LAST:\"%9.1lf/s last \" ". + "GPRINT:outu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:outu:MAX:\"%9.1lf/s max\\n\" ". + "AREA:minusoutnu#0080c0:\"out broadcast/multicast\":STACK ". + "GPRINT:outnu:LAST:\"%9.1lf/s last \" ". + "GPRINT:outnu:AVERAGE:\"%9.1lf/s avg \" ". + "GPRINT:outnu:MAX:\"%9.1lf/s max\\n\" ". + "VDEF:outperc=minusout,5,PERCENTNAN ". + "LINE:outperc#0000cf:\"out 95% percentile \" ". + "GPRINT:outperc:\"%9.1lf/s\\n\" ". + ""; + +# Graph 3: errors and discards +$ds_name[3] = 'Errors and discards'; +$opt[3] = "--vertical-label \"packets/sec\" -X0 --title \"Problems $hostname / $servicedesc\" "; +$def[3] = + "HRULE:0#c0c0c0 ". + "DEF:inerr=$RRDFILE[5]:$DS[5]:MAX ". + "DEF:indisc=$RRDFILE[4]:$DS[4]:MAX ". + "AREA:inerr#ff0000:\"in errors \" ". + "GPRINT:inerr:LAST:\"%7.2lf/s last \" ". + "GPRINT:inerr:AVERAGE:\"%7.2lf/s avg \" ". + "GPRINT:inerr:MAX:\"%7.2lf/s max\\n\" ". + "AREA:indisc#ff8000:\"in discards \":STACK ". + "GPRINT:indisc:LAST:\"%7.2lf/s last \" ". + "GPRINT:indisc:AVERAGE:\"%7.2lf/s avg \" ". + "GPRINT:indisc:MAX:\"%7.2lf/s max\\n\" ". + "DEF:outerr=$RRDFILE[10]:$DS[10]:MAX ". + "DEF:outdisc=$RRDFILE[9]:$DS[9]:MAX ". + "CDEF:minusouterr=0,outerr,- ". + "CDEF:minusoutdisc=0,outdisc,- ". + "AREA:minusouterr#ff0080:\"out errors \" ". + "GPRINT:outerr:LAST:\"%7.2lf/s last \" ". + "GPRINT:outerr:AVERAGE:\"%7.2lf/s avg \" ". + "GPRINT:outerr:MAX:\"%7.2lf/s max\\n\" ". + "AREA:minusoutdisc#ff8080:\"out discards \":STACK ". + "GPRINT:outdisc:LAST:\"%7.2lf/s last \" ". + "GPRINT:outdisc:AVERAGE:\"%7.2lf/s avg \" ". + "GPRINT:outdisc:MAX:\"%7.2lf/s max\\n\" "; +?> diff -Nru check-mk-1.2.2p3/check_mk-winperf_msx_queues.php check-mk-1.2.6p12/check_mk-winperf_msx_queues.php --- check-mk-1.2.2p3/check_mk-winperf_msx_queues.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-winperf_msx_queues.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/check_mk-winperf_phydisk.php check-mk-1.2.6p12/check_mk-winperf_phydisk.php --- check-mk-1.2.2p3/check_mk-winperf_phydisk.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-winperf_phydisk.php 2015-09-21 10:59:54.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -41,7 +41,7 @@ $opt[1] = "--vertical-label 'Throughput (MB/s)' -X0 --title \"Disk throughput $hostname / $disk\" "; - $def[1] = + $def[1] = "HRULE:0#a0a0a0 ". # read "DEF:read=$RRD[read] ". @@ -53,7 +53,7 @@ # read average as line in the same graph if (isset($RRD["read.avg"])) { - $def[1] .= + $def[1] .= "DEF:read_avg=${RRD['read.avg']} ". "CDEF:read_avg_mb=read_avg,1048576,/ ". "LINE:read_avg_mb#202020 "; @@ -84,7 +84,7 @@ # write average if (isset($DS["write.avg"])) { - $def[1] .= + $def[1] .= "DEF:write_avg=${RRD['write.avg']} ". "CDEF:write_avg_mb=write_avg,1048576,/ ". "CDEF:write_avg_mb_neg=write_avg_mb,-1,* ". @@ -122,14 +122,14 @@ $def[] = "" . "DEF:read=$RRD[read_ql] " . "DEF:write=$RRD[write_ql] " - . "CDEF:writen=write,-1,* " + . "CDEF:writen=write,-1,* " . "HRULE:0#a0a0a0 " . "AREA:read#669a76 " . "AREA:writen#517ba5 " ; } - + } // legacy version of diskstat diff -Nru check-mk-1.2.2p3/check_mk-winperf_processor.util.php check-mk-1.2.6p12/check_mk-winperf_processor.util.php --- check-mk-1.2.2p3/check_mk-winperf_processor.util.php 2013-03-04 11:48:42.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-winperf_processor.util.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -23,22 +23,58 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -$desc = str_replace("_", " ", $servicedesc); +# Do not depend on numbers, use names +$RRD = array(); +foreach ($NAME as $i => $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $RRD_AVG[$n] = "$RRDFILE[$i]:$DS[$i]:AVERAGE"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; +} -$opt[1] = "--vertical-label 'CPU utilization %' -l0 -u 100 --title \"CPU Utilization $hostname $desc\" "; -# -$def[1] = "DEF:util=$RRDFILE[1]:$DS[1]:MAX ". - "CDEF:ok=util,$WARN[1],MIN ". - "CDEF:warn=util,$CRIT[1],MIN ". - "AREA:util#c0f020 ". - "AREA:warn#90f020 ". - "AREA:ok#60f020:\"Utilization\:\" ". - "LINE:util#40a018 ". - "GPRINT:util:LAST:\"%.0lf%%,\" ". - "GPRINT:util:MIN:\"min\: %.0lf%%,\" ". - "GPRINT:util:MAX:\"max\: %.0lf%%\" ". - "HRULE:$WARN[1]#ffe000:\"Warning at $WARN[1]%\" ". - "HRULE:$CRIT[1]#ff0000:\"Critical at $CRIT[1]%\\n\" ". - ""; +$num_threads = $MAX[1]; +$warnthreads = $WARN[1] * $num_threads / 100.0; +$critthreads = $CRIT[1] * $num_threads / 100.0; +$rightscale = 100.0 / $num_threads; + +$opt[1] = "--vertical-label 'Used CPU threads' --right-axis $rightscale:0 --right-axis-format '%4.1lf%%' --right-axis-label 'Utilization %' -l0 -ru $num_threads --title \"CPU Utilization for $hostname ($num_threads CPU threads)\" "; + +$def[1] = "DEF:perc=$RRD_AVG[util] " + . "CDEF:util=perc,$num_threads,*,100,/ " + ; + +$def[1] .= "HRULE:$MAX[util]#0040d0:\"$num_threads CPU Threads\\n\" " + ; + +$def[1] .= "AREA:util#60f020:\"Utilization\:\" " + . "LINE:util#50b01a " + . "GPRINT:perc:LAST:\"%.1lf%%\" " + . "GPRINT:util:LAST:\"(%.1lf Threads) \" " + . "GPRINT:perc:MIN:\"min\: %.1lf%%,\" " + . "GPRINT:util:MIN:\"(%.1lf), \" " + . "GPRINT:perc:MAX:\"max\: %.1lf%%\" " + . "GPRINT:util:MAX:\"(%.1lf)\\n\" " + ; + + +if (isset($RRD_AVG["avg"])) { + $def[1] .= "DEF:aperc=$RRD_AVG[avg] ". + "CDEF:avg=aperc,$num_threads,*,100,/ ". + "LINE:avg#004000:\"Averaged\: \" ". + "GPRINT:aperc:LAST:\"%.1lf%%,\" ". + "GPRINT:aperc:MIN:\"min\: %.1lf%%,\" ". + "GPRINT:aperc:MAX:\"max\: %.1lf%%\\n\" ". + ""; +} + +if ($WARN['util']) { + $def[1] .= "HRULE:$warnthreads#fff000:\"Warn at $WARN[util]% \" " + . "HRULE:$critthreads#ff0000:\"Critical at $CRIT[util]%\\n\" "; +} +else { + $def[1] .= "COMMENT:\"\\n\" "; +} ?> diff -Nru check-mk-1.2.2p3/check_mk-wut_webtherm.php check-mk-1.2.6p12/check_mk-wut_webtherm.php --- check-mk-1.2.2p3/check_mk-wut_webtherm.php 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-wut_webtherm.php 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -31,6 +31,8 @@ $def[1] .= "LINE1:var1#000080:\"\" "; $def[1] .= "GPRINT:var1:MAX:\"(Max\: %2.0lfC,\" "; $def[1] .= "GPRINT:var1:AVERAGE:\"Avg\: %2.0lfC)\" "; -$def[1] .= "HRULE:$WARN[1]#FFFF00:\"Warning\: $WARN[1]C\" "; -$def[1] .= "HRULE:$CRIT[1]#FF0000:\"Critical\: $CRIT[1]C\" "; +if ($WARN[1] != "") { + $def[1] .= "HRULE:$WARN[1]#FFFF00:\"Warning\: $WARN[1]C\" "; + $def[1] .= "HRULE:$CRIT[1]#FF0000:\"Critical\: $CRIT[1]C\" "; +} ?> diff -Nru check-mk-1.2.2p3/check_mk-zfs_arc_cache.l2.php check-mk-1.2.6p12/check_mk-zfs_arc_cache.l2.php --- check-mk-1.2.2p3/check_mk-zfs_arc_cache.l2.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-zfs_arc_cache.l2.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,63 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; +} + +# +# hit_ratio +# + +$ds_name[1] = "L2 Cache Hit Ratio"; +$opt[1] = "--vertical-label '%' -l0 --title \"L2 Cache Hit Ratio for $hostname / $servicedesc\" "; +$def[1] = "DEF:hit_ratio=".$RRD['l2_hit_ratio']." "; +$def[1] .= "LINE:hit_ratio#408000:\"L2 Cache Hit Ratio \" "; +$def[1] .= "GPRINT:hit_ratio:LAST:\"last %2.2lf %%\" "; +$def[1] .= "GPRINT:hit_ratio:AVERAGE:\"avg %2.2lf %%\" "; +$def[1] .= "GPRINT:hit_ratio:MIN:\"min %2.2lf %%\" "; +$def[1] .= "GPRINT:hit_ratio:MAX:\"max %2.2lf %%\\n\" "; + +# +# size +# + +$ds_name[2] = "L2 Cache Size"; +$opt[2] = "--vertical-label 'Bytes' -l0 --title \"L2 Cache Size for $hostname / $servicedesc\" "; +$def[2] = "DEF:size=".$RRD['l2_size']." "; +$def[2] .= "AREA:size#408000:\"L2 Cache Size\" "; +$def[2] .= "LINE:size#000000 "; +$def[2] .= "GPRINT:size:LAST:\"last %2.0lf Bytes\" "; +$def[2] .= "GPRINT:size:AVERAGE:\"avg %2.0lf Bytes\\n\" "; +$def[2] .= "GPRINT:size:MIN:\" min %2.0lf Bytes\" "; +$def[2] .= "GPRINT:size:MAX:\"max %2.0lf Bytes\\n\" "; + +?> diff -Nru check-mk-1.2.2p3/check_mk-zfs_arc_cache.php check-mk-1.2.6p12/check_mk-zfs_arc_cache.php --- check-mk-1.2.2p3/check_mk-zfs_arc_cache.php 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-zfs_arc_cache.php 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,96 @@ + $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; +} + +# +# hit_ratio +# + +$ds_name[1] = "Cache Hit Ratio"; +$opt[1] = "--vertical-label '%' -l0 --title \"Cache Hit Ratio for $hostname / $servicedesc\" "; +$def[1] = "DEF:hit_ratio=".$RRD['hit_ratio']." "; +$def[1] .= "DEF:prefetch_data_hit_ratio=".$RRD['prefetch_data_hit_ratio']." "; +$def[1] .= "DEF:prefetch_metadata_hit_ratio=".$RRD['prefetch_metadata_hit_ratio']." "; +$def[1] .= "LINE:hit_ratio#408000:\"Hit Ratio \" "; +$def[1] .= "GPRINT:hit_ratio:LAST:\"last %2.2lf %%\" "; +$def[1] .= "GPRINT:hit_ratio:AVERAGE:\"avg %2.2lf %%\" "; +$def[1] .= "GPRINT:hit_ratio:MIN:\"min %2.2lf %%\" "; +$def[1] .= "GPRINT:hit_ratio:MAX:\"max %2.2lf %%\\n\" "; +$def[1] .= "LINE:prefetch_data_hit_ratio#000080:\"Prefetch Data \" "; +$def[1] .= "GPRINT:prefetch_data_hit_ratio:LAST:\"last %2.2lf %%\" "; +$def[1] .= "GPRINT:prefetch_data_hit_ratio:AVERAGE:\"avg %2.2lf %%\" "; +$def[1] .= "GPRINT:prefetch_data_hit_ratio:MIN:\"min %2.2lf %%\" "; +$def[1] .= "GPRINT:prefetch_data_hit_ratio:MAX:\"max %2.2lf %%\\n\" "; +$def[1] .= "LINE:prefetch_metadata_hit_ratio#800000:\"Prefetch Metadata\" "; +$def[1] .= "GPRINT:prefetch_metadata_hit_ratio:LAST:\"last %2.2lf %%\" "; +$def[1] .= "GPRINT:prefetch_metadata_hit_ratio:AVERAGE:\"avg %2.2lf %%\" "; +$def[1] .= "GPRINT:prefetch_metadata_hit_ratio:MIN:\"min %2.2lf %%\" "; +$def[1] .= "GPRINT:prefetch_metadata_hit_ratio:MAX:\"max %2.2lf %%\\n\" "; + +# +# size +# + +$ds_name[2] = "Cache Size"; +$opt[2] = "--vertical-label 'Bytes' -l0 --title \"Cache Size for $hostname / $servicedesc\" "; +$def[2] = "DEF:size=".$RRD['size']." "; +$def[2] .= "AREA:size#408000:\"Cache Size\" "; +$def[2] .= "LINE:size#000000 "; +$def[2] .= "GPRINT:size:LAST:\"last %2.0lf Bytes\" "; +$def[2] .= "GPRINT:size:AVERAGE:\"avg %2.0lf Bytes\\n\" "; +$def[2] .= "GPRINT:size:MIN:\" min %2.0lf Bytes\" "; +$def[2] .= "GPRINT:size:MAX:\"max %2.0lf Bytes\\n\" "; + +# +# arc meta +# + +if( isset($RRD['arc_meta_used']) and isset($RRD['arc_meta_limit']) and isset($RRD['arc_meta_max'])) { + $ds_name[3] = "Arc Meta"; + $opt[3] = "--vertical-label 'Bytes' -l0 --title \"Arc Meta for $hostname / $servicedesc\" "; + $def[3] = "DEF:arc_meta_used=".$RRD['arc_meta_used']." "; + $def[3] .= "DEF:arc_meta_limit=".$RRD['arc_meta_limit']." "; + $def[3] .= "DEF:arc_meta_max=".$RRD['arc_meta_max']." "; + $def[3] .= "LINE:arc_meta_used#408000:\"used \" "; + $def[3] .= "GPRINT:arc_meta_used:LAST:\"last %2.0lf Bytes\" "; + $def[3] .= "GPRINT:arc_meta_used:AVERAGE:\"avg %2.0lf Bytes\\n\" "; + $def[3] .= "LINE:arc_meta_limit#000080:\"limit \" "; + $def[3] .= "GPRINT:arc_meta_limit:LAST:\"last %2.0lf Bytes\" "; + $def[3] .= "GPRINT:arc_meta_limit:AVERAGE:\"avg %2.0lf Bytes\\n\" "; + $def[3] .= "LINE:arc_meta_max#800000:\"max \" "; + $def[3] .= "GPRINT:arc_meta_max:LAST:\"last %2.0lf Bytes\" "; + $def[3] .= "GPRINT:arc_meta_max:AVERAGE:\"avg %2.0lf Bytes\\n\" "; +} + +?> diff -Nru check-mk-1.2.2p3/check_mk-zfsget.php check-mk-1.2.6p12/check_mk-zfsget.php --- check-mk-1.2.2p3/check_mk-zfsget.php 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/check_mk-zfsget.php 2015-09-21 10:59:54.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -25,10 +25,23 @@ setlocale(LC_ALL, "POSIX"); +// Make data sources available via names +$RRD = array(); +foreach ($NAME as $i => $n) { + $RRD[$n] = "$RRDFILE[$i]:$DS[$i]:MAX"; + $RRD_MIN[$n] = "$RRDFILE[$i]:$DS[$i]:MIN"; + $RRD_AVG[$n] = "$RRDFILE[$i]:$DS[$i]:AVERAGE"; + $WARN[$n] = $WARN[$i]; + $CRIT[$n] = $CRIT[$i]; + $MIN[$n] = $MIN[$i]; + $MAX[$n] = $MAX[$i]; + $ACT[$n] = $ACT[$i]; +} + # RRDtool Options #$servicedes=$NAGIOS_SERVICEDESC -$fsname = str_replace("_", "/", substr($servicedesc, 3)); +$fsname = str_replace("_", "/", substr($servicedesc,11)); $fstitle = $fsname; # Hack for windows: replace C// with C:\ @@ -47,57 +60,98 @@ $opt[1] = "--vertical-label GB -l 0 -u $maxgb --title '$hostname: Filesystem $fstitle ($sizegb GB)' "; # First graph show current filesystem usage -$def[1] = "DEF:mb=$RRDFILE[1]:$DS[1]:MAX "; +$def[1] = "DEF:mb=$RRDFILE[1]:$DS[1]:MAX "; $def[1] .= "CDEF:var1=mb,1024,/ "; -$def[1] .= "AREA:var1#00ffc6:\"used space on $fsname\\n\" "; -$def[1] .= "LINE1:var1#226600: "; +$def[1] .= "AREA:var1#00ffc6:\"used space on $fsname\\n\" "; + +# Optional uncommitted usage e.g. for esx hosts +if(isset($RRD['uncommitted'])) { + $def[1] .= "DEF:uncommitted_mb=".$RRD['uncommitted']." "; + $def[1] .= "CDEF:uncommitted_gb=uncommitted_mb,1024,/ "; + $def[1] .= "CDEF:total_gb=uncommitted_gb,var1,+ "; +} else { + $def[1] .= "CDEF:total_gb=var1 "; +} + $def[1] .= "HRULE:$maxgb#003300:\"Size ($sizegb GB) \" "; $def[1] .= "HRULE:$warngb#ffff00:\"Warning at $warngbtxt GB \" "; $def[1] .= "HRULE:$critgb#ff0000:\"Critical at $critgbtxt GB \\n\" "; $def[1] .= "GPRINT:var1:LAST:\"current\: %6.2lf GB\" "; $def[1] .= "GPRINT:var1:MAX:\"max\: %6.2lf GB \" "; -$def[1] .= "GPRINT:var1:AVERAGE:\"avg\: %6.2lf GB\" "; +$def[1] .= "GPRINT:var1:AVERAGE:\"avg\: %6.2lf GB\\n\" "; + +if(isset($RRD['uncommitted'])) { + $def[1] .= "AREA:uncommitted_gb#eeccff:\"Uncommited\":STACK "; + $def[1] .= "GPRINT:uncommitted_gb:MAX:\"%6.2lf GB\l\" "; +} + +$def[1] .= "LINE1:total_gb#226600 "; # Second graph is optional and shows trend. The MAX field # of the third variable contains (size of the filesystem in MB # / range in hours). From that we can compute the configured range. -if (isset($DS[2])) { - $size_mb_per_hours = floatval($MAX[3]); // this is size_mb / range(hours) +if (isset($RRD['growth'])) { + $size_mb_per_hours = floatval($MAX['trend']); // this is size_mb / range(hours) $size_mb = floatval($MAX[1]); $hours = 1.0 / ($size_mb_per_hours / $size_mb); $range = sprintf("%.0fh", $hours); - // Current growth / shrinking. This value is give as MB / 24 hours. + // Current growth / shrinking. This value is give as MB / 24 hours. // Note: This has changed in 1.1.13i3. Prior it was MB / trend_range! $opt[2] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Growth of $fstitle' "; - $def[2] = "DEF:growth_max=$RRDFILE[2]:$DS[2]:MAX "; - $def[2] .= "DEF:growth_min=$RRDFILE[2]:$DS[2]:MIN "; - $def[2] .= "DEF:trend=$RRDFILE[3]:$DS[3]:AVERAGE "; + $def[2] = "DEF:growth_max=${RRD['growth']} "; + $def[2] .= "DEF:growth_min=${RRD_MIN['growth']} "; + $def[2] .= "DEF:trend=${RRD_AVG['trend']} "; $def[2] .= "CDEF:growth_pos=growth_max,0,MAX "; $def[2] .= "CDEF:growth_neg=growth_min,0,MIN "; - $def[2] .= "CDEF:growth_minabs=0,growth_min,- "; + $def[2] .= "CDEF:growth_minabs=0,growth_min,- "; $def[2] .= "CDEF:growth=growth_minabs,growth_max,MAX "; $def[2] .= "HRULE:0#c0c0c0 "; - $def[2] .= "AREA:growth_pos#3060f0:\"Grow\" "; - $def[2] .= "AREA:growth_neg#30f060:\"Shrink \" "; + $def[2] .= "AREA:growth_pos#3060f0:\"Grow\" "; + $def[2] .= "AREA:growth_neg#30f060:\"Shrink \" "; $def[2] .= "GPRINT:growth:LAST:\"Current\: %+9.2lfMB / 24h\" "; $def[2] .= "GPRINT:growth:MAX:\"Max\: %+9.2lfMB / 24h\\n\" "; // Trend $opt[3] = "--vertical-label '+/- MB / 24h' -l -1 -u 1 -X0 --title '$hostname: Trend for $fstitle' "; - $def[3] = "DEF:trend=$RRDFILE[3]:$DS[3]:AVERAGE "; + $def[3] = "DEF:trend=${RRD_AVG['trend']} "; $def[3] .= "HRULE:0#c0c0c0 "; $def[3] .= "LINE1:trend#000000:\"Trend\:\" "; - $def[3] .= "GPRINT:trend:LAST:\"%+7.2lf MB/24h\" "; - if ($WARN[3]) { - $warn_mb = sprintf("%.2fMB", $WARN[3] * $hours / 24.0); - $def[3] .= "LINE1:$WARN[3]#ffff00:\"Warn\: $warn_mb / $range\" "; + $def[3] .= "GPRINT:trend:LAST:\"%+7.2lf MB/24h\" "; + if ($WARN['trend']) { + $warn_mb = sprintf("%.2fMB", $WARN['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${WARN['trend']}#ffff00:\"Warn\: $warn_mb / $range\" "; } - if ($CRIT[3]) { - $crit_mb = sprintf("%.2fMB", $CRIT[3] * $hours / 24.0); - $def[3] .= "LINE1:$CRIT[3]#ff0000:\"Crit\: $crit_mb / $range\" "; + if ($CRIT['trend']) { + $crit_mb = sprintf("%.2fMB", $CRIT['trend'] * $hours / 24.0); + $def[3] .= "LINE1:${CRIT['trend']}#ff0000:\"Crit\: $crit_mb / $range\" "; } $def[3] .= "COMMENT:\"\\n\" "; } +if (isset($RRD['trend_hoursleft'])) { + // Trend + $opt[4] = "--vertical-label 'Days left' -l -1 -u 365 -X0 --title '$hostname: Days left for $fstitle' "; + $def[4] = "DEF:hours_left=${RRD_AVG['trend_hoursleft']} "; + $def[4] .= "DEF:hours_left_min=${RRD_MIN['trend_hoursleft']} "; + // negative hours indicate no growth + // the dataset hours_left_isneg stores this info for each point as True/False + $def[4] .= "CDEF:hours_left_isneg=hours_left_min,-1,EQ "; + $def[4] .= "CDEF:hours_left_unmon=hours_left_min,400,0,IF "; + $def[4] .= "CDEF:days_left=hours_left,24,/ "; + $def[4] .= "CDEF:days_left_cap=days_left,400,MIN "; + // Convert negative points to 400 (y-axis cap) + $def[4] .= "CDEF:days_left_cap_positive=hours_left_isneg,400,days_left_cap,IF "; + // The AREA has a rendering problem. Points are too far to the right + $def[4] .= "AREA:hours_left_unmon#AA2200: "; + + $def[4] .= "AREA:days_left_cap_positive#22AA44:\"Days left\:\" "; + if ($ACT[4] == -1) + { + $def[4] .= "COMMENT:\"Not growing\" "; + } + else { + $def[4] .= "GPRINT:days_left:LAST:\"%7.2lf days\" "; + } +} ?> diff -Nru check-mk-1.2.2p3/check_notify_count check-mk-1.2.6p12/check_notify_count --- check-mk-1.2.2p3/check_notify_count 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_notify_count 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,43 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def check_notify_count_arguments(params): + description, interval, settings = params + args = '-r %d' % interval + + if "num_per_contact" in settings: + args += ' -w %s -c %s' % settings['num_per_contact'] + + return args + + +active_check_info['notify_count'] = { + "command_line" : '$USER1$/check_notify_count $ARG1$', + "argument_function" : check_notify_count_arguments, + "service_description" : lambda params: "Notify %s" % params[0], + "has_perfdata" : True, +} + diff -Nru check-mk-1.2.2p3/checkpoint_connections check-mk-1.2.6p12/checkpoint_connections --- check-mk-1.2.2p3/checkpoint_connections 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/checkpoint_connections 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,56 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# .1.3.6.1.2.1.1.1.0 Linux gateway1 2.6.18-92cp #1 SMP Tue Dec 4 21:44:22 IST 2012 i686 +# .1.3.6.1.4.1.2620.1.1.25.3.0 19190 + +checkpoint_connections_default_levels = (40000, 50000) + +def check_checkpoint_connections(item, params, info): + warn, crit = params + current = saveint(info[0][0]) + state = 0 + icon = '' + if current >= warn: + state = 1 + if current >= crit: + state = 2 + + perfdata = [("connections", current, warn, crit)] + infotext = "%d Current Connections (levels at %d/%d)" % (current, warn, crit) + yield state, infotext, perfdata + +check_info["checkpoint_connections"] = { + "check_function" : check_checkpoint_connections, + "inventory_function" : lambda info: [(None, "checkpoint_connections_default_levels")], + "service_description" : "Connections", + "has_perfdata" : True, + "group" : "checkpoint_connections", + "snmp_scan_function" : scan_checkpoint, + "snmp_info" : ( ".1.3.6.1.4.1.2620.1.1.25", [ 3 ]), + "includes" : [ "checkpoint.include" ], +} + diff -Nru check-mk-1.2.2p3/checkpoint.include check-mk-1.2.6p12/checkpoint.include --- check-mk-1.2.2p3/checkpoint.include 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/checkpoint.include 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,37 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +def scan_checkpoint(oid): + sys_descr = oid(".1.3.6.1.2.1.1.1.0") + return ( + # Entry in sysDescr, varies a bit + (len(sys_descr.split(" ")) >= 3 and sys_descr.split(" ")[2].endswith("cp")) or + sys_descr.startswith("IPSO ") or + (sys_descr.startswith("Linux") and "cpx" in + sys_descr)) \ + and \ + oid(".1.3.6.1.4.1.2620.1.1.21.0").lower().startswith('firewall') diff -Nru check-mk-1.2.2p3/checkpoint_packets check-mk-1.2.6p12/checkpoint_packets --- check-mk-1.2.2p3/checkpoint_packets 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/checkpoint_packets 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,83 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# .1.3.6.1.2.1.1.1.0 Linux gateway1 2.6.18-92cp #1 SMP Tue Dec 4 21:44:22 IST 2012 i686 +# .1.3.6.1.4.1.2620.1.1.4.0 131645 +# .1.3.6.1.4.1.2620.1.1.5.0 0 +# .1.3.6.1.4.1.2620.1.1.6.0 1495 +# .1.3.6.1.4.1.2620.1.1.7.0 16297 + +factory_settings["checkpoint_packets_default_levels"] = { + "accepted": (100000, 200000), + "rejected": (100000, 200000), + "dropped": (100000, 200000), + "logged": (100000, 200000), +} + +def inventory_checkpoint_packets(info): + if len(info[0]) == 4: + return [ (None, "checkpoint_packets_default_levels" ) ] + +def check_checkpoint_packets(item, params, info): + if info and len(info[0]) == 4: + value = {} + value["accepted"] = int(info[0][0]) + value["rejected"] = int(info[0][1]) + value["dropped"] = int(info[0][2]) + value["logged"] = int(info[0][3]) + + this_time = time.time() + state = 0 + + for name in value.keys(): + warn, crit = params.get(name, (None, None)) + rate = get_rate(name, this_time, value[name]) + infotext = "%s: %.1f pkts/s" % ( name, rate ) + if rate >= crit: + state = 2 + elif rate >= warn: + state = 1 + perfdata = [ (name, rate, warn, crit, 0) ] + yield state, infotext, perfdata + + +check_info["checkpoint_packets"] = { + "check_function" : check_checkpoint_packets, + "inventory_function" : inventory_checkpoint_packets, + "service_description" : "Packet Statistics", + "has_perfdata" : True, + "group" : "checkpoint_packets", + "snmp_scan_function" : scan_checkpoint, + "default_levels_variable" : "checkpoint_packets_default_levels", + "snmp_info" : ( ".1.3.6.1.4.1.2620.1.1", + [ 4, # fwAccepted + 5, # fwRejected + 6, # fwDropped + 7, # fwLogged + ]), + "includes" : [ "checkpoint.include" ], +} + diff -Nru check-mk-1.2.2p3/check_smtp check-mk-1.2.6p12/check_smtp --- check-mk-1.2.2p3/check_smtp 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/check_smtp 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -32,7 +32,8 @@ args += ' -e %s' % quote_shell_string(settings["expect"]) if "port" in settings: - args += ' -p %s' % quote_shell_string(settings["port"]) + port = int(settings["port"]) # ValueSpec was broken, convert to int + args += ' -p %d' % port if "ip_version" in settings: if settings['ip_version'] == 'ipv4': diff -Nru check-mk-1.2.2p3/check_sql check-mk-1.2.6p12/check_sql --- check-mk-1.2.2p3/check_sql 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_sql 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,74 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +#active_checks['sql'] = [ +# ( {'dbms': 'postgres', 'description': u'SQL Test', 'user': 'golla', 'sql': 'testerer', 'password': 'toor', 'procedure': {'useprocs': True, 'input': '1223,456'}, 'name': 'messpc'}, [], ALL_HOSTS ), +# ] + active_checks['sql'] + +def check_sql_arguments(params): + args = " --hostname '$HOSTADDRESS$'" + args += " --dbms %s" % quote_shell_string(params["dbms"]) + args += " --name %s" % quote_shell_string(params["name"]) + args += " --user '%s'" % params["user"] + args += " --password '%s'" % params["password"] + + if "port" in params: + args += " --port %s" % params["port"] + + if "procedure" in params: + if "procedure" in params and "useprocs" in params["procedure"]: + args += " --procedure" + if "input" in params["procedure"]: + args += " --inputvars %s" \ + % quote_shell_string(params["procedure"]["input"]) + + if "levels" in params: + upper = params["levels"] + else: + upper = "", "" + + if "levels_low" in params: + lower = params["levels_low"] + else: + lower = "", "" + + if "levels" in params or "levels_low" in params: + args += " -w %s:%s" % (lower[0], upper[0]) + args += " -c %s:%s" % (lower[1], upper[1]) + + sql_tmp = params["sql"].replace("\n", r"\n").replace(";", "\;") + args += " %s" % quote_shell_string(sql_tmp) + + return args + + +active_check_info['sql'] = { + "command_line" : '$USER1$/check_sql $ARG1$', + "argument_function" : check_sql_arguments, + "service_description" : lambda args: args["description"], + "has_perfdata" : True, +} + diff -Nru check-mk-1.2.2p3/check_ssh check-mk-1.2.6p12/check_ssh --- check-mk-1.2.2p3/check_ssh 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_ssh 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,48 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def check_ssh_arguments(params): + args = [] + if 'timeout' in params: + args.append("-t " + str(params['timeout'])) + if 'port' in params: + args.append("-p " + str(params['port'])) + if 'remote_version' in params: + args.append("-r " + quote_shell_string(params['remote_version'])) + if 'remote_protocol' in params: + args.append("-P " + quote_shell_string(params['remote_protocol'])) + + args.append("$HOSTADDRESS$") + + return " ".join(args) + +active_check_info['ssh'] = { + "command_line" : '$USER1$/check_ssh $ARG1$', + "argument_function" : check_ssh_arguments, + "service_description" : lambda args: "SSH", + "has_perfdata" : True, +} + Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/checks.tar.gz and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/checks.tar.gz differ diff -Nru check-mk-1.2.2p3/check_tcp check-mk-1.2.6p12/check_tcp --- check-mk-1.2.2p3/check_tcp 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/check_tcp 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -39,12 +39,12 @@ if "refuse_state" in settings: args += ' -r %s' % settings["refuse_state"] - if "send_string" in settings: - args += ' -s %s' % quote_shell_string(settings["send_string"]) - if settings.get("escape_send_string"): args += ' --escape' + if "send_string" in settings: + args += ' -s %s' % quote_shell_string(settings["send_string"]) + if "expect" in settings: for s in settings["expect"]: args += ' -e %s' % quote_shell_string(s) @@ -76,7 +76,7 @@ if "hostname" in settings: args += ' -H %s' % quote_shell_string(settings["hostname"]) else: - args += ' -H $HOSTADDRESS$' + args += " -H '$HOSTADDRESS$'" return args @@ -84,7 +84,7 @@ active_check_info['tcp'] = { "command_line" : '$USER1$/check_tcp $ARG1$', "argument_function" : check_tcp_arguments, - "service_description" : lambda args: "TCP Port %d" % args[0], + "service_description" : lambda args: args[1].get("svc_description", "TCP Port %d" % args[0]), "has_perfdata" : True, } diff -Nru check-mk-1.2.2p3/check_traceroute check-mk-1.2.6p12/check_traceroute --- check-mk-1.2.2p3/check_traceroute 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_traceroute 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,50 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def check_traceroute_arguments(params): + args = "" + if not params["dns"]: + args += "-n " + method = params["method"] + if method == "icmp": + args += "-I " + elif method == "tcp": + args += "-T " + # else: None -> default method + + for router, state in params["routers"]: + args += "-%s %s " % (state, quote_shell_string(router)) + args += '"$HOSTADDRESS$"' + return args + + +active_check_info['traceroute'] = { + "command_line" : '$USER1$/check_traceroute $ARG1$', + "argument_function" : check_traceroute_arguments, + "service_description" : lambda params: "Routing", + "has_perfdata" : False, +} + diff -Nru check-mk-1.2.2p3/check_uniserv check-mk-1.2.6p12/check_uniserv --- check-mk-1.2.2p3/check_uniserv 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/check_uniserv 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,63 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def check_uniserv_arguments(params): + args = " $HOSTADDRESS$" + args += " " + str(params['port']) + args += " " + quote_shell_string(params['service']) + if type(params['job']) == tuple: + job = params['job'][0] + else: + job = params['job'] + if job == "version": + args += " VERSION" + else: + address = params['job'][1] + args += " ADDRESS" + args += " " + quote_shell_string(address['street']) + args += " " + str(address['street_no']) + args += " " + quote_shell_string(address['city']) + args += " " + quote_shell_string(address['search_regex']) + + return args + +def check_uniserv_desc(params): + job = params['job'] + if type(job) == tuple: + job = job[0] + + if job == "version": + return "Uniserv %s Version" % params['service'] + else: + return "Uniserv %s Address %s " % (params['service'], params['job'][1]['city']) + + +active_check_info['uniserv'] = { + "command_line" : '$USER1$/check_uniserv $ARG1$', + "argument_function" : check_uniserv_arguments, + "service_description" : check_uniserv_desc, +} + diff -Nru check-mk-1.2.2p3/chrony check-mk-1.2.6p12/chrony --- check-mk-1.2.2p3/chrony 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/chrony 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,96 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +ntp_default_levels = (10, 200.0, 500.0) # stratum, ms offset + +# Example output from agent: +# <<>> +# Reference ID : 212.18.3.18 (ntp1.m-online.net) +# Stratum : 3 +# Ref time (UTC) : Tue Aug 19 16:56:21 2014 +# System time : 0.000000353 seconds fast of NTP time +# Frequency : 10.725 ppm slow +# Residual freq : 195.475 ppm +# Skew : 10.639 ppm +# Root delay : 0.027455 seconds +# Root dispersion : 0.024512 seconds + +def parse_chrony(info): + parsed = {} + for line in info: + varname, value = " ".join(line).split(":", 1) + parsed[varname.strip()] = value.strip() + return parsed + +# We monitor all servers we have reached at least once +def inventory_chrony(info): + parsed = parse_chrony(info) + if parsed: + return [(None, "ntp_default_levels")] + + +def check_chrony(_no_item, params, info): + parsed = parse_chrony(info) + if not parsed: + yield 2, "No status information, chronyd probably not running" + return + + # Prepare parameters + crit_stratum, warn, crit = params + + # Check offset and stratum, output a few info texsts + offset = float(parsed["System time"].split()[0]) * 1000 # converted to ms + stratum = int(parsed["Stratum"]) + + # Check stratum + infotext = "stratum %d" % stratum + if stratum >= crit_stratum: + yield 2, infotext + " (maximum allowed is %d)" % (crit_stratum - 1) + else: + yield 0, infotext + + # Check offset + status = 0 + infotext = "offset %.4f ms" % offset + if abs(offset) >= crit: + status = 2 + elif abs(offset) >= warn: + status = 1 + if status: + infotext += " (levels at %.4f/%.4f ms)" % (warn, crit) + yield status, infotext, [ ("offset", offset, warn, crit, 0, None) ] + + # Show additional information + yield 0, "reference: %s" % parsed["Reference ID"] + + +check_info["chrony"] = { + 'check_function': check_chrony, + 'inventory_function': inventory_chrony, + 'service_description': 'NTP Time', + 'has_perfdata': True, + 'group': 'ntp_time', +} diff -Nru check-mk-1.2.2p3/cifsmounts check-mk-1.2.6p12/cifsmounts --- check-mk-1.2.2p3/cifsmounts 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/cifsmounts 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,39 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example out from agent: +# <<>> +# /foobar hanging 0 0 0 0 +# /with spaces ok 217492 123563 112515 524288 + + +check_info["cifsmounts"] = { + 'check_function': check_network_fs_mounts, + 'inventory_function': inventory_network_fs_mounts, + 'service_description': 'CIFS mount %s', + 'group': 'network_fs', + 'includes': [ 'network_fs.include' ], +} diff -Nru check-mk-1.2.2p3/cisco_asa_failover check-mk-1.2.6p12/cisco_asa_failover --- check-mk-1.2.2p3/cisco_asa_failover 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/cisco_asa_failover 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -46,12 +46,22 @@ if "this device" in deviceentry[0]: # Return the Cluster role ID of the device. return [ (None, int(info[1][1])) ] - return [] def check_cisco_asa_failover(item, params, info): - asa_state_names = { 9: "active", 10 : "standby" } + asa_state_names = { + 1 : "other", + 2 : "up", + 3 : "down", + 4 : "error", + 5 : "overTemp", + 6 : "busy", + 7 : "noMedia", + 8 : "backup", + 9 : "active", + 10 : "standby", + } for deviceentry in info[-2:]: @@ -73,13 +83,10 @@ state = 1 errtxt = " expecting to be %s" % asa_state_names[def_role] - msgtxt = nagios_state_names[state] + " - Device is the %s" % deviceentry[2] + errtxt + state * "!" + msgtxt = "Device is the %s" % deviceentry[2] + errtxt + state * "!" return (state, msgtxt) - return (3, "UNKNOWN - Data not in SNMP output") - - check_info["cisco_asa_failover"] = { "check_function" : check_cisco_asa_failover, "inventory_function" : inventory_cisco_asa_failover, diff -Nru check-mk-1.2.2p3/cisco_cpu check-mk-1.2.6p12/cisco_cpu --- check-mk-1.2.2p3/cisco_cpu 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/cisco_cpu 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,27 +24,41 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. +# oid(".1.3.6.1.4.1.9.9.109.1.1.1.1.5.1") is depreceated by +# oid(".1.3.6.1.4.1.9.9.109.1.1.1.1.8.1"), we recognize both for now + cisco_cpu_default_levels = (80.0, 90.0) +def inventory_cisco_cpu(info): + if info and info[0] != [None, None]: + return [( None, 'cisco_cpu_default_levels') ] + def check_cisco_cpu(item, params, info): - util = float(info[0][0]) - infotext = " - %2.1f%% utilization in last 5minutes" % util + if info[0] == [None, None]: + return 3, 'No information about the CPU utilization available' + + if info[0][1]: + util = float(info[0][1]) + else: + util = float(info[0][0]) + infotext = "%2.1f%% utilization in the last 5 minutes" % util warn, crit = params perfdata = [("util", util, warn, crit, 0, 100)] if util >= crit: - return (2, "CRIT" + infotext + " (critical at %d%%)" % crit, perfdata) + return (2, infotext + " (critical at %d%%)" % crit, perfdata) elif util >= warn: - return (1, "WARN" + infotext + " (warning at %d%%)" % warn, perfdata) + return (1, infotext + " (warning at %d%%)" % warn, perfdata) else: - return (0, "OK" + infotext, perfdata) + return (0, infotext, perfdata) check_info["cisco_cpu"] = { "check_function" : check_cisco_cpu, - "inventory_function" : lambda info: [(None, "cisco_cpu_default_levels")], + "inventory_function" : inventory_cisco_cpu, "service_description" : "CPU utilization", "has_perfdata" : True, "group" : "cpu_utilization", - "snmp_scan_function" : lambda oid: oid(".1.3.6.1.4.1.9.9.109.1.1.1.1.5.1"), - "snmp_info" : ( ".1.3.6.1.4.1.9.9.109.1.1.1.1.5", [ 1 ]), + "snmp_scan_function" : lambda oid: "cisco" in oid(".1.3.6.1.2.1.1.1.0").lower() \ + and oid(".1.3.6.1.4.1.9.9.109.1.1.1.1.*"), + "snmp_info" : ( ".1.3.6.1.4.1.9.9.109.1.1.1.1", [ 5, 8 ]), } diff -Nru check-mk-1.2.2p3/cisco_fan check-mk-1.2.6p12/cisco_fan --- check-mk-1.2.2p3/cisco_fan 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/cisco_fan 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -28,21 +28,27 @@ 'shutdown', 'notPresent', 'notFunctioning') def inventory_cisco_fan(info): - return [ (line[0], None) for line in info if line[1] != '5' ] + return [ (cisco_sensor_item(line[0],line[-1]), None) for line in info if line[1] != '5' ] def check_cisco_fan(item, params, info): - for line in info: - if line[0] == item: - state = saveint(line[1]) + for statustext, state, oid_end in info: + if cisco_sensor_item(statustext, oid_end) == item: + state = int(state) if state == 1: - return (0, "OK (State is: %s (%d))" % (cisco_fan_states[state], state)) + return (0, "State is: %s (%d)" % (cisco_fan_states[state], state)) elif state == 2: - return (1, "WARNING (state is %s (%d))" % (cisco_fan_states[state], state)) + return (1, "State is %s (%d)" % (cisco_fan_states[state], state)) else: - return (2, "CRITICAL (state is %s (%d))" % (cisco_fan_states[state], state)) - return (3, "UNKNOWN - item not found in snmp data") + return (2, "State is %s (%d)" % (cisco_fan_states[state], state)) + return (3, "item not found in snmp data") -check_info['cisco_fan'] = (check_cisco_fan, "FAN %s", 0, inventory_cisco_fan) -snmp_info['cisco_fan'] = ( ".1.3.6.1.4.1.9.9.13.1.4.1", [ "2", "3" ] ) -snmp_scan_functions['cisco_fan'] = \ - lambda oid: "cisco" in oid(".1.3.6.1.2.1.1.1.0").lower() + +check_info["cisco_fan"] = { + 'check_function': check_cisco_fan, + 'inventory_function': inventory_cisco_fan, + 'service_description': 'FAN %s', + 'snmp_info': ('.1.3.6.1.4.1.9.9.13.1.4.1', ['2', '3', OID_END]), + 'snmp_scan_function': \ + lambda oid: "cisco" in oid(".1.3.6.1.2.1.1.1.0").lower(), + "includes" : ['cisco_sensor_item.include'], +} diff -Nru check-mk-1.2.2p3/cisco_fantray check-mk-1.2.6p12/cisco_fantray --- check-mk-1.2.2p3/cisco_fantray 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/cisco_fantray 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,60 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +cisco_fantray_state_names = { + '1': (3, "status is reported as 'unknown'"), + '2': (0, "powered on"), + '3': (2, "powered down"), + '4': (2, "partial failure, needs replacement as soon as possible.") +} + +def inventory_cisco_fantray(info): + inventory = [] + for fan_id, state in info: + if state != '3': # skip fans reported as down (might be missing) + inventory.append((fan_id, None)) + return inventory + +def check_cisco_fantray(item, _no_params, info): + for fan_id, state in info: + if fan_id == item: + result, state_name = \ + cisco_fantray_state_names.get(state, (3, "unexpected(%s)" % state)) + return result, state_name + + return (3, "No information about fan with this ID") + + +check_info["cisco_fantray"] = { + 'check_function' : check_cisco_fantray, + 'inventory_function' : inventory_cisco_fantray, + 'service_description' : 'FAN %s', + 'snmp_info' : ('.1.3.6.1.4.1.9.9.117.1.4.1.1.1', [ OID_END, '' ]), + 'snmp_scan_function' : lambda oid: "cisco" in oid(".1.3.6.1.2.1.1.1.0").lower() \ + # Exclude cisco_fan check: + and not oid(".1.3.6.1.4.1.9.9.13.1.4.1.2.*"), +} diff -Nru check-mk-1.2.2p3/cisco_fru_power check-mk-1.2.6p12/cisco_fru_power --- check-mk-1.2.2p3/cisco_fru_power 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/cisco_fru_power 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,92 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# FRU = Field Replacable Unit + +cisco_fru_poweradmin_types = { + '1': ( 'on', 0 ), + '2': ( 'off', 2 ), + '3': ( 'inlineAuto', 1 ), + '4': ( 'inlineOn', 1 ), + '5': ( 'powerCycle', 1 ), +} + +cisco_fru_poweroper_types = { + '1' : ('offEnvOther', 1 ), + '2' : ('on', 0 ), + '3' : ('offAdmin', 1 ), + '4' : ('offDenied', 2 ), + '5' : ('offEnvPower', 2 ), + '6' : ('offEnvTemp', 2 ), + '7' : ('offEnvFan', 2 ), + '8' : ('failed', 2 ), + '9' : ('onButFanFail', 1 ), + '10': ('offCooling', 1 ), + '11': ('offConnectorRating', 1 ), + '12': ('onButInlinePowerFail', 2 ), +} + + +def inventory_cisco_fru_power(info): + # Monitor all devices that are not + # - with incomplete SNMP data + # - in state 1:offEnvOther + # - in state 5:offEnvPower + return [ (line[0], None) + for line in info + if line[2] not in ('', '0', '1', '5') ] + + +def check_cisco_fru_power(item, _no_params, info): + for oid_end, admin_state, oper_state in info: + if oid_end == item: + worst_state = 0 + infotexts = [] + + for (name, state), title in [ + ( cisco_fru_poweroper_types[oper_state], "Operational state" ), + ( cisco_fru_poweradmin_types[admin_state], "Administrative state" )]: + + text = title + ": " + name + worst_state = max(state, worst_state) + if state == 1: + text += '(!)' + elif state == 2: + text += '(!!)' + infotexts.append(text) + + return worst_state, ", ".join(infotexts) + + return 3, "No FRU with this id found" + + +check_info["cisco_fru_power"] = { + 'check_function' : check_cisco_fru_power, + 'inventory_function' : inventory_cisco_fru_power, + 'service_description' : 'FRU Power %s', + 'snmp_info' : ('.1.3.6.1.4.1.9.9.117.1.1.2.1', [ OID_END, 1, 2 ] ), + 'snmp_scan_function' : lambda oid: "cisco" in oid(".1.3.6.1.2.1.1.1.0").lower(), +} diff -Nru check-mk-1.2.2p3/cisco_hsrp check-mk-1.2.6p12/cisco_hsrp --- check-mk-1.2.2p3/cisco_hsrp 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/cisco_hsrp 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -75,7 +75,6 @@ def inventory_cisco_hsrp(info): - inventory = [] for line in info: hsrp_grp_entry, vip, actrouter, sbrouter, hsrp_state, vmac = line @@ -84,7 +83,8 @@ # if the group is in a working state (both routers see and talk to each other), # inventorize HSRP group name+IP and the standby state as seen from "this" box. if hsrp_state in [ 5, 6 ]: - inventory.append( (vip, (hsrp_grp, hsrp_state)) ) + vip_grp = "%s-%s" % ( vip, hsrp_grp ) + inventory.append( (vip_grp, (hsrp_grp, hsrp_state)) ) return inventory @@ -98,17 +98,22 @@ interface_index, hsrp_grp = hsrp_grp_entry.split(".") hsrp_state = int(hsrp_state) - if vip == item: + if "-" in item: + vip_grp = "%s-%s" % ( vip, hsrp_grp ) + else: + vip_grp = vip + + if vip_grp == item: # FIXME: This should be shorter. # Validate that we the inventorized state is a "good one" # if it's also the one we have now, then we're fine. if hsrp_state_wanted == 5 and hsrp_state == hsrp_state_wanted: state = 0 - msgtxt = "Redundancy Group %s is OK" % hsrp_grp + msgtxt = "Redundancy Group %s is OK" % vip_grp elif hsrp_state_wanted == 6 and hsrp_state == hsrp_state_wanted: state = 0 - msgtxt = "Redundancy Group %s is OK" % hsrp_grp + msgtxt = "Redundancy Group %s is OK" % vip_grp # otherwise if it's a good one, but flipped, then we are in a failover elif hsrp_state == 5 or hsrp_state == 6: state = 1 @@ -118,9 +123,9 @@ state = 2 msgtxt = "Redundancy Group %s has status %s" % ( hsrp_grp, hsrp_states[hsrp_state]) - return (state, nagios_state_names[state] + " - " + msgtxt) + return (state, msgtxt) - return (3, "UNKNOWN - HSRP Group %s not found in Agent output" % hsrp_grp_wanted ) + return (3, "HSRP Group not found in Agent output" ) diff -Nru check-mk-1.2.2p3/cisco_locif check-mk-1.2.6p12/cisco_locif --- check-mk-1.2.2p3/cisco_locif 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/cisco_locif 1970-01-01 00:00:00.000000000 +0000 @@ -1,43 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# ails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - - -def inventory_cisco_locif(info): - pass - -def check_cisco_locif(item, params, info): - return (3, 'UNKNOWN - The cisco_locif check has been removed. Please switch to either if or if64 checks') - -check_info['cisco_locif'] = (check_cisco_locif, "Port %s", 1, inventory_cisco_locif) -snmp_info['cisco_locif'] = ( ".1.3.6.1.4.1.9", - [ "9.23.1.1.1.1.6", # CISCO-CDP-MIB::cdpInterfaceName - "2.2.1.1.2", # OLD-CISCO-INTERFACES-MIB::locIfLineProt - "2.2.1.1.6", # OLD-CISCO-INTERFACES-MIB::locIfInBitsSec - "2.2.1.1.8", # OLD-CISCO-INTERFACES-MIB::locIfOutBitsSec - "2.2.1.1.28", # OLD-CISCO-INTERFACES-MIB::locIfDescr - OID_END ] ) - -snmp_scan_functions['cisco_locif'] = lambda oid: False diff -Nru check-mk-1.2.2p3/cisco_mem check-mk-1.2.6p12/cisco_mem --- check-mk-1.2.2p3/cisco_mem 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/cisco_mem 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -40,8 +40,9 @@ if line[0] == item: mem_free = saveint(line[2]) mem_used = saveint(line[1]) - mem_total = mem_free + mem_used - return check_cisco_mem_sub(params, mem_used, mem_total) + mem_total = mem_free + mem_used + return check_cisco_mem_sub(params, mem_used, mem_total) + return 3, "Error getting information. Try a reinventory" def check_cisco_mem_sub(params, mem_used, mem_total): perc_used = 100 * (float(mem_used) / float(mem_total)) @@ -49,23 +50,23 @@ perfdata = [("mem_used", perc_used, warn, crit, 0, 100)] if type(warn) == float: - infotext = " - %2.1f%% (%s) of %s used" % \ - (perc_used, get_filesize_human_readable(mem_used), get_filesize_human_readable(mem_total)) + infotext = "%2.1f%% (%s) of %s used" % \ + (perc_used, get_bytes_human_readable(mem_used), get_bytes_human_readable(mem_total)) if perc_used >= crit: - return (2, "CRIT" + infotext + " (critical at %d%%)" % crit, perfdata) + return (2, infotext + " (critical at %d%%)" % crit, perfdata) elif perc_used >= warn: - return (1, "WARN" + infotext + " (warning at %d%%)" % warn, perfdata) + return (1, infotext + " (warning at %d%%)" % warn, perfdata) else: - return (0, "OK" + infotext, perfdata) + return (0, infotext, perfdata) else: - infotext = " - %s (%2.1f%%) of %s used" % \ - (get_filesize_human_readable(mem_used), perc_used, get_filesize_human_readable(mem_total)) + infotext = "%s (%2.1f%%) of %s used" % \ + (get_bytes_human_readable(mem_used), perc_used, get_bytes_human_readable(mem_total)) if mem_used >= crit: - return (2, "CRIT" + infotext + " (critical at %s MB)" % crit, perfdata) + return (2, infotext + " (critical at %s MB)" % crit, perfdata) elif mem_used >= warn: - return (1, "WARN" + infotext + " (warning at %s MB)" % warn, perfdata) + return (1, infotext + " (warning at %s MB)" % warn, perfdata) else: - return (0, "OK" + infotext, perfdata) + return (0, infotext, perfdata) check_info["cisco_mem"] = { diff -Nru check-mk-1.2.2p3/cisco_power check-mk-1.2.6p12/cisco_power --- check-mk-1.2.2p3/cisco_power 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/cisco_power 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -31,29 +31,69 @@ # .1.3.6.1.4.1.9.9.13.1.5.1.4.1 1 # .1.3.6.1.4.1.9.9.13.1.5.1.4.2 2 -cisco_power_states = ('', 'normal', 'warning', 'critical', - 'shutdown', 'notPresent', 'notFunctioning') -cisco_power_source = ( '', 'unknown', 'ac', 'dc', 'externalPowerSupply', 'internalRedundant') +cisco_power_states = ( + '', + 'normal', + 'warning', + 'critical', + 'shutdown', + 'not present', + 'not functioning', +) + +cisco_power_sources = ( + '', + 'unknown', + 'AC', + 'DC', + 'external power supply', + 'internal redundant', +) def inventory_cisco_power(info): - # 5 in line[1] means cisco_power_states = notPresent - return [ (line[0], '', '""') for line in info if 'RPS NotExist' not in line[0] and line[1] != '5' ] - -def check_cisco_power(item, params, info): - for line in info: - if line[0] == item: - state, source = map(saveint, line[1:3]) - output = 'State: %s, Source: %s' % (cisco_power_states[state], cisco_power_source[source]) + # Note: the name of the power supply is not unique. We have seen + # a Cisco with four entries in the MIB. So we force uniqueness + # by appending a "/4" for ID 4 if the name is not unique + discovered = {} + for sid, textinfo, state, source in info: + if state != '5': + name = cisco_sensor_item(textinfo, sid) + discovered.setdefault(name, []).append(sid) + + for name, entries in discovered.items(): + if len(entries) == 1: + yield name, None + else: + for entry in entries: + yield ("%s/%s" % (name, entry)), None + + +def check_cisco_power(item, no_params, info): + if "/" in item: + split_item = item.split("/")[1] + else: + split_item = None + for sid, textinfo, state, source in info: + if sid == item or sid == split_item or cisco_sensor_item(textinfo, sid) == item: + state = int(state) + source = int(source) + output = 'state: %s, source: %s' % \ + (cisco_power_states[state], cisco_power_sources[source]) if state == 1: - return (0, "OK - %s" % output) + return 0, "%s" % output elif state == 2: - return (1, "WARN - %s" % output) + return 1, "%s" % output else: - return (2, "CRIT - %s" % output) - return (3, "UNKNOWN - item not found in snmp data") + return 2, "%s" % output + -check_info['cisco_power'] = (check_cisco_power, "%s", 0, inventory_cisco_power) -snmp_info['cisco_power'] = ( ".1.3.6.1.4.1.9.9.13.1.5.1", [ 2, 3, 4 ] ) # CISCO-SMI -snmp_scan_functions['cisco_power'] = \ - lambda oid: "cisco" in oid(".1.3.6.1.2.1.1.1.0").lower() +check_info["cisco_power"] = { + 'check_function': check_cisco_power, + 'inventory_function': inventory_cisco_power, + 'service_description': 'Power %s', + 'snmp_info': ('.1.3.6.1.4.1.9.9.13.1.5.1', [OID_END, 2, 3, 4]), + 'snmp_scan_function': \ + lambda oid: "cisco" in oid(".1.3.6.1.2.1.1.1.0").lower(), + "includes" : ['cisco_sensor_item.include'], +} diff -Nru check-mk-1.2.2p3/cisco_qos check-mk-1.2.6p12/cisco_qos --- check-mk-1.2.2p3/cisco_qos 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/cisco_qos 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -45,20 +45,20 @@ # TEST: # # search class table: -# .1.3.6.1.4.1.9.9.166.1.7.1.1.1.284945 "AF1" -# class_id = 284945 +# .1.3.6.1.4.1.9.9.166.1.7.1.1.1.284945 (cbQosCMName) "AF1" +# class_id = 284945 (cbQosConfigIndex) # # search config table for matching value # .1.3.6.1.4.1.9.9.166.1.5.1.1.2.144.5256 284945 -# key = 144.5256 +# key = 144.5256 (cbQosPolicyIndex: 144, cbQosObjectsIndex: 5256) # # search if table for matchin if_id: 144 -# .1.3.6.1.4.1.9.9.166.1.1.1.1.4.144 9 -# if_policy = 9 +# .1.3.6.1.4.1.9.9.166.1.1.1.1.4.144 (cbQosIfIndex) 9 +# if_policy = 9 (ifIndex -> standard mib) # -# get policy_id from config table using if_id.if_id 144.144 -# .1.3.6.1.4.1.9.9.166.1.5.1.1.2.144.144 6208592 -# policy_index = 6208592 +# get config_id from config table using if_id.if_id 144.144 +# .1.3.6.1.4.1.9.9.166.1.5.1.1.2.144.144 (cbQosConfigIndex) 6208592 +# config_index = 6208592 # # get policy name using the policy_index # .1.3.6.1.4.1.9.9.166.1.6.1.1.1.6208592 "ingress-map" @@ -113,14 +113,20 @@ # Parent ID: # .1.3.6.1.4.1.9.9.166.1.5.1.1.4.258.6184 258 -# post_warn, post_crit, drop warn, drop crit + +# get cbQosQueueingCfgBandwidth +# .1.3.6.1.4.1.9.9.166.1.9.1.1.1.1608 3094 + cisco_qos_default_levels = (None, None, 0.01, 0.01) +#factory_settings["cisco_qos_default_values"] = { +# "drop" : (0.01, 0.01) +#} -def cisco_qos_get_ifs_by_class_id(config, class_id): +def cisco_qos_get_config_entries_by_class_id(config, class_id): return [ if_index.split('.') for if_index, value in config.iteritems() if value == class_id ] def inventory_cisco_qos(info): - if len(info) == 8: + if len(info) == 11: ifs = dict(info[0]) config = dict([ ('.'.join(oid.split('.')[-2:]), value) for oid, value in info[3] ]) if_names = dict(info[6]) @@ -129,14 +135,29 @@ items = [] for class_id, class_name in info[2]: # Get interface ids which use this qos class - for policy_if_id, policy_if_id2 in cisco_qos_get_ifs_by_class_id(config, class_id): - if_name = if_names[ifs[policy_if_id]] - items += [ ('%s: %s' % (if_name, class_name), 'cisco_qos_default_levels') ] + for policy_id, objects_id in cisco_qos_get_config_entries_by_class_id(config, class_id): + if ifs.get(policy_id) in if_names: + if_name = if_names[ifs[policy_id]] + items += [ ('%s: %s' % (if_name, class_name), {}) ] return items def check_cisco_qos(item, params, info): - post_warn, post_crit, drop_warn, drop_crit = params + # Convert old params definitions + # Note: the float values of the post levels are converted to int + had_legacy_params = False + if type(params) == tuple: + params = { + "post": tuple(map(lambda x: x and int(x) or None, params[0:2])), + "drop": params[2:4] + } + had_legacy_params = True + + unit = params.get("unit", "bit") + average = params.get("average") + post_warn, post_crit = params.get("post",(None, None)) + drop_warn, drop_crit = params.get("drop",(None, None)) + # Load values and format them ifs = dict(info[0]) @@ -147,6 +168,9 @@ drop_bytes = dict([ ('.'.join(oid.split('.')[-2:]), value) for oid, value in info[5] ]) if_names = dict(info[6]) if_speeds = dict(info[7]) + parents = dict(info[8]) + if_qos_bandwidth = dict(info[9]) + object_types = dict(info[10]) if_name, class_name = item.split(': ') @@ -165,59 +189,139 @@ break if not if_id or not class_id: - return (3, "UNKNOWN - QoS class not found for that interface") + return (3, "QoS class not found for that interface") + + policy_id, objects_id, policy_map_id, policy_name = None, None, None, None + for this_policy_id, this_objects_id in cisco_qos_get_config_entries_by_class_id(config, class_id): + if if_id != ifs[this_policy_id]: + continue # skip the ones of other interfaces + + # Get the policy_map_id. To retrieve this get one of the config entries + # of type "policy map" from the config table. All of this type should have + # the same value, which is then the policy_map_id. + for key in object_types.keys(): + if key.startswith(this_policy_id+'.') and object_types[key] == '1': + policy_map_id = config[key] + break + + if policy_map_id is None: + return 3, 'Invalid policy map id' + + policy_name = policies.get(policy_map_id) + policy_id = this_policy_id + objects_id = this_objects_id - # Gather information for this object - policy_if_id, policy_if_id2 = cisco_qos_get_ifs_by_class_id(config, class_id)[0] - try: - policy_id = config[policy_if_id+'.'+policy_if_id] - except KeyError: - # Be compatible with newer IOS-XE releases where the last digit is pinned - # to "1" instead of the plicy_if_id - policy_id = config[policy_if_id+'.1'] - policy_name = policies[policy_id] - post_b = post_bytes[policy_if_id+'.'+policy_if_id2] - drop_b = drop_bytes[policy_if_id+'.'+policy_if_id2] + if policy_id is None or objects_id is None: + return 3, 'Could not find policy_id or objects_id' + + post_b = post_bytes.get(policy_id+'.'+objects_id, 0) + drop_b = drop_bytes.get(policy_id+'.'+objects_id, 0) speed = saveint(if_speeds[if_id]) + + parent_value_cache = {} + for a_key, a_value in config.items(): + parent_value_cache.update({ a_value : a_key.split(".")[1] }) + + # if a_value == class_id: + # parent_value = a_key.split(".")[1] + for b_key, b_value in parents.items(): + if parent_value_cache[class_id] == b_value: + if object_types[b_key] == "4": + try: + speed = saveint(if_qos_bandwidth[config[b_key]]) * 1000 + break + except KeyError: + pass + # Bandwidth needs to be in bytes for later calculations - bw = speed / 8.0 + bw = speed / 8.0 + + # Determine post warn/crit levels + if type(post_warn) == float: + post_warn = bw / 100.0 * post_warn + post_crit = bw / 100.0 * post_crit + elif type(post_warn) == int: + if unit == 'bit': + post_warn = post_warn / 8 + post_crit = post_crit / 8 + + # Determine drop warn/crit levels + if type(drop_warn) == float and not had_legacy_params: + drop_warn = bw / 100.0 * drop_warn + drop_crit = bw / 100.0 * drop_crit + # Convert the drop levels to byte + elif unit == "bit": + # But only if our params where already provided in the new format + if not had_legacy_params: + if type(drop_warn) == int: + drop_warn = drop_warn / 8.0 + if type(drop_crit) == int: + drop_crit = drop_crit / 8.0 + # Handle counter values state = 0 infotext = '' this_time = time.time() rates = [] - wrapped = False perfdata = [] - for name, counter, warn, crit, min, max in [ ( "post", post_b, post_warn, post_crit, 0, bw), - ( "drop", drop_b, drop_warn, drop_crit, 0, bw) ]: + perfdata_avg = [] - try: - timedif, rate = get_counter("cisco_qos.%s.%s" % (name, item), this_time, saveint(counter)) - rates.append(rate) - perfdata.append( (name, rate, warn, crit, min, max) ) - except MKCounterWrapped, e: - wrapped = True - - # if at least one counter wrapped, we do not handle the counters at all - if wrapped: - perfdata = [] + min_value = ("0", "0.0")[unit == 'bit'] + for name, counter, warn, crit, min_val, max_val in [ + ( "post", post_b, post_warn, post_crit, min_value, bw), + ( "drop", drop_b, drop_warn, drop_crit, min_value, bw), + ]: + rate = get_rate("cisco_qos.%s.%s" % (name, item), this_time, saveint(counter)) + rates.append(rate) + perfdata.append( (name, rate, warn, crit, min_val, max_val) ) + + if average: + avg_value = get_average("cisco_qos.%s.%s.avg" % (name, item), this_time, rate, average) + rates.append(avg_value) + perfdata_avg.append( ("%s_avg_%d" % (name, average), avg_value, warn, crit, min_val, max_val) ) + + perfdata.extend(perfdata_avg) + def format_value(value): + if unit == "bit": + value = value * 8 + return get_nic_speed_human_readable(value) + else: + return "%s/s" % get_bytes_human_readable(value) + + if average: + post_rate = rates[1] + drop_rate = rates[3] else: - post_rate, drop_rate = rates - for what, rate, warn, crit in [ ("post", rates[0], post_warn, post_crit), - ("drop", rates[1], drop_warn, drop_crit) ]: - infotext += ', %s: %s/s' % (what, get_bytes_human_readable(rate)) - if crit is not None and rate >= crit: - state = 2 - infotext += '(!!)' - elif warn is not None and rate >= warn: - state = 1 - infotext += '(!)' + post_rate = rates[0] + drop_rate = rates[1] + + for what, rate, warn, crit in [ ("post", post_rate, post_warn, post_crit), + ("drop", drop_rate, drop_warn, drop_crit) ]: + infotext += ', %s: %s' % (what, format_value(rate)) + if crit is not None and rate >= crit: + state = max(2, state) + infotext += '(!!)' + elif warn is not None and rate >= warn: + state = max(1, state) + infotext += '(!)' + + if policy_name: + infotext += ', Policy-Name: %s, Int-Bandwidth: %s' % (policy_name, format_value(bw)) + else: + infotext += ', Policy-Map-ID: %s, Int-Bandwidth: %s' % (policy_map_id, format_value(bw)) + return (state, infotext.lstrip(', '), perfdata) + - infotext += ', Policy-Name: %s, Int-Bandwidth: %sits/s' % (policy_name, get_bytes_human_readable(speed, 1000)) - return (state, "%s - %s" % (nagios_state_names[state], infotext.lstrip(', ')), perfdata) +check_info["cisco_qos"] = { + "service_description" : "QoS %s", + "check_function" : check_cisco_qos, + "inventory_function" : inventory_cisco_qos, + "has_perfdata" : True, + "group" : "cisco_qos", + "default_levels_variable" : "cisco_qos_default_" +} -check_info['cisco_qos'] = (check_cisco_qos, "QoS %s", 1, inventory_cisco_qos) snmp_info['cisco_qos'] = [ ( '.1.3.6.1.4.1.9.9.166.1', [ OID_END, '1.1.1.4' ] ), # qosIfIndex ( '.1.3.6.1.4.1.9.9.166.1', [ OID_END, '6.1.1.1' ] ), # qosPolicies ( '.1.3.6.1.4.1.9.9.166.1', [ OID_END, '7.1.1.1' ] ), # qosClasses @@ -226,6 +330,9 @@ ( '.1.3.6.1.4.1.9.9.166.1', [ OID_STRING, '15.1.1.16' ] ), # qosDropBytes ( '.1.3.6.1.2.1.2.2.1', [ OID_END, '2' ]), # ifNames ( '.1.3.6.1.2.1.2.2.1', [ OID_END, '5' ]), # ifSpeeds + ( '.1.3.6.1.4.1.9.9.166.1', [ OID_END, '5.1.1.4' ]), # cbQosParentObjectsIndex + ( '.1.3.6.1.4.1.9.9.166.1', [ OID_END, '9.1.1.1' ]), # qosQueueingConfigBandwidth + ( '.1.3.6.1.4.1.9.9.166.1', [ OID_END, '5.1.1.3' ]), # cbQosObjectsType ] snmp_scan_functions['cisco_qos'] = lambda oid: "cisco" in oid(".1.3.6.1.2.1.1.1.0").lower() and \ oid(".1.3.6.1.4.1.9.9.166.1.1.1.1.4.*") diff -Nru check-mk-1.2.2p3/cisco_secure check-mk-1.2.6p12/cisco_secure --- check-mk-1.2.2p3/cisco_secure 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/cisco_secure 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,92 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +def parse_cisco_secure(info): + parsed = [] + # l[1] = Name, l[2] = Portstate + names = dict([ (l[0], ( l[1], l[2] )) for l in info[0]] ) + for num, enabled, status, violationCount, lastmac in info[1]: + mac = ":".join(["%02s" % hex(ord(m))[2:] for m in lastmac]).replace(' ', '0') + # violationCount is initialized with 0 when security is enabled. When not, the + # value is reported as empty string. saveint() makes life easier here. + parsed.append((names[num][0], int(names[num][1]), int(enabled), int(status), saveint(violationCount), mac)) + return parsed + + +def inventory_cisco_secure(parsed): + # search for at least one port with security + for name, op_state, enabled, status, violationCount, lastmac in parsed: + # if portsecurity enabled and port up OR currently there is sercurity issue` + if ( enabled == 1 and op_state == 1) or status == 3: + return [ (None, None) ] + + +def check_cisco_secure(item, params, parsed): + secure_states = { + 1 : "full Operational", + 2 : "could not be enabled due to certain reasons", + 3 : "shutdown due to security violation" + } + + failed = [] + at_least_one_problem = False + for name, op_state, enabled, status, violationCount, lastmac in parsed: + message = "Port %s: %s (Violation Count: %s, Last Mac: %s)" % \ + ( name, secure_states[status], violationCount, lastmac ) + + # If port cant be enabled and is up and has violations -> WARN + if status == 2 and op_state == 1 and int(violationCount) > 0: + yield 1, message + at_least_one_problem = True + # Security issue -> CEIT + elif status == 3: + yield 2, message + at_least_one_problem = True + + if not at_least_one_problem: + yield 0, "No port security violation" + + +check_info["cisco_secure"] = { + "parse_function" : parse_cisco_secure, + "check_function" : check_cisco_secure, + "inventory_function" : inventory_cisco_secure, + "service_description" : "Port Security", + "snmp_scan_function" : lambda oid: "cisco" in oid(".1.3.6.1.2.1.1.1.0").lower() and \ + oid(".1.3.6.1.4.1.9.9.315.1.2.1.1.1.*"), + "snmp_info" : [ (".1.3.6.1.2.1.2.2.1", [OID_END, 2, 8 ] ), + ( ".1.3.6.1.4.1.9.9.315.1.2.1.1", + [ + OID_END, + "1", # cpsIfPortSecurityEnable + "2", # cpsIfPortSecurityStatus + "9", # cpsIfViolationCount + "10", # cpsIfSecureLastMacAddress + ] ), + ] +} + diff -Nru check-mk-1.2.2p3/cisco_sensor_item.include check-mk-1.2.6p12/cisco_sensor_item.include --- check-mk-1.2.2p3/cisco_sensor_item.include 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/cisco_sensor_item.include 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,56 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def cisco_sensor_item(trial_string, fall_back): + # trial_string can be: + # Empty + # A single string + # A string seperated by commas with status information + # -> Depends on the device model + try: + # Try to handle all cases found in mkzeug/walks + splited = [ x.strip() for x in trial_string.split(',')] + if len(splited) == 1: + item = trial_string + # If the last part contains # or Power, take the complete string + elif '#' in splited[-1] or 'Power' in splited[-1]: + item = " ".join(splited) + # If second last part start with Status, remove that part + elif splited[-2].startswith("Status") or splited[-2].startswith("PS1"): + item = " ".join(splited[:-2]) + # Take anything, but without last part + else: + item = " ".join(splited[:-1]) + # Maye there a multiple Items, but always the same string. + # Try to prevent that by adding the fall_back number to the end + if not item[-1].isdigit(): + item += " "+ fall_back + # Replace unwanted chars and return + return item.replace('#',' ') + except: + return fall_back + + diff -Nru check-mk-1.2.2p3/cisco_sys_mem check-mk-1.2.6p12/cisco_sys_mem --- check-mk-1.2.2p3/cisco_sys_mem 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/cisco_sys_mem 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,60 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# cseSysMemoryUtilization .1.3.6.1.4.1.9.9.305.1.1.2.0 +# + +cisco_sys_mem_default_levels = (80.0, 90.0) + +def inventory_cisco_sys_mem(info): + if info: + return [ (None, "cisco_sys_mem_default_levels") ] + else: + return [] + +def check_cisco_sys_mem(_no_item, params, info): + warn, crit = params + if info[0][0]: + mem_used = float(info[0][0]) + perfdata = [("mem_used", mem_used, warn, crit, 0, 100)] + infotext = "%2.1f%% of supervisor memory used" % mem_used + if mem_used >= crit: + return (2, infotext + " (critical at %2.1f%%)" % crit, perfdata) + elif mem_used >= warn: + return (1, infotext + " (warning at %2.1f%%)" % warn, perfdata) + else: + return (0, infotext, perfdata) + + +check_info["cisco_sys_mem"] = { + "check_function" : check_cisco_sys_mem, + "inventory_function" : inventory_cisco_sys_mem, + "service_description" : "Supervisor Mem Used", + "has_perfdata" : True, + "group" : "cisco_supervisor_mem", # seperate group since only percentage + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.1.0").startswith("Cisco NX-OS"), + "snmp_info" : ( ".1.3.6.1.4.1.9.9.305.1.1.2", "0" ), +} diff -Nru check-mk-1.2.2p3/cisco_temp check-mk-1.2.6p12/cisco_temp --- check-mk-1.2.2p3/cisco_temp 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/cisco_temp 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -58,29 +58,29 @@ for name, state in info: if name == item: if state == '1': - return (0, "OK - status is OK") + return (0, "status is OK") elif state == '2': - return (1, "WARN - status is WARNING") + return (1, "status is WARNING") elif state == '3': - return (2, "CRIT - status is CRITICAL") + return (2, "status is CRITICAL") elif state == '4': - return (2, "CRIT - status is SHUTDOWN") + return (2, "status is SHUTDOWN") elif state == '5': - return (3, "UNKNOWN - sensor not present") + return (3, "sensor not present") elif state == '6': - return (3, "UNKNOWN - sensor value out of range") + return (3, "sensor value out of range") else: - return (3, "UNKNOWN - invalid state '%s'" % state) + return (3, "invalid state '%s'" % state) - return (3, "UNKNOWN - sensor not found in SNMP output") + return (3, "sensor not found in SNMP output") -check_info["cisco_temp"] = ( check_cisco_temp, "Temperature %s", 0, inventory_cisco_temp ) - -snmp_info["cisco_temp"] = \ - ( ".1.3.6.1.4.1.9.9.13.1.3.1", [ "2", "6" ] ) - -snmp_scan_functions["cisco_temp"] = \ - lambda oid: "cisco" in oid(".1.3.6.1.2.1.1.1.0").lower() and \ - not oid(".1.3.6.1.4.1.9.9.13.1.3.1.3.*") - -checkgroup_of["cisco_temp"] = "temperature_auto" +check_info["cisco_temp"] = { + 'check_function': check_cisco_temp, + 'inventory_function': inventory_cisco_temp, + 'service_description': 'Temperature %s', + 'snmp_info': ('.1.3.6.1.4.1.9.9.13.1.3.1', ['2', '6']), + 'snmp_scan_function': \ + lambda oid: "cisco" in oid(".1.3.6.1.2.1.1.1.0").lower() and \ + not oid(".1.3.6.1.4.1.9.9.13.1.3.1.3.*"), + 'group': 'temperature_auto', +} diff -Nru check-mk-1.2.2p3/cisco_temperature check-mk-1.2.6p12/cisco_temperature --- check-mk-1.2.2p3/cisco_temperature 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/cisco_temperature 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,211 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2015 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +# NOTE: Devices of type 3850 with firmware versions 3.2.0SE, 3.2.1, 3.2.2 +# have been observed to display a tenth of the actual temperature value. +# A firmware update on the device fixes this. + +# CISCO-ENTITY-SENSOR-MIB::entSensorScale + +cisco_entity_exponents = { + "1" : -24, # 1:yocto + "2" : -21, # 2:zepto + "3" : -18, # 3:atto + "4" : -15, # 4:femto + "5" : -12, # 5:pico + "6" : -9, # 6:nano + "7" : -6, # 7:micro + "8" : -3, # 8:milli + "9" : 0, # 9:units + "10" : 3, # 10:kilo + "11" : 6, # 11:mega + "12" : 9, # 12:giga + "13" : 12, # 13:tera + "14" : 18, # 14:exa + "15" : 15, # 15:peta + "16" : 21, # 16:zetta + "17" : 24, # 17:yotta +} + +# CISCO-ENTITY-SENSOR-MIB::entSensorStatus +# 1:ok +# 2:unavailable +# 3:nonoperational + + +def parse_cisco_temperature(info): + description_info, state_info, levels_info, perfstuff = info + + # Parse OIDs described by CISCO-ENTITY-SENSOR-MIB + entity_parsed = {} + + # Create dict of sensor descriptions + descriptions = dict(description_info) + + # Create dict with thresholds + thresholds = {} + for sensor_id, sensortype, scalecode, magnitude, value, sensorstate in state_info: + thresholds.setdefault(sensor_id, []) + + for endoid, level in levels_info: + # endoid is e.g. 21549.9 or 21459.10 + sensor_id, subid = endoid.split('.') + thresholds.setdefault(sensor_id, []).append(level) + + for sensor_id, sensortype, scalecode, magnitude, value, sensorstate in state_info: + if sensor_id in descriptions: + descr = descriptions[sensor_id] + else: + descr = sensor_id + + if descr and sensortype == '8': # only care about temperature sensors + + if sensorstate == '2': + entity_parsed[descr] = (3, "Data from sensor currently not available") + elif sensorstate == '3': + entity_parsed[descr] = (3, "Sensor is broken") + else: + entity_parsed[descr] = {} + scale = 10**int(magnitude) + + scale *= 10 ** (-1 * cisco_entity_exponents[scalecode]) + + entity_parsed[descr]['temp'] = float(value) / scale + + if len(thresholds[sensor_id]) in [ 2, 4 ]: + warnraw, critraw = thresholds[sensor_id][0:2] + # Some devices deliver these values in the wrong order + dev_levels = ( min(float(warnraw)/scale, float(critraw)/scale), + max(float(warnraw)/scale, float(critraw)/scale) ) + else: + dev_levels = None + entity_parsed[descr]['dev_levels'] = dev_levels + + entity_parsed[descr]['dev_status'] = None + + # Now parse OIDs described by CISCO-ENVMON-MIB + envmon_states = { + '1': "normal", + '2': "warning", + '3': "critical", + '4': "shutdown", + '5': "not present", + '6': "not functioning" + } + + parsed = {} + for statustext, temp, max_temp, state, oid_end in perfstuff: + item = cisco_sensor_item(statustext, oid_end) + if state in ['5', '6']: + parsed[item] = (3, "Sensor %s" % envmon_states[state]) + elif int(temp) == 0: + if state in ('123'): + parsed[item] = ( int(state) - 1, "Sensor reports %s state" % envmon_states[state] ) + else: + parsed[item] = (3, "Sensor defect") + else: + parsed[item] = {} + parsed[item]['temp'] = int(temp) + if max_temp and int(max_temp): + parsed[item]['dev_status'] = None + parsed[item]['dev_levels'] = (int(max_temp), int(max_temp)) + else: + parsed[item]['dev_levels'] = None + if state == '1': + parsed[item]['dev_status'] = 0 + elif state == '2': + parsed[item]['dev_status'] = 1 + elif state in '34': + parsed[item]['dev_status'] = 2 + else: + parsed[item] = (3, "Sensor reports unknown status code") + + # Merge the two dicts, preferring keys generated from ENTITY data + parsed.update(entity_parsed) + + return parsed + + +def inventory_cisco_temperature(parsed): + for item in parsed.keys(): + yield item, {} + + +def check_cisco_temperature(item, params, parsed): + if item in parsed: + if type(parsed[item]) == tuple: + return parsed[item] + else: + return check_temperature(parsed[item]['temp'], params, + dev_levels = parsed[item]['dev_levels'], + dev_status = parsed[item]['dev_status']) + + +check_info['cisco_temperature'] = { + "parse_function" : parse_cisco_temperature, + "inventory_function" : inventory_cisco_temperature, + "check_function" : check_cisco_temperature, + "service_description": "Temperature %s", + "group" : "temperature", + "has_perfdata" : True, + "snmp_scan_function" : lambda oid: "cisco" in oid(".1.3.6.1.2.1.1.1.0").lower() and \ + ( oid(".1.3.6.1.4.1.9.9.91.1.1.1.1.*") != None or + oid(".1.3.6.1.4.1.9.9.13.1.3.1.3.*") != None ), + "snmp_info" : [ + # cisco_temp_sensor data + ( ".1.3.6.1.2.1.47.1.1.1.1", [ + OID_END, + 2, # Description of the sensor + ]), + + # Type and current state + ( ".1.3.6.1.4.1.9.9.91.1.1.1.1", [ + OID_END, + 1, # CISCO-ENTITY-SENSOR-MIB::entSensorType + 2, # CISCO-ENTITY-SENSOR-MIB::entSensorScale + 3, # CISCO-ENTITY-SENSOR-MIB::entSensorPrecision + 4, # CISCO-ENTITY-SENSOR-MIB::entSensorValue + 5, # CISCO-ENTITY-SENSOR-MIB::entSensorStatus + ]), + + # Threshold + ( ".1.3.6.1.4.1.9.9.91.1.2.1.1", [ + OID_END, + 4, # Thresholds + ]), + + # cisco_temp_perf data + ( ".1.3.6.1.4.1.9.9.13.1.3.1", [ # CISCO-SMI + 2, # ciscoEnvMonTemperatureStatusDescr + 3, # ciscoEnvMonTemperatureStatusValue + 4, # ciscoEnvMonTemperatureThreshold + 6, # ciscoEnvMonTemperatureState + OID_END + ]), + ], + "includes" : [ "temperature.include", 'cisco_sensor_item.include' ], +} diff -Nru check-mk-1.2.2p3/cisco_temp_perf check-mk-1.2.6p12/cisco_temp_perf --- check-mk-1.2.2p3/cisco_temp_perf 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/cisco_temp_perf 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,27 +24,22 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -# normal(1): the environment is good, such as low -# temperature. -# -# warning(2): the environment is bad, such as temperature -# above normal operation range but not too -# high. -# -# critical(3): the environment is very bad, such as -# temperature much higher than normal -# operation limit. -# -# shutdown(4): the environment is the worst, the system -# should be shutdown immediately. -# -# notPresent(5): the environmental monitor is not present, -# such as temperature sensors do not exist. -# -# notFunctioning(6): the environmental monitor does not -# function properly, such as a temperature -# sensor generates a abnormal data like -# 1000 C. +# normal(1): the environment is good, such as low +# temperature. +# warning(2): the environment is bad, such as temperature +# above normal operation range but not too +# high. +# critical(3): the environment is very bad, such as +# temperature much higher than normal +# operation limit. +# shutdown(4): the environment is the worst, the system +# should be shutdown immediately. +# notPresent(5): the environmental monitor is not present, +# such as temperature sensors do not exist. +# notFunctioning(6): the environmental monitor does not +# function properly, such as a temperature +# sensor generates a abnormal data like +# 1000 C. cisco_temp_perf_envmon_states = { 1: "normal", @@ -56,41 +51,54 @@ } def inventory_cisco_temp_perf(info): - inventory = [] - for line in info: - name = line[0] - if name == "": - name = line[4] - inventory.append((name, None)) - return inventory +# for line in info: +# yield cisco_sensor_item(line[0], line[4]), None + return [] def check_cisco_temp_perf(item, _no_params, info): - for line in info: - if line[0] == item or line[4] == item: - temp = saveint(line[1]) - if temp != 0: - perfdata = [("temp", temp, None, saveint(line[2]) )] - temptext = ", %d degrees (critical at %d)" % (temp, saveint(line[2])) - else: - perfdata = [] - temptext = "" - - state = int(line[3]) - statename = cisco_temp_perf_envmon_states.get(state, "(invalid)") - if state == 1: - return (0, "OK - state is normal%s" % temptext, perfdata) - elif state == 2: - return (1, "WARN - state is %s%s" % (statename, temptext), perfdata) - elif state in [ 5, 6 ]: - return (3, "UNKNOWN - state is %s" % statename) - else: - return (2, "CRIT - state is %s%s" % (statename, temptext), perfdata) - return (3, "UNKNOWN - Item %s not found in SNMP data" % item) - -check_info['cisco_temp_perf'] = (check_cisco_temp_perf, "Temperature %s", 1, inventory_cisco_temp_perf) -snmp_info['cisco_temp_perf'] = ( ".1.3.6.1.4.1.9.9.13.1.3.1", [ 2, 3, 4, 6, OID_END ] ) # CISCO-SMI -snmp_scan_functions['cisco_temp_perf'] = \ - lambda oid: "cisco" in oid(".1.3.6.1.2.1.1.1.0").lower() and \ - oid(".1.3.6.1.4.1.9.9.13.1.3.1.3.*") != None and \ - oid(".1.3.6.1.4.1.9.9.91.1.1.1.1.*") == None # the using cisco_sensor_temp -checkgroup_of["cisco_temp_perf"] = "temperature_auto" + for statustext, temp, max_temp, state, oid_end in info: + if cisco_sensor_item(statustext, oid_end) == item: +# # FIXME saveint needed here? (See if condition) +# temp = saveint(temp) +# if temp != 0: +# max_temp = max_temp and int(max_temp) or None +# perfdata = [("temp", temp, None, max_temp )] +# temptext = ", Temperature: %d °C" % temp +# if max_temp != None: +# temptext += " (critical at %d °C)" % max_temp +# else: +# perfdata = [] +# temptext = "" +# +# state = int(state) +# statename = cisco_temp_perf_envmon_states.get(state, "(invalid)") +# if state == 1: +# return (0, "state is normal%s" % temptext, perfdata) +# elif state == 2: +# return (1, "state is %s%s" % (statename, temptext), perfdata) +# elif state in [ 5, 6 ]: +# return (3, "state is %s" % statename) +# else: +# return (2, "state is %s%s" % (statename, temptext), perfdata) + return 3, "This check is obsolete, please re-inventorize this host" + + +check_info['cisco_temp_perf'] = { + "check_function" : check_cisco_temp_perf, + "inventory_function" : inventory_cisco_temp_perf, + "service_description" : "Temperature %s", + "has_perfdata" : True, + "snmp_scan_function" : + lambda oid: "cisco" in oid(".1.3.6.1.2.1.1.1.0").lower() \ + and oid(".1.3.6.1.4.1.9.9.13.1.3.1.3.*") != None \ + and oid(".1.3.6.1.4.1.9.9.91.1.1.1.1.*") == None, + "snmp_info" : (".1.3.6.1.4.1.9.9.13.1.3.1", [ # CISCO-SMI + 2, # ciscoEnvMonTemperatureStatusDescr + 3, # ciscoEnvMonTemperatureStatusValue + 4, # ciscoEnvMonTemperatureThreshold + 6, # ciscoEnvMonTemperatureState + OID_END + ]), + "group" : "temperature_auto", + "includes" : ['cisco_sensor_item.include'], +} diff -Nru check-mk-1.2.2p3/cisco_temp_sensor check-mk-1.2.6p12/cisco_temp_sensor --- check-mk-1.2.2p3/cisco_temp_sensor 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/cisco_temp_sensor 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -116,75 +116,89 @@ # Create dict with thresholds thresholds = {} - for id, sensortype, value, sensorstate in state_info: - thresholds.setdefault(id, []) + for id, sensortype, precision, value, sensorstate in state_info: + thresholds.setdefault(id, []) for endoid, level in levels_info: - # endoid is e.g. 21549.9 or 21459.10 - id, subid = endoid.split('.') - thresholds.setdefault(id, []).append(saveint(level)) + level = float(level)/(10**int(precision)) + # endoid is e.g. 21549.9 or 21459.10 + id, subid = endoid.split('.') + thresholds.setdefault(id, []).append(level) # Create main dictionary (only of temperature sensors) sensors = [] - for id, sensortype, value, sensorstate in state_info: + for id, sensortype, precision, value, sensorstate in state_info: + value = float(value)/(10**int(precision)) sensors.append( ( id, descriptions.get(id), sensortype, - saveint(value), sensorstate, thresholds[id] ) ) + value, sensorstate, thresholds[id] ) ) return sensors - def inventory_cisco_temp_sensor(info): - sensors = parse_cisco_temp_sensor(info) - # Use all temperature sensors with a non-empty description and valid threshold - return [ (entry[1], None) for entry - in sensors if entry[1] != None - and entry[2] == '8' - and len(entry[5]) == 2 ] +# sensors = parse_cisco_temp_sensor(info) + inventory = [] +# for id, descr, sensortype, scale, value, sensorstate, levels in sensors: +# # Use all temperature sensors with a non-empty description and valid threshold +# if descr != None and sensortype == '8' and len(levels) in [ 2, 4 ]: +# warn, crit = saveint(levels[0]), saveint(levels[1]) +# inventory.append(( descr, (warn, crit) )) + return inventory -def check_cisco_temp_sensor(item, _no_params, info): +def check_cisco_temp_sensor(item, params, info): sensors = parse_cisco_temp_sensor(info) - for id, descr, sensortype, value, sensorstate, levels in sensors: + for id, descr, sensortype, temp, sensorstate, levels in sensors: if item == descr: - warn, crit = levels - if sensorstate == "2": - return (3, "UNKNOWN - data from sensor currently not available") - elif sensorstate == "3": - return (3, "UNKNOWN - sensor is broken") - if value >= crit: - state = 2 - elif value >= warn: - state = 1 - else: - state = 0 - return (state, nagios_state_names[state] + " - %dC (levels at %d/%d)" % (value, warn, crit), [ - ( "temperature", value, warn, crit ) ]) - return (3, "UNKNOWN - sensor not found in SNMP data") - -check_info['cisco_temp_sensor'] = (check_cisco_temp_sensor, "Temperature %s", 1, inventory_cisco_temp_sensor) - -snmp_info['cisco_temp_sensor'] = [ - # Description of sensors - ( ".1.3.6.1.2.1.47.1.1.1.1", [ - OID_END, - 2, # Description of the sensor - ]), - - # Type and current state - ( ".1.3.6.1.4.1.9.9.91.1.1.1.1", [ - OID_END, - 1, # Type (see above), 8 = Celsius, 12 = truth value - 4, # Most recent measurement - 5, # Status of the sensor 1 == ok, 2 == cannot report, 3 == broken - ]), - - # Threshold - ( ".1.3.6.1.4.1.9.9.91.1.2.1.1", [ - OID_END, - 4, # Thresholds - ]), -] - -snmp_scan_functions['cisco_temp_sensor'] = \ - lambda oid: "cisco" in oid(".1.3.6.1.2.1.1.1.0").lower() and \ - oid(".1.3.6.1.4.1.9.9.91.1.1.1.1.*") != None +# # Use built in levels if no levels are configured +# if params: +# warn, crit = params +# else: +# warn, crit = levels[0:2] +# # convert threshold milli values to native unit +# if scale == "8": +# temp = temp/1000 +# warn = warn/1000 +# +# # convert milli temperature to native unit +# if scale == "8": +# crit = crit/1000 +# +# if sensorstate == "2": +# return (3, "Data from sensor currently not available") +# elif sensorstate == "3": +# return (3, "Sensor is broken") +# +# return check_temperature(temp, (warn, crit)) + return 3, "This check is obsolete, please re-inventorize this host" + + +check_info['cisco_temp_sensor'] = { + "check_function" : check_cisco_temp_sensor, + "inventory_function" : inventory_cisco_temp_sensor, + "service_description": "Temperature %s", + "group" : "hw_temperature", + "has_perfdata" : True, + "snmp_scan_function" : lambda oid: "cisco" in oid(".1.3.6.1.2.1.1.1.0").lower() and \ + oid(".1.3.6.1.4.1.9.9.91.1.1.1.1.*") != None, + "snmp_info" : [ + ( ".1.3.6.1.2.1.47.1.1.1.1", [ + OID_END, + 2, # Description of the sensor + ]), + + # Type and current state + ( ".1.3.6.1.4.1.9.9.91.1.1.1.1", [ + OID_END, + 1, # Type (see above), 8 = Celsius, 12 = truth value + 3, # Precision + 4, # Most recent measurement + 5, # Status of the sensor 1 == ok, 2 == cannot report, 3 == broken + ]), + + # Threshold + ( ".1.3.6.1.4.1.9.9.91.1.2.1.1", [ + OID_END, + 4, # Thresholds + ]), + ], + "includes" : [ "temperature.include" ] +} -checkgroup_of["cisco_temp_sensor"] = "temperature_auto" diff -Nru check-mk-1.2.2p3/cisco_vpn_tunnel check-mk-1.2.6p12/cisco_vpn_tunnel --- check-mk-1.2.2p3/cisco_vpn_tunnel 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/cisco_vpn_tunnel 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,65 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# OID: 1.3.6.1.4.1.9.9.171.1.2.3.1.7.### +# cikeTunRemoteValue +# Description +# "The value of the remote peer identity. +# If the remote peer type is an IP Address, then this +# is the IP Address used to identify the remote peer. +# If the remote peer type is a host name, then +# this is the host name used to identify the +# remote peer." + +def inventory_cisco_vpn_tunnel(info): + return [(item[0], {}) for item in info] + + +def check_cisco_vpn_tunnel(item, params, info): + tunnel_not_found_state = params.get('state', 2) # default error state + + # Try to find individual error states by tunnel IP/name + alias = "" + for tunnel_ip, tunnel_alias, not_found_state in params.get('tunnels', []): + if item == tunnel_ip: + alias = "[%s] " % tunnel_alias + tunnel_not_found_state = not_found_state + + if [item] not in info: + return tunnel_not_found_state, "%sTunnel is missing" % alias + else: + return 0, "%sTunnel is OK" % alias + + +check_info["cisco_vpn_tunnel"] = { + "check_function" : check_cisco_vpn_tunnel, + "inventory_function" : inventory_cisco_vpn_tunnel, + "service_description" : "VPN Tunnel %s", + "group" : "vpn_tunnel", + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.1.0").lower().startswith("cisco adaptive security") \ + or "vpn 3000 concentrator" in oid(".1.3.6.1.2.1.1.1.0").lower(), + "snmp_info" : ( ".1.3.6.1.4.1.9.9.171.1.2.3.1", [ 7 ] ), +} diff -Nru check-mk-1.2.2p3/cisco_vss check-mk-1.2.6p12/cisco_vss --- check-mk-1.2.2p3/cisco_vss 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/cisco_vss 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,136 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example SNMP walk (extract) +# All names OIDs are prefixed with CISCO-VIRTUAL-SWITCH-MIB:: +# All numeric OIDs are prefixed with .1.3.6.1.4.1.9.9.388 + +# cvsDomain.0 .1.1.1.0 10 +# cvsSwitchID.0 .1.1.2.0 1 +# cvsSwitchCapability.0 .1.1.3.0 "C0 " +# cvsSwitchMode.0 .1.1.4.0 2 +# cvsSwitchConvertingStatus.0 .1.1.5.0 2 +# cvsVSLChangeNotifEnable.0 .1.1.6.0 2 +# cvsCoreSwitchPriority.1 .1.2.1.1.2.1 100 +# cvsCoreSwitchPriority.2 .1.2.1.1.2.2 100 +# cvsCoreSwitchPreempt.1 .1.2.1.1.3.1 2 +# cvsCoreSwitchPreempt.2 .1.2.1.1.3.2 2 +# cvsCoreSwitchLocation.1 .1.2.1.1.4.1 +# cvsCoreSwitchLocation.2 .1.2.1.1.4.2 +# cvsChassisSwitchID.2 .1.2.2.1.1.2 1 +# cvsChassisSwitchID.500 .1.2.2.1.1.500 2 +# cvsChassisRole.2 .1.2.2.1.2.2 2 +# cvsChassisRole.500 .1.2.2.1.2.500 3 +# cvsChassisUpTime.2 .1.2.2.1.3.2 184371004 +# cvsChassisUpTime.500 .1.2.2.1.3.500 184371004 +# cvsVSLCoreSwitchID.41 .1.3.1.1.2.41 1 +# cvsVSLCoreSwitchID.42 .1.3.1.1.2.42 2 +# cvsVSLConnectOperStatus.41 .1.3.1.1.3.41 1 +# cvsVSLConnectOperStatus.42 .1.3.1.1.3.42 1 +# cvsVSLLastConnectionStateChange.41 .1.3.1.1.4.41 "07 DE 07 18 01 12 22 00 " +# cvsVSLLastConnectionStateChange.42 .1.3.1.1.4.42 "07 DE 07 18 01 12 22 00 " +# cvsVSLConfiguredPortCount.41 .1.3.1.1.5.41 2 +# cvsVSLConfiguredPortCount.42 .1.3.1.1.5.42 2 +# cvsVSLOperationalPortCount.41 .1.3.1.1.6.41 2 +# cvsVSLOperationalPortCount.42 .1.3.1.1.6.42 2 +# cvsVSLConnectionRowStatus.41 .1.3.1.1.7.41 1 +# cvsVSLConnectionRowStatus.42 .1.3.1.1.7.42 1 +# cvsModuleVSSupported.1000 .1.4.1.1.1.1000 1 +# cvsModuleVSSupported.11000 .1.4.1.1.1.11000 1 +# cvsModuleVSLCapable.1000 .1.4.1.1.2.1000 1 +# cvsModuleVSLCapable.11000 .1.4.1.1.2.11000 1 +# cvsModuleSlotNumber.1000 .1.4.1.1.3.1000 1 +# cvsModuleSlotNumber.11000 .1.4.1.1.3.11000 11 +# cvsModuleRprWarm.1000 .1.4.1.1.4.1000 1 +# cvsModuleRprWarm.11000 .1.4.1.1.4.11000 1 +# cvsDualActiveDetectionNotifEnable.0 .1.5.1.0 2 + +cisco_vss_role_names = { + '1' : 'standalone', + '2' : 'active', + '3' : 'standby', +} + +cisco_vss_operstatus_names = { + '1' : 'up', + '2' : 'down', +} + +def inventory_cisco_vss(info): + for switch_id, chassis_role in info[0]: + if chassis_role in [ '2', '3' ]: # active, standby + return [ (None, None) ] + +def check_cisco_vss(item, params, info): + chassis, ports = info + for switch_id, chassis_role in chassis: + if chassis_role == '1': + state = 2 + else: + state = 0 + yield state, "chassis %s: %s" % (switch_id, cisco_vss_role_names[chassis_role]) + + yield 0, "%d VSL connections configured" % len(ports) + + for core_switch_id, operstatus, conf_portcount, op_portcount in ports: + if operstatus == '1': + state = 0 + else: + state = 2 + yield state, "core switch %s: VSL %s" % (core_switch_id, cisco_vss_operstatus_names[operstatus]) + + if conf_portcount == op_portcount: + state = 0 + else: + state = 2 + yield state, "%s/%s ports operational" % (op_portcount, conf_portcount) + + +check_info["cisco_vss"] = { + "check_function" : check_cisco_vss, + "inventory_function" : inventory_cisco_vss, + "service_description": "VSS Status", + "snmp_scan_function" : lambda oid: ( + "Catalyst 45" in oid(".1.3.6.1.2.1.1.1.0") or + "Catalyst 65" in oid(".1.3.6.1.2.1.1.1.0")) and \ + oid(".1.3.6.1.4.1.9.9.388.1.1.1.0"), + "snmp_info" : [ + ( ".1.3.6.1.4.1.9.9.388.1.2.2.1", + [ + 1, # cvsChassisSwitchID + 2, # cvsChassisRole: standalone(1), active(2), standby(3) + ] + ), + ( ".1.3.6.1.4.1.9.9.388.1.3.1.1", + [ + 2, # cvsVSLCoreSwitchID + 3, # cvsVSLConnectOperStatus: up(1), down(2) + 5, # cvsVSLConfiguredPortCount + 6, # cvsVSLOperationalPortCount + ] + ), + ], +} diff -Nru check-mk-1.2.2p3/cisco_wlc check-mk-1.2.6p12/cisco_wlc --- check-mk-1.2.2p3/cisco_wlc 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/cisco_wlc 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,88 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# States: +# 1: OK, 2: Crit, 3: Warn + +def cisco_wlc_convert(info): + data = [] + + for j in range(0, len(info), 4): + for i in range(1, len(info[j])): + if len(info[j][i][1]) > 0: + data.append((info[j][i][1], info[j+1][i][1], info[j+2][i][1], info[j+3][i][1], info[j][0] )) + + return data + +def inventory_cisco_wlc(info): + data = cisco_wlc_convert(info) + return [ (x[0], None) for x in data ] + +def check_cisco_wlc(item, params, info): + data = cisco_wlc_convert(info) + for name, mac, state, model, node in data: + if name == item: + if node == None: + node = '' + else: + node = ' (connected to %s)' % node + + state = saveint(state) + + if state == 1: + return 0, "Accesspoint online" + node + if state == 3: + return 1, "Accesspoint state Warning" + node + if state == 2: + return 2, "State Critical" + node + return 3, "Unknown state (%s) " % state + + # Special treatment if this device is missing + if params: + for ap_name, ap_state in params.get("ap_name", []): + if item.startswith(ap_name): + return ap_state, "Accesspoint 1 not found (State set to %s by rule)" % nagios_state_names[ap_state] + + return 2, "Accesspoint not found" + +check_info["cisco_wlc"] = { + "check_function" : check_cisco_wlc, + "inventory_function" : inventory_cisco_wlc, + "group" : "cisco_wlc", + "service_description" : "AP %s", + "node_info" : True, + "snmp_scan_function" : lambda oid: oid('.1.3.6.1.2.1.1.2.0') in [".1.3.6.1.4.1.9.1.1069", + ".1.3.6.1.4.1.14179.1.1.4.3", + ".1.3.6.1.4.1.9.1.1645", + ".1.3.6.1.4.1.9.1.1631", + ".1.3.6.1.4.1.9.1.1279"], + "snmp_info" : [( ".1.3.6.1.4.1.14179.2.2.1.1.3", [ OID_END, '' ]), + ( ".1.3.6.1.4.1.14179.2.2.1.1.1", [ OID_END, '' ]), + ( ".1.3.6.1.4.1.14179.2.2.1.1.6", [ OID_END, '' ]), + ( ".1.3.6.1.4.1.14179.2.2.1.1.16", [ OID_END, '' ]), + ], +} + diff -Nru check-mk-1.2.2p3/cisco_wlc_clients check-mk-1.2.6p12/cisco_wlc_clients --- check-mk-1.2.2p3/cisco_wlc_clients 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/cisco_wlc_clients 2015-09-16 14:25:30.000000000 +0000 @@ -0,0 +1,82 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +# 2: Wlan Name, 42: Interface Name, 38, Connectet Clients + +def inventory_cisco_wlc_clients(info): + if len(info) > 0: + return [ ('Summary', None ) ] + [ (x[0], None) for x in info ] + +def check_cisco_wlc_clients(item, params, info): + state = 0 + found = False + for line in info: + if item == "Summary": + found = True + interface = "Summary" + num_of_clients = 0 + for l in info: + num_of_clients += saveint(l[2]) + break + + if line[0] == item: + interface = line[1] + num_of_clients = saveint(line[2]) + found = True + break + + if found: + if params: + crit_low, warn_low, warn_high, crit_high = params + if num_of_clients < crit_low: + state = 2 + elif num_of_clients < warn_low: + state = 1 + elif num_of_clients > crit_high: + state = 2 + elif num_of_clients > warn_high: + state = 1 + + perf = [ ("clients", num_of_clients, None, None) ] + return state, "%d connections (%s)" % (num_of_clients, interface), perf + + return 3, "WiFi not found" + +check_info["cisco_wlc_clients"] = { + "check_function" : check_cisco_wlc_clients, + "inventory_function" : inventory_cisco_wlc_clients, + "service_description" : "Clients", + "group" : "wlc_clients", + "has_perfdata" : True, + "snmp_scan_function" : lambda oid: oid('.1.3.6.1.2.1.1.2.0') in [".1.3.6.1.4.1.9.1.1069", + ".1.3.6.1.4.1.9.1.1645", + ".1.3.6.1.4.1.9.1.1631", + ".1.3.6.1.4.1.14179.1.1.4.3", + ".1.3.6.1.4.1.9.1.1279"], + "snmp_info" : ( ".1.3.6.1.4.1.14179.2.1.1.1", [ 2, 42, 38 ]), +} + diff -Nru check-mk-1.2.2p3/citrix_licenses check-mk-1.2.6p12/citrix_licenses --- check-mk-1.2.2p3/citrix_licenses 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/citrix_licenses 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,63 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from plugin: +# <<>> +# PVS_STD_CCS 80 0 +# PVS_STD_CCS 22 0 +# CEHV_ENT_CCS 22 0 +# MPS_ENT_CCU 2160 1636 +# MPS_ENT_CCU 22 22 +# XDT_ENT_UD 22 18 +# XDS_ENT_CCS 22 0 +# PVSD_STD_CCS 42 0 + +def inventory_citrix_licenses(info): + license_types = set([]) + for license_type, have, used in info: + license_types.add(license_type) + return [ (lt, None) for lt in license_types ] + +def check_citrix_licenses(item, params, info): + have = 0 + used = 0 + for license_type, h, u in info: + if item == license_type: + have += int(h) + used += int(u) + if not have: + return 3, "No licenses of that type found" + + return license_check_levels(have, used, params) + +check_info["citrix_licenses"] = { + 'check_function' : check_citrix_licenses, + 'inventory_function' : inventory_citrix_licenses, + 'service_description' : 'Citrix Licenses %s', + 'has_perfdata' : True, + 'group' : "citrix_licenses", + 'includes' : [ "license.include" ] +} diff -Nru check-mk-1.2.2p3/citrix_serverload check-mk-1.2.6p12/citrix_serverload --- check-mk-1.2.2p3/citrix_serverload 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/citrix_serverload 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,56 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +#<<>> +#100 + +citrix_serverload_default_levels = ( 8500, 9500 ) + +def inventory_citrix_serverload(info): + return [ ( None, 'citrix_serverload_default_levels' )] + +def check_citrix_serverload(_no_item, params, info): + try: + load = int(info[0][0]) + except: + return 3, "Load information not found" + + warn, crit = params + state = 0 + if load > crit: + state = 2 + elif load > warn: + state = 1 + return state, "Current Citrix Load is: " + str(load), [ ('perf', load, warn, crit ) ] + +check_info["citrix_serverload"] = { + "group" : "citrix_load", + "check_function" : check_citrix_serverload, + "inventory_function" : inventory_citrix_serverload, + "service_description" : "Citrix Serverload", + "has_perfdata" : True, +} + diff -Nru check-mk-1.2.2p3/citrix_sessions check-mk-1.2.6p12/citrix_sessions --- check-mk-1.2.2p3/citrix_sessions 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/citrix_sessions 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,67 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +#<<>> +#sessions 1 +#active_sessions 1 +#inactive_sessions 0 + +citrix_sessions_default_levels = { + "total" : ( 60, 65 ), + "active" : ( 60, 65 ), + "inactive" : ( 10, 15 ), +} +def inventory_citrix_sessions(info): + return [ ( None, "citrix_sessions_default_levels" )] + +def check_citrix_sessions(_no_item, params, info): + session = {} + session['total'], session['active'], session['inactive'] = \ + map(int, (info[0][1], info[1][1], info[2][1])) + state = 0 + messages = [] + perf = [] + for what in [ 'total', 'active', 'inactive' ]: + warn, crit = params.get(what, (None, None)) + perf.append(( what, session[what], warn, crit )) + if crit != None and session[what] > crit: + messages.append("%s: %s(!!)" % ( what, session[what] )) + state = 2 + elif warn != None and session[what] > warn: + messages.append("%s: %s(!)" % ( what, session[what] )) + state = max(state, 1) + else: + messages.append("%s: %s" % ( what, session[what] )) + + return state, ", ".join(messages), perf + +check_info["citrix_sessions"] = { + "group" : "citrix_sessions", + "check_function" : check_citrix_sessions, + "inventory_function" : inventory_citrix_sessions, + "service_description" : "Citrix Sessions", + "has_perfdata" : True, +} diff -Nru check-mk-1.2.2p3/climaveneta_alarm check-mk-1.2.6p12/climaveneta_alarm --- check-mk-1.2.2p3/climaveneta_alarm 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/climaveneta_alarm 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,107 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +climaveneta_alarms = { + #20 : "Global (general)", + 21 : "Maintenance Status", + 22 : "Password", + 23 : "High water 1erature", + 24 : "High water 2erature", + 25 : "Low room humidity", + 26 : "High room humidity", + 27 : "Low Roomerature", + 28 : "High roomerature", + 29 : "High air inleterature", + 30 : "High air outleterature", + 31 : "Room humid probe", + 32 : "Room probe", + 33 : "Inlet 1 probe", + 34 : "Inlet 2 probe", + 35 : "Inlet 3 probe", + 36 : "Inlet 4 probe", + 37 : "Outlet 1 probe", + 38 : "Outlet 2 probe", + 39 : "Outlet 3 probe", + 40 : "Outlet 4 probe", + 41 : "Water 1erature probe", + 42 : "Water 2erature probe", + 43 : "Door open", + 44 : "EEPROM", + 45 : "Fan 1 disconnected", + 46 : "Fan 2 disconnected", + 47 : "Fan 3 disconnected", + 48 : "Fan 4 disconnected", + 49 : "Dew point", + 50 : "Flooding", + 51 : "LAN", + 52 : "Dirty filter", + 53 : "Electronic thermostatic valve", + 54 : "Low pressure", + 55 : "High pressure", + 56 : "Air flow", + 57 : "Fire smoke", + 58 : "I/O expansion", + 59 : "Inverter", + 60 : "Envelop", + 61 : "Polygon inconsistent", + 62 : "Delta pressure for inverter compressor", + 63 : "Primary power supply", + 64 : "Energy managment", + 65 : "Low current humidif", + 66 : "No water humidif", + 67 : "High current humidif", + 68 : "Humidifier Board Offline", + 69 : "Life timer expired Reset/Clean cylinder", + 70 : "Humidifier Drain", + 71 : "Generic Humidifier", + 72 : "Electric heater", +} + + +def inventory_climaveneta_alarm(info): + return [(None,None)] + + +def check_climaveneta_alarm(item, params, info): + hit = False + for oid_id, status in info: + alarm_id = int(oid_id.split('.')[0]) + if alarm_id in climaveneta_alarms.keys(): + if status != '0': + hit = True + yield 2, "Alarm: %s" % climaveneta_alarms[alarm_id] + if not hit: + yield 0, "No alarm state" + + +check_info["climaveneta_alarm"] = { + "check_function" : check_climaveneta_alarm, + "inventory_function" : inventory_climaveneta_alarm, + "service_description" : "Alarm Status", + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.1.0") == "pCO Gateway", + "snmp_info" : (".1.3.6.1.4.1.9839.2.1", [ OID_END, 1 ] ), +} + diff -Nru check-mk-1.2.2p3/climaveneta_fan check-mk-1.2.6p12/climaveneta_fan --- check-mk-1.2.2p3/climaveneta_fan 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/climaveneta_fan 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,63 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +factory_settings["climaveneta_fan_default_levels"] = { + "lower" : ( 200, 100 ), + "upper" : ( 700, 800 ), +} + + +def inventory_climaveneta_fan(info): + if len(info[0]) == 2: + return [ (1, {}), (2, {}) ] + + +def check_climaveneta_fan(item, params, info): + rpm = int(info[0][item - 1]) + l_warn, l_crit = params['lower'] + u_warn, u_crit = params['upper'] + perfdata = [("rpm", rpm )] + message = "Speed at %d RPM" % rpm + if rpm <= l_crit or rpm >= u_crit: + return 2, message, perfdata + elif rpm <= l_warn or rpm >= u_warn: + return 1, message, perfdata + else: + return 0, message, perfdata + + +check_info["climaveneta_fan"] = { + "check_function" : check_climaveneta_fan, + "inventory_function" : inventory_climaveneta_fan, + "service_description" : "Fan %s", + "has_perfdata" : True, + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.1.0") == "pCO Gateway", + "snmp_info" : (".1.3.6.1.4.1.9839.2.1.2", [ 42, 43 ] ), + "group" : "hw_fans", + "default_levels_variable" : "climaveneta_fan_default_levels", +} + diff -Nru check-mk-1.2.2p3/climaveneta_temp check-mk-1.2.6p12/climaveneta_temp --- check-mk-1.2.2p3/climaveneta_temp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/climaveneta_temp 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,74 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +climaveneta_sensors = { + 1 : "Room", + 3 : "Outlet Air 1", + 4 : "Outlet Air 2", + 5 : "Outlet Air 3", + 6 : "Outlet Air 4", + 7 : "Intlet Air 1", + 8 : "Intlet Air 2", + 9 : "Intlet Air 3", + 10 : "Intlet Air 4", + 11 : "Coil 1 Inlet Water", + 12 : "Coil 2 Inlet Water", + 13 : "Coil 1 Outlet Water", + 14 : "Coil 2 Outlet Water", + 23 : "Regulation Valve/Compressor", + 24 : "Regulation Fan 1", + 25 : "Regulation Fan 2", + 28 : "Suction", +} + +climaveneta_temp_default_levels = (28, 30) + +def inventory_climaveneta_temp(info): + for sensor_id, value in info: + sensor_id = int(sensor_id.split('.')[0]) + if sensor_id in climaveneta_sensors.keys() and int(value) > 0: + yield climaveneta_sensors[sensor_id], 'climaveneta_temp_default_levels' + + +def check_climaveneta_temp(item, params, info): + for sensor_id, sensor_value in info: + sensor_id = int(sensor_id.split('.')[0]) + if climaveneta_sensors.get(sensor_id) == item: + sensor_value = int(sensor_value) / 10.0 + return check_temperature(sensor_value, params) + + +check_info["climaveneta_temp"] = { + "check_function" : check_climaveneta_temp, + "inventory_function" : inventory_climaveneta_temp, + "service_description" : "Temperature %s", + "has_perfdata" : True, + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.1.0") == "pCO Gateway", + "snmp_info" : (".1.3.6.1.4.1.9839.2.1", [ OID_END, 2 ] ), + "group" : "room_temperature", + "includes" : [ "temperature.include" ], +} + diff -Nru check-mk-1.2.2p3/cmciii check-mk-1.2.6p12/cmciii --- check-mk-1.2.2p3/cmciii 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/cmciii 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,727 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +# Example SNMP walk: +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.1.1 Temperature.DescName +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.1.2 Temperature.Value +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.1.3 Temperature.Offset +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.1.4 Temperature.SetPtHighAlarm +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.1.5 Temperature.SetPtHighWarning +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.1.6 Temperature.SetPtLowWarning +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.1.7 Temperature.SetPtLowAlarm +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.1.8 Temperature.Hysteresis +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.1.9 Temperature.Status +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.1.10 Temperature.Category +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.1.11 Access.DescName +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.1.12 Access.Value +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.1.13 Access.Sensitivity +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.1.14 Access.Delay +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.1.15 Access.Status +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.1.16 Access.Category +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.1.17 Input 1.DescName +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.1.18 Input 1.Value +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.1.19 Input 1.Logic +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.1.20 Input 1.Delay +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.1.21 Input 1.Status +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.1.22 Input 1.Category +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.1.23 Input 2.DescName +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.1.24 Input 2.Value +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.1.25 Input 2.Logic +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.1.26 Input 2.Delay +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.1.27 Input 2.Status +# ... +# .1.3.6.1.4.1.2606.7.4.2.2.1.10.1.1 Temperature +# .1.3.6.1.4.1.2606.7.4.2.2.1.10.1.2 31.00 C +# .1.3.6.1.4.1.2606.7.4.2.2.1.10.1.3 -3.70 C +# .1.3.6.1.4.1.2606.7.4.2.2.1.10.1.4 50.00 C +# .1.3.6.1.4.1.2606.7.4.2.2.1.10.1.5 40.00 C +# .1.3.6.1.4.1.2606.7.4.2.2.1.10.1.6 10.00 C +# .1.3.6.1.4.1.2606.7.4.2.2.1.10.1.7 5.00 C +# .1.3.6.1.4.1.2606.7.4.2.2.1.10.1.8 0.10 % +# .1.3.6.1.4.1.2606.7.4.2.2.1.10.1.9 OK +# .1.3.6.1.4.1.2606.7.4.2.2.1.10.1.10 0 +# .1.3.6.1.4.1.2606.7.4.2.2.1.10.1.11 Door +# .1.3.6.1.4.1.2606.7.4.2.2.1.10.1.12 0 +# .1.3.6.1.4.1.2606.7.4.2.2.1.10.1.13 0 +# .1.3.6.1.4.1.2606.7.4.2.2.1.10.1.14 10 s +# .1.3.6.1.4.1.2606.7.4.2.2.1.10.1.15 Closed +# .1.3.6.1.4.1.2606.7.4.2.2.1.10.1.16 0 +# .1.3.6.1.4.1.2606.7.4.2.2.1.10.1.17 Input_1 +# .1.3.6.1.4.1.2606.7.4.2.2.1.10.1.18 1 +# .1.3.6.1.4.1.2606.7.4.2.2.1.10.1.19 0:Off / 1:On +# .1.3.6.1.4.1.2606.7.4.2.2.1.10.1.20 0.5 s +# .1.3.6.1.4.1.2606.7.4.2.2.1.10.1.21 On +# .1.3.6.1.4.1.2606.7.4.2.2.1.10.1.22 0 +# .1.3.6.1.4.1.2606.7.4.2.2.1.10.1.23 Input_2 +# .1.3.6.1.4.1.2606.7.4.2.2.1.10.1.24 0 +# .1.3.6.1.4.1.2606.7.4.2.2.1.10.1.25 0:Off / 1:On +# .1.3.6.1.4.1.2606.7.4.2.2.1.10.1.26 0.5 s +# .1.3.6.1.4.1.2606.7.4.2.2.1.10.1.27 Off + +# .1.3.6.1.4.1.2606.7.4.1.2.1.2.1 CMCIII-PU +# .1.3.6.1.4.1.2606.7.4.1.2.1.2.2 CMCIII-GAT +# .1.3.6.1.4.1.2606.7.4.1.2.1.2.3 CMCIII-GAT +# .1.3.6.1.4.1.2606.7.4.1.2.1.2.4 CMCIII-IO3 +# .1.3.6.1.4.1.2606.7.4.1.2.1.2.5 CMCIII-SEN +# .1.3.6.1.4.1.2606.7.4.1.2.1.2.6 CMCIII-SEN +# .1.3.6.1.4.1.2606.7.4.1.2.1.2.7 CMCIII-SEN +# .1.3.6.1.4.1.2606.7.4.1.2.1.2.8 CMCIII-ACC +# .1.3.6.1.4.1.2606.7.4.1.2.1.2.9 CMCIII-ACC +# .1.3.6.1.4.1.2606.7.4.1.2.1.2.10 CMCIII-ACC +# .1.3.6.1.4.1.2606.7.4.1.2.1.2.11 CMCIII-ACC +# .1.3.6.1.4.1.2606.7.4.1.2.1.3.1 CMCIII-PU +# .1.3.6.1.4.1.2606.7.4.1.2.1.3.2 CAN_BUS_UNIT_I +# .1.3.6.1.4.1.2606.7.4.1.2.1.3.3 CAN_BUS_UNIT_II +# .1.3.6.1.4.1.2606.7.4.1.2.1.3.4 CMCIII-IO3 +# .1.3.6.1.4.1.2606.7.4.1.2.1.3.5 Access Rack_1 +# .1.3.6.1.4.1.2606.7.4.1.2.1.3.6 Access Rack_2 +# .1.3.6.1.4.1.2606.7.4.1.2.1.3.7 Access Rack_3 +# .1.3.6.1.4.1.2606.7.4.1.2.1.3.8 Left Door +# .1.3.6.1.4.1.2606.7.4.1.2.1.3.9 Right Door +# .1.3.6.1.4.1.2606.7.4.1.2.1.3.10 Side Exit +# .1.3.6.1.4.1.2606.7.4.1.2.1.3.11 Emergency Exit +# + +# Example for info: +# [['1.1', 'Temperature.DescName', 'Temperature'], +# ['1.2', 'Temperature.Value', '31.00 \xb0C'], +# ['1.3', 'Temperature.Offset', '-3.70 \xb0C'], +# ['1.4', 'Temperature.SetPtHighAlarm', '50.00 \xb0C'], +# ['1.5', 'Temperature.SetPtHighWarning', '40.00 \xb0C'], +# ['1.6', 'Temperature.SetPtLowWarning', '10.00 \xb0C'], +# ['1.7', 'Temperature.SetPtLowAlarm', '5.00 \xb0C'], +# ['1.8', 'Temperature.Hysteresis', '0.10 %'], +# ['1.9', 'Temperature.Status', 'OK'], +# ['1.10', 'Temperature.Category', '0'], +# ['1.11', 'Access.DescName', 'Door'], +# ['1.12', 'Access.Value', '0'], +# ['1.13', 'Access.Sensitivity', '0'], +# ['1.14', 'Access.Delay', '10 s'], +# ['1.15', 'Access.Status', 'Closed'], +# ['1.25', 'Input 2.Logic', '0:Off / 1:On'], +# ['1.26', 'Input 2.Delay', '0.5 s'], +# ['1.27', 'Input 2.Status', 'Off'], +# ['1.28', 'Input 2.Category', '0'], +# ['1.29', 'Output.DescName', 'Alarm Relay'], +# ['1.30', 'Output.Relay', 'Off'], +# ['1.31', 'Output.Logic', '0:Off / 1:On'], +# ['1.32', 'Output.Status', 'Off'], +# ['1.33', 'Output.Category', '0'], +# ['1.34', 'System.V24 Port.DescName', 'V24 Unit'], +# ['1.35', 'System.V24 Port.Device', 'NONE'], +# ['1.36', 'System.V24 Port.Message', '--'], + +# Convert info dictionary: + +# {('1', 'Access'): {'Category': '0', +# 'Delay': '10 s', +# 'DescName': 'Door', +# 'Sensitivity': '0', +# 'Status': 'Closed', +# 'Value': '0'}, +# ('1', 'Input 1'): {'Category': '0', +# 'Delay': '0.5 s', +# 'DescName': 'Input_1', +# 'Logic': '0:Off / 1:On', +# 'Status': 'On', +# 'Value': '1'}, +# ('1', 'Input 2'): {'Category': '0', +# 'Delay': '0.5 s', +# 'DescName': 'Input_2', +# 'Logic': '0:Off / 1:On', +# 'Status': 'Off', +# 'Value': '0'}, + + +def parse_cmciii_inputs(info): + parsed = {} + for endoid, varname, value in info: + unit, eid = endoid.split(".") # "1.8" -> "1" + item_name, key = varname.rsplit(".", 1) + item = (unit, item_name) + parsed.setdefault(item, {})[key] = value + return parsed + +def parse_units(info): + units = [] + num = 0 + for unit_type, descname, state in info: + num =+ 1 + # no blanks in names since we use blanks in items + # later to split between unit_name and item_name + descname = re.sub(" ", "_", descname) + if descname == '': + descname = unit_type+"-"+str(num) + units.append((unit_type, descname, state)) + return units + +def get_unit_number(units,unit_name): + namelist = map(list, zip(*units))[1] + if unit_name in namelist: + return str(namelist.index(unit_name)+1) + +cmciii_snmp_info = [ + [ ".1.3.6.1.4.1.2606.7.4.2.2.1", [ + OID_END, + 3, # Variable names in this subtree, e.g. Temperature.SetPtHighAlarm + 10, # Actual values in this subtree + ] + ], + [ ".1.3.6.1.4.1.2606.7.4.1.2.1", [ + 2, # Type of unit + 3, # Descriptive name of unit + 6, # State + ] + ] + ] + +cmciii_scan = lambda oid: ".1.3.6.1.4.1.2606.7" in oid(".1.3.6.1.2.1.1.2.0") + +# .--psm current---------------------------------------------------------. +# | _ | +# | _ __ ___ _ __ ___ ___ _ _ _ __ _ __ ___ _ __ | |_ | +# | | '_ \/ __| '_ ` _ \ / __| | | | '__| '__/ _ \ '_ \| __| | +# | | |_) \__ \ | | | | | | (__| |_| | | | | | __/ | | | |_ | +# | | .__/|___/_| |_| |_| \___|\__,_|_| |_| \___|_| |_|\__| | +# | |_| | +# +----------------------------------------------------------------------+ + + +def inventory_cmciii_psm_current(info): + inventory = [] + parsed = parse_cmciii_inputs(info[0]) + units = parse_units(info[1]) + for (unit, item_name), entry in parsed.iteritems(): + unit_type, unit_name = units[int(unit)-1][0:2] + if item_name.startswith("PSM_") and item_name.endswith(".Unit"): + item_name = re.sub(r'Unit$','Current',item_name) + inventory.append( ("%s %s" % (unit_name, item_name), None) ) + return inventory + +def check_cmciii_psm_current(item, params, info): + unit_name, item_name = item.split(" ", 1) + itemtemp = re.sub(r'Current$','Unit',item_name) + parsed = parse_cmciii_inputs(info[0]) + unit = get_unit_number(parse_units(info[1]), unit_name) + entry = parsed.get((unit, itemtemp)) + if not entry: + return 3, "No such PSM found" + + descr = entry.get('DescName') + typ = entry.get('Unit Type') + status = entry.get('Status') + current = entry.get('Value') + serial = entry.get('Serial Number') + position = entry.get('Mounting Position') + max_current = entry.get('SetPtHighAlarm') + min_current = entry.get('SetPtLowAlarm') + if status == "OK": + state = 0 + statind = "" + else: + state = 2 + statind = status+" " + strom, einheit = current.split(" ") + max = max_current.split(" ")[0] + min = min_current.split(" ")[0] + + perfdata = [ ( "current", strom+einheit, 0, 0, min, max) ] + + infotext = "%s%s: Current %s (%s/%s), Type %s, Serial %s, Position %s" \ + % ( statind, descr, current, min_current, max_current, typ, serial, position) + + return (state, infotext, perfdata) + +check_info['cmciii.psm_current'] = { + "check_function" : check_cmciii_psm_current, + "inventory_function" : inventory_cmciii_psm_current, + "has_perfdata" : True, + "service_description" : "%s", + "snmp_scan_function" : cmciii_scan, + "snmp_info" : cmciii_snmp_info, +} + +#. +# .--psm plugs-----------------------------------------------------------. +# | _ | +# | _ __ ___ _ __ ___ _ __ | |_ _ __ _ ___ | +# | | '_ \/ __| '_ ` _ \ | '_ \| | | | |/ _` / __| | +# | | |_) \__ \ | | | | | | |_) | | |_| | (_| \__ \ | +# | | .__/|___/_| |_| |_| | .__/|_|\__,_|\__, |___/ | +# | |_| |_| |___/ | +# +----------------------------------------------------------------------+ + +def inventory_cmciii_psm_plugs(info): + inventory = [] + parsed = parse_cmciii_inputs(info[0]) + units = parse_units(info[1]) + for (unit, item_name), entry in parsed.iteritems(): + unit_type, unit_name = units[int(unit)-1][0:2] + if item_name.startswith("PSM_") and re.search(r'\.Plug\d$',item_name): + inventory.append( ("%s %s" % (unit_name, item_name), None) ) + return inventory + +def check_cmciii_psm_plugs(item, params, info): + unit_name, item_name = item.split(" ", 1) + parsed = parse_cmciii_inputs(info[0]) + unit = get_unit_number(parse_units(info[1]), unit_name) + entry = parsed.get((unit, item_name)) + if not entry: + return 3, "No such PSM_Plug found" + + descr = entry.get('DescName') + status = entry.get('Status') + if status == "OK": + state = 0 + else: + state = 2 + + infotext = "%s: %s" % ( descr, status) + + return (state, infotext) + +check_info['cmciii.psm_plugs'] = { + "check_function" : check_cmciii_psm_plugs, + "inventory_function" : inventory_cmciii_psm_plugs, + "service_description" : "%s", + "snmp_scan_function" : cmciii_scan, + "snmp_info" : cmciii_snmp_info, +} + +#. +# .--IO------------------------------------------------------------------. +# | ___ ___ | +# | |_ _/ _ \ | +# | | | | | | | +# | | | |_| | | +# | |___\___/ | +# | | +# +----------------------------------------------------------------------+ + +def inventory_cmciii_io(info): + inventory = [] + parsed = parse_cmciii_inputs(info[0]) + units = parse_units(info[1]) + for (unit, item_name), entry in parsed.iteritems(): + unit_type, unit_name = units[int(unit)-1][0:2] + if re.match(r'(Input|Output)',item_name) and re.match(r'CMCIII-IO', unit_type ): + inventory.append( ("%s %s" % (unit_name, item_name), None) ) + + return inventory + +def check_cmciii_io(item, params, info): + unit_name, item_name = item.split(" ", 1) + parsed = parse_cmciii_inputs(info[0]) + unit = get_unit_number(parse_units(info[1]), unit_name) + entry = parsed.get((unit, item_name)) + if not entry: + return 3, "No such IO channel found" + + descr = entry.get('DescName') + status = entry.get('Status') + value = entry.get('Value') + logic = entry.get('Logic') + relay = entry.get('Relay') + delay = entry.get('Delay') + grouping = entry.get('Grouping') + + if relay: # output relay + if status == "OK": + state = 0 + sym = "" + else: + state = 2 + sym = "(!!)" + infotext = "%s: %s%s, Relay %s, Grouping %s, Logic %s" % \ + ( descr, status, sym, relay, grouping, logic) + else: # input relay + if status == "OK": + state = 0 + sym = "" + elif status == "Off": + state = 0 + sym = "" + elif status == "On": + state = 1 + sym = "(!)" + else: + state = 2 + sym = "(!!)" + infotext = "%s: %s, Status %s%s, Logic %s, Delay %s" % \ + ( descr, status, value, sym, logic, delay) + + return (state, infotext) + +check_info['cmciii.io'] = { + "check_function" : check_cmciii_io, + "inventory_function" : inventory_cmciii_io, + "service_description" : "%s", + "snmp_scan_function" : cmciii_scan, + "snmp_info" : cmciii_snmp_info, +} + +#. +# .--access--------------------------------------------------------------. +# | | +# | __ _ ___ ___ ___ ___ ___ | +# | / _` |/ __/ __/ _ \/ __/ __| | +# | | (_| | (_| (_| __/\__ \__ \ | +# | \__,_|\___\___\___||___/___/ | +# | | +# +----------------------------------------------------------------------+ + +def inventory_cmciii_access(info): + inventory = [] + parsed = parse_cmciii_inputs(info[0]) + units = parse_units(info[1]) + for (unit, item_name), entry in parsed.iteritems(): + unit_type, unit_name = units[int(unit)-1][0:2] + if item_name == "Access": + inventory.append( (unit_name+" Access", None) ) + return inventory + +def check_cmciii_access(item, params, info): + parsed = parse_cmciii_inputs(info[0]) + unit_name = re.sub(r' Access','',item) + unit = get_unit_number(parse_units(info[1]), unit_name) + entry = parsed.get((unit, "Access")) + if not entry: + return 3, "No such Access data found" + + descr = entry.get('DescName') + status = entry.get('Status') + value = entry.get('Value') + delay = entry.get('Delay') + sensitivity = entry.get('Sensitivity') + + if status == "Closed": + state = 0 + sym = "" + else: + state = 2 + sym = "(!!)" + + infotext = "%s %s%s, Value %s, Delay %s, Sens. %s " % \ + ( descr, status, sym, value, delay, sensitivity) + + return (state, infotext) + +check_info['cmciii.access'] = { + "check_function" : check_cmciii_access, + "inventory_function" : inventory_cmciii_access, + "service_description" : "%s", + "snmp_scan_function" : cmciii_scan, + "snmp_info" : cmciii_snmp_info, +} + +#. +# .--temp----------------------------------------------------------------. +# | _ | +# | | |_ ___ _ __ ___ _ __ | +# | | __/ _ \ '_ ` _ \| '_ \ | +# | | || __/ | | | | | |_) | | +# | \__\___|_| |_| |_| .__/ | +# | |_| | +# +----------------------------------------------------------------------+ + +def inventory_cmciii_temp(info): + inventory = [] + parsed = parse_cmciii_inputs(info[0]) + units = parse_units(info[1]) + for (unit, item_name), entry in parsed.iteritems(): + unit_type, unit_name = units[int(unit)-1][0:2] + if "Temperature" in item_name and "Air." not in item_name: + inventory.append( ("%s %s" % (unit_name, item_name), None) ) + return inventory + +def parse_temp(val): + if re.search(" B0 ", val): + # is encoded string + val = re.sub(" ","", val).decode("hex") + val, t_unit = val.split(" ", 1) + # omit special characters in unit, because they + # cause problems in rrdtools + t_unit = re.sub("[^\w]","", t_unit) + return val, t_unit + +def check_cmciii_temp(item, params, info): + unit_name, item_name = item.split(" ", 1) + parsed = parse_cmciii_inputs(info[0]) + unit = get_unit_number(parse_units(info[1]), unit_name) + entry = parsed.get((unit, item_name)) + if not entry: + return 3, "No such temperature data found" + + descr = entry.get('DescName') + status = entry.get('Status') + offset = entry.get('Offset') + + value, t_unit = parse_temp(entry.get('Value')) + highalarm = parse_temp(entry.get('SetPtHighAlarm'))[0] + highwarning = parse_temp(entry.get('SetPtHighWarning'))[0] + lowalarm = parse_temp(entry.get('SetPtLowAlarm'))[0] + lowwarning = parse_temp(entry.get('SetPtLowWarning'))[0] + + if status == "OK": + state = 0 + status = "" + else: + state = 2 + + perfdata = [ ("temp", value+t_unit, 0, 0 ) ] + + infotext = "%s %s %s - Limits low %s/%s high %s/%s, Offset %s" % \ + ( descr, value, status, lowalarm, lowwarning, highwarning, highalarm, offset ) + + return (state, infotext, perfdata) + +check_info['cmciii.temp'] = { + "check_function" : check_cmciii_temp, + "inventory_function" : inventory_cmciii_temp, + "service_description" : "%s", + "has_perfdata" : True, + "snmp_scan_function" : cmciii_scan, + "snmp_info" : cmciii_snmp_info, +} + +#. +# .--can current---------------------------------------------------------. +# | _ | +# | ___ __ _ _ __ ___ _ _ _ __ _ __ ___ _ __ | |_ | +# | / __/ _` | '_ \ / __| | | | '__| '__/ _ \ '_ \| __| | +# | | (_| (_| | | | | | (__| |_| | | | | | __/ | | | |_ | +# | \___\__,_|_| |_| \___|\__,_|_| |_| \___|_| |_|\__| | +# | | +# +----------------------------------------------------------------------+ +# | | +# '----------------------------------------------------------------------' +def inventory_cmciii_can_current(info): + inventory = [] + parsed = parse_cmciii_inputs(info[0]) + units = parse_units(info[1]) + for (unit, item_name), entry in parsed.iteritems(): + unit_type, unit_name = units[int(unit)-1][0:2] + if item_name.startswith('System.CAN') and item_name.endswith('Current'): + inventory.append( ("%s %s" % (unit_name, item_name), None) ) + return inventory + +def check_cmciii_can_current(item, params, info): + unit_name, item_name = item.split(" ", 1) + parsed = parse_cmciii_inputs(info[0]) + unit = get_unit_number(parse_units(info[1]), unit_name) + entry = parsed.get((unit, item_name)) + if not entry: + return 3, "No such temperature data found" + + descr = entry.get('DescName') + status = entry.get('Status') + value = re.sub(r' ','',entry.get('Value')) + hysteresis = entry.get('Hysteresis') + highalarm = entry.get('SetPtHighAlarm').split(" ")[0] + highwarning = entry.get('SetPtHighWarning').split(" ")[0] + + if status == "OK": + state = 0 + status = "" + else: + state = 2 + + perfdata = [ ("current", value, highwarning, highalarm ) ] + + infotext = "%s %s %s - Limits %s/%s, Hysteresis %s" % \ + ( descr, value, status, highwarning, highalarm, hysteresis) + + return (state, infotext, perfdata) + +check_info['cmciii.can_current'] = { + "check_function" : check_cmciii_can_current, + "inventory_function" : inventory_cmciii_can_current, + "service_description" : "%s", + "has_perfdata" : True, + "snmp_scan_function" : cmciii_scan, + "snmp_info" : cmciii_snmp_info, +} + +#. +# .--sensor--------------------------------------------------------------. +# | | +# | ___ ___ _ __ ___ ___ _ __ | +# | / __|/ _ \ '_ \/ __|/ _ \| '__| | +# | \__ \ __/ | | \__ \ (_) | | | +# | |___/\___|_| |_|___/\___/|_| | +# | | +# +----------------------------------------------------------------------+ +# | | +# '----------------------------------------------------------------------' + +def inventory_cmciii_sensor(info): + inventory = [] + parsed = parse_cmciii_inputs(info[0]) + units = parse_units(info[1]) + for (unit, item_name), entry in parsed.iteritems(): + unit_type, unit_name = units[int(unit)-1][0:2] + if re.match(r'CMCIII-SEN', unit_type ): + inventory.append((unit_name+" Sensor", None)) + return inventory + +def check_cmciii_sensor(item, params, info): + parsed = parse_cmciii_inputs(info[0]) + unit_name = re.sub(r' Sensor','',item) + unit = get_unit_number(parse_units(info[1]), unit_name) + entry = parsed.get((unit, "Input")) + if not entry: + return 3, "No such sensor unit found" + + status = entry.get('Status') + + if status == "Closed": + state = 0 + else: + state = 2 + + return (state, status) + +check_info['cmciii.sensor'] = { + "check_function" : check_cmciii_sensor, + "inventory_function" : inventory_cmciii_sensor, + "service_description" : "%s", + "snmp_scan_function" : cmciii_scan, + "snmp_info" : cmciii_snmp_info, +} + +#. +# .--state---------------------------------------------------------------. +# | _ _ | +# | ___| |_ __ _| |_ ___ | +# | / __| __/ _` | __/ _ \ | +# | \__ \ || (_| | || __/ | +# | |___/\__\__,_|\__\___| | +# | | +# +----------------------------------------------------------------------+ +# | | +# '----------------------------------------------------------------------' + +def inventory_cmciii_state(info): + inventory = [] + units = parse_units(info[1]) + for unit_type, unit_name, state in units: + inventory.append((unit_name+" State", None)) + return inventory + +def check_cmciii_state(item, params, info): + unit_name = re.sub(r' State','',item) + units = parse_units(info[1]) + unit_number = get_unit_number(units, unit_name) + entry = units[int(unit_number)-1] + stati = { + 1 : ( "not available", 3 ), + 2 : ( "OK", 0 ), + 3 : ( "detect", 1), + 4 : ( "lost", 2), + 5 : ( "changed", 0), + 6 : ( "error", 2), + } + status = stati.get(saveint(entry[2]), ( "unknown", 3 ) ) + + infotext = "Device returns internal state \"%s\"" % status[0] + return (int(status[1]), infotext ) + + +check_info['cmciii.state'] = { + "check_function" : check_cmciii_state, + "inventory_function" : inventory_cmciii_state, + "service_description" : "%s", + "snmp_scan_function" : cmciii_scan, + "snmp_info" : cmciii_snmp_info, +} + +#. +# .--humidity------------------------------------------------------------. +# | _ _ _ _ _ | +# | | |__ _ _ _ __ ___ (_) __| (_) |_ _ _ | +# | | '_ \| | | | '_ ` _ \| |/ _` | | __| | | | | +# | | | | | |_| | | | | | | | (_| | | |_| |_| | | +# | |_| |_|\__,_|_| |_| |_|_|\__,_|_|\__|\__, | | +# | |___/ | +# +----------------------------------------------------------------------+ +# | | +# '----------------------------------------------------------------------' + +def inventory_cmciii_humidity(info): + inventory = [] + parsed = parse_cmciii_inputs(info[0]) + units = parse_units(info[1]) + for (unit, item_name), entry in parsed.iteritems(): + unit_type, unit_name = units[int(unit)-1][0:2] + if item_name == 'Humidity' and unit_type == 'CMCIII-HUM': + inventory.append( ("%s %s" % (unit_name, item_name), None) ) + + return inventory + +def parse_hum(val): + return val.split(" ", 1) + +def check_cmciii_humidity(item, params, info): + unit_name, item_name = item.split(" ", 1) + parsed = parse_cmciii_inputs(info[0]) + unit = get_unit_number(parse_units(info[1]), unit_name) + entry = parsed.get((unit, item_name)) + if not entry: + return 3, "No such temperature data found" + + descr = entry.get('DescName') + status = entry.get('Status') + offset = entry.get('Offset') + + value = parse_hum(entry.get('Value'))[0] + highalarm = parse_hum(entry.get('SetPtHighAlarm'))[0] + highwarning = parse_hum(entry.get('SetPtHighWarning'))[0] + lowalarm = parse_hum(entry.get('SetPtLowAlarm'))[0] + lowwarning = parse_hum(entry.get('SetPtLowWarning'))[0] + + if status == "OK": + state = 0 + status = "" + else: + state = 2 + + perfdata = [ ("hum", value, 0, 0 ) ] + + infotext = "%s %s%% %s - Limits low %s/%s high %s/%s, Offset %s" % \ + ( descr, value, status, lowalarm, lowwarning, highwarning, highalarm, offset ) + + return (state, infotext, perfdata) + +check_info['cmciii.humidity'] = { + "check_function" : check_cmciii_humidity, + "inventory_function" : inventory_cmciii_humidity, + "service_description" : "%s", + "snmp_scan_function" : cmciii_scan, + "snmp_info" : cmciii_snmp_info, + "has_perfdata" : True, +} +#. diff -Nru check-mk-1.2.2p3/cmciii.access check-mk-1.2.6p12/cmciii.access --- check-mk-1.2.2p3/cmciii.access 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/cmciii.access 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,22 @@ +title: Rittal CMC-III Access Modules: Status +agents: snmp +catalog: hw/environment/rittal +license: GPL +distribution: check_mk +description: + This check monitors the status of the access modules of the + Rittal CMC-III units. + No limits are set in the check, since limits are configured in the + Rittal device itself. The state given by the Rittal device is + taken as the state of the check as follows: + If the Rittal device returns {Closed}, the check is {OK}. Otherwise + the check is {CRIT}. + +item: + None + +perfdata: + None + +inventory: + None diff -Nru check-mk-1.2.2p3/cmciii.can_current check-mk-1.2.6p12/cmciii.can_current --- check-mk-1.2.2p3/cmciii.can_current 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/cmciii.can_current 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,22 @@ +title: Rittal CMC-III PU: Canbus and Current +agents: snmp +catalog: hw/environment/rittal +license: GPL +distribution: check_mk +description: + This check monitors the state and current of the canbus ports of the + Rittal CMC-III PU unit. + No limits are set in the check, since limits are configured in the + Rittal device itself. The state given by the Rittal device is + taken as the state of the check as follows: + If the Rittal device returns {OK}, the check is {OK}. Otherwise + the check is {CRIT}. + +item: + The unit number and the internal name of the canbus port + +perfdata: + One variable: the current on the canbus port + +inventory: + All canbus ports of the device are inventorized diff -Nru check-mk-1.2.2p3/cmciii.humidity check-mk-1.2.6p12/cmciii.humidity --- check-mk-1.2.2p3/cmciii.humidity 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/cmciii.humidity 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,22 @@ +title: Rittal CMC-III Units: humidity +agents: snmp +catalog: hw/environment/rittal +license: GPL +distribution: check_mk +description: + This check monitors the humidity measured by various + Rittal CMC-III units. + No limits are set in the check, since limits are configured in the + Rittal device itself. The state given by the Rittal device is + taken as the state of the check as follows: + If the Rittal device returns {OK}, the check is {OK}. Otherwise + the check is {CRIT}. + +item: + The unit name and the name of the humidity sensor + +perfdata: + One variable: the humidity + +inventory: + All humidity sensors on all cmciii units are detected diff -Nru check-mk-1.2.2p3/cmciii.include check-mk-1.2.6p12/cmciii.include --- check-mk-1.2.2p3/cmciii.include 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/cmciii.include 2015-09-18 13:28:27.000000000 +0000 @@ -0,0 +1,36 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def cmciii_extract_temps(liste): + miste = [] + for l in liste: + try: + miste.append(float(l.split(" ")[0])) + except ValueError: + # Dirty hack fix for a dirty check. Ignore all values that can not be + # converted to floats + pass + return miste diff -Nru check-mk-1.2.2p3/cmciii.io check-mk-1.2.6p12/cmciii.io --- check-mk-1.2.2p3/cmciii.io 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/cmciii.io 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,22 @@ +title: Rittal CMC-III IO Units: Status of Input and Output Ports +agents: snmp +catalog: hw/environment/rittal +license: GPL +distribution: check_mk +description: + This check monitors the status of the Input and Output Ports of + the Rittal CMC-III IO Units, + No limits are set in the check. The state given by the Rittal device is + taken as the state of the check as follows: + For input ports the state is {OK} when the value at the port is {0}. + For output ports the state is {OK} when the status of the port is {Off}. + Otherwise the checks are {WARN}. + +item: + The internal name of the input or output ports + +perfdata: + None + +inventory: + All input and output ports are inventorized diff -Nru check-mk-1.2.2p3/cmciii_lcp_airin check-mk-1.2.6p12/cmciii_lcp_airin --- check-mk-1.2.2p3/cmciii_lcp_airin 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/cmciii_lcp_airin 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,102 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_cmciii_lcp_airin(info): + if info: + return [( None, None )] + +def check_cmciii_lcp_airin(item, params, info): + unit_desc, unit_status, desc, status = info[0][0:4] + temps = cmciii_extract_temps(info[0][4:]) + + state = 0 + sym = "" + if status.lower() != "ok" or unit_status.lower() != "ok": + if status.lower() in ( "ok", "warning" ) and \ + unit_status.lower() in ( "ok", "warning" ): + state = 1 + else: + state = 2 + if temps[4] < temps[0] or temps[4] > temps[3] : + state = 2 + sym = "(!!)" + elif temps[4] < temps[1] or temps[4] > temps[2]: + state = max(state, 1) + sym = "(!)" + else: + state = max(state, 0) + + info_text = "%s %s %s %s, Temp.Average: %.1f°C%s, Top/Mid/Bottom: %.1f/%.1f/%.1f" % \ + (unit_desc, unit_status, desc, status, temps[4], sym, temps[5], temps[6], temps[7]) + + levels_text = ", lowcrit/lowwarn/highwarn/highcrit: %.1f/%.1f/%.1f/%.1f" % ( temps[0], temps[1], temps[2], temps[3] ) + + perfdata = [ ("temp", temps[4], str(temps[1])+":"+str(temps[2]), str(temps[0])+":"+str(temps[3]), 0 ) ] + + return (state, info_text + levels_text, perfdata) + +check_info['cmciii_lcp_airin'] = { + "check_function" : check_cmciii_lcp_airin, + "inventory_function" : inventory_cmciii_lcp_airin, + "has_perfdata" : True, + "service_description" : "LCP Fanunit Air IN", + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.1.0").startswith("Rittal LCP") and \ + oid(".1.3.6.1.4.1.2606.7.4.2.2.1.3.2.6").startswith("Air.Temperature.DescName"), + "snmp_info" : ( '.1.3.6.1.4.1.2606.7.4.2.2.1.10', + [ '2.6', '2.13', '2.15', '2.23', '2.21', '2.20', '2.19', '2.18', '2.17', '2.7', '2.8', '2.9' ] + ), + "includes" : [ 'cmciii.include' ], +} +# +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.2.6 Air.Temperature.DescName +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.2.7 Air.Temperature.In-Top +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.2.8 Air.Temperature.In-Mid +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.2.9 Air.Temperature.In-Bot +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.2.10 Air.Temperature.Out-Top +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.2.11 Air.Temperature.Out-Mid +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.2.12 Air.Temperature.Out-Bot +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.2.13 Air.Temperature.Status +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.2.14 Air.Temperature.Category +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.2.15 Air.Server-In.DescName +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.2.16 Air.Server-In.Setpoint +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.2.17 Air.Server-In.Average +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.2.18 Air.Server-In.SetPtHighAlarm +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.2.19 Air.Server-In.SetPtHighWarning +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.2.20 Air.Server-In.SetPtLowWarning +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.2.21 Air.Server-In.SetPtLowAlarm +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.2.22 Air.Server-In.Hysteresis +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.2.23 Air.Server-In.Status +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.2.24 Air.Server-In.Category +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.2.25 Air.Server-Out.DescName +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.2.26 Air.Server-Out.Average +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.2.27 Air.Server-Out.SetPtHighAlarm +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.2.28 Air.Server-Out.SetPtHighWarning +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.2.29 Air.Server-Out.SetPtLowWarning +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.2.30 Air.Server-Out.SetPtLowAlarm +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.2.31 Air.Server-Out.Hysteresis +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.2.32 Air.Server-Out.Status +# .1.3.6.1.4.1.2606.7.4.2.2.1.3.2.33 Air.Server-Out.Category +# diff -Nru check-mk-1.2.2p3/cmciii_lcp_airout check-mk-1.2.6p12/cmciii_lcp_airout --- check-mk-1.2.2p3/cmciii_lcp_airout 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/cmciii_lcp_airout 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,72 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_cmciii_lcp_airout(info): + if info: + return [( None, None )] + +def check_cmciii_lcp_airout(item, params, info): + unit_desc, unit_status, desc, status = info[0][0:4] + temps = cmciii_extract_temps(info[0][4:]) + + state = 0 + sym = "" + if status.lower() != "ok" or unit_status.lower() != "ok": + if status.lower() in ( "ok", "warning" ) and \ + unit_status.lower() in ( "ok", "warning" ): + state = 1 + else: + state = 2 + if temps[4] < temps[0] or temps[4] > temps[3] : + state = 2 + sym = "(!!)" + elif temps[4] < temps[1] or temps[4] > temps[2]: + state = max(state, 1) + sym = "(!)" + else: + state = max(state, 0) + + info_text = "%s %s %s %s, Temp.Average: %.1f°C%s, Top/Mid/Bottom: %.1f/%.1f/%.1f" % \ + (unit_desc, unit_status, desc, status, temps[4], sym, temps[5], temps[6], temps[7]) + + levels_text = ", lowcrit/lowwarn/highwarn/highcrit: %.1f/%.1f/%.1f/%.1f" % ( temps[0], temps[1], temps[2], temps[3] ) + + perfdata = [ ("temp", temps[4], str(temps[1])+":"+str(temps[2]), str(temps[0])+":"+str(temps[3]), 0 ) ] + + return (state, info_text + levels_text, perfdata) + +check_info['cmciii_lcp_airout'] = { + "check_function" : check_cmciii_lcp_airout, + "inventory_function" : inventory_cmciii_lcp_airout, + "has_perfdata" : True, + "service_description" : "LCP Fanunit Air OUT", + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.1.0").startswith("Rittal LCP") and \ + oid(".1.3.6.1.4.1.2606.7.4.2.2.1.3.2.6").startswith("Air.Temperature.DescName"), + "snmp_info" : ( '.1.3.6.1.4.1.2606.7.4.2.2.1.10', + [ '2.6', '2.13', '2.25', '2.32', '2.30', '2.29', '2.28', '2.27', '2.26', '2.10', '2.11', '2.12' ] + ), + "includes" : [ 'cmciii.include' ], +} diff -Nru check-mk-1.2.2p3/cmciii_lcp_fans check-mk-1.2.6p12/cmciii_lcp_fans --- check-mk-1.2.2p3/cmciii_lcp_fans 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/cmciii_lcp_fans 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,77 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_cmciii_lcp_fans(info): + inventory = [] + # FAN infos have 4 elements. Split the single info line we get + # into even sized chunks of 4 elements. In some cases there might + # be non-fan information in the resulting data like infos about + # water cooling. Filter them out. + parts = [ info[0][x+1:x+4] for x in range(0, len(info[0]), 4) ] + for i, (name, value, status) in enumerate(parts): + if status != "off" and 'FAN' in name: + # FIXME: Why not use the unique name? Maybe recode + inventory.append((i+1 , None)) + return inventory + +def check_cmciii_lcp_fans(item, params, info): + + lowlevel = int(re.sub(" .*$", "", info[0][0])) # global low warning + + parts = [ info[0][x+1:x+4] for x in range(0, len(info[0]), 4) ] + for i, (name, value, status) in enumerate(parts): + if item == i: + rpm, unit = value.split(" ", 1) + rpm = int(rpm) + + sym = "" + if status == "OK" and rpm >= lowlevel: + state = 0 + elif status == "OK" and rpm < lowlevel: + state = 1 + sym = "(!)" + else: + state = 2 + sym = "(!!)" + + info_text = "%s RPM: %d%s (limit %d%s)%s, Status %s" \ + % (name, rpm, unit, lowlevel, unit, sym, status) + + perfdata = [ ("rpm", str(rpm)+unit, str(lowlevel)+":", 0, 0 ) ] + + return (state, info_text, perfdata) + + return (3, "no SNMP data found") + +check_info['cmciii_lcp_fans'] = { + "check_function" : check_cmciii_lcp_fans, + "inventory_function" : inventory_cmciii_lcp_fans, + "has_perfdata" : True, + "service_description" : "LCP Fanunit FAN %s", + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.1.0").startswith("Rittal LCP") and \ + oid(".1.3.6.1.4.1.2606.7.4.2.2.1.3.2.6").startswith("Air.Temperature.DescName"), + "snmp_info" : ( '.1.3.6.1.4.1.2606.7.4.2.2.1.10.2', range(34, 58)), +} diff -Nru check-mk-1.2.2p3/cmciii_lcp_waterflow check-mk-1.2.6p12/cmciii_lcp_waterflow --- check-mk-1.2.2p3/cmciii_lcp_waterflow 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/cmciii_lcp_waterflow 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,71 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_cmciii_lcp_waterflow(info): + if info: + return [(None, None)] + +def check_cmciii_lcp_waterflow(item, params, info): + if info[0]: + # We have a list of values where no item has a fixed index. We + # try to detect the starting index for the needed values now. + try: + index = info[0].index('Waterflow') + name, flow, maxflow, minflow, status = info[0][index:index+5] + except ValueError: + return 3, 'Waterflow information not found' + + unit = flow.split(" ", 1)[1] + flow = float(flow.split(" ", 1)[0]) + minflow = float(minflow.split(" ", 1)[0]) + maxflow = float(maxflow.split(" ", 1)[0]) + + sym = "" + state = 0 + if status != "OK": + state = 2 + sym = "(!!)" + elif flow < minflow or flow > maxflow: + state = 1 + sym = "(!)" + + info_text = "%s Status: %s Flow: %.1f%s, MinFlow: %.1f, MaxFLow: %.1f" \ + % (name, status, flow, sym, minflow, maxflow) + + perfdata = [ ("flow", str(flow)+unit , str(minflow)+":"+str(maxflow), 0, 0 ) ] + + return state, info_text, perfdata + + return (3, "no SNMP data found") + +check_info['cmciii_lcp_waterflow'] = { + "check_function" : check_cmciii_lcp_waterflow, + "inventory_function" : inventory_cmciii_lcp_waterflow, + "has_perfdata" : True, + "service_description" : "LCP Fanunit WATER FLOW", + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.1.0").startswith("Rittal LCP"), + "snmp_info" : ( '.1.3.6.1.4.1.2606.7.4.2.2.1.10.2', range(74, 87)), +} diff -Nru check-mk-1.2.2p3/cmciii_lcp_waterin check-mk-1.2.6p12/cmciii_lcp_waterin --- check-mk-1.2.2p3/cmciii_lcp_waterin 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/cmciii_lcp_waterin 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,74 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_cmciii_lcp_waterin(info): + if info: + return [( None, None )] + +def check_cmciii_lcp_waterin(item, params, info): + limits = () + unit_desc, unit_status, desc = info[0][0:3] + limits = cmciii_extract_temps(info[0][3:8]) + temp = limits.pop(0) + status = info[0][9] + + state = 0 + sym = "" + if status.lower() != "ok" or unit_status.lower() != "ok": + if status.lower() in ( "ok", "warning" ) and \ + unit_status.lower() in ( "ok", "warning" ): + state = 1 + else: + state = 2 + if temp < limits[3] or temp > limits[0] : + state = 2 + sym = "(!!)" + elif temp < limits[2] or temp > limits[1]: + state = max(state, 1) + sym = "(!)" + else: + state = max(state, 0) + + info_text = "%s %s, %s %s, Temperature: %.1f°C%s" \ + % (unit_desc, unit_status, desc, status, temp, sym) + + levels_text = ", lowcrit/lowwarn/highwarn/highcrit: %.1f/%.1f/%.1f/%.1f" \ + % ( limits[3], limits[2], limits[1], limits[0] ) + + perfdata = [ ("temp", temp, str(limits[2])+":"+str(limits[1]), str(limits[3])+":"+str(limits[0]), 0 ) ] + + return (state, info_text + levels_text, perfdata) + +check_info['cmciii_lcp_waterin'] = { + "check_function" : check_cmciii_lcp_waterin, + "inventory_function" : inventory_cmciii_lcp_waterin, + "has_perfdata" : True, + "service_description" : "LCP Fanunit Water IN", + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.1.0").startswith("Rittal LCP") and \ + oid(".1.3.6.1.4.1.2606.7.4.2.2.1.3.2.6").startswith("Air.Temperature.DescName"), + "snmp_info" : ( '.1.3.6.1.4.1.2606.7.4.2.2.1.10.2', [59, 62] + range(64,72) ), + "includes" : [ 'cmciii.include' ], +} diff -Nru check-mk-1.2.2p3/cmciii_lcp_waterout check-mk-1.2.6p12/cmciii_lcp_waterout --- check-mk-1.2.2p3/cmciii_lcp_waterout 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/cmciii_lcp_waterout 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,74 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_cmciii_lcp_waterout(info): + if info: + return [( None, None )] + +def check_cmciii_lcp_waterout(item, params, info): + limits = () + unit_desc, unit_status, desc = info[0][0:3] + limits = cmciii_extract_temps(info[0][3:8]) + temp = limits.pop(0) + status = info[0][9] + + state = 0 + sym = "" + if status.lower() != "ok" or unit_status.lower() != "ok": + if status.lower() in ( "ok", "warning" ) and \ + unit_status.lower() in ( "ok", "warning" ): + state = 1 + else: + state = 2 + if temp < limits[3] or temp > limits[0] : + state = 2 + sym = "(!!)" + elif temp < limits[2] or temp > limits[1]: + state = max(state, 1) + sym = "(!)" + else: + state = max(state, 0) + + info_text = "%s %s, %s %s, Temperature: %.1f°C%s" \ + % (unit_desc, unit_status, desc, status, temp, sym) + + levels_text = ", lowcrit/lowwarn/highwarn/highcrit: %.1f/%.1f/%.1f/%.1f" \ + % ( limits[3], limits[2], limits[1], limits[0] ) + + perfdata = [ ("temp", temp, str(limits[2])+":"+str(limits[1]), str(limits[3])+":"+str(limits[0]), 0 ) ] + + return (state, info_text + levels_text, perfdata) + +check_info['cmciii_lcp_waterout'] = { + "check_function" : check_cmciii_lcp_waterout, + "inventory_function" : inventory_cmciii_lcp_waterout, + "has_perfdata" : True, + "service_description" : "LCP Fanunit Water OUT", + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.1.0").startswith("Rittal LCP") and \ + oid(".1.3.6.1.4.1.2606.7.4.2.2.1.3.2.6").startswith("Air.Temperature.DescName"), + "snmp_info" : ( '.1.3.6.1.4.1.2606.7.4.2.2.1.10.2', [59, 62] + range(73,81) ), + "includes" : [ 'cmciii.include' ], +} diff -Nru check-mk-1.2.2p3/cmciii.psm_current check-mk-1.2.6p12/cmciii.psm_current --- check-mk-1.2.2p3/cmciii.psm_current 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/cmciii.psm_current 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,23 @@ +title: Rittal CMC-III PSM: Currents +agents: snmp +catalog: hw/environment/rittal +license: GPL +distribution: check_mk +description: + This check monitors the currents detected on the power system modules of the + Rittal CMC-III units. + No limits are set in the check, since limits are configured in the + Rittal device itself. The state given by the Rittal device is + taken as the state of the check as follows: + If the Rittal device returns {OK}, the check is {OK}. Otherwise + the check is {CRIT}. + +item: + The name of the power system module found, comprised of the unit number on the bus, + the PSM name, and the unit on the PSM + +perfdata: + One variable: the current + +inventory: + For each power system module found, an entry is created diff -Nru check-mk-1.2.2p3/cmciii.psm_plugs check-mk-1.2.6p12/cmciii.psm_plugs --- check-mk-1.2.2p3/cmciii.psm_plugs 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/cmciii.psm_plugs 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,21 @@ +title: Rittal CMC-III PSM: Plugs +agents: snmp +catalog: hw/environment/rittal +license: GPL +distribution: check_mk +description: + This check monitors the status of the power plugs of the + Rittal CMC-III power supply modules (PSM). + No limits are set in the check. The state given by the Rittal device is + taken as the state of the check as follows: + If the Rittal device returns {OK}, the check is {OK}. Otherwise + the check is {CRIT}. + +item: + None + +perfdata: + None + +inventory: + None diff -Nru check-mk-1.2.2p3/cmciii.sensor check-mk-1.2.6p12/cmciii.sensor --- check-mk-1.2.2p3/cmciii.sensor 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/cmciii.sensor 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,22 @@ +title: Rittal CMC-III Sensor Modules: Status +agents: snmp +catalog: hw/environment/rittal +license: GPL +distribution: check_mk +description: + This check monitors the status of the sensor modules of the + Rittal CMC-III units. + No limits are set in the check, since limits are configured in the + Rittal device itself. The state given by the Rittal device is + taken as the state of the check as follows: + If the Rittal device returns {Closed}, the check is {OK}. Otherwise + the check is {CRIT}. + +item: + None + +perfdata: + None + +inventory: + None diff -Nru check-mk-1.2.2p3/cmciii.state check-mk-1.2.6p12/cmciii.state --- check-mk-1.2.2p3/cmciii.state 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/cmciii.state 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,22 @@ +title: Rittal CMC-III Units: General State +agents: snmp +catalog: hw/environment/rittal +license: GPL +distribution: check_mk +description: + This check monitors the general status of the units attached to the + Rittal CMC-III. The state given by the system is + taken as the state of the check as follows: + If the device returns {lost} or {error}, the check is {CRIT}. + If the device returns {detect}, the check is {WARN}. + if the device return {changed} or {OK}, the check is {OK}. + Otherwise the check is {UNKNOWN}. + +item: + None + +perfdata: + None + +inventory: + None diff -Nru check-mk-1.2.2p3/cmciii.temp check-mk-1.2.6p12/cmciii.temp --- check-mk-1.2.2p3/cmciii.temp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/cmciii.temp 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,22 @@ +title: Rittal CMC-III Units: Temperatures +agents: snmp +catalog: hw/environment/rittal +license: GPL +distribution: check_mk +description: + This check monitors the temperature measured by various + Rittal CMC-III units. + No limits are set in the check, since limits are configured in the + Rittal device itself. The state given by the Rittal device is + taken as the state of the check as follows: + If the Rittal device returns {OK}, the check is {OK}. Otherwise + the check is {CRIT}. + +item: + The unit name and the name of the temperature sensor + +perfdata: + One variable: the temperature + +inventory: + All temperature sensors on all cmciii units are detected diff -Nru check-mk-1.2.2p3/cmctc check-mk-1.2.6p12/cmctc --- check-mk-1.2.2p3/cmctc 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/cmctc 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -55,7 +55,7 @@ } status, current, crit, low, warn = map(int, line[2:7]) descr = line[7] - info_text = " - %s at %d°C" % (descr, current) + info_text = "%s at %d°C" % (descr, current) if params: warn, crit = params if current >= crit: @@ -66,15 +66,15 @@ status_is = ", status is %s" % status_text.get(status, "UNKNOWN") perfdata = [ ("temp", current, warn, crit, 0 ) ] if status == 7: - return (1, "WARNING" + info_text + levels_text, perfdata) + return (1, info_text + levels_text, perfdata) elif status == 8: - return (1, "WARNING" + info_text + ": too low (below %sC)" % low, perfdata) + return (1, info_text + ": too low (below %sC)" % low, perfdata) elif status == 4: - return (0, "OK" + info_text, perfdata) + return (0, info_text, perfdata) else: - return (2, "CRIT" + info_text + levels_text + status_is, perfdata) + return (2, info_text + levels_text + status_is, perfdata) - return (3, "UNKNOWN - Sensor %s not found in SNMP data" % item) + return (3, "Sensor %s not found in SNMP data" % item) check_info['cmctc.temp'] = { "check_function" : check_cmctc_temp, @@ -82,7 +82,8 @@ "has_perfdata" : True, "service_description" : "Temperature %s", "group" : "room_temperature", - "snmp_scan_function" : lambda oid: "CMC-TC" in oid(".1.3.6.1.2.1.1.1.0"), + "snmp_scan_function" : lambda oid: "CMC-TC" in oid(".1.3.6.1.2.1.1.1.0") or \ + "Rittal CMC" in oid(".1.3.6.1.2.1.1.1.0"), "snmp_info" : ( # Base to all IO units ".1.3.6.1.4.1.2606.4.2", diff -Nru check-mk-1.2.2p3/cmctc_lcp check-mk-1.2.6p12/cmctc_lcp --- check-mk-1.2.2p3/cmctc_lcp 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/cmctc_lcp 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -84,7 +84,7 @@ if itemindex == index: unit = cmctc_lcp_sensortypes[sensortype][0] value = int(value) - infotext = " - " + infotext = "" if description: infotext += description + ", " infotext += "%d%s" % (value, unit) @@ -106,8 +106,8 @@ status = 1 else: status = 2 - return (status, nagios_state_names[status] + infotext, perfdata) - return 3, "UNKNOWN - Sensor not found in SNMP output" + return (status, infotext, perfdata) + return 3, "Sensor not found in SNMP output" snmp_scan_functions["cmctc_lcp"] = \ lambda oid: "CMC-TC" in oid(".1.3.6.1.2.1.1.1.0") @@ -125,12 +125,12 @@ ) -for s, i in cmctc_lcp_sensortypes.items(): - check_info['cmctc_lcp.' + s] = { - "check_function" : (lambda st: lambda item,params,info: check_cmctc_lcp(item, params, info, st))(s), - "inventory_function" : (lambda st: lambda info: inventory_cmctc_lcp(info, st))(s), +for _s, _i in cmctc_lcp_sensortypes.items(): + check_info['cmctc_lcp.' + _s] = { + "check_function" : (lambda st: lambda item,params,info: check_cmctc_lcp(item, params, info, st))(_s), + "inventory_function" : (lambda st: lambda info: inventory_cmctc_lcp(info, st))(_s), "has_perfdata" : True, - "service_description" : i[1], + "service_description" : _i[1], } # Set WATO check group for temperature checks diff -Nru check-mk-1.2.2p3/cmctc_lcp.blower check-mk-1.2.6p12/cmctc_lcp.blower --- check-mk-1.2.2p3/cmctc_lcp.blower 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/cmctc_lcp.blower 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Rittal CMC-TC LCP Blowers +title: Rittal CMC-TC LCP: Blowers agents: snmp -author: Mathias Kettner +catalog: hw/environment/rittal license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/cmctc_lcp.blowergrade check-mk-1.2.6p12/cmctc_lcp.blowergrade --- check-mk-1.2.2p3/cmctc_lcp.blowergrade 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/cmctc_lcp.blowergrade 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Rittal CMC-TC LCP Blower total state +title: Rittal CMC-TC LCP: Blower total state agents: snmp -author: Mathias Kettner +catalog: hw/environment/rittal license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/cmctc_lcp.flow check-mk-1.2.6p12/cmctc_lcp.flow --- check-mk-1.2.2p3/cmctc_lcp.flow 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/cmctc_lcp.flow 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Rittal CMC-TC LCP Water flow +title: Rittal CMC-TC LCP: Water flow agents: snmp -author: Mathias Kettner +catalog: hw/environment/rittal license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/cmctc_lcp.regulator check-mk-1.2.6p12/cmctc_lcp.regulator --- check-mk-1.2.2p3/cmctc_lcp.regulator 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/cmctc_lcp.regulator 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Rittal CMC-TC LCP Regulator +title: Rittal CMC-TC LCP: Regulator agents: snmp -author: Mathias Kettner +catalog: hw/environment/rittal license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/cmctc_lcp.temp check-mk-1.2.6p12/cmctc_lcp.temp --- check-mk-1.2.2p3/cmctc_lcp.temp 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/cmctc_lcp.temp 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Rittal CMC-TC LCP Temperature sensors +title: Rittal CMC-TC LCP: Temperature agents: snmp -author: Mathias Kettner +catalog: hw/environment/rittal license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/cmctc_psm_m check-mk-1.2.6p12/cmctc_psm_m --- check-mk-1.2.2p3/cmctc_psm_m 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/cmctc_psm_m 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -57,45 +57,43 @@ sensor_type = saveint(line[1]) unit = cmctc_pcm_m_sensor_types[sensor_type] current_val = saveint(line[3]) / 10 - output = " - %s at %d%s " % (line[7], current_val, unit) + output = "%s at %d%s " % (line[7], current_val, unit) perf = [(unit, current_val,"","","","")] if int(line[2]) == 4: - return (0, "OK"+output,perf) + return (0, output, perf) else: - return (2, "CRIT"+output,perf) + return (2, output, perf) return (3, "Item no found in SNMP tree") -check_info['cmctc_psm_m'] = (check_cmctc_psm_m, "CMC %s", 1, inventory_cmctc_psm_m) - - - - -snmp_info['cmctc_psm_m'] = ( - # Base to all IO units - ".1.3.6.1.4.1.2606.4.2", - # Each of the up to 4 units has its own subtree - ["3", "4", "5", "6"], - [ - # sensors index (1-4) - "5.2.1.1", - # sensor type (10 = temperature) - "5.2.1.2", - # unit status: notAvail(1), lost(2), changed(3), ok(4), off(5), on(6), warning(7), tooLow(8), tooHigh(9) - "5.2.1.4", - # current value - "5.2.1.5", - # high value (used for critical state) - "5.2.1.6", - # low value (used for warning, if temp falls below this value) - "5.2.1.7", - # warn value (used for warning state) - "5.2.1.8", - #Port Desct - "5.2.1.3", - ] - ) - -snmp_scan_functions["cmctc_psm_m"] = \ - lambda oid: "CMC-TC" in oid(".1.3.6.1.2.1.1.1.0") - +check_info["cmctc_psm_m"] = { + 'check_function': check_cmctc_psm_m, + 'inventory_function': inventory_cmctc_psm_m, + 'service_description': 'CMC %s', + 'has_perfdata': True, + 'snmp_info': ( + # Base to all IO units + ".1.3.6.1.4.1.2606.4.2", + # Each of the up to 4 units has its own subtree + ["3", "4", "5", "6"], + [ + # sensors index (1-4) + "5.2.1.1", + # sensor type (10 = temperature) + "5.2.1.2", + # unit status: notAvail(1), lost(2), changed(3), ok(4), off(5), on(6), warning(7), tooLow(8), tooHigh(9) + "5.2.1.4", + # current value + "5.2.1.5", + # high value (used for critical state) + "5.2.1.6", + # low value (used for warning, if temp falls below this value) + "5.2.1.7", + # warn value (used for warning state) + "5.2.1.8", + #Port Desct + "5.2.1.3", + ]), + 'snmp_scan_function': \ + lambda oid: "CMC-TC" in oid(".1.3.6.1.2.1.1.1.0"), +} diff -Nru check-mk-1.2.2p3/cmctc.temp check-mk-1.2.6p12/cmctc.temp --- check-mk-1.2.2p3/cmctc.temp 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/cmctc.temp 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Rittal CMC TC Temperature sensors +title: Rittal CMC-TC: Temperature agents: snmp -author: Mathias Kettner +catalog: hw/environment/rittal license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/config.py check-mk-1.2.6p12/config.py --- check-mk-1.2.2p3/config.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/config.py 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,170 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# This file contains the defaults settings for almost all configuration +# variables that can be overridden in main.mk. Some configuration +# variables are preset in checks/* as well. + +monitoring_core = "nagios" # other option: "cmc" +agent_port = 6556 +agent_ports = [] +snmp_ports = [] # UDP ports used for SNMP +tcp_connect_timeout = 5.0 +use_dns_cache = True # prevent DNS by using own cache file +delay_precompile = False # delay Python compilation to Nagios execution +restart_locking = "abort" # also possible: "wait", None +check_submission = "file" # alternative: "pipe" +aggr_summary_hostname = "%s-s" +agent_min_version = 0 # warn, if plugin has not at least version +check_max_cachefile_age = 0 # per default do not use cache files when checking +cluster_max_cachefile_age = 90 # secs. +piggyback_max_cachefile_age = 3600 # secs +piggyback_translation = [] # Ruleset for translating piggyback host names +simulation_mode = False +agent_simulator = False +perfdata_format = "pnp" # also possible: "standard" +check_mk_perfdata_with_times = True +debug_log = False # deprecated +monitoring_host = None # deprecated +max_num_processes = 50 + +# SNMP communities and encoding +has_inline_snmp = False # is set to True by inline_snmp module, when available +use_inline_snmp = True +snmp_limit_oid_range = [] # Ruleset to recduce fetched OIDs of a check, only inline SNMP +record_inline_snmp_stats = False +snmp_default_community = 'public' +snmp_communities = [] +snmp_timing = [] +snmp_character_encodings = [] +explicit_snmp_communities = {} # override the rule based configuration + +# RRD creation (only with CMC) +cmc_log_rrdcreation = None # also: "terse", "full" +cmc_host_rrd_config = [] # Rule for per-host configuration of RRDs +cmc_service_rrd_config = [] # Rule for per-service configuration of RRDs + +# Inventory and inventory checks +inventory_check_interval = None # Nagios intervals (4h = 240) +inventory_check_severity = 1 # warning +inventory_check_do_scan = True # include SNMP scan for SNMP devices +inventory_max_cachefile_age = 120 # seconds +inventory_check_autotrigger = True # Automatically trigger inv-check after automation-inventory +always_cleanup_autochecks = None # For compatiblity with old configuration + +# Nagios templates and other settings concerning generation +# of Nagios configuration files. No need to change these values. +# Better adopt the content of the templates +host_template = 'check_mk_host' +cluster_template = 'check_mk_cluster' +pingonly_template = 'check_mk_pingonly' +active_service_template = 'check_mk_active' +inventory_check_template = 'check_mk_inventory' +passive_service_template = 'check_mk_passive' +passive_service_template_perf = 'check_mk_passive_perf' +summary_service_template = 'check_mk_summarized' +service_dependency_template = 'check_mk' +default_host_group = 'check_mk' +generate_hostconf = True +generate_dummy_commands = True +dummy_check_commandline = 'echo "ERROR - you did an active check on this service - please disable active checks" && exit 1' +nagios_illegal_chars = '`;~!$%^&*|\'"<>?,()=' + +# Data to be defined in main.mk +checks = [] +static_checks = {} +check_parameters = [] +checkgroup_parameters = {} +legacy_checks = [] # non-WATO variant of legacy checks +active_checks = {} # WATO variant for fully formalized checks +special_agents = {} # WATO variant for datasource_programs +custom_checks = [] # WATO variant for free-form custom checks without formalization +all_hosts = [] +host_paths = {} +snmp_hosts = [ (['snmp'], ALL_HOSTS) ] +tcp_hosts = [ (['tcp'], ALL_HOSTS), (NEGATE, ['snmp'], ALL_HOSTS), (['!ping'], ALL_HOSTS) ] +bulkwalk_hosts = [] +snmpv2c_hosts = [] +snmp_without_sys_descr = [] +usewalk_hosts = [] +dyndns_hosts = [] # use host name as ip address for these hosts +ignored_checktypes = [] # exclude from inventory +ignored_services = [] # exclude from inventory +ignored_checks = [] # exclude from inventory +host_groups = [] +service_groups = [] +service_contactgroups = [] +service_notification_periods = [] # deprecated, will be removed soon. +host_notification_periods = [] # deprecated, will be removed soon. +host_contactgroups = [] +parents = [] +define_hostgroups = None +define_servicegroups = None +define_contactgroups = None +contactgroup_members = {} +contacts = {} +timeperiods = {} # needed for WATO +clusters = {} +clustered_services = [] +clustered_services_of = {} # new in 1.1.4 +clustered_services_mapping = [] # new for 1.2.5i1 Wato Rule +datasource_programs = [] +service_aggregations = [] +service_dependencies = [] +non_aggregated_hosts = [] +aggregate_check_mk = False +aggregation_output_format = "multiline" # new in 1.1.6. Possible also: "multiline" +summary_host_groups = [] +summary_service_groups = [] # service groups for aggregated services +summary_service_contactgroups = [] # service contact groups for aggregated services +summary_host_notification_periods = [] +summary_service_notification_periods = [] +ipaddresses = {} # mapping from hostname to ipaddress +only_hosts = None +distributed_wato_site = None # used by distributed WATO +extra_host_conf = {} +extra_summary_host_conf = {} +extra_service_conf = {} +extra_summary_service_conf = {} +extra_nagios_conf = "" +service_descriptions = {} +donation_hosts = [] +donation_command = 'mail -r checkmk@yoursite.de -s "Host donation %s" donatehosts@mathias-kettner.de' % check_mk_version +scanparent_hosts = [ ( ALL_HOSTS ) ] +host_attributes = {} # needed by WATO, ignored by Check_MK +ping_levels = [] # special parameters for host/PING check_command +host_check_commands = [] # alternative host check instead of check_icmp +check_mk_exit_status = [] # Rule for specifying CMK's exit status in case of various errors +check_mk_agent_target_versions = [] # Rule for defining expected version for agents +check_periods = [] +snmp_check_interval = [] +inv_exports = {} # Rulesets for inventory export hooks +notification_parameters = {} # Rulesets for parameters of notification scripts +use_new_descriptions_for = [] + +# Rulesets for agent bakery +agent_config = {} +bake_agents_on_restart = False diff -Nru check-mk-1.2.2p3/configure check-mk-1.2.6p12/configure --- check-mk-1.2.2p3/configure 2013-11-05 09:42:55.000000000 +0000 +++ check-mk-1.2.6p12/configure 2015-09-21 11:01:32.000000000 +0000 @@ -1,13 +1,11 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.67 for MK Livestatus 1.2.2p3. +# Generated by GNU Autoconf 2.69 for MK Livestatus 1.2.6p12. # # Report bugs to . # # -# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, -# 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software -# Foundation, Inc. +# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. # # # This configure script is free software; the Free Software Foundation @@ -91,6 +89,7 @@ IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. +as_myself= case $0 in #(( *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR @@ -135,6 +134,31 @@ # CDPATH. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH +# Use a proper internal environment variable to ensure we don't fall + # into an infinite loop, continuously re-executing ourselves. + if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then + _as_can_reexec=no; export _as_can_reexec; + # We cannot yet assume a decent shell, so we have to provide a +# neutralization value for shells without unset; and this also +# works around shells that cannot unset nonexistent variables. +# Preserve -v and -x to the replacement shell. +BASH_ENV=/dev/null +ENV=/dev/null +(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV +case $- in # (((( + *v*x* | *x*v* ) as_opts=-vx ;; + *v* ) as_opts=-v ;; + *x* ) as_opts=-x ;; + * ) as_opts= ;; +esac +exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} +# Admittedly, this is quite paranoid, since all the known shells bail +# out after a failed `exec'. +$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 +as_fn_exit 255 + fi + # We don't want this to propagate to other subprocesses. + { _as_can_reexec=; unset _as_can_reexec;} if test "x$CONFIG_SHELL" = x; then as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : emulate sh @@ -168,7 +192,8 @@ else exitcode=1; echo positional parameters were not saved. fi -test x\$exitcode = x0 || exit 1" +test x\$exitcode = x0 || exit 1 +test -x / || exit 1" as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && @@ -213,14 +238,25 @@ if test "x$CONFIG_SHELL" != x; then : - # We cannot yet assume a decent shell, so we have to provide a - # neutralization value for shells without unset; and this also - # works around shells that cannot unset nonexistent variables. - BASH_ENV=/dev/null - ENV=/dev/null - (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV - export CONFIG_SHELL - exec "$CONFIG_SHELL" "$as_myself" ${1+"$@"} + export CONFIG_SHELL + # We cannot yet assume a decent shell, so we have to provide a +# neutralization value for shells without unset; and this also +# works around shells that cannot unset nonexistent variables. +# Preserve -v and -x to the replacement shell. +BASH_ENV=/dev/null +ENV=/dev/null +(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV +case $- in # (((( + *v*x* | *x*v* ) as_opts=-vx ;; + *v* ) as_opts=-v ;; + *x* ) as_opts=-x ;; + * ) as_opts= ;; +esac +exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} +# Admittedly, this is quite paranoid, since all the known shells bail +# out after a failed `exec'. +$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 +exit 255 fi if test x$as_have_required = xno; then : @@ -323,6 +359,14 @@ } # as_fn_mkdir_p + +# as_fn_executable_p FILE +# ----------------------- +# Test if FILE is an executable regular file. +as_fn_executable_p () +{ + test -f "$1" && test -x "$1" +} # as_fn_executable_p # as_fn_append VAR VALUE # ---------------------- # Append the text in VALUE to the end of the definition contained in VAR. Take @@ -444,6 +488,10 @@ chmod +x "$as_me.lineno" || { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } + # If we had to re-execute with $CONFIG_SHELL, we're ensured to have + # already done that, so ensure we don't try to do so again and fall + # in an infinite loop. This has already happened in practice. + _as_can_reexec=no; export _as_can_reexec # Don't try to exec as it changes $[0], causing all sort of problems # (the dirname of $[0] is not the place where we might find the # original and so on. Autoconf is especially sensitive to this). @@ -478,16 +526,16 @@ # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. - # In both cases, we have to default to `cp -p'. + # In both cases, we have to default to `cp -pR'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || - as_ln_s='cp -p' + as_ln_s='cp -pR' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else - as_ln_s='cp -p' + as_ln_s='cp -pR' fi else - as_ln_s='cp -p' + as_ln_s='cp -pR' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null @@ -499,28 +547,8 @@ as_mkdir_p=false fi -if test -x / >/dev/null 2>&1; then - as_test_x='test -x' -else - if ls -dL / >/dev/null 2>&1; then - as_ls_L_option=L - else - as_ls_L_option= - fi - as_test_x=' - eval sh -c '\'' - if test -d "$1"; then - test -d "$1/."; - else - case $1 in #( - -*)set "./$1";; - esac; - case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #(( - ???[sx]*):;;*)false;;esac;fi - '\'' sh - ' -fi -as_executable_p=$as_test_x +as_test_x='test -x' +as_executable_p=as_fn_executable_p # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" @@ -552,8 +580,8 @@ # Identity of this package. PACKAGE_NAME='MK Livestatus' PACKAGE_TARNAME='mk-livestatus' -PACKAGE_VERSION='1.2.2p3' -PACKAGE_STRING='MK Livestatus 1.2.2p3' +PACKAGE_VERSION='1.2.6p12' +PACKAGE_STRING='MK Livestatus 1.2.6p12' PACKAGE_BUGREPORT='mk@mathias-kettner.de' PACKAGE_URL='' @@ -597,6 +625,7 @@ ac_subst_vars='am__EXEEXT_FALSE am__EXEEXT_TRUE LTLIBOBJS +nagios_headers LIBOBJS EGREP GREP @@ -613,6 +642,7 @@ am__fastdepCXX_FALSE am__fastdepCXX_TRUE CXXDEPMODE +am__nodep AMDEPBACKSLASH AMDEP_FALSE AMDEP_TRUE @@ -691,6 +721,7 @@ ac_user_opts=' enable_option_checking enable_dependency_tracking +with_nagios4 ' ac_precious_vars='build_alias host_alias @@ -1108,7 +1139,7 @@ $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 - : ${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option} + : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" ;; esac @@ -1159,8 +1190,6 @@ if test "x$host_alias" != x; then if test "x$build_alias" = x; then cross_compiling=maybe - $as_echo "$as_me: WARNING: if you wanted to set the --build type, don't use --host. - If a cross compiler is detected then cross compile mode will be used" >&2 elif test "x$build_alias" != "x$host_alias"; then cross_compiling=yes fi @@ -1246,7 +1275,7 @@ # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF -\`configure' configures MK Livestatus 1.2.2p3 to adapt to many kinds of systems. +\`configure' configures MK Livestatus 1.2.6p12 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... @@ -1312,7 +1341,7 @@ if test -n "$ac_init_help"; then case $ac_init_help in - short | recursive ) echo "Configuration of MK Livestatus 1.2.2p3:";; + short | recursive ) echo "Configuration of MK Livestatus 1.2.6p12:";; esac cat <<\_ACEOF @@ -1323,6 +1352,11 @@ --disable-dependency-tracking speeds up one-time build --enable-dependency-tracking do not reject slow dependency extractors +Optional Packages: + --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] + --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) + --with-nagios4 enabled compilation for nagios 4 + Some influential environment variables: CXX C++ compiler command CXXFLAGS C++ compiler flags @@ -1401,10 +1435,10 @@ test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF -MK Livestatus configure 1.2.2p3 -generated by GNU Autoconf 2.67 +MK Livestatus configure 1.2.6p12 +generated by GNU Autoconf 2.69 -Copyright (C) 2010 Free Software Foundation, Inc. +Copyright (C) 2012 Free Software Foundation, Inc. This configure script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it. _ACEOF @@ -1448,7 +1482,7 @@ ac_retval=1 fi - eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_cxx_try_compile @@ -1486,7 +1520,7 @@ ac_retval=1 fi - eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_compile @@ -1518,7 +1552,7 @@ test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || - $as_test_x conftest$ac_exeext + test -x conftest$ac_exeext }; then : ac_retval=0 else @@ -1532,7 +1566,7 @@ # interfere with the next link command; also delete a directory that is # left behind by Apple's compiler. We do this before executing the actions. rm -rf conftest.dSYM conftest_ipa8_conftest.oo - eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_link @@ -1569,7 +1603,7 @@ ac_retval=1 fi - eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_cpp @@ -1611,7 +1645,7 @@ ac_retval=$ac_status fi rm -rf conftest.dSYM conftest_ipa8_conftest.oo - eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_run @@ -1624,10 +1658,10 @@ ac_fn_c_check_header_mongrel () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - if eval "test \"\${$3+set}\"" = set; then : + if eval \${$3+:} false; then : { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } -if eval "test \"\${$3+set}\"" = set; then : +if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 fi eval ac_res=\$$3 @@ -1694,7 +1728,7 @@ esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } -if eval "test \"\${$3+set}\"" = set; then : +if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else eval "$3=\$ac_header_compiler" @@ -1703,7 +1737,7 @@ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi - eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_header_mongrel @@ -1716,7 +1750,7 @@ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } -if eval "test \"\${$3+set}\"" = set; then : +if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext @@ -1734,7 +1768,7 @@ eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } - eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_header_compile @@ -1747,7 +1781,7 @@ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } -if eval "test \"\${$3+set}\"" = set; then : +if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else eval "$3=no" @@ -1788,7 +1822,7 @@ eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } - eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_type @@ -1801,7 +1835,7 @@ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for int$2_t" >&5 $as_echo_n "checking for int$2_t... " >&6; } -if eval "test \"\${$3+set}\"" = set; then : +if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else eval "$3=no" @@ -1817,7 +1851,8 @@ main () { static int test_array [1 - 2 * !(0 < ($ac_type) ((((($ac_type) 1 << N) << N) - 1) * 2 + 1))]; -test_array [0] = 0 +test_array [0] = 0; +return test_array [0]; ; return 0; @@ -1833,7 +1868,8 @@ { static int test_array [1 - 2 * !(($ac_type) ((((($ac_type) 1 << N) << N) - 1) * 2 + 1) < ($ac_type) ((((($ac_type) 1 << N) << N) - 1) * 2 + 2))]; -test_array [0] = 0 +test_array [0] = 0; +return test_array [0]; ; return 0; @@ -1862,7 +1898,7 @@ eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } - eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_find_intX_t @@ -1875,7 +1911,7 @@ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for uint$2_t" >&5 $as_echo_n "checking for uint$2_t... " >&6; } -if eval "test \"\${$3+set}\"" = set; then : +if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else eval "$3=no" @@ -1890,7 +1926,8 @@ main () { static int test_array [1 - 2 * !((($ac_type) -1 >> ($2 / 2 - 1)) >> ($2 / 2 - 1) == 3)]; -test_array [0] = 0 +test_array [0] = 0; +return test_array [0]; ; return 0; @@ -1915,7 +1952,7 @@ eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } - eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_find_uintX_t @@ -1927,7 +1964,7 @@ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } -if eval "test \"\${$3+set}\"" = set; then : +if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext @@ -1982,15 +2019,15 @@ eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } - eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_func cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by MK Livestatus $as_me 1.2.2p3, which was -generated by GNU Autoconf 2.67. Invocation command line was +It was created by MK Livestatus $as_me 1.2.6p12, which was +generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ @@ -2248,7 +2285,7 @@ || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "failed to load site script $ac_site_file -See \`config.log' for more details" "$LINENO" 5 ; } +See \`config.log' for more details" "$LINENO" 5; } fi done @@ -2385,7 +2422,7 @@ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5 $as_echo_n "checking for a BSD-compatible install... " >&6; } if test -z "$INSTALL"; then -if test "${ac_cv_path_install+set}" = set; then : +if ${ac_cv_path_install+:} false; then : $as_echo_n "(cached) " >&6 else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR @@ -2405,7 +2442,7 @@ # by default. for ac_prog in ginstall scoinst install; do for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; }; then + if as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then if test $ac_prog = install && grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then # AIX install. It has an incompatible calling convention. @@ -2472,11 +2509,11 @@ ' case `pwd` in *[\\\"\#\$\&\'\`$am_lf]*) - as_fn_error $? "unsafe absolute working directory name" "$LINENO" 5 ;; + as_fn_error $? "unsafe absolute working directory name" "$LINENO" 5;; esac case $srcdir in *[\\\"\#\$\&\'\`$am_lf\ \ ]*) - as_fn_error $? "unsafe srcdir value: \`$srcdir'" "$LINENO" 5 ;; + as_fn_error $? "unsafe srcdir value: \`$srcdir'" "$LINENO" 5;; esac # Do `set' in a subshell so we don't clobber the current shell's @@ -2562,7 +2599,7 @@ set dummy ${ac_tool_prefix}strip; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_STRIP+set}" = set; then : +if ${ac_cv_prog_STRIP+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$STRIP"; then @@ -2574,7 +2611,7 @@ IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_STRIP="${ac_tool_prefix}strip" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 @@ -2602,7 +2639,7 @@ set dummy strip; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_ac_ct_STRIP+set}" = set; then : +if ${ac_cv_prog_ac_ct_STRIP+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_STRIP"; then @@ -2614,7 +2651,7 @@ IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_STRIP="strip" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 @@ -2655,7 +2692,7 @@ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for a thread-safe mkdir -p" >&5 $as_echo_n "checking for a thread-safe mkdir -p... " >&6; } if test -z "$MKDIR_P"; then - if test "${ac_cv_path_mkdir+set}" = set; then : + if ${ac_cv_path_mkdir+:} false; then : $as_echo_n "(cached) " >&6 else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR @@ -2665,7 +2702,7 @@ test -z "$as_dir" && as_dir=. for ac_prog in mkdir gmkdir; do for ac_exec_ext in '' $ac_executable_extensions; do - { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; } || continue + as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext" || continue case `"$as_dir/$ac_prog$ac_exec_ext" --version 2>&1` in #( 'mkdir (GNU coreutils) '* | \ 'mkdir (coreutils) '* | \ @@ -2706,7 +2743,7 @@ set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_AWK+set}" = set; then : +if ${ac_cv_prog_AWK+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$AWK"; then @@ -2718,7 +2755,7 @@ IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_AWK="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 @@ -2746,7 +2783,7 @@ $as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; } set x ${MAKE-make} ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` -if eval "test \"\${ac_cv_prog_make_${ac_make}_set+set}\"" = set; then : +if eval \${ac_cv_prog_make_${ac_make}_set+:} false; then : $as_echo_n "(cached) " >&6 else cat >conftest.make <<\_ACEOF @@ -2804,7 +2841,7 @@ # Define the identity of the package. PACKAGE='mk-livestatus' - VERSION='1.2.2p3' + VERSION='1.2.6p12' cat >>confdefs.h <<_ACEOF @@ -2834,11 +2871,11 @@ # We need awk for the "check" target. The system "awk" is bad on # some platforms. -# Always define AMTAR for backward compatibility. - -AMTAR=${AMTAR-"${am_missing_run}tar"} +# Always define AMTAR for backward compatibility. Yes, it's still used +# in the wild :-( We should find a proper way to deprecate it ... +AMTAR='$${TAR-tar}' -am__tar='${AMTAR} chof - "$$tardir"'; am__untar='${AMTAR} xf -' +am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -' @@ -2865,7 +2902,7 @@ set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_CXX+set}" = set; then : +if ${ac_cv_prog_CXX+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CXX"; then @@ -2877,7 +2914,7 @@ IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CXX="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 @@ -2909,7 +2946,7 @@ set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_ac_ct_CXX+set}" = set; then : +if ${ac_cv_prog_ac_ct_CXX+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CXX"; then @@ -2921,7 +2958,7 @@ IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CXX="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 @@ -3073,7 +3110,7 @@ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "C++ compiler cannot create executables -See \`config.log' for more details" "$LINENO" 5 ; } +See \`config.log' for more details" "$LINENO" 5; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } @@ -3116,7 +3153,7 @@ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot compute suffix of executables: cannot compile and link -See \`config.log' for more details" "$LINENO" 5 ; } +See \`config.log' for more details" "$LINENO" 5; } fi rm -f conftest conftest$ac_cv_exeext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 @@ -3175,7 +3212,7 @@ $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot run C++ compiled programs. If you meant to cross compile, use \`--host'. -See \`config.log' for more details" "$LINENO" 5 ; } +See \`config.log' for more details" "$LINENO" 5; } fi fi fi @@ -3186,7 +3223,7 @@ ac_clean_files=$ac_clean_files_save { $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 $as_echo_n "checking for suffix of object files... " >&6; } -if test "${ac_cv_objext+set}" = set; then : +if ${ac_cv_objext+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext @@ -3227,7 +3264,7 @@ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot compute suffix of object files: cannot compile -See \`config.log' for more details" "$LINENO" 5 ; } +See \`config.log' for more details" "$LINENO" 5; } fi rm -f conftest.$ac_cv_objext conftest.$ac_ext fi @@ -3237,7 +3274,7 @@ ac_objext=$OBJEXT { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C++ compiler" >&5 $as_echo_n "checking whether we are using the GNU C++ compiler... " >&6; } -if test "${ac_cv_cxx_compiler_gnu+set}" = set; then : +if ${ac_cv_cxx_compiler_gnu+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext @@ -3274,7 +3311,7 @@ ac_save_CXXFLAGS=$CXXFLAGS { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5 $as_echo_n "checking whether $CXX accepts -g... " >&6; } -if test "${ac_cv_prog_cxx_g+set}" = set; then : +if ${ac_cv_prog_cxx_g+:} false; then : $as_echo_n "(cached) " >&6 else ac_save_cxx_werror_flag=$ac_cxx_werror_flag @@ -3407,6 +3444,7 @@ if test "x$enable_dependency_tracking" != xno; then am_depcomp="$ac_aux_dir/depcomp" AMDEPBACKSLASH='\' + am__nodep='_no' fi if test "x$enable_dependency_tracking" != xno; then AMDEP_TRUE= @@ -3422,7 +3460,7 @@ { $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 $as_echo_n "checking dependency style of $depcc... " >&6; } -if test "${am_cv_CXX_dependencies_compiler_type+set}" = set; then : +if ${am_cv_CXX_dependencies_compiler_type+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then @@ -3431,6 +3469,7 @@ # instance it was reported that on HP-UX the gcc test will end up # making a dummy file named `D' -- because `-MD' means `put the output # in D'. + rm -rf conftest.dir mkdir conftest.dir # Copy depcomp to subdir because otherwise we won't find it if we're # using a relative directory. @@ -3490,7 +3529,7 @@ break fi ;; - msvisualcpp | msvcmsys) + msvc7 | msvc7msys | msvisualcpp | msvcmsys) # This compiler won't grok `-c -o', but also, the minuso test has # not run yet. These depmodes are late enough in the game, and # so weak that their functioning should not be impacted. @@ -3555,7 +3594,7 @@ set dummy ${ac_tool_prefix}gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_CC+set}" = set; then : +if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then @@ -3567,7 +3606,7 @@ IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 @@ -3595,7 +3634,7 @@ set dummy gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_ac_ct_CC+set}" = set; then : +if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then @@ -3607,7 +3646,7 @@ IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 @@ -3648,7 +3687,7 @@ set dummy ${ac_tool_prefix}cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_CC+set}" = set; then : +if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then @@ -3660,7 +3699,7 @@ IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}cc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 @@ -3688,7 +3727,7 @@ set dummy cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_CC+set}" = set; then : +if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then @@ -3701,7 +3740,7 @@ IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then ac_prog_rejected=yes continue @@ -3747,7 +3786,7 @@ set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_CC+set}" = set; then : +if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then @@ -3759,7 +3798,7 @@ IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 @@ -3791,7 +3830,7 @@ set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_ac_ct_CC+set}" = set; then : +if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then @@ -3803,7 +3842,7 @@ IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 @@ -3846,7 +3885,7 @@ test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "no acceptable C compiler found in \$PATH -See \`config.log' for more details" "$LINENO" 5 ; } +See \`config.log' for more details" "$LINENO" 5; } # Provide some information about the compiler. $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 @@ -3875,7 +3914,7 @@ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 $as_echo_n "checking whether we are using the GNU C compiler... " >&6; } -if test "${ac_cv_c_compiler_gnu+set}" = set; then : +if ${ac_cv_c_compiler_gnu+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext @@ -3912,7 +3951,7 @@ ac_save_CFLAGS=$CFLAGS { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 $as_echo_n "checking whether $CC accepts -g... " >&6; } -if test "${ac_cv_prog_cc_g+set}" = set; then : +if ${ac_cv_prog_cc_g+:} false; then : $as_echo_n "(cached) " >&6 else ac_save_c_werror_flag=$ac_c_werror_flag @@ -3990,7 +4029,7 @@ fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 $as_echo_n "checking for $CC option to accept ISO C89... " >&6; } -if test "${ac_cv_prog_cc_c89+set}" = set; then : +if ${ac_cv_prog_cc_c89+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_prog_cc_c89=no @@ -3999,8 +4038,7 @@ /* end confdefs.h. */ #include #include -#include -#include +struct stat; /* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ struct buf { int x; }; FILE * (*rcsopen) (struct buf *, struct stat *, int); @@ -4089,7 +4127,7 @@ { $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 $as_echo_n "checking dependency style of $depcc... " >&6; } -if test "${am_cv_CC_dependencies_compiler_type+set}" = set; then : +if ${am_cv_CC_dependencies_compiler_type+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then @@ -4098,6 +4136,7 @@ # instance it was reported that on HP-UX the gcc test will end up # making a dummy file named `D' -- because `-MD' means `put the output # in D'. + rm -rf conftest.dir mkdir conftest.dir # Copy depcomp to subdir because otherwise we won't find it if we're # using a relative directory. @@ -4157,7 +4196,7 @@ break fi ;; - msvisualcpp | msvcmsys) + msvc7 | msvc7msys | msvisualcpp | msvcmsys) # This compiler won't grok `-c -o', but also, the minuso test has # not run yet. These depmodes are late enough in the game, and # so weak that their functioning should not be impacted. @@ -4217,7 +4256,7 @@ set dummy ${ac_tool_prefix}ranlib; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_RANLIB+set}" = set; then : +if ${ac_cv_prog_RANLIB+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$RANLIB"; then @@ -4229,7 +4268,7 @@ IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 @@ -4257,7 +4296,7 @@ set dummy ranlib; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_ac_ct_RANLIB+set}" = set; then : +if ${ac_cv_prog_ac_ct_RANLIB+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_RANLIB"; then @@ -4269,7 +4308,7 @@ IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_RANLIB="ranlib" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 @@ -4317,7 +4356,7 @@ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for socket in -lsocket" >&5 $as_echo_n "checking for socket in -lsocket... " >&6; } -if test "${ac_cv_lib_socket_socket+set}" = set; then : +if ${ac_cv_lib_socket_socket+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS @@ -4351,7 +4390,7 @@ fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_socket_socket" >&5 $as_echo "$ac_cv_lib_socket_socket" >&6; } -if test "x$ac_cv_lib_socket_socket" = x""yes; then : +if test "x$ac_cv_lib_socket_socket" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBSOCKET 1 _ACEOF @@ -4362,7 +4401,7 @@ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for connect in -lsocket" >&5 $as_echo_n "checking for connect in -lsocket... " >&6; } -if test "${ac_cv_lib_socket_connect+set}" = set; then : +if ${ac_cv_lib_socket_connect+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS @@ -4396,7 +4435,7 @@ fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_socket_connect" >&5 $as_echo "$ac_cv_lib_socket_connect" >&6; } -if test "x$ac_cv_lib_socket_connect" = x""yes; then : +if test "x$ac_cv_lib_socket_connect" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBSOCKET 1 _ACEOF @@ -4407,7 +4446,7 @@ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for shutdown in -lsocket" >&5 $as_echo_n "checking for shutdown in -lsocket... " >&6; } -if test "${ac_cv_lib_socket_shutdown+set}" = set; then : +if ${ac_cv_lib_socket_shutdown+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS @@ -4441,7 +4480,7 @@ fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_socket_shutdown" >&5 $as_echo "$ac_cv_lib_socket_shutdown" >&6; } -if test "x$ac_cv_lib_socket_shutdown" = x""yes; then : +if test "x$ac_cv_lib_socket_shutdown" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBSOCKET 1 _ACEOF @@ -4457,7 +4496,7 @@ as_ac_Header=`$as_echo "ac_cv_header_dirent_$ac_hdr" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_hdr that defines DIR" >&5 $as_echo_n "checking for $ac_hdr that defines DIR... " >&6; } -if eval "test \"\${$as_ac_Header+set}\"" = set; then : +if eval \${$as_ac_Header+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext @@ -4497,7 +4536,7 @@ if test $ac_header_dirent = dirent.h; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing opendir" >&5 $as_echo_n "checking for library containing opendir... " >&6; } -if test "${ac_cv_search_opendir+set}" = set; then : +if ${ac_cv_search_opendir+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS @@ -4531,11 +4570,11 @@ fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext - if test "${ac_cv_search_opendir+set}" = set; then : + if ${ac_cv_search_opendir+:} false; then : break fi done -if test "${ac_cv_search_opendir+set}" = set; then : +if ${ac_cv_search_opendir+:} false; then : else ac_cv_search_opendir=no @@ -4554,7 +4593,7 @@ else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing opendir" >&5 $as_echo_n "checking for library containing opendir... " >&6; } -if test "${ac_cv_search_opendir+set}" = set; then : +if ${ac_cv_search_opendir+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS @@ -4588,11 +4627,11 @@ fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext - if test "${ac_cv_search_opendir+set}" = set; then : + if ${ac_cv_search_opendir+:} false; then : break fi done -if test "${ac_cv_search_opendir+set}" = set; then : +if ${ac_cv_search_opendir+:} false; then : else ac_cv_search_opendir=no @@ -4622,7 +4661,7 @@ CPP= fi if test -z "$CPP"; then - if test "${ac_cv_prog_CPP+set}" = set; then : + if ${ac_cv_prog_CPP+:} false; then : $as_echo_n "(cached) " >&6 else # Double quotes because CPP needs to be expanded @@ -4738,7 +4777,7 @@ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "C preprocessor \"$CPP\" fails sanity check -See \`config.log' for more details" "$LINENO" 5 ; } +See \`config.log' for more details" "$LINENO" 5; } fi ac_ext=c @@ -4750,7 +4789,7 @@ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 $as_echo_n "checking for grep that handles long lines and -e... " >&6; } -if test "${ac_cv_path_GREP+set}" = set; then : +if ${ac_cv_path_GREP+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$GREP"; then @@ -4764,7 +4803,7 @@ for ac_prog in grep ggrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" - { test -f "$ac_path_GREP" && $as_test_x "$ac_path_GREP"; } || continue + as_fn_executable_p "$ac_path_GREP" || continue # Check for GNU ac_path_GREP and select it if it is found. # Check for GNU $ac_path_GREP case `"$ac_path_GREP" --version 2>&1` in @@ -4813,7 +4852,7 @@ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 $as_echo_n "checking for egrep... " >&6; } -if test "${ac_cv_path_EGREP+set}" = set; then : +if ${ac_cv_path_EGREP+:} false; then : $as_echo_n "(cached) " >&6 else if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 @@ -4830,7 +4869,7 @@ for ac_prog in egrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" - { test -f "$ac_path_EGREP" && $as_test_x "$ac_path_EGREP"; } || continue + as_fn_executable_p "$ac_path_EGREP" || continue # Check for GNU ac_path_EGREP and select it if it is found. # Check for GNU $ac_path_EGREP case `"$ac_path_EGREP" --version 2>&1` in @@ -4880,7 +4919,7 @@ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 $as_echo_n "checking for ANSI C header files... " >&6; } -if test "${ac_cv_header_stdc+set}" = set; then : +if ${ac_cv_header_stdc+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext @@ -4992,7 +5031,7 @@ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for sys/wait.h that is POSIX.1 compatible" >&5 $as_echo_n "checking for sys/wait.h that is POSIX.1 compatible... " >&6; } -if test "${ac_cv_header_sys_wait_h+set}" = set; then : +if ${ac_cv_header_sys_wait_h+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext @@ -5065,84 +5104,66 @@ # Checks for typedefs, structures, and compiler characteristics. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for stdbool.h that conforms to C99" >&5 $as_echo_n "checking for stdbool.h that conforms to C99... " >&6; } -if test "${ac_cv_header_stdbool_h+set}" = set; then : +if ${ac_cv_header_stdbool_h+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include -#ifndef bool - "error: bool is not defined" -#endif -#ifndef false - "error: false is not defined" -#endif -#if false - "error: false is not 0" -#endif -#ifndef true - "error: true is not defined" -#endif -#if true != 1 - "error: true is not 1" -#endif -#ifndef __bool_true_false_are_defined - "error: __bool_true_false_are_defined is not defined" -#endif - - struct s { _Bool s: 1; _Bool t; } s; - - char a[true == 1 ? 1 : -1]; - char b[false == 0 ? 1 : -1]; - char c[__bool_true_false_are_defined == 1 ? 1 : -1]; - char d[(bool) 0.5 == true ? 1 : -1]; - bool e = &s; - char f[(_Bool) 0.0 == false ? 1 : -1]; - char g[true]; - char h[sizeof (_Bool)]; - char i[sizeof s.t]; - enum { j = false, k = true, l = false * true, m = true * 256 }; - /* The following fails for - HP aC++/ANSI C B3910B A.05.55 [Dec 04 2003]. */ - _Bool n[m]; - char o[sizeof n == m * sizeof n[0] ? 1 : -1]; - char p[-1 - (_Bool) 0 < 0 && -1 - (bool) 0 < 0 ? 1 : -1]; -# if defined __xlc__ || defined __GNUC__ - /* Catch a bug in IBM AIX xlc compiler version 6.0.0.0 - reported by James Lemley on 2005-10-05; see - http://lists.gnu.org/archive/html/bug-coreutils/2005-10/msg00086.html - This test is not quite right, since xlc is allowed to - reject this program, as the initializer for xlcbug is - not one of the forms that C requires support for. - However, doing the test right would require a runtime - test, and that would make cross-compilation harder. - Let us hope that IBM fixes the xlc bug, and also adds - support for this kind of constant expression. In the - meantime, this test will reject xlc, which is OK, since - our stdbool.h substitute should suffice. We also test - this with GCC, where it should work, to detect more - quickly whether someone messes up the test in the - future. */ - char digs[] = "0123456789"; - int xlcbug = 1 / (&(digs + 5)[-2 + (bool) 1] == &digs[4] ? 1 : -1); -# endif - /* Catch a bug in an HP-UX C compiler. See - http://gcc.gnu.org/ml/gcc-patches/2003-12/msg02303.html - http://lists.gnu.org/archive/html/bug-coreutils/2005-11/msg00161.html - */ - _Bool q = true; - _Bool *pq = &q; + #include + #ifndef bool + "error: bool is not defined" + #endif + #ifndef false + "error: false is not defined" + #endif + #if false + "error: false is not 0" + #endif + #ifndef true + "error: true is not defined" + #endif + #if true != 1 + "error: true is not 1" + #endif + #ifndef __bool_true_false_are_defined + "error: __bool_true_false_are_defined is not defined" + #endif + + struct s { _Bool s: 1; _Bool t; } s; + + char a[true == 1 ? 1 : -1]; + char b[false == 0 ? 1 : -1]; + char c[__bool_true_false_are_defined == 1 ? 1 : -1]; + char d[(bool) 0.5 == true ? 1 : -1]; + /* See body of main program for 'e'. */ + char f[(_Bool) 0.0 == false ? 1 : -1]; + char g[true]; + char h[sizeof (_Bool)]; + char i[sizeof s.t]; + enum { j = false, k = true, l = false * true, m = true * 256 }; + /* The following fails for + HP aC++/ANSI C B3910B A.05.55 [Dec 04 2003]. */ + _Bool n[m]; + char o[sizeof n == m * sizeof n[0] ? 1 : -1]; + char p[-1 - (_Bool) 0 < 0 && -1 - (bool) 0 < 0 ? 1 : -1]; + /* Catch a bug in an HP-UX C compiler. See + http://gcc.gnu.org/ml/gcc-patches/2003-12/msg02303.html + http://lists.gnu.org/archive/html/bug-coreutils/2005-11/msg00161.html + */ + _Bool q = true; + _Bool *pq = &q; int main () { - *pq |= q; - *pq |= ! q; - /* Refer to every declared value, to avoid compiler optimizations. */ - return (!a + !b + !c + !d + !e + !f + !g + !h + !i + !!j + !k + !!l - + !m + !n + !o + !p + !q + !pq); + bool e = &s; + *pq |= q; + *pq |= ! q; + /* Refer to every declared value, to avoid compiler optimizations. */ + return (!a + !b + !c + !d + !e + !f + !g + !h + !i + !!j + !k + !!l + + !m + !n + !o + !p + !q + !pq); ; return 0; @@ -5157,8 +5178,8 @@ fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdbool_h" >&5 $as_echo "$ac_cv_header_stdbool_h" >&6; } -ac_fn_c_check_type "$LINENO" "_Bool" "ac_cv_type__Bool" "$ac_includes_default" -if test "x$ac_cv_type__Bool" = x""yes; then : + ac_fn_c_check_type "$LINENO" "_Bool" "ac_cv_type__Bool" "$ac_includes_default" +if test "x$ac_cv_type__Bool" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE__BOOL 1 @@ -5167,6 +5188,7 @@ fi + if test $ac_cv_header_stdbool_h = yes; then $as_echo "#define HAVE_STDBOOL_H 1" >>confdefs.h @@ -5175,7 +5197,7 @@ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for an ANSI C-conforming const" >&5 $as_echo_n "checking for an ANSI C-conforming const... " >&6; } -if test "${ac_cv_c_const+set}" = set; then : +if ${ac_cv_c_const+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext @@ -5184,11 +5206,11 @@ int main () { -/* FIXME: Include the comments suggested by Paul. */ + #ifndef __cplusplus - /* Ultrix mips cc rejects this. */ + /* Ultrix mips cc rejects this sort of thing. */ typedef int charset[2]; - const charset cs; + const charset cs = { 0, 0 }; /* SunOS 4.1.1 cc rejects this. */ char const *const *pcpcc; char **ppc; @@ -5205,8 +5227,9 @@ ++pcpcc; ppc = (char**) pcpcc; pcpcc = (char const *const *) ppc; - { /* SCO 3.2v4 cc rejects this. */ - char *t; + { /* SCO 3.2v4 cc rejects this sort of thing. */ + char tx; + char *t = &tx; char const *s = 0 ? (char *) 0 : (char const *) 0; *t++ = 0; @@ -5222,10 +5245,10 @@ iptr p = 0; ++p; } - { /* AIX XL C 1.02.0.0 rejects this saying + { /* AIX XL C 1.02.0.0 rejects this sort of thing, saying "k.c", line 2.27: 1506-025 (S) Operand must be a modifiable lvalue. */ - struct s { int j; const int *ap[3]; }; - struct s *b; b->j = 5; + struct s { int j; const int *ap[3]; } bx; + struct s *b = &bx; b->j = 5; } { /* ULTRIX-32 V3.1 (Rev 9) vcc rejects this */ const int foo = 10; @@ -5255,7 +5278,7 @@ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for inline" >&5 $as_echo_n "checking for inline... " >&6; } -if test "${ac_cv_c_inline+set}" = set; then : +if ${ac_cv_c_inline+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_c_inline=no @@ -5318,7 +5341,7 @@ esac ac_fn_c_check_type "$LINENO" "size_t" "ac_cv_type_size_t" "$ac_includes_default" -if test "x$ac_cv_type_size_t" = x""yes; then : +if test "x$ac_cv_type_size_t" = xyes; then : else @@ -5329,7 +5352,7 @@ fi ac_fn_c_check_type "$LINENO" "ssize_t" "ac_cv_type_ssize_t" "$ac_includes_default" -if test "x$ac_cv_type_ssize_t" = x""yes; then : +if test "x$ac_cv_type_ssize_t" = xyes; then : else @@ -5341,7 +5364,7 @@ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether time.h and sys/time.h may both be included" >&5 $as_echo_n "checking whether time.h and sys/time.h may both be included... " >&6; } -if test "${ac_cv_header_time+set}" = set; then : +if ${ac_cv_header_time+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext @@ -5407,7 +5430,7 @@ for ac_header in stdlib.h do : ac_fn_c_check_header_mongrel "$LINENO" "stdlib.h" "ac_cv_header_stdlib_h" "$ac_includes_default" -if test "x$ac_cv_header_stdlib_h" = x""yes; then : +if test "x$ac_cv_header_stdlib_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_STDLIB_H 1 _ACEOF @@ -5418,7 +5441,7 @@ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU libc compatible malloc" >&5 $as_echo_n "checking for GNU libc compatible malloc... " >&6; } -if test "${ac_cv_func_malloc_0_nonnull+set}" = set; then : +if ${ac_cv_func_malloc_0_nonnull+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : @@ -5474,7 +5497,7 @@ for ac_header in stdlib.h do : ac_fn_c_check_header_mongrel "$LINENO" "stdlib.h" "ac_cv_header_stdlib_h" "$ac_includes_default" -if test "x$ac_cv_header_stdlib_h" = x""yes; then : +if test "x$ac_cv_header_stdlib_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_STDLIB_H 1 _ACEOF @@ -5485,7 +5508,7 @@ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU libc compatible realloc" >&5 $as_echo_n "checking for GNU libc compatible realloc... " >&6; } -if test "${ac_cv_func_realloc_0_nonnull+set}" = set; then : +if ${ac_cv_func_realloc_0_nonnull+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : @@ -5553,7 +5576,7 @@ { $as_echo "$as_me:${as_lineno-$LINENO}: checking types of arguments for select" >&5 $as_echo_n "checking types of arguments for select... " >&6; } -if test "${ac_cv_func_select_args+set}" = set; then : +if ${ac_cv_func_select_args+:} false; then : $as_echo_n "(cached) " >&6 else for ac_arg234 in 'fd_set *' 'int *' 'void *'; do @@ -5587,7 +5610,7 @@ done done # Provide a safe default value. -: ${ac_cv_func_select_args='int,int *,struct timeval *'} +: "${ac_cv_func_select_args=int,int *,struct timeval *}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_select_args" >&5 @@ -5615,7 +5638,7 @@ { $as_echo "$as_me:${as_lineno-$LINENO}: checking return type of signal handlers" >&5 $as_echo_n "checking return type of signal handlers... " >&6; } -if test "${ac_cv_type_signal+set}" = set; then : +if ${ac_cv_type_signal+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext @@ -5648,7 +5671,7 @@ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether lstat correctly handles trailing slash" >&5 $as_echo_n "checking whether lstat correctly handles trailing slash... " >&6; } -if test "${ac_cv_func_lstat_dereferences_slashed_symlink+set}" = set; then : +if ${ac_cv_func_lstat_dereferences_slashed_symlink+:} false; then : $as_echo_n "(cached) " >&6 else rm -f conftest.sym conftest.file @@ -5710,7 +5733,7 @@ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether stat accepts an empty string" >&5 $as_echo_n "checking whether stat accepts an empty string... " >&6; } -if test "${ac_cv_func_stat_empty_string_bug+set}" = set; then : +if ${ac_cv_func_stat_empty_string_bug+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : @@ -5767,6 +5790,19 @@ done + +# Check whether --with-nagios4 was given. +if test "${with_nagios4+set}" = set; then : + withval=$with_nagios4; + CPPFLAGS="${CFLAGS} -DNAGIOS4" + nagios_headers=nagios4 + +else + nagios_headers=nagios +fi + + + ac_config_files="$ac_config_files Makefile src/Makefile" cat >confcache <<\_ACEOF @@ -5833,10 +5869,21 @@ :end' >>confcache if diff "$cache_file" confcache >/dev/null 2>&1; then :; else if test -w "$cache_file"; then - test "x$cache_file" != "x/dev/null" && + if test "x$cache_file" != "x/dev/null"; then { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 $as_echo "$as_me: updating cache $cache_file" >&6;} - cat confcache >$cache_file + if test ! -f "$cache_file" || test -h "$cache_file"; then + cat confcache >"$cache_file" + else + case $cache_file in #( + */* | ?:*) + mv -f confcache "$cache_file"$$ && + mv -f "$cache_file"$$ "$cache_file" ;; #( + *) + mv -f confcache "$cache_file" ;; + esac + fi + fi else { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 $as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} @@ -5892,7 +5939,7 @@ Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi -: ${CONFIG_STATUS=./config.status} +: "${CONFIG_STATUS=./config.status}" ac_write_fail=0 ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files $CONFIG_STATUS" @@ -5993,6 +6040,7 @@ IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. +as_myself= case $0 in #(( *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR @@ -6188,16 +6236,16 @@ # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. - # In both cases, we have to default to `cp -p'. + # In both cases, we have to default to `cp -pR'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || - as_ln_s='cp -p' + as_ln_s='cp -pR' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else - as_ln_s='cp -p' + as_ln_s='cp -pR' fi else - as_ln_s='cp -p' + as_ln_s='cp -pR' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null @@ -6257,28 +6305,16 @@ as_mkdir_p=false fi -if test -x / >/dev/null 2>&1; then - as_test_x='test -x' -else - if ls -dL / >/dev/null 2>&1; then - as_ls_L_option=L - else - as_ls_L_option= - fi - as_test_x=' - eval sh -c '\'' - if test -d "$1"; then - test -d "$1/."; - else - case $1 in #( - -*)set "./$1";; - esac; - case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #(( - ???[sx]*):;;*)false;;esac;fi - '\'' sh - ' -fi -as_executable_p=$as_test_x + +# as_fn_executable_p FILE +# ----------------------- +# Test if FILE is an executable regular file. +as_fn_executable_p () +{ + test -f "$1" && test -x "$1" +} # as_fn_executable_p +as_test_x='test -x' +as_executable_p=as_fn_executable_p # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" @@ -6299,8 +6335,8 @@ # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by MK Livestatus $as_me 1.2.2p3, which was -generated by GNU Autoconf 2.67. Invocation command line was +This file was extended by MK Livestatus $as_me 1.2.6p12, which was +generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES CONFIG_HEADERS = $CONFIG_HEADERS @@ -6365,11 +6401,11 @@ cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ -MK Livestatus config.status 1.2.2p3 -configured by $0, generated by GNU Autoconf 2.67, +MK Livestatus config.status 1.2.6p12 +configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" -Copyright (C) 2010 Free Software Foundation, Inc. +Copyright (C) 2012 Free Software Foundation, Inc. This config.status script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it." @@ -6460,7 +6496,7 @@ _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 if \$ac_cs_recheck; then - set X '$SHELL' '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion + set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion shift \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 CONFIG_SHELL='$SHELL' @@ -6499,7 +6535,7 @@ "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; "src/Makefile") CONFIG_FILES="$CONFIG_FILES src/Makefile" ;; - *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5 ;; + *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; esac done @@ -6522,9 +6558,10 @@ # after its creation but before its name has been assigned to `$tmp'. $debug || { - tmp= + tmp= ac_tmp= trap 'exit_status=$? - { test -z "$tmp" || test ! -d "$tmp" || rm -fr "$tmp"; } && exit $exit_status + : "${ac_tmp:=$tmp}" + { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status ' 0 trap 'as_fn_exit 1' 1 2 13 15 } @@ -6532,12 +6569,13 @@ { tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && - test -n "$tmp" && test -d "$tmp" + test -d "$tmp" } || { tmp=./conf$$-$RANDOM (umask 077 && mkdir "$tmp") } || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 +ac_tmp=$tmp # Set up the scripts for CONFIG_FILES section. # No need to generate them if there are no CONFIG_FILES. @@ -6559,7 +6597,7 @@ ac_cs_awk_cr=$ac_cr fi -echo 'BEGIN {' >"$tmp/subs1.awk" && +echo 'BEGIN {' >"$ac_tmp/subs1.awk" && _ACEOF @@ -6587,7 +6625,7 @@ rm -f conf$$subs.sh cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -cat >>"\$tmp/subs1.awk" <<\\_ACAWK && +cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && _ACEOF sed -n ' h @@ -6635,7 +6673,7 @@ rm -f conf$$subs.awk cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 _ACAWK -cat >>"\$tmp/subs1.awk" <<_ACAWK && +cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && for (key in S) S_is_set[key] = 1 FS = "" @@ -6667,7 +6705,7 @@ sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" else cat -fi < "$tmp/subs1.awk" > "$tmp/subs.awk" \ +fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 _ACEOF @@ -6701,7 +6739,7 @@ # No need to generate them if there are no CONFIG_HEADERS. # This happens for instance with `./config.status Makefile'. if test -n "$CONFIG_HEADERS"; then -cat >"$tmp/defines.awk" <<\_ACAWK || +cat >"$ac_tmp/defines.awk" <<\_ACAWK || BEGIN { _ACEOF @@ -6713,8 +6751,8 @@ # handling of long lines. ac_delim='%!_!# ' for ac_last_try in false false :; do - ac_t=`sed -n "/$ac_delim/p" confdefs.h` - if test -z "$ac_t"; then + ac_tt=`sed -n "/$ac_delim/p" confdefs.h` + if test -z "$ac_tt"; then break elif $ac_last_try; then as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5 @@ -6815,7 +6853,7 @@ esac case $ac_mode$ac_tag in :[FHL]*:*);; - :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5 ;; + :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; :[FH]-) ac_tag=-:-;; :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; esac @@ -6834,7 +6872,7 @@ for ac_f do case $ac_f in - -) ac_f="$tmp/stdin";; + -) ac_f="$ac_tmp/stdin";; *) # Look for the file first in the build tree, then in the source tree # (if the path is not absolute). The absolute path cannot be DOS-style, # because $ac_f cannot contain `:'. @@ -6843,7 +6881,7 @@ [\\/$]*) false;; *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; esac || - as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5 ;; + as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; esac case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac as_fn_append ac_file_inputs " '$ac_f'" @@ -6869,8 +6907,8 @@ esac case $ac_tag in - *:-:* | *:-) cat >"$tmp/stdin" \ - || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; + *:-:* | *:-) cat >"$ac_tmp/stdin" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; esac ;; esac @@ -7006,21 +7044,22 @@ s&@MKDIR_P@&$ac_MKDIR_P&;t t $ac_datarootdir_hack " -eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$tmp/subs.awk" >$tmp/out \ - || as_fn_error $? "could not create $ac_file" "$LINENO" 5 +eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ + >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && - { ac_out=`sed -n '/\${datarootdir}/p' "$tmp/out"`; test -n "$ac_out"; } && - { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' "$tmp/out"`; test -z "$ac_out"; } && + { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && + { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ + "$ac_tmp/out"`; test -z "$ac_out"; } && { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&5 $as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&2;} - rm -f "$tmp/stdin" + rm -f "$ac_tmp/stdin" case $ac_file in - -) cat "$tmp/out" && rm -f "$tmp/out";; - *) rm -f "$ac_file" && mv "$tmp/out" "$ac_file";; + -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; + *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; esac \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; @@ -7031,20 +7070,20 @@ if test x"$ac_file" != x-; then { $as_echo "/* $configure_input */" \ - && eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs" - } >"$tmp/config.h" \ + && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" + } >"$ac_tmp/config.h" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 - if diff "$ac_file" "$tmp/config.h" >/dev/null 2>&1; then + if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 $as_echo "$as_me: $ac_file is unchanged" >&6;} else rm -f "$ac_file" - mv "$tmp/config.h" "$ac_file" \ + mv "$ac_tmp/config.h" "$ac_file" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 fi else $as_echo "/* $configure_input */" \ - && eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs" \ + && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ || as_fn_error $? "could not create -" "$LINENO" 5 fi # Compute "$ac_file"'s index in $config_headers. diff -Nru check-mk-1.2.2p3/configure.ac check-mk-1.2.6p12/configure.ac --- check-mk-1.2.2p3/configure.ac 2013-11-05 09:22:41.000000000 +0000 +++ check-mk-1.2.6p12/configure.ac 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -28,7 +28,7 @@ # Process this file with autoconf to produce a configure script. AC_PREREQ(2.61) -AC_INIT([MK Livestatus], [1.2.2p3], [mk@mathias-kettner.de]) +AC_INIT([MK Livestatus], [1.2.6p12], [mk@mathias-kettner.de]) AM_INIT_AUTOMAKE([-Wall foreign]) AC_CONFIG_SRCDIR([config.h.in]) AC_CONFIG_HEADER([config.h]) @@ -70,6 +70,13 @@ AC_FUNC_STAT AC_CHECK_FUNCS([bzero gettimeofday memmove regcomp select socket strcasecmp strdup strerror strtoul]) +AC_ARG_WITH(nagios4,AC_HELP_STRING([--with-nagios4],[enabled compilation for nagios 4]), [ + CPPFLAGS="${CFLAGS} -DNAGIOS4" + nagios_headers=nagios4 + ], + nagios_headers=nagios) +AC_SUBST(nagios_headers) + AC_CONFIG_FILES([Makefile src/Makefile]) AC_OUTPUT Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/conf.tar.gz and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/conf.tar.gz differ diff -Nru check-mk-1.2.2p3/cpsecure_sessions check-mk-1.2.6p12/cpsecure_sessions --- check-mk-1.2.2p3/cpsecure_sessions 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/cpsecure_sessions 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -45,24 +45,29 @@ for service, enabled, sessions in info: if item == service: if enabled != '1': - return (1, "WARN - service not enabled") + return (1, "service not enabled") num_sessions = int(sessions) warn, crit = params perfdata = [ ("sessions", num_sessions, warn, crit, 0 ) ] if num_sessions >= crit: - return (2, "CRIT - %s sessions (critical at %d)" % (sessions, crit), perfdata) + return (2, "%s sessions (critical at %d)" % (sessions, crit), perfdata) elif num_sessions >= warn: - return (1, "WARN - %s sessions (warning at %d)" % (sessions, warn), perfdata) + return (1, "%s sessions (warning at %d)" % (sessions, warn), perfdata) else: - return (0, "OK - %s sessions" % sessions, perfdata) + return (0, "%s sessions" % sessions, perfdata) - return (3, "UNKNOWN - service not found") + return (3, "service not found") -check_info["cpsecure_sessions"] = \ - (check_cpsecure_sessions, "Number of %s sessions", 1, inventory_cpsecure_sessions) -snmp_info["cpsecure_sessions"] = ( ".1.3.6.1.4.1.26546.3.1.2.1.1.1", [ "1", "2", "3" ] ) -snmp_scan_functions["cpsecure_sessions"] = \ - lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.26546.1.1.2" + +check_info["cpsecure_sessions"] = { + 'check_function': check_cpsecure_sessions, + 'inventory_function': inventory_cpsecure_sessions, + 'service_description': 'Number of %s sessions', + 'has_perfdata': True, + 'snmp_info': ('.1.3.6.1.4.1.26546.3.1.2.1.1.1', ['1', '2', '3']), + 'snmp_scan_function': \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.26546.1.1.2", +} diff -Nru check-mk-1.2.2p3/cpu check-mk-1.2.6p12/cpu --- check-mk-1.2.2p3/cpu 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/cpu 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -28,7 +28,7 @@ # 0.26 0.47 0.52 2/459 19531 4 -cpuload_default_levels = (5, 10) +cpuload_default_levels = (5.0, 10.0) threads_default_levels = (2000, 4000) def inventory_cpu_load(info): @@ -36,25 +36,14 @@ return [(None, "cpuload_default_levels")] def check_cpu_load(item, params, info): - load = [] - for i in [ 0, 1, 2 ]: - load.append(float(info[0][i])) if len(info[0]) >= 6: num_cpus = int(info[0][5]) else: num_cpus = 1 - warn, crit = params # apply on 15min average, relative to number of CPUs - warn = warn * num_cpus - crit = crit * num_cpus - perfdata = [ ('load' + str(z), l, warn, crit, 0, num_cpus ) for (z,l) in [ (1,load[0]), (5,load[1]), (15, load[2]) ] ] - - if load[2] >= crit: - return (2, "CRIT - 15min load %.2f at %s CPUs (critical at %.2f)" % (load[2], num_cpus, crit), perfdata) - elif load[2] >= warn: - return (1, "WARN - 15min load %.2f at %s CPUs (warning at %.2f)" % (load[2], num_cpus, warn), perfdata) - else: - return (0, "OK - 15min load %.2f at %s CPUs" % (load[2], num_cpus), perfdata) + load = map(float, info[0][0:3]) + return check_cpu_load_generic(params, load, num_cpus) + def inventory_cpu_threads(info): if len(info) == 1 and len(info[0]) >= 5: @@ -64,22 +53,23 @@ try: nthreads = int(info[0][3].split('/')[1]) except: - return (3, "UNKNOWN - invalid output from plugin") + return (3, "invalid output from plugin") warn, crit = params perfdata = [('threads', nthreads, warn, crit, 0 )] if nthreads >= crit: - return (2, "CRIT - %d threads (critical at %d)" % (nthreads, crit), perfdata) + return (2, "%d threads (critical at %d)" % (nthreads, crit), perfdata) elif nthreads >= warn: - return (1, "WARN - %d threads (warning at %d)" % (nthreads, warn), perfdata) + return (1, "%d threads (warning at %d)" % (nthreads, warn), perfdata) else: - return (0, "OK - %d threads" % (nthreads,), perfdata) + return (0, "%d threads" % (nthreads,), perfdata) check_info["cpu.loads"] = { - "check_function" : check_cpu_load, - "inventory_function" : inventory_cpu_load, - "service_description" : "CPU load", - "has_perfdata" : True, - "group" : "cpu_load", + "check_function" : check_cpu_load, + "inventory_function" : inventory_cpu_load, + "service_description" : "CPU load", + "has_perfdata" : True, + "group" : "cpu_load", + "includes" : ["cpu_load.include"], } check_info["cpu.threads"] = { diff -Nru check-mk-1.2.2p3/cpu_load.include check-mk-1.2.6p12/cpu_load.include --- check-mk-1.2.2p3/cpu_load.include 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/cpu_load.include 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,50 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Common code for all CPU load checks. Please do not mix this up +# with CPU utilization. The load is at any time the current number +# of processes in the running state (on some systems, like Linux, +# also Disk wait is account for the load). + +def check_cpu_load_generic(params, load, num_cpus=1): + + # Prepare performance data + if type(params) == tuple: + warn, crit = [ p*num_cpus for p in params ] + else: + warn, crit = None, None + + perfdata = [ ('load' + str(z), l, warn, crit, 0, num_cpus ) + for (z,l) in [ (1,load[0]), (5,load[1]), (15, load[2]) ] ] + + state, text, perf = check_levels(load[2], 'load15', params, factor = num_cpus) + perfdata += perf + infotext = "15min load %.2f" % load[2] + if num_cpus > 1: + infotext += " at %d CPUs" % num_cpus + if text: + infotext += ", " + text + return state, infotext, perfdata diff -Nru check-mk-1.2.2p3/cpu.loads check-mk-1.2.6p12/cpu.loads --- check-mk-1.2.2p3/cpu.loads 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/cpu.loads 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Check CPU load -agents: linux -author: Mathias Kettner +title: CPU load +agents: linux, aix, freebsd, macosx, netbsd, openbsd, solaris +catalog: os/kernel license: GPL distribution: check_mk description: @@ -8,6 +8,11 @@ for 1, 5 and 15 minute average are sent, although the PNP template shipped with check_mk only displays the 1 and 15 min average load. + Note: The CPU load is the average number of processes that are + currently in the state "running". Do not mix this up with the + CPU "utiliziation" (which measures the current usage of the CPU + in percent). + examples: # Set default levels to a load of 4 and 8 per CPU cpuload_default_levels = (4.0, 8.0). diff -Nru check-mk-1.2.2p3/cpu.threads check-mk-1.2.6p12/cpu.threads --- check-mk-1.2.2p3/cpu.threads 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/cpu.threads 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Measure current number of processes / threads -agents: linux -author: Mathias Kettner +title: Total number of current processes and threads +agents: linux, aix, freebsd, macosx, netbsd, openbsd, solaris +catalog: os/kernel license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/cpu_util.include check-mk-1.2.6p12/cpu_util.include --- check-mk-1.2.2p3/cpu_util.include 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/cpu_util.include 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,145 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Common file for all (modern) checks that check CPU utilization (not load!) + +# Example for check parameters: +# 1. Variant: Tuple (warn, crit). This is legaacy style +# 2. Variant: dictionary: +# +# param = { +# "levels" : .... --> compatible with check_levels(), optional +# "average" : 15 # -> compute average for 15 minutes, optional +# } + +def check_cpu_util(util, params, this_time = None): + # Convert legacy param style to new dict style + if this_time == None: + this_time = time.time() + + if params == None: + params = {} + elif type(params) == tuple: + params = { + "levels" : params, + } + + infotext = "%.1f %% used" % util + + # Averaging + if "average" in params: + util_avg = get_average("cpu_utilization.avg", this_time, util, params["average"]) + check_against = util_avg + counter = "avg" + infotext += ", %dmin average: %.1f %%" % (params["average"], util_avg) + else: + check_against = util + counter = "util" + + + levels = params.get("levels") + if type(levels) == tuple: + warn, crit = levels # only for perfdata + else: + warn, crit = None, None + + state, extrainfo, extraperf = check_levels(check_against, counter, levels) + if extrainfo: + infotext += "," + extrainfo + + perfdata = [ ("util", util, warn, crit, 0, 100) ] + if "average" in params: + perfdata.append( ("avg", util_avg, warn, crit, 0, 100) ) + + perfdata += extraperf # reference curve for predictive levels + return state, infotext, perfdata + + +# This one can handle user, system and wait. values is a list of: +# - 0 - user: normal processes executing in user mode +# - 1 - nice: niced processes executing in user mode +# - 2 - system: processes executing in kernel mode +# - 3 - idle: twiddling thumbs +# - 4 - iowait: waiting for I/O to complete +# - 5 - irq: servicing interrupts +# - 6 - softirq: servicing softirqs +# - 7 - steal: involuntary wait +def check_cpu_util_unix(values, params): + this_time = int(time.time()) + # Compute jiffi-differences of all relevant counters + diff_values = [] + n = 0 + global g_counters + for v in values: + n += 1 + countername = "cpu.util.%d" % n + last_time, last_val = g_counters.get(countername, (0, 0)) + diff_values.append(v - last_val) + g_counters[countername] = (this_time, v) + + sum_jiffies = sum(diff_values) # do not account for steal! + if sum_jiffies == 0: + raise MKCounterWrapped("Too short time difference since last check") + + user = diff_values[0] + diff_values[1] # add user + nice + system = diff_values[2] + diff_values[5] + diff_values[6] + wait = diff_values[4] + user_perc = 100.0 * float(user) / float(sum_jiffies) + system_perc = 100.0 * float(system) / float(sum_jiffies) + wait_perc = 100.0 * float(wait) / float(sum_jiffies) + perfdata = [ + ( "user", "%.3f" % user_perc ), + ( "system", "%.3f" % system_perc ), + ( "wait", "%.3f" % wait_perc ) ] + + yield 0, "user: %.1f%%, system: %.1f%%" % (user_perc, system_perc), perfdata + + # Handle level on iowait + state = 0 + if "iowait" in params and params["iowait"] != None: + warn, crit = params["iowait"] + if wait_perc >= crit: + state = 2 + elif wait_perc >= warn: + state = 1 + yield state, "wait: %.1f%%" % (wait_perc) + + # Total utilization + util_total_perc = user_perc + system_perc + wait_perc + state = 0 + levelstext = "" + if "util" in params: + warn, crit = params["util"] + if util_total_perc >= crit: + state = 2 + elif util_total_perc >= warn: + state = 1 + else: + state = 0 + if state: + levelstext = " (warn/crit at %.1f%%/%.1f%%)" % (warn, crit) + + yield state, "total: %.1f%%" % util_total_perc + levelstext diff -Nru check-mk-1.2.2p3/cups_queues check-mk-1.2.6p12/cups_queues --- check-mk-1.2.2p3/cups_queues 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/cups_queues 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -45,8 +45,6 @@ #lpr2-2 root 1024 Tue Jun 29 09:02:35 2010 #lpr2-3 root 1024 Tue Jun 29 09:05:54 2010 -import time, datetime - # Default thresholds # ("", "", "", "") cups_queues_default_levels = (5, 10, 360, 720) @@ -57,9 +55,9 @@ def check_cups_queues(item, params, info): warnNum, critNum, warnAge, critAge = params state = 3 - output = "UNKNOWN - Queue not found" + output = "Queue not found" numJobs = 0 - now = datetime.datetime.now() + now = time.time() oldest = now for num, line in enumerate(info): if line[0] == 'printer' and line[1] == item: @@ -67,7 +65,7 @@ status = ' '.join(line[2:4]) # If the next line does not start with "printer" append it as additional output - if not info[num+1][0] in [ 'printer', '---' ]: + if len(info) > num+1 and not info[num+1][0] in [ 'printer', '---' ]: statusoutput += " (%s)" % " ".join(info[num+1]) if status == "disabled since": @@ -82,18 +80,32 @@ elif line[0].startswith(item+'-'): # This is a queue item count the number of items and check the max age numJobs += 1 - dt = datetime.datetime(*time.strptime(' '.join(line[3:]), '%a %b %d %H:%M:%S %Y')[:6]) + # Try to parse different time formats + timestring = ' '.join(line[3:]) + try: + # Tue Jun 29 09:05:54 2010 + parsed = time.strptime(timestring, '%a %b %d %H:%M:%S %Y') + except: + # Thu 29 Aug 2013 12:41:42 AM CEST + parsed = time.strptime(timestring, '%a %d %b %Y %H:%M:%S %p %Z') + dt = time.mktime(parsed) if dt < oldest: oldest = dt jobOutput = 'Jobs: %d' % numJobs if numJobs > 0: - if oldest < now - datetime.timedelta(seconds=critAge) or numJobs > critNum: + if oldest < now - critAge or numJobs > critNum: state = 2 - elif oldest < now - datetime.timedelta(seconds=warnAge) or numJobs > warnNum: + elif oldest < now - warnAge or numJobs > warnNum: state = 1 - jobOutput += ', Oldest job is from %s' % oldest + jobOutput += ', Oldest job is from %s' % time.strftime("%c", time.localtime(oldest)) + + return (state, output + " - " + jobOutput, [ ("jobs", numJobs, warnNum, critNum, 0) ]) - return (state, nagios_state_names[state] + " - " + output + " - " + jobOutput, [ ("jobs", numJobs, warnNum, critNum, 0) ]) -check_info['cups_queues'] = (check_cups_queues, "CUPS Queue %s", 1, inventory_cups_queues) +check_info["cups_queues"] = { + 'check_function': check_cups_queues, + 'inventory_function': inventory_cups_queues, + 'service_description': 'CUPS Queue %s', + 'has_perfdata': True, +} diff -Nru check-mk-1.2.2p3/db2_mem check-mk-1.2.6p12/db2_mem --- check-mk-1.2.2p3/db2_mem 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/db2_mem 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -35,11 +35,17 @@ values = [] perf = [] state = 3 - message = " - Instance not found" + message = "Instance not found" for line in info: if hit > 0 and hit <= 2: hit += 1 - values.append(saveint(line[-2])) + if line[-1].lower() == "kb": + value = int(line[-2]) * 1024 + elif line[-1].lower() == "mb": + value = int(line[-2]) * 1024 * 1024 + else: + value = int(line[-2]) + values.append(value) if len(values) == 2: limit, usage = values left = limit - usage @@ -53,7 +59,7 @@ label = "" state = 0 - message = " - Max: %s, Used: %s (%.2d%% Free%s) " % \ + message = "Max: %s, Used: %s (%.2d%% Free%s) " % \ (get_bytes_human_readable(limit), get_bytes_human_readable(usage), perc_level, label) perf = [("mem", usage, 0, 0, 0, limit )] @@ -61,7 +67,7 @@ hit = 1 - return state, nagios_state_names[state] + message, perf + return state, message, perf check_info['db2_mem'] = { diff -Nru check-mk-1.2.2p3/debian/apache.icinga check-mk-1.2.6p12/debian/apache.icinga --- check-mk-1.2.2p3/debian/apache.icinga 2015-12-21 17:47:25.000000000 +0000 +++ check-mk-1.2.6p12/debian/apache.icinga 2015-12-21 17:47:25.000000000 +0000 @@ -1,4 +1,4 @@ -# Created by setup of check_mk version 1.1.6p1 +# Created by setup of check_mk version 1.2.6p12 # This file will *not* be overwritten at the next setup # of check_mk. You may edit it as needed. In order to get # a new version, please delete it and re-run setup.sh. @@ -10,12 +10,12 @@ # inconveniance. - Alias /check_mk /usr/share/check_mk/web/htdocs - + Alias /check_mk /usr/share/check_mk/web/htdocs + AddHandler mod_python .py PythonHandler index - PythonDebug On - DirectoryIndex index.py + PythonDebug On + DirectoryIndex index.py #Handle apache 2.2 and 2.4 = 2.3> @@ -25,23 +25,52 @@ Order deny,allow allow from all - # Need Nagios authentification. Please edit the - # following: Set AuthName and AuthUserFile to the - # same value that you use for your Nagios configuration! - AuthName "Icinga Access" + # Need Nagios authentification. Please edit the + # following: Set AuthName and AuthUserFile to the + # same value that you use for your Nagios configuration! + Order deny,allow + allow from all + AuthName "Icinga Access" AuthType Basic AuthUserFile /etc/icinga/htpasswd.users require valid-user - ErrorDocument 403 "

Authentication Problem

Either you've entered an invalid password or the authentication
configuration of your check_mk web pages is incorrect.

Please make sure that you've edited the file
/etc/apache2/conf.d/check_mk and made it use the same
authentication settings as your Nagios web pages.
Restart Apache afterwards." - ErrorDocument 500 "

Server or Configuration Problem

A Server problem occurred. You'll find details in the error log of Apache. One possible reason is, that the file /etc/nagios/htpasswd.users is missing. You can create that file with htpasswd or htpasswd2. A better solution might be to use your existing htpasswd file from your Nagios installation. Please edit /etc/apache2/conf.d/check_mk and change the path there. Restart Apache afterwards." -
+ ErrorDocument 403 "

Authentication Problem

Either you've entered an invalid password or the authentication
configuration of your check_mk web pages is incorrect.

Please make sure that you've edited the file
/etc/apache2/conf.d/check_mk and made it use the same
authentication settings as your Nagios web pages.
Restart Apache afterwards." + ErrorDocument 500 "

Server or Configuration Problem

A Server problem occurred. You'll find details in the error log of Apache. One possible reason is, that the file /etc/icinga/htpasswd.users is missing. You can create that file with htpasswd or htpasswd2. A better solution might be to use your existing htpasswd file from your Nagios installation. Please edit /etc/apache2/conf.d/check_mk and change the path there. Restart Apache afterwards." +
+ ## WARNING: automation is part of multisite, more information at + ## http://mathias-kettner.com/checkmk_multisite_automation.html + ## It uses a shared secret rather than HTTP Auth for authentication and + ## and is potentially exposed to public networks so is disabled on Debian + ## by default. If you need this feature, be sure you understand the + ## security implications and take necessary precautions before turning it on. + ## Automation is done without HTTP Auth + # + # Order allow,deny + # Allow from all + # Satisfy any + # + + ## WARNING: like automation above, run_cron is part of multisite. + ## It does not use HTTP Auth, but is only exposed to localhost. Having + ## it enabled has less risk, but since it's part of multisite it is + ## also disabled by default on Debian. + ## Trigger cron jobs. This is done without authentication + # + # Order deny,allow + # Deny from all + # Allow from 127.0.0.1 + # Satisfy any + # +
+ + - Alias /check_mk /usr/share/check_mk/web/htdocs - + Alias /check_mk /usr/share/check_mk/web/htdocs + Deny from all ErrorDocument 403 "

Check_mk: Incomplete Apache2 Installation

You need mod_python in order to run the web interface of check_mk.
Please install mod_python and restart Apache." -
+
diff -Nru check-mk-1.2.2p3/debian/apache.nagios3 check-mk-1.2.6p12/debian/apache.nagios3 --- check-mk-1.2.2p3/debian/apache.nagios3 2015-12-21 17:47:25.000000000 +0000 +++ check-mk-1.2.6p12/debian/apache.nagios3 2015-12-21 17:47:25.000000000 +0000 @@ -1,4 +1,4 @@ -# Created by setup of check_mk version 1.1.6p1 +# Created by setup of check_mk version 1.2.6p12 # This file will *not* be overwritten at the next setup # of check_mk. You may edit it as needed. In order to get # a new version, please delete it and re-run setup.sh. @@ -10,12 +10,12 @@ # inconveniance. - Alias /check_mk /usr/share/check_mk/web/htdocs - + Alias /check_mk /usr/share/check_mk/web/htdocs + AddHandler mod_python .py PythonHandler index - PythonDebug On - DirectoryIndex index.py + PythonDebug On + DirectoryIndex index.py #Handle apache 2.2 and 2.4 = 2.3> @@ -25,23 +25,52 @@ Order deny,allow allow from all - # Need Nagios authentification. Please edit the - # following: Set AuthName and AuthUserFile to the - # same value that you use for your Nagios configuration! - AuthName "Nagios Access" + # Need Nagios authentification. Please edit the + # following: Set AuthName and AuthUserFile to the + # same value that you use for your Nagios configuration! + Order deny,allow + allow from all + AuthName "Nagios Access" AuthType Basic AuthUserFile /etc/nagios3/htpasswd.users require valid-user - ErrorDocument 403 "

Authentication Problem

Either you've entered an invalid password or the authentication
configuration of your check_mk web pages is incorrect.

Please make sure that you've edited the file
/etc/apache2/conf.d/check_mk and made it use the same
authentication settings as your Nagios web pages.
Restart Apache afterwards." - ErrorDocument 500 "

Server or Configuration Problem

A Server problem occurred. You'll find details in the error log of Apache. One possible reason is, that the file /etc/nagios/htpasswd.users is missing. You can create that file with htpasswd or htpasswd2. A better solution might be to use your existing htpasswd file from your Nagios installation. Please edit /etc/apache2/conf.d/check_mk and change the path there. Restart Apache afterwards." -
+ ErrorDocument 403 "

Authentication Problem

Either you've entered an invalid password or the authentication
configuration of your check_mk web pages is incorrect.

Please make sure that you've edited the file
/etc/apache2/conf.d/check_mk and made it use the same
authentication settings as your Nagios web pages.
Restart Apache afterwards." + ErrorDocument 500 "

Server or Configuration Problem

A Server problem occurred. You'll find details in the error log of Apache. One possible reason is, that the file /etc/nagios/htpasswd.users is missing. You can create that file with htpasswd or htpasswd2. A better solution might be to use your existing htpasswd file from your Nagios installation. Please edit /etc/apache2/conf.d/check_mk and change the path there. Restart Apache afterwards." +
+ ## WARNING: automation is part of multisite, more information at + ## http://mathias-kettner.com/checkmk_multisite_automation.html + ## It uses a shared secret rather than HTTP Auth for authentication and + ## and is potentially exposed to public networks so is disabled on Debian + ## by default. If you need this feature, be sure you understand the + ## security implications and take necessary precautions before turning it on. + ## Automation is done without HTTP Auth + # + # Order allow,deny + # Allow from all + # Satisfy any + # + + ## WARNING: like automation above, run_cron is part of multisite. + ## It does not use HTTP Auth, but is only exposed to localhost. Having + ## it enabled has less risk, but since it's part of multisite it is + ## also disabled by default on Debian. + ## Trigger cron jobs. This is done without authentication + # + # Order deny,allow + # Deny from all + # Allow from 127.0.0.1 + # Satisfy any + # +
+ + - Alias /check_mk /usr/share/check_mk/web/htdocs - + Alias /check_mk /usr/share/check_mk/web/htdocs + Deny from all ErrorDocument 403 "

Check_mk: Incomplete Apache2 Installation

You need mod_python in order to run the web interface of check_mk.
Please install mod_python and restart Apache." -
+
diff -Nru check-mk-1.2.2p3/debian/changelog check-mk-1.2.6p12/debian/changelog --- check-mk-1.2.2p3/debian/changelog 2015-12-21 17:47:25.000000000 +0000 +++ check-mk-1.2.6p12/debian/changelog 2015-12-21 17:47:25.000000000 +0000 @@ -1,3 +1,81 @@ +check-mk (1.2.6p12-1ubuntu1~trusty) trusty; urgency=medium + + * automatic rebuild + + -- gandalf Mon, 21 Dec 2015 18:20:21 +0100 + +check-mk (1.2.6p12-1) unstable; urgency=medium + + [ Matt Taggart ] + * Imported Upstream version 1.2.6p12 + * fix multisite.d dir in postinst (Closes: #798344). + + -- Matt Taggart Thu, 24 Sep 2015 13:08:41 -0700 + +check-mk (1.2.6p7-1) unstable; urgency=low + + [ Matt Taggart ] + * Imported Upstream version 1.2.6p7 + * add myself to uploaders + * fix some lintian errors + * update version in defaults files (Closes: #792395). + * provide cmk and mkp utils. + * provide example multisite.mk config. + * ensure some needed directories are created. + * remove smartmontools depends until we properly fix the smart plugin. + * review difference between upstream and debian install paths and + document in README.source + + [ Bernhard Schmidt ] + * start providing mk-job + * fix -agent-logwatch depends + * -agent-logwatch is arch all + + -- Matt Taggart Fri, 21 Aug 2015 16:44:17 -0700 + +check-mk (1.2.6p5-1) unstable; urgency=medium + + [ Matt Taggart ] + * Imported Upstream version 1.2.6p5 + * migrate multisite conffile, fix logic to better detect different + cases (Closes: #732357) + * check-mk-server: clean up stuff in /var on purge (Closes: #788829). + * stop packaging all upsteam provided binaries (Closes: #790308). + + -- Matt Taggart Fri, 03 Jul 2015 15:15:53 -0700 + +check-mk (1.2.6p4-1) unstable; urgency=medium + + [ Thomas Bechtold ] + * New upstream release (Closes: #738987). + * debian/defaults.*: Use correct check-mk version. + * debian/control: + - Add myself to Uploaders field. + - Remove Sven Velt from Uploaders field (Closes: #739092). + + [ Ilya Rassadin ] + * New upstream release (Closes: #778380). + * debian/defaults.*: Use correct check-mk version. + * debian/control: Add myself to Uploaders field. + * debian/check-mk-server.install: Add path for flexible notifications + * debian/check-mk-server.postinst: Add path for flexible notifications + + [ Matt Taggart ] + * Confirmed that CVE-2014-2329, CVE-2014-2330, CVE-2014-2331, + CVE-2014-2332 are fixed in upstream as of 1.2.3i5 (Closes: #742689). + * New upstream release (Closes: #778380). + * upstream forgot to include waitmax.c in their "source" tarball, + provide it in the source package for now + * logwatch.cfg example changed location upstream, adjust + check_mk_agent_logwatch.{example,install} + * regenerate defaults.* starting with upstream versions generated by + setup.sh and then porting forward the debian specific changes. + * regenerate apache.* starting with upstream versions generated by + setup.sh and then porting forward the debian specific changes. + Disable multisite automation.py and run_cron.py services by default. + + -- Matt Taggart Wed, 10 Jun 2015 11:10:32 -0700 + check-mk (1.2.2p3-1) unstable; urgency=low * New upstream release. diff -Nru check-mk-1.2.2p3/debian/check-mk-agent.install check-mk-1.2.6p12/debian/check-mk-agent.install --- check-mk-1.2.2p3/debian/check-mk-agent.install 2015-12-21 17:47:25.000000000 +0000 +++ check-mk-1.2.6p12/debian/check-mk-agent.install 2015-12-21 17:47:25.000000000 +0000 @@ -1 +1,2 @@ usr/share/check_mk/agents/waitmax usr/bin +usr/share/check_mk/agents/mk-job usr/bin diff -Nru check-mk-1.2.2p3/debian/check-mk-agent-logwatch.examples check-mk-1.2.6p12/debian/check-mk-agent-logwatch.examples --- check-mk-1.2.2p3/debian/check-mk-agent-logwatch.examples 2015-12-21 17:47:25.000000000 +0000 +++ check-mk-1.2.6p12/debian/check-mk-agent-logwatch.examples 2015-12-21 17:47:25.000000000 +0000 @@ -1 +1,2 @@ -debian/tmp/usr/share/check_mk/agents/logwatch.cfg +debian/tmp/usr/share/check_mk/agents/cfg_examples/logwatch.cfg + diff -Nru check-mk-1.2.2p3/debian/check-mk-agent-logwatch.install check-mk-1.2.6p12/debian/check-mk-agent-logwatch.install --- check-mk-1.2.2p3/debian/check-mk-agent-logwatch.install 2015-12-21 17:47:25.000000000 +0000 +++ check-mk-1.2.6p12/debian/check-mk-agent-logwatch.install 2015-12-21 17:47:25.000000000 +0000 @@ -1,2 +1,3 @@ usr/share/check_mk/agents/plugins/mk_logwatch usr/lib/check_mk_agent/plugins -usr/share/check_mk/agents/logwatch.cfg etc/check_mk +usr/share/check_mk/agents/cfg_examples/logwatch.cfg /etc/check_mk + diff -Nru check-mk-1.2.2p3/debian/check-mk-config-icinga.postinst check-mk-1.2.6p12/debian/check-mk-config-icinga.postinst --- check-mk-1.2.2p3/debian/check-mk-config-icinga.postinst 2015-12-21 17:47:25.000000000 +0000 +++ check-mk-1.2.6p12/debian/check-mk-config-icinga.postinst 2015-12-21 17:47:25.000000000 +0000 @@ -21,6 +21,16 @@ /etc/icinga/objects/check_mk/check_mk_templates.cfg \ 1.1.10-2 -- "$@" +# older releases (1.1.12p7-1 at least) delivered the multisite conffile to +# /etc/apache2/conf.d. If it exists we need to move it to conf-available +# once it's moved then the code below will setup the conf.d symlink. +# This code wasn't in place until 1.2.6p4-2, so we need to check everything +# before that. +dpkg-maintscript-helper mv_conffile \ + /etc/apache2/conf.d/check_mk_multisite.cfg \ + /etc/apache2/conf-available/check_mk_multisite.cfg \ + 1.2.6p4-2 -- "$@" + if [ -e '/etc/icinga/objects/check_mk_objects.cfg' ]; then echo 'Migrate /etc/icinga/objects/check_mk_objects.cfg to /etc/icinga/objects/check_mk/check_mk_objects.cfg' @@ -48,9 +58,19 @@ . /usr/share/apache2/apache2-maintscript-helper apache2_invoke enconf $CONF || exit $? elif [ "$COMMON_STATE" = "installed" ] || [ "$COMMON_STATE" = "unpacked" ] ; then - [ -d /etc/apache2/conf.d/ ] && [ ! -L /etc/apache2/conf.d/$CONF.conf ] && ln -s ../conf-available/$CONF.conf /etc/apache2/conf.d/$CONF.conf + if [ -d /etc/apache2/conf.d/ ]; then + if [ -L /etc/apache2/conf.d/$CONF.conf ]; then + # it's a symlink, all is well + true + elif [ -e /etc/apache2/conf.d/$CONF.conf ]; then + # it's not a symlink, but exists, error + echo "ERROR: /etc/apache2/conf.d/$CONF.conf is not a symlink, please investigate" 1>&2 + else + # we need to create the symlink + ln -s ../conf-available/$CONF.conf /etc/apache2/conf.d/$CONF.conf + fi + fi fi - ;; abort-upgrade|abort-remove|abort-deconfigure) ;; diff -Nru check-mk-1.2.2p3/debian/check-mk-config-icinga.postrm check-mk-1.2.6p12/debian/check-mk-config-icinga.postrm --- check-mk-1.2.2p3/debian/check-mk-config-icinga.postrm 2015-12-21 17:47:25.000000000 +0000 +++ check-mk-1.2.6p12/debian/check-mk-config-icinga.postrm 2015-12-21 17:47:25.000000000 +0000 @@ -7,6 +7,16 @@ /etc/icinga/objects/check_mk/check_mk_templates.cfg \ 1.1.10-2 -- "$@" +# older releases (1.1.12p7-1 at least) delivered the multisite conffile to +# /etc/apache2/conf.d. If it exists we need to move it to conf-available +# once it's moved then the code below will setup the conf.d symlink. +# This code wasn't in place until 1.2.6p4-2, so we need to check everything +# before that. +dpkg-maintscript-helper mv_conffile \ + /etc/apache2/conf.d/check_mk_multisite.cfg \ + /etc/apache2/conf-available/check_mk_multisite.cfg \ + 1.2.6p4-2 -- "$@" + if [ "$1" = "remove" ] || [ "$1" = "purge" ] ; then CONF="check-mk-multisite" COMMON_STATE=$(dpkg-query -f '${Status}' -W 'apache2.2-common' 2>/dev/null | awk '{print $3}' || true) diff -Nru check-mk-1.2.2p3/debian/check-mk-config-nagios3.postinst check-mk-1.2.6p12/debian/check-mk-config-nagios3.postinst --- check-mk-1.2.2p3/debian/check-mk-config-nagios3.postinst 2015-12-21 17:47:25.000000000 +0000 +++ check-mk-1.2.6p12/debian/check-mk-config-nagios3.postinst 2015-12-21 17:47:25.000000000 +0000 @@ -21,6 +21,16 @@ /etc/nagios3/conf.d/check_mk/check_mk_templates.cfg \ 1.1.10-2 -- "$@" +# older releases (1.1.12p7-1 at least) delivered the multisite conffile to +# /etc/apache2/conf.d. If it exists we need to move it to conf-available +# once it's moved then the code below will setup the conf.d symlink. +# This code wasn't in place until 1.2.6p4-2, so we need to check everything +# before that. +dpkg-maintscript-helper mv_conffile \ + /etc/apache2/conf.d/check_mk_multisite.cfg \ + /etc/apache2/conf-available/check_mk_multisite.cfg \ + 1.2.6p4-2 -- "$@" + if [ -e '/etc/nagios3/conf.d/check_mk_objects.cfg' ]; then echo 'Migrate /etc/nagios3/conf.d/check_mk_objects.cfg to /etc/nagios3/conf.d/objects/check_mk/check_mk_objects.cfg' @@ -48,7 +58,18 @@ . /usr/share/apache2/apache2-maintscript-helper apache2_invoke enconf $CONF || exit $? elif [ "$COMMON_STATE" = "installed" ] || [ "$COMMON_STATE" = "unpacked" ] ; then - [ -d /etc/apache2/conf.d/ ] && [ ! -L /etc/apache2/conf.d/$CONF.conf ] && ln -s ../conf-available/$CONF.conf /etc/apache2/conf.d/$CONF.conf + if [ -d /etc/apache2/conf.d/ ]; then + if [ -L /etc/apache2/conf.d/$CONF.conf ]; then + # it's a symlink, all is well + true + elif [ -e /etc/apache2/conf.d/$CONF.conf ]; then + # it's not a symlink, but exists, error + echo "ERROR: /etc/apache2/conf.d/$CONF.conf is not a symlink, please investigate" 1>&2 + else + # we need to create the symlink + ln -s ../conf-available/$CONF.conf /etc/apache2/conf.d/$CONF.conf + fi + fi fi ;; abort-upgrade|abort-remove|abort-deconfigure) diff -Nru check-mk-1.2.2p3/debian/check-mk-config-nagios3.postrm check-mk-1.2.6p12/debian/check-mk-config-nagios3.postrm --- check-mk-1.2.2p3/debian/check-mk-config-nagios3.postrm 2015-12-21 17:47:25.000000000 +0000 +++ check-mk-1.2.6p12/debian/check-mk-config-nagios3.postrm 2015-12-21 17:47:25.000000000 +0000 @@ -7,6 +7,16 @@ /etc/nagios3/conf.d/check_mk/check_mk_templates.cfg \ 1.1.10-2 -- "$@" +# older releases (1.1.12p7-1 at least) delivered the multisite conffile to +# /etc/apache2/conf.d. If it exists we need to move it to conf-available +# once it's moved then the code below will setup the conf.d symlink. +# This code wasn't in place until 1.2.6p4-2, so we need to check everything +# before that. +dpkg-maintscript-helper mv_conffile \ + /etc/apache2/conf.d/check_mk_multisite.cfg \ + /etc/apache2/conf-available/check_mk_multisite.cfg \ + 1.2.6p4-2 -- "$@" + if [ "$1" = "remove" ] || [ "$1" = "purge" ] ; then CONF="check-mk-multisite" COMMON_STATE=$(dpkg-query -f '${Status}' -W 'apache2.2-common' 2>/dev/null | awk '{print $3}' || true) diff -Nru check-mk-1.2.2p3/debian/check-mk-multisite.dirs check-mk-1.2.6p12/debian/check-mk-multisite.dirs --- check-mk-1.2.2p3/debian/check-mk-multisite.dirs 2015-12-21 17:47:25.000000000 +0000 +++ check-mk-1.2.6p12/debian/check-mk-multisite.dirs 2015-12-21 17:47:25.000000000 +0000 @@ -1,3 +1,3 @@ etc/check_mk -var/lib/check_mk/ -etc/check_mk/conf.d +var/lib/check_mk +etc/check_mk/multisite.d diff -Nru check-mk-1.2.2p3/debian/check-mk-multisite.examples check-mk-1.2.6p12/debian/check-mk-multisite.examples --- check-mk-1.2.2p3/debian/check-mk-multisite.examples 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/debian/check-mk-multisite.examples 2015-12-21 17:47:25.000000000 +0000 @@ -0,0 +1 @@ +debian/tmp/etc/check_mk/multisite.mk diff -Nru check-mk-1.2.2p3/debian/check-mk-multisite.postinst check-mk-1.2.6p12/debian/check-mk-multisite.postinst --- check-mk-1.2.2p3/debian/check-mk-multisite.postinst 2015-12-21 17:47:25.000000000 +0000 +++ check-mk-1.2.6p12/debian/check-mk-multisite.postinst 2015-12-21 17:47:25.000000000 +0000 @@ -22,7 +22,7 @@ # explicitly set permissions on some files setperm www-data nagios 4770 /var/lib/check_mk/web setperm www-data nagios 4770 /var/lib/check_mk/wato - setperm www-data nagios 4770 /etc/check_mk/conf.d + setperm www-data nagios 4770 /etc/check_mk/multisite.d ;; abort-upgrade|abort-remove|abort-deconfigure) ;; diff -Nru check-mk-1.2.2p3/debian/check-mk-server.dirs check-mk-1.2.6p12/debian/check-mk-server.dirs --- check-mk-1.2.2p3/debian/check-mk-server.dirs 2015-12-21 17:47:25.000000000 +0000 +++ check-mk-1.2.6p12/debian/check-mk-server.dirs 2015-12-21 17:47:25.000000000 +0000 @@ -1,2 +1,3 @@ etc/check_mk/conf.d var/lib/check_mk +var/lib/check_mk/log diff -Nru check-mk-1.2.2p3/debian/check-mk-server.install check-mk-1.2.6p12/debian/check-mk-server.install --- check-mk-1.2.2p3/debian/check-mk-server.install 2015-12-21 17:47:25.000000000 +0000 +++ check-mk-1.2.6p12/debian/check-mk-server.install 2015-12-21 17:47:25.000000000 +0000 @@ -11,3 +11,4 @@ usr/share/check_mk/checks-man usr/share/check_mk/modules usr/share/check_mk/pnp-templates +usr/share/check_mk/notifications diff -Nru check-mk-1.2.2p3/debian/check-mk-server.links check-mk-1.2.6p12/debian/check-mk-server.links --- check-mk-1.2.2p3/debian/check-mk-server.links 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/debian/check-mk-server.links 2015-12-21 17:47:25.000000000 +0000 @@ -0,0 +1 @@ +usr/bin/check_mk usr/bin/cmk diff -Nru check-mk-1.2.2p3/debian/check-mk-server.postinst check-mk-1.2.6p12/debian/check-mk-server.postinst --- check-mk-1.2.2p3/debian/check-mk-server.postinst 2015-12-21 17:47:25.000000000 +0000 +++ check-mk-1.2.6p12/debian/check-mk-server.postinst 2015-12-21 17:47:25.000000000 +0000 @@ -39,6 +39,8 @@ setperm nagios nagios 0750 /var/lib/check_mk/counters/ test -d /var/lib/check_mk/logwatch/ || mkdir -p /var/lib/check_mk/logwatch/ setperm nagios www-data 0770 /var/lib/check_mk/logwatch/ + test -d /var/lib/check_mk/notify/ || mkdir -p /var/lib/check_mk/notify/ + setperm nagios www-data 0770 /var/lib/check_mk/notify/ ;; abort-upgrade|abort-remove|abort-deconfigure) ;; diff -Nru check-mk-1.2.2p3/debian/check-mk-server.postrm check-mk-1.2.6p12/debian/check-mk-server.postrm --- check-mk-1.2.2p3/debian/check-mk-server.postrm 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/debian/check-mk-server.postrm 2015-12-21 17:47:25.000000000 +0000 @@ -0,0 +1,10 @@ +#!/bin/sh + +set -e + +# cleanup var dirs on +if [ "$1" = "purge" ] ; then + [ -e /var/lib/check_mk ] && rm -rf /var/lib/check_mk || true +fi + +#DEBHELPER# diff -Nru check-mk-1.2.2p3/debian/check-mk-server.templates check-mk-1.2.6p12/debian/check-mk-server.templates --- check-mk-1.2.2p3/debian/check-mk-server.templates 2015-12-21 17:47:25.000000000 +0000 +++ check-mk-1.2.6p12/debian/check-mk-server.templates 2015-12-21 17:47:25.000000000 +0000 @@ -1,6 +1,6 @@ Template: check-mk-server/v1.2_upgrade_msg Type: note -Description: Convert or delete RRD graphs +_Description: Convert or delete RRD graphs The tcp_conn_stats check now also counts sockets in the state BOUND. From that follows that the check now issues one more performance data value. Those who do not use PNP in the "MULTIPLE" mode need either to delete or convert their RRD graphs of those checks. Otherwise they won't be updated anymore. diff -Nru check-mk-1.2.2p3/debian/control check-mk-1.2.6p12/debian/control --- check-mk-1.2.2p3/debian/control 2015-12-21 17:47:25.000000000 +0000 +++ check-mk-1.2.6p12/debian/control 2015-12-21 17:47:25.000000000 +0000 @@ -2,7 +2,11 @@ Section: admin Priority: optional Maintainer: Debian Nagios Maintainer Group -Uploaders: Alexander Wirt , Jan Wagner , Sven Velt +Uploaders: Alexander Wirt , + Jan Wagner , + Thomas Bechtold , + Ilya Rassadin , + Matt Taggart Build-Depends: debhelper (>= 7), dpatch Standards-Version: 3.9.4 Vcs-Git: git://anonscm.debian.org/pkg-nagios/pkg-check-mk.git @@ -12,7 +16,7 @@ Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends} Suggests: xinetd, python -Recommends: ethtool, smartmontools +Recommends: ethtool Description: general purpose nagios-plugin for retrieving data Check_mk adopts a new a approach for collecting data from operating systems and network components. It obsoletes NRPE, check_by_ssh, NSClient and @@ -24,9 +28,8 @@ This package contains the agent part of check-mk. Package: check-mk-agent-logwatch -Architecture: any -Depends: ${shlibs:Depends}, ${misc:Depends}, check-mk-agent -Suggests: xinetd, python +Architecture: all +Depends: ${shlibs:Depends}, ${misc:Depends}, check-mk-agent, python Description: general purpose nagios-plugin for retrieving data Check_mk adopts a new a approach for collecting data from operating systems and network components. It obsoletes NRPE, check_by_ssh, NSClient and diff -Nru check-mk-1.2.2p3/debian/defaults.icinga check-mk-1.2.6p12/debian/defaults.icinga --- check-mk-1.2.2p3/debian/defaults.icinga 2015-12-21 17:47:25.000000000 +0000 +++ check-mk-1.2.6p12/debian/defaults.icinga 2015-12-21 17:47:25.000000000 +0000 @@ -1,39 +1,41 @@ -# This file has been created during setup of check_mk at Sun Oct 17 17:18:15 CEST 2010. +# This file has been created during setup of check_mk at Thu Sep 24 12:57:08 PDT 2015. # Do not edit this file. Also do not try to override these settings # in main.mk since some of them are hardcoded into several files -# during setup. +# during setup. # # If you need to change these settings, you have to re-run setup.sh # and enter new values when asked, or edit ~/.check_mk_setup.conf and # run ./setup.sh --yes. -check_mk_version = '1.2.2p2' +check_mk_version = '1.2.6p12' default_config_dir = '/etc/check_mk' check_mk_configdir = '/etc/check_mk/conf.d' -share_dir = '/usr/share/check_mk' +share_dir = '/usr/share/check_mk' checks_dir = '/usr/share/check_mk/checks' -notifications_dir = '/usr/share/check_mk/notifications' +notifications_dir = '/usr/share/check_mk/notifications' +inventory_dir = '/usr/share/check_mk/inventory' check_manpages_dir = '/usr/share/check_mk/checks-man' modules_dir = '/usr/share/check_mk/modules' locale_dir = '/usr/share/check_mk/locale' agents_dir = '/usr/share/check_mk/agents' -var_dir = '/var/lib/check_mk' lib_dir = '/usr/lib/check_mk' +var_dir = '/var/lib/check_mk' +log_dir = '/var/lib/check_mk/log' snmpwalks_dir = '/var/lib/check_mk/snmpwalks' autochecksdir = '/var/lib/check_mk/autochecks' precompiled_hostchecks_dir = '/var/lib/check_mk/precompiled' counters_directory = '/var/lib/check_mk/counters' -tcp_cache_dir = '/var/lib/check_mk/cache' -tmp_dir = '/var/lib/check_mk' +tcp_cache_dir = '/var/lib/check_mk/cache' +tmp_dir = '/var/lib/check_mk/tmp' logwatch_dir = '/var/lib/check_mk/logwatch' nagios_objects_file = '/etc/icinga/objects/check_mk/check_mk_objects.cfg' +rrd_path = '/var/lib/nagios/rrd' +rrddcached_socket = '/tmp/rrdcached.sock' nagios_command_pipe_path = '/var/lib/icinga/rw/icinga.cmd' -check_result_path = '/var/lib/icinga/spool/checkresults' +check_result_path = '/var/lib/icinga/spool/checkresults' nagios_status_file = '/var/lib/icinga/status.dat' nagios_conf_dir = '/etc/icinga/objects/check_mk' nagios_user = 'nagios' -nagios_url = '/icinga' -nagios_cgi_url = '/cgi-bin/icinga' logwatch_notes_url = '/check_mk/logwatch.py?host=%s&file=%s' www_group = 'nagios' nagios_config_file = '/etc/icinga/icinga.cfg' @@ -43,12 +45,10 @@ htpasswd_file = '/etc/icinga/htpasswd.users' nagios_auth_name = 'Nagios Access' web_dir = '/usr/share/check_mk/web' -checkmk_web_uri = '/check_mk' livestatus_unix_socket = '/var/lib/icinga/rw/live' livebackendsdir = '/usr/share/check_mk/livestatus' url_prefix = '/' pnp_url = '/pnp4nagios/' pnp_templates_dir = '/usr/share/check_mk/pnp-templates' -pnp_rraconf_dir = '/usr/share/check_mk/pnp-rraconf' doc_dir = '/usr/share/doc/check-mk-doc' check_mk_automation = 'sudo -u nagios /usr/bin/check_mk --automation' diff -Nru check-mk-1.2.2p3/debian/defaults.nagios3 check-mk-1.2.6p12/debian/defaults.nagios3 --- check-mk-1.2.2p3/debian/defaults.nagios3 2015-12-21 17:47:25.000000000 +0000 +++ check-mk-1.2.6p12/debian/defaults.nagios3 2015-12-21 17:47:25.000000000 +0000 @@ -1,39 +1,41 @@ -# This file has been created during setup of check_mk at Sun Oct 17 17:18:15 CEST 2010. +# This file has been created during setup of check_mk at Thu Sep 24 12:57:08 PDT 2015. # Do not edit this file. Also do not try to override these settings # in main.mk since some of them are hardcoded into several files -# during setup. +# during setup. # # If you need to change these settings, you have to re-run setup.sh # and enter new values when asked, or edit ~/.check_mk_setup.conf and # run ./setup.sh --yes. -check_mk_version = '1.2.2p2' +check_mk_version = '1.2.6p12' default_config_dir = '/etc/check_mk' check_mk_configdir = '/etc/check_mk/conf.d' -share_dir = '/usr/share/check_mk' +share_dir = '/usr/share/check_mk' checks_dir = '/usr/share/check_mk/checks' -notifications_dir = '/usr/share/check_mk/notifications' +notifications_dir = '/usr/share/check_mk/notifications' +inventory_dir = '/usr/share/check_mk/inventory' check_manpages_dir = '/usr/share/check_mk/checks-man' modules_dir = '/usr/share/check_mk/modules' locale_dir = '/usr/share/check_mk/locale' agents_dir = '/usr/share/check_mk/agents' -var_dir = '/var/lib/check_mk' lib_dir = '/usr/lib/check_mk' +var_dir = '/var/lib/check_mk' +log_dir = '/var/lib/check_mk/log' snmpwalks_dir = '/var/lib/check_mk/snmpwalks' autochecksdir = '/var/lib/check_mk/autochecks' precompiled_hostchecks_dir = '/var/lib/check_mk/precompiled' counters_directory = '/var/lib/check_mk/counters' -tcp_cache_dir = '/var/lib/check_mk/cache' -tmp_dir = '/var/lib/check_mk' +tcp_cache_dir = '/var/lib/check_mk/cache' +tmp_dir = '/var/lib/check_mk/tmp' logwatch_dir = '/var/lib/check_mk/logwatch' nagios_objects_file = '/etc/nagios3/conf.d/check_mk/check_mk_objects.cfg' +rrd_path = '/var/lib/nagios/rrd' +rrddcached_socket = '/tmp/rrdcached.sock' nagios_command_pipe_path = '/var/lib/nagios3/rw/nagios.cmd' -check_result_path = '/var/lib/nagios3/spool/checkresults' +check_result_path = '/var/lib/nagios3/spool/checkresults' nagios_status_file = '/var/cache/nagios3/status.dat' nagios_conf_dir = '/etc/nagios3/conf.d' nagios_user = 'nagios' -nagios_url = '/nagios3' -nagios_cgi_url = '/cgi-bin/nagios3' logwatch_notes_url = '/check_mk/logwatch.py?host=%s&file=%s' www_group = 'nagios' nagios_config_file = '/etc/nagios3/nagios.cfg' @@ -43,12 +45,10 @@ htpasswd_file = '/etc/nagios3/htpasswd.users' nagios_auth_name = 'Nagios Access' web_dir = '/usr/share/check_mk/web' -checkmk_web_uri = '/check_mk' livestatus_unix_socket = '/var/lib/nagios3/rw/live' livebackendsdir = '/usr/share/check_mk/livestatus' url_prefix = '/' pnp_url = '/pnp4nagios/' pnp_templates_dir = '/usr/share/check_mk/pnp-templates' -pnp_rraconf_dir = '/usr/share/check_mk/pnp-rraconf' doc_dir = '/usr/share/doc/check-mk-doc' check_mk_automation = 'sudo -u nagios /usr/bin/check_mk --automation' diff -Nru check-mk-1.2.2p3/debian/po/POTFILES.in check-mk-1.2.6p12/debian/po/POTFILES.in --- check-mk-1.2.2p3/debian/po/POTFILES.in 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/debian/po/POTFILES.in 2015-12-21 17:47:25.000000000 +0000 @@ -0,0 +1 @@ +[type: gettext/rfc822deb] templates diff -Nru check-mk-1.2.2p3/debian/README.source check-mk-1.2.6p12/debian/README.source --- check-mk-1.2.2p3/debian/README.source 2015-12-21 17:47:25.000000000 +0000 +++ check-mk-1.2.6p12/debian/README.source 2015-12-21 17:47:25.000000000 +0000 @@ -1,2 +1,44 @@ -We use dpatch for patch handling inside our nagios related packages. Please see -/usr/share/doc/dpatch/README.source.gz (if you have installed dpatch) for documentation about dpatch. +dpatch +------ +We use dpatch for patch handling inside our nagios related packages. +Please see /usr/share/doc/dpatch/README.source.gz (if you have +installed dpatch) for documentation about dpatch. + +upstream vs debian install paths +-------------------------------- +This is a list of the differences between an upstream check_mk install +using setup.sh and what files and directories the debian packages +create. +Last reviewed 2015-08-21 by taggart@debian.org. + +upstream -> debian + +/etc/apache2/conf.d/zzz_check_mk.conf -> check-mk-config-* postinst creates + /etc/apache2/conf-available/check-mk-multisite.conf + +(no logwatch.cfg) -> + /etc/check_mk/logwatch.cfg provided by check-mk-agent-logwatch + +/usr/share/check_mk/agents/xinetd.conf -> /etc/xinetd.d/check_mk + +/etc/nagios/auth.serials -> not needed + +/usr/share/check_mk/modules/defaults -> provided by check-mk-config-* +/usr/share/check_mk/web/htdocs/defaults.py -> symlink to above, provided + by check-mk-config-* + +/usr/share/check_mk/check_mk_templates.cfg -> not needed +/etc/nagios/objects/check_mk_templates.cfg -> symlink to above, not needed + +/usr/share/check_mk/agents/ -> we just provide check_mk_agent/cmk/mk-job/mkp +/usr/share/check_mk/agents/plugins/ -> don't provide, see #796453 +/usr/share/check_mk/inventory/ -> don't provide, see #796455 + +/usr/share/doc/check_mk/ -> split up by package name +/usr/share/doc/check_mk/checks/ -> /usr/share/check_mk/checks-man in -doc + +/usr/share/check_mk/locale/ -> unneeded by default +/var/lib/check_mk/notify/ -> automatically created with the correct + ownership and permissions + +/etc/check_mk/multisite.mk -> /usr/share/doc/check-mk-multisite/examples/ diff -Nru check-mk-1.2.2p3/debian/rules check-mk-1.2.6p12/debian/rules --- check-mk-1.2.2p3/debian/rules 2015-12-21 17:47:25.000000000 +0000 +++ check-mk-1.2.6p12/debian/rules 2015-12-21 17:47:25.000000000 +0000 @@ -59,10 +59,27 @@ dh_installdebconf mkdir -p debian/tmp DESTDIR=debian/tmp ./setup.sh --yes - #don't use the statically linked binary. + ## clean up binaries shipped by upstream + # upstream agent deb and rpm + rm -rf debian/tmp/usr/share/check_mk/agent/check-mk-agent*.deb + rm -rf debian/tmp/usr/share/check_mk/agent/check-mk-agent*.rpm + # java jar and classes (we don't rebuild these, help if you want them) + rm -rf debian/tmp/usr/share/doc/check_mk/jasperreports + # Windows binaries (we don't rebuild these, help if you want them) + rm -rf debian/tmp/usr/share/check_mk/agents/windows + rm -rf debian/tmp/usr/share/doc/check_mk/treasures/windows_msi + # fsc-celsius 'treasure' tarball that includes binaries + rm -rf debian/tmp/usr/share/doc/check_mk/treasures/fsc-celsius* + # agent_modbus + rm debian/tmp/usr/share/doc/check_mk/treasures/modbus/agent_modbus + # waitmax, build our own rm debian/tmp/usr/share/check_mk/agents/waitmax - gcc -s -o debian/tmp/usr/share/check_mk/agents/waitmax debian/tmp/usr/share/check_mk/agents/waitmax.c + # upstream stopped including waitmax.c, so we carry it in debian/ + gcc -s -o debian/tmp/usr/share/check_mk/agents/waitmax debian/waitmax.c cp -a livestatus.src/debian/tmp/* debian/tmp/ + # z_os waitmax + rm debian/tmp/usr/share/check_mk/agents/z_os/waitmax + ## config files # We need 2 different "defaults" files for Icinga and Nagios3 rm debian/tmp/usr/share/check_mk/modules/defaults rm debian/tmp/usr/share/check_mk/web/htdocs/defaults.py @@ -81,11 +98,19 @@ # Prepare agent files cp debian/tmp/usr/share/check_mk/agents/check_mk_agent.linux debian/check-mk-agent/usr/bin/check_mk_agent chmod +x debian/check-mk-agent/usr/bin/check_mk_agent + # mkp wrapper script + mkdir -p debian/check-mk-server/usr/bin + cp debian/tmp/usr/bin/mkp debian/check-mk-server/usr/bin/ + chmod +x debian/check-mk-server/usr/bin/mkp + # xinetd: provide config, but disabled by default cp debian/tmp/usr/share/check_mk/agents/xinetd.conf debian/check-mk-agent/etc/xinetd.d/check_mk - # xinetd: Disable service sed -i 's#disable\s*=\s*no#disable = yes#' debian/check-mk-agent/etc/xinetd.d/check_mk # move checks manpages (to be installed in -server) mv debian/tmp/usr/share/doc/check_mk/checks debian/tmp/usr/share/check_mk/checks-man + # remove installed ChangeLog to avoid duplicate + rm debian/tmp/usr/share/doc/check_mk/ChangeLog + # remove installed COPYING, redundant + rm debian/tmp/usr/share/doc/check_mk/COPYING # Build architecture-independent files here. binary-indep: build install diff -Nru check-mk-1.2.2p3/debian/waitmax.c check-mk-1.2.6p12/debian/waitmax.c --- check-mk-1.2.2p3/debian/waitmax.c 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/debian/waitmax.c 2015-12-21 17:47:25.000000000 +0000 @@ -0,0 +1,157 @@ +// +------------------------------------------------------------------+ +// | ____ _ _ __ __ _ __ | +// | / ___| |__ ___ ___| | __ | \/ | |/ / | +// | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +// | | |___| | | | __/ (__| < | | | | . \ | +// | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +// | | +// | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +// +------------------------------------------------------------------+ +// +// This file is part of Check_MK. +// The official homepage is at http://mathias-kettner.de/check_mk. +// +// check_mk is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation in version 2. check_mk is distributed +// in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +// out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +// PARTICULAR PURPOSE. See the GNU General Public License for more de- +// ails. You should have received a copy of the GNU General Public +// License along with GNU Make; see the file COPYING. If not, write +// to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +// Boston, MA 02110-1301 USA. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* macros for using write(2) instead of fprintf(stderr, ) */ +#define out(text) write(2, text, strlen(text)); + +int g_pid; +int g_timeout = 0; +int g_signum = 15; + +struct option long_options[] = { + { "version" , no_argument, 0, 'V' }, + { "help" , no_argument, 0, 'h' }, + { "signal" , required_argument, 0, 's' }, + { 0, 0, 0, 0 } }; + +void version() +{ + out("waitmax version 1.1\n" + "Copyright Mathias Kettner 2008\n" + "This is free software; see the source for copying conditions. There is NO\n" + "warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"); + exit(0); +} + + +void usage() +{ + out("Usage: waitmax [-s SIGNUM] MAXTIME PROGRAM [ARGS...]\n" + "\n" + "Execute PROGRAM as a subprocess. If PROGRAM does not exit before MAXTIME\n" + "seconds, it will be killed with SIGTERM or an alternative signal.\n" + "\n" + " -s, --signal SIGNUM kill with SIGNUM on timeout\n" + " -h, --help this help\n" + " -V, --version show version an exit\n\n"); + exit(1); +} + + +void signalhandler(int signum) +{ + if (0 == kill(g_pid, g_signum)) + g_timeout = 1; +} + + +int main(int argc, char **argv) +{ + int indexptr=0; + int ret; + setenv("POSIXLY_CORRECT", "true", 0); + while (0 <= (ret = getopt_long(argc, argv, "Vhs:", long_options, &indexptr))) { + switch (ret) + { + case 'V': + version(); + + case 'h': + usage(); + + case 's': + g_signum = strtoul(optarg, 0, 10); + if (g_signum < 1 || g_signum > 32) { + out("Signalnumber must be between 1 and 32.\n"); + exit(1); + } + break; + + default: + usage(argv[0]); + exit(1); + break; + } + } + + if (optind + 1 >= argc) usage(); + + int maxtime = atoi(argv[optind]); + if (maxtime <= 0) usage(); + + g_pid = fork(); + if (g_pid == 0) { + signal(SIGALRM, signalhandler); + execvp(argv[optind + 1], argv + optind + 1); + out("Cannot execute "); + out(argv[optind + 1]); + out(": "); + out(strerror(errno)); + out("\n"); + exit(253); + } + + signal(SIGALRM, signalhandler); + alarm(maxtime); + int status; + while (1) { + int pid = waitpid(g_pid, &status, 0); + if (pid <= 0) { + if (errno == EINTR) continue; // interuppted by alarm + else + out("Strange: waitpid() fails: "); + out(strerror(errno)); + out("\n"); + exit(1); + } + else break; + } + + if (WIFEXITED(status)) { + int exitcode = WEXITSTATUS(status); + return exitcode; + } + else if (WIFSIGNALED(status)) { + int signum = WTERMSIG(status); + if (g_timeout) + return 255; + else + return 128 + signum; + } + else { + out("Strange: program did neither exit nor was signalled.\n"); + return 254; + } +} diff -Nru check-mk-1.2.2p3/debug check-mk-1.2.6p12/debug --- check-mk-1.2.2p3/debug 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/debug 2015-06-24 09:48:37.000000000 +0000 @@ -14,7 +14,7 @@ # or notification_logging=2 in main.mk (or via WATO in the # global settings: Notification -> Debug notifications, then # you will see this output in the notitifcation log file. -# Under OMD this file is in ~/var/check_mk/notify/notify.log. +# Under OMD this file is in ~/var/log/notify.log. # Please refer to the official documentation for more details. diff -Nru check-mk-1.2.2p3/decru_cpu check-mk-1.2.6p12/decru_cpu --- check-mk-1.2.2p3/decru_cpu 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/decru_cpu 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -37,10 +37,16 @@ ( "system", "%.3f" % system), ( "interrupt", "%.3f" % interrupt) ] - return (0, "OK - user %.0f%%, sys %.0f%%, interrupt %.0f%%, idle %.0f%%" + return (0, "user %.0f%%, sys %.0f%%, interrupt %.0f%%, idle %.0f%%" % (user, system, interrupt, idle), perfdata) -check_info['decru_cpu'] = (check_decru_cpu, "CPU utilization", 1, inventory_decru_cpu) -snmp_info['decru_cpu'] = ( ".1.3.6.1.4.1.12962.1.1", [ 8 ] ) -snmp_scan_functions['decru_cpu'] = \ - lambda oid: "datafort" in oid('.1.3.6.1.2.1.1.1.0').lower() + +check_info["decru_cpu"] = { + 'check_function': check_decru_cpu, + 'inventory_function': inventory_decru_cpu, + 'service_description': 'CPU utilization', + 'has_perfdata': True, + 'snmp_info': ('.1.3.6.1.4.1.12962.1.1', [8]), + 'snmp_scan_function': \ + lambda oid: "datafort" in oid('.1.3.6.1.2.1.1.1.0').lower(), +} diff -Nru check-mk-1.2.2p3/decru_fans check-mk-1.2.6p12/decru_fans --- check-mk-1.2.2p3/decru_fans 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/decru_fans 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -27,26 +27,31 @@ decru_fan_default_levels = (8000, 8400) def inventory_decru_fans(info): - return [ (l[0], l[1] + " RPM", "decru_fan_default_levels") for l in info ] + return [ (l[0], "decru_fan_default_levels") for l in info ] def check_decru_fans(item, params, info): - for fan in info: - if fan[0] == item: - rpm = int(fan[1]) + for fan_name, rpm in info: + if fan_name == item: + rpm = int(rpm) crit, warn = params - perfdata = [("rpm", rpm, 0, "", warn, crit)] - infotxt = " - %d RPM" % rpm - if rpm < crit: - return (2, "CRIT" + infotxt, perfdata) - elif rpm < warn: - return (1, "WARN" + infotxt, perfdata) + perfdata = [("rpm", rpm, 0, None, warn, crit)] + infotxt = "%d RPM" % rpm + if rpm <= crit: + return (2, infotxt, perfdata) + elif rpm <= warn: + return (1, infotxt, perfdata) else: - return (0, "OK" + infotxt, perfdata) + return (0, infotxt, perfdata) - return (3, "UNKNOWN - fan not found") + return (3, "fan not found") -check_info['decru_fans'] = (check_decru_fans, "FAN %s", 1, inventory_decru_fans) -snmp_info['decru_fans'] = ( ".1.3.6.1.4.1.12962.1.2.3.1", [ 2, 3 ] ) -snmp_scan_functions['decru_fans'] = \ - lambda oid: "datafort" in oid('.1.3.6.1.2.1.1.1.0').lower() +check_info["decru_fans"] = { + 'check_function': check_decru_fans, + 'inventory_function': inventory_decru_fans, + 'service_description': 'FAN %s', + 'has_perfdata': True, + 'snmp_info': ('.1.3.6.1.4.1.12962.1.2.3.1', [2, 3]), + 'snmp_scan_function': \ + lambda oid: "datafort" in oid('.1.3.6.1.2.1.1.1.0').lower(), +} diff -Nru check-mk-1.2.2p3/decru_perf check-mk-1.2.6p12/decru_perf --- check-mk-1.2.2p3/decru_perf 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/decru_perf 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -50,12 +50,18 @@ for perf in info: if perf[0] == index: rate = int(perf[1]) - return (0, "OK - current rate is %d/s" % rate, [("rate", rate)]) + return (0, "current rate is %d/s" % rate, [("rate", rate)]) - return (3, "UNKNOWN - item not found") + return (3, "item not found") -check_info['decru_perf'] = (check_decru_perf, "COUNTER %s", 1, inventory_decru_perf) -snmp_info['decru_perf'] = ( ".1.3.6.1.4.1.12962.1.1.2.1.1", [ 1, 2 ] ) -snmp_scan_functions['decru_perf'] = \ - lambda oid: "datafort" in oid('.1.3.6.1.2.1.1.1.0').lower() + +check_info["decru_perf"] = { + 'check_function': check_decru_perf, + 'inventory_function': inventory_decru_perf, + 'service_description': 'COUNTER %s', + 'has_perfdata': True, + 'snmp_info': ('.1.3.6.1.4.1.12962.1.1.2.1.1', [1, 2]), + 'snmp_scan_function': \ + lambda oid: "datafort" in oid('.1.3.6.1.2.1.1.1.0').lower(), +} diff -Nru check-mk-1.2.2p3/decru_power check-mk-1.2.6p12/decru_power --- check-mk-1.2.2p3/decru_power 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/decru_power 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -31,14 +31,19 @@ for power in info: if power[0] == item: if power[1] != "1": - return (2, "CRIT - power supply in state %s" % power[1]) + return (2, "power supply in state %s" % power[1]) else: - return (0, "OK - power supply ok") + return (0, "power supply ok") - return (3, "UNKNOWN - power supply not found") + return (3, "power supply not found") -check_info['decru_power'] = (check_decru_power, "POWER %s", 0, inventory_decru_power) -snmp_info['decru_power'] = ( ".1.3.6.1.4.1.12962.1.2.6.1", [ 2, 3 ] ) -snmp_scan_functions['decru_power'] = \ - lambda oid: "datafort" in oid('.1.3.6.1.2.1.1.1.0').lower() + +check_info["decru_power"] = { + 'check_function': check_decru_power, + 'inventory_function': inventory_decru_power, + 'service_description': 'POWER %s', + 'snmp_info': ('.1.3.6.1.4.1.12962.1.2.6.1', [2, 3]), + 'snmp_scan_function': \ + lambda oid: "datafort" in oid('.1.3.6.1.2.1.1.1.0').lower(), +} diff -Nru check-mk-1.2.2p3/decru_temps check-mk-1.2.6p12/decru_temps --- check-mk-1.2.2p3/decru_temps 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/decru_temps 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -25,28 +25,24 @@ # Boston, MA 02110-1301 USA. def inventory_decru_temps(info): - return [ (l[0], (int(to_celsius(int(l[1]))) + 4.0, - int(to_celsius(int(l[1]))) + 8.0 ) ) for l in info ] + return [ (l[0], (int(fahrenheit_to_celsius(int(l[1]))) + 4.0, + int(fahrenheit_to_celsius(int(l[1]))) + 8.0 ) ) for l in info ] def check_decru_temps(item, params, info): - for temp in info: - if temp[0] == item: - temp = to_celsius(int(temp[1])) - warn, crit = params - perfdata = [("temp", temp, "", "", warn, crit)] - infotxt = " - %d C (levels at %d/%d)" % (temp, warn, crit) - if temp >= crit: - return (2, "CRIT" + infotxt, perfdata) - elif temp >= warn: - return (1, "WARN" + infotxt, perfdata) - else: - return (0, "OK" + infotxt, perfdata) + for name, rawtemp in info: + if name == item: + temp = fahrenheit_to_celsius(int(rawtemp)) + return check_temperature(temp, params) - return (3, "UNKNOWN - sensor not found") -check_info['decru_temps'] = (check_decru_temps, "Temperature %s", 1, inventory_decru_temps) - -snmp_info['decru_temps'] = ( ".1.3.6.1.4.1.12962.1.2.4.1", [ 2, 3 ] ) -snmp_scan_functions['decru_temps'] = \ - lambda oid: "datafort" in oid('.1.3.6.1.2.1.1.1.0').lower() -checkgroup_of["decru_temps"] = "hw_temperature" +check_info["decru_temps"] = { + 'check_function': check_decru_temps, + 'inventory_function': inventory_decru_temps, + 'service_description': 'Temperature %s', + 'has_perfdata': True, + 'snmp_info': ('.1.3.6.1.4.1.12962.1.2.4.1', [2, 3]), + 'snmp_scan_function': \ + lambda oid: "datafort" in oid('.1.3.6.1.2.1.1.1.0').lower(), + 'group': 'hw_temperature', + 'includes': [ 'temperature.include' ], +} diff -Nru check-mk-1.2.2p3/dell_chassis_fans check-mk-1.2.6p12/dell_chassis_fans --- check-mk-1.2.2p3/dell_chassis_fans 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/dell_chassis_fans 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,57 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_dell_chassis_fans(info): + if info: + return [ ( None, None ) ] + +def check_dell_chassis_fans(_no_item, _no_params, info): + + state_table = { + "1" : ( "other, ", 1 ), + "2" : ( "unknown, ", 1 ), + "3" : ( "normal", 0 ), + "4" : ( "nonCritical, ", 1 ), + "5" : ( "Critical, ", 2 ), + "6" : ( "NonRecoverable, ", 2 ), + } + infotext, state = state_table.get(info[0][0], ("unknown state", 2 )) + + infotext = "Status: " + infotext + + return state, infotext + +check_info["dell_chassis_fans"] = { + "check_function" : check_dell_chassis_fans, + "inventory_function" : inventory_dell_chassis_fans, + "service_description" : "Overall Fan Status", + "has_perfdata" : False, + "snmp_info" : ( ".1.3.6.1.4.1.674.10892.2.3.1", [ + "6", # drsFanCurrStatus + ]), + "snmp_scan_function" : lambda oid: oid('.1.3.6.1.2.1.1.2.0') == ".1.3.6.1.4.1.674.10892.2", +} + diff -Nru check-mk-1.2.2p3/dell_chassis_io check-mk-1.2.6p12/dell_chassis_io --- check-mk-1.2.2p3/dell_chassis_io 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/dell_chassis_io 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,57 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_dell_chassis_io(info): + if info: + return [ ( None, None ) ] + +def check_dell_chassis_io(_no_item, _no_params, info): + + state_table = { + "1" : ( "other, ", 1 ), + "2" : ( "unknown, ", 1 ), + "3" : ( "normal", 0 ), + "4" : ( "nonCritical, ", 1 ), + "5" : ( "Critical, ", 2 ), + "6" : ( "NonRecoverable, ", 2 ), + } + infotext, state = state_table.get(info[0][0], ("unknown state", 3 )) + + infotext = "Status: " + infotext + + return state, infotext + +check_info["dell_chassis_io"] = { + "check_function" : check_dell_chassis_io, + "inventory_function" : inventory_dell_chassis_io, + "service_description" : "Overall IO Module Status", + "has_perfdata" : False, + "snmp_info" : ( ".1.3.6.1.4.1.674.10892.2.3.1", [ + "2", # drsIOMCurrStatus + ]), + "snmp_scan_function" : lambda oid: oid('.1.3.6.1.2.1.1.2.0') == ".1.3.6.1.4.1.674.10892.2", +} + diff -Nru check-mk-1.2.2p3/dell_chassis_kvm check-mk-1.2.6p12/dell_chassis_kvm --- check-mk-1.2.2p3/dell_chassis_kvm 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/dell_chassis_kvm 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,61 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +def inventory_dell_chassis_kvm(info): + if info: + return [ ( None, None ) ] + +def check_dell_chassis_kvm(_no_item, _no_params, info): + + state_table = { + "1" : ( "other, ", 1 ), + "2" : ( "unknown, ", 1 ), + "3" : ( "normal", 0 ), + "4" : ( "nonCritical, ", 1 ), + "5" : ( "Critical, ", 2 ), + "6" : ( "NonRecoverable, ", 2 ), + } + infotext, state = state_table.get(info[0][0], ("unknown state", 3 )) + + infotext = "Status: " + infotext + + infotext += ", Firmware: %s" % info[0][1] + + return state, infotext + +check_info["dell_chassis_kvm"] = { + "check_function" : check_dell_chassis_kvm, + "inventory_function" : inventory_dell_chassis_kvm, + "service_description" : "Overall KVM Status", + "has_perfdata" : False, + "snmp_info" : ( ".1.3.6.1.4.1.674.10892.2", [ + "3.1.2", # drsKVMCurrStatus + "1.2.2", # drsiKVMFirmwareVersion + ]), + "snmp_scan_function" : lambda oid: oid('.1.3.6.1.2.1.1.2.0') == ".1.3.6.1.4.1.674.10892.2", +} + diff -Nru check-mk-1.2.2p3/dell_chassis_power check-mk-1.2.6p12/dell_chassis_power --- check-mk-1.2.2p3/dell_chassis_power 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/dell_chassis_power 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,65 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +def inventory_dell_chassis_power(info): + if info: + return [ ( None, None ) ] + +def check_dell_chassis_power(item, _no_params, info): + status, PotentialPower, MaxPowerSpec, power, current = info[0] + state_table = { + "1" : ( "other, ", 1 ), + "2" : ( "unknown, ", 1 ), + "3" : ( "", 0 ), + "4" : ( "nonCritical, ", 1 ), + "5" : ( "Critical, ", 2 ), + "6" : ( "NonRecoverable, ", 2 ), + } + infotext, state = state_table.get(status, ("unknown state, ", 3 )) + + infotext += "Power: %.1f W, PotentialPower: %.1f W, MaxPower: %.1f W, Current: %.1f A" \ + % ( savefloat(power), savefloat(PotentialPower), savefloat(MaxPowerSpec), savefloat(current) ) + + perfdata = [ ( "power" , power+"Watt", 0, PotentialPower , "", MaxPowerSpec )] + + return state, infotext, perfdata + +check_info["dell_chassis_power"] = { + "check_function" : check_dell_chassis_power, + "inventory_function" : inventory_dell_chassis_power, + "service_description" : "Chassis Power", + "has_perfdata" : True, + "snmp_info" : ( ".1.3.6.1.4.1.674.10892.2", [ + "3.1.5.0", # drsPowerCurrStatus 0 + "4.1.1.2.1", # drsPotentialPower 1 + "4.1.1.4.1", # drsMaxPowerSpecification 2 + "4.1.1.13.1", # drsWattsReading 3 + "4.1.1.14.1", # drsAmpsReading 4 + ]), + "snmp_scan_function" : lambda oid: oid('.1.3.6.1.2.1.1.2.0') == ".1.3.6.1.4.1.674.10892.2", +} + diff -Nru check-mk-1.2.2p3/dell_chassis_powersupplies check-mk-1.2.6p12/dell_chassis_powersupplies --- check-mk-1.2.2p3/dell_chassis_powersupplies 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/dell_chassis_powersupplies 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,64 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_dell_chassis_powersupplies(info): + inventory = [] + for line in info: + item = re.sub("\.", "-", line[0]) + inventory.append( ( item, None ) ) + return inventory + +def check_dell_chassis_powersupplies(item, _no_params, info): + for oid_end, voltage, current, maxpower in info: + if item == re.sub("\.", "-", oid_end): + power = savefloat(voltage) * savefloat(current) + state = 0 + infotext = "" + infotext += "current/max Power: %.2f / %s, Current: %s, Voltage: %s" \ + % ( power, maxpower, current, voltage ) + perfdata = [ ( "power", str(power)+"Watt", 0, maxpower , "", maxpower ) ] + + if savefloat(current) == 0: + infotext = infotext + " - device in standby" + + return state, infotext, perfdata + + return 3, "unknown power supply" + +check_info["dell_chassis_powersupplies"] = { + "check_function" : check_dell_chassis_powersupplies, + "inventory_function" : inventory_dell_chassis_powersupplies, + "service_description" : "Power Supply %s", + "has_perfdata" : True, + "snmp_info" : ( ".1.3.6.1.4.1.674.10892.2.4.2.1", [ + OID_END, + "5", # drsPSUVoltsReading 0 + "6", # drsPSUAmpsReading 1 + "7", # drsPSUWattsReading 2 + ]), + "snmp_scan_function" : lambda oid: oid('.1.3.6.1.2.1.1.2.0') == ".1.3.6.1.4.1.674.10892.2", +} + diff -Nru check-mk-1.2.2p3/dell_chassis_slots check-mk-1.2.6p12/dell_chassis_slots --- check-mk-1.2.2p3/dell_chassis_slots 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/dell_chassis_slots 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,69 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_dell_chassis_slots(info): + inventory = [] + for line in info: + number = line[3] + if saveint(number) in ( 1, 2, 3, 4, 5, 6, 7, 8, 9 ): + number = "0"+number + if line[0] != "1" and line[2] != "N/A": + inventory.append( ( number, None ) ) + return inventory + +def check_dell_chassis_slots(item, _no_params, info): + for status, service_tag, name, number in info: + if saveint(number) in ( 1, 2, 3, 4, 5, 6, 7, 8, 9 ): + number = "0"+number + if item == number: + #absent = 1,none = 2,basic = 3,off = 4, + state_table = { + "1" : ( "absent", 0 ), + "2" : ( "none", 1 ), + "3" : ( "basic", 0 ), + "4" : ( "off", 1 ), + } + state_txt, state = state_table.get(status, ("unknown state, ", 3 )) + infotext = "Status: %s, Name: %s, ServiceTag: %s" % ( state_txt, name, service_tag ) + + return state, infotext + + return 3, "unknown slot" + +check_info["dell_chassis_slots"] = { + "check_function" : check_dell_chassis_slots, + "inventory_function" : inventory_dell_chassis_slots, + "service_description" : "Slot %s", + "has_perfdata" : False, + "snmp_info" : ( ".1.3.6.1.4.1.674.10892.2.5.1.1", [ + #"1", # drsServerIndex + "2", # drsServerMonitoringCapable + "3", # drsServerServiceTag + "4", # drsServerSlotName + "5", # drsServerSlotNumber + ]), + "snmp_scan_function" : lambda oid: oid('.1.3.6.1.2.1.1.2.0') == ".1.3.6.1.4.1.674.10892.2", +} diff -Nru check-mk-1.2.2p3/dell_chassis_status check-mk-1.2.6p12/dell_chassis_status --- check-mk-1.2.2p3/dell_chassis_status 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/dell_chassis_status 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,77 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +dell_chassis_status_info = ( ".1.3.6.1.4.1.674.10892.2", [ + "1.1.7", # drsProductURL 0 + "1.1.9", # drsProductChassisLocation 1 + "1.1.10", # drsProductChassisName 2 + "1.1.11", # drsSystemServiceTag 3 + "1.1.15", # drsProductChassisDataCenter 4 + "1.2.1", # drsFirmwareVersion 5 + "2.1", # drsGlobalSystemStatus 6 + ]) + +def inventory_dell_chassis_status(info): + if info: + return [ ( None, None ) ] + +def check_dell_chassis_status(item, _no_params, info): + di = dict() + di['URL'], di['Location'], di['Name'], di['ServiceTag'], di['DataCenter'], \ + di['FirmwareVersion'], status = info[0] + + state_table = { + "1" : ( "other, ", 1 ), + "2" : ( "unknown, ", 1 ), + "3" : ( "", 0 ), + "4" : ( "nonCritical, ", 1 ), + "5" : ( "Critical, ", 2 ), + "6" : ( "NonRecoverable, ", 2 ), + } + infotext, state = state_table.get(status, ("unknown state", 2 )) + for parameter, value in di.items(): + infotext += "%s: %s, " % ( parameter, value ) + infotext = re.sub(", $","", infotext) + + return state, infotext + +check_info["dell_chassis_status"] = { + "check_function" : check_dell_chassis_status, + "inventory_function" : inventory_dell_chassis_status, + "service_description" : "Chassis Health", + "has_perfdata" : False, + "snmp_info" : ( ".1.3.6.1.4.1.674.10892.2", [ + "1.1.7", # drsProductURL 0 + "1.1.9", # drsProductChassisLocation 1 + "1.1.10", # drsProductChassisName 2 + "1.1.11", # drsSystemServiceTag 3 + "1.1.15", # drsProductChassisDataCenter 4 + "1.2.1", # drsFirmwareVersion 5 + "2.1", # drsGlobalSystemStatus 6 + ]), + "snmp_scan_function" : lambda oid: oid('.1.3.6.1.2.1.1.2.0') == ".1.3.6.1.4.1.674.10892.2", +} + diff -Nru check-mk-1.2.2p3/dell_chassis_temp check-mk-1.2.6p12/dell_chassis_temp --- check-mk-1.2.2p3/dell_chassis_temp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/dell_chassis_temp 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,69 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +dell_chassis_temp_default_levels = ( 60, 80 ) + +def inventory_dell_chassis_temp(info): + inventory = [] + if info and len(info[0]) == 3: + inventory.append( ( "Front Panel", "dell_chassis_temp_default_levels") ) + inventory.append( ( "CMC Ambient", "dell_chassis_temp_default_levels") ) + inventory.append( ( "CMC Processor", "dell_chassis_temp_default_levels") ) + return inventory + + +def check_dell_chassis_temp(item, params, info): + + items = { + "Front Panel" : 0, + "CMC Ambient" : 1, + "CMC Processor": 2, + } + + if item in items: + item_id = items[item] + + temp = float(info[0][item_id]) + return check_temperature(temp, params) + + return 3, "Sensor not found in SNMP data" + + +check_info["dell_chassis_temp"] = { + "check_function" : check_dell_chassis_temp, + "inventory_function" : inventory_dell_chassis_temp, + "service_description" : "Temperature %s", + "has_perfdata" : True, + "group" : "hw_temperature", + "snmp_info" : ( ".1.3.6.1.4.1.674.10892.2.3.1", [ + "10", # drsChassisFrontPanelAmbientTemperature + "11", # drsCMCAmbientTemperature + "12", # drsCMCProcessorTemperature + ]), + "snmp_scan_function" : lambda oid: oid('.1.3.6.1.2.1.1.2.0') == ".1.3.6.1.4.1.674.10892.2", + "includes" : [ "temperature.include" ], +} + diff -Nru check-mk-1.2.2p3/dell_idrac_disks check-mk-1.2.6p12/dell_idrac_disks --- check-mk-1.2.2p3/dell_idrac_disks 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/dell_idrac_disks 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,123 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_dell_idrac_disks(info): + inventory = [] + for line in info: + inventory.append(( line[0], None )) + return inventory + +def check_dell_idrac_disks(item, _no_params, info): + diskStates = { + 1 : "unkown", + 2 : "ready", + 3 : "online", + 4 : "foreign", + 5 : "offline", + 6 : "blocked", + 7 : "failed", + 8 : "non-raid", + 9 : "removed", + } + + componentStates = { + 1 : "Other", + 2 : "Unknown", + 3 : "OK", + 4 : "Non-critical", + 5 : "Critical", + 6 : "Non-recoverable" + } + + sparestates = { + 1 : "not a Spare", + 2 : "dedicated Hotspare", + 3 : "global Hotspare", + } + for diskname, diskState, capacityMB, spareState, componentState, smartAlert, displayName in info: + if diskname == item: + state = 0 + infotexts = [] + + # Component State + componentState = int(componentState) + infotext = "State: " + componentStates[componentState] + if componentState in [ 5, 6 ]: + state = 2 + infotexts.append(infotext+"(!!)") + elif componentState in [ 1, 2, 4 ]: + state = max(state, 1) + infotexts.append(infotext+"(!)") + + # Smart Alert + if smartAlert != '0': + infotexts.append("Smart Alert on Disk(!!)") + + # Disk State + diskState = int(diskState) + infotext = "Disk State: " + diskStates[diskState] + label = "" + show = False + if diskState in [ 1, 5, 6, 7, 9 ]: + state = 2 + label = "(!!)" + show = True + elif diskState in [ 4, 8 ]: + state = max(state, 1) + label = "(!)" + show = True + if show: + infotexts.append(infotext+label) + + spareState = int(spareState) + if spareState != 1: + infotexts.append('Spare State: ' + sparestates[spareState] ) + + # Capacity + infotexts.append("Size: " + get_bytes_human_readable(int(capacityMB) * 1024 * 1024 )) + + # Display Name + infotexts.append(displayName) + + return state, ", ".join(infotexts) + return 3, "Disk not found in SNMP Output" + +check_info["dell_idrac_disks"] = { + "check_function" : check_dell_idrac_disks, + "inventory_function" : inventory_dell_idrac_disks, + "service_description" : "Disk %s", + "snmp_scan_function" : lambda oid: oid('.1.3.6.1.2.1.1.2.0') == ".1.3.6.1.4.1.674.10892.5", + "snmp_info" : (".1.3.6.1.4.1.674.10892.5.5.1.20.130.4.1", [ + 2, # physicalDiskName + 4, # physicalDiskState + 11, # physicalDiskCapacityInMB + 22, # physicalDiskSpareState + 24, # physicalDiskComponentStatus + 31, # physicalDiskSmartAlertIndication + 55, # physicalDiskDisplayName + ]), +} + diff -Nru check-mk-1.2.2p3/dell_om_disks check-mk-1.2.6p12/dell_om_disks --- check-mk-1.2.2p3/dell_om_disks 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/dell_om_disks 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,136 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_dell_om_disks(info): + return [ ("%s:%s:%s" % (x[3], x[4], x[5]) , None) for x in info ] + +def check_dell_om_disks(item, _no_params, info): + #State definitions. Found in check_openmange from Trond H. Amundsen + spare_state = { + 1 : 'VD member', # disk is a member of a virtual disk + 2 : 'DG member', # disk is a member of a disk group + 3 : 'Global HS', # disk is a global hot spare + 4 : 'Dedicated HS', # disk is a dedicated hot spare + 5 : 'no', # not a spare + } + + media_type = { + 1 : 'unknown', + 2 : 'HDD', + 3 : 'SSD', + } + + bus_type = { + 1 : 'SCSI', + 2 : 'IDE', + 3 : 'Fibre Channel', + 4 : 'SSA', + 6 : 'USB', + 7 : 'SATA', + 8 : 'SAS', + } + + pdisk_state = { + 0 : 'Unknown', + 1 : 'Ready', + 2 : 'Failed', + 3 : 'Online', + 4 : 'Offline', + 6 : 'Degraded', + 7 : 'Recovering', + 11 : 'Removed', + 15 : 'Resynching', + 22 : 'Replacing', # FIXME: this one is not defined in the OMSA MIBs + 24 : 'Rebuilding', + 25 : 'No Media', + 26 : 'Formatting', + 28 : 'Diagnostics', + 34 : 'Predictive failure', + 35 : 'Initializing', + 41 : 'Unsupported', + 53 : 'Incompatible', + 39 : 'Foreign', + 40 : 'Clear', + } + + for name, dstate, pid, eid, cid, tid, sizeMB, btype, sstate, mt in info: + ditem = "%s:%s:%s" % ( eid, cid, tid ) + if ditem == item: + state = 0 + dstate = saveint(dstate) + btype = saveint(btype) + sstate = saveint(sstate) + mt = saveint(mt) + size = saveint(sizeMB)*1024*1024 + msg = ["%s (%s, %s)" % ( name, pid, get_bytes_human_readable(size) ) ] + label = "" + if dstate in [ 40, 35, 34, 26, 7, 4]: + state = 1 + label = "(!)" + elif dstate not in [ 3, 1 ]: + state = 2 + label = "(!!)" + + # handle hot spares as OK + if sstate in [ 3, 4 ] and dstate == 1: + state = 0 + label = "" + + msg.append("state %s%s" % ( pdisk_state.get(dstate, 'ukn (%s)' % dstate ), label)) + msg.append("Bus Type: %s" % bus_type.get(btype,'unk (%s)' % btype) ) + + if sstate != 5: + msg.append("Spare State: %s" % spare_state.get(sstate, 'ukn (%s)' %sstate )) + if mt != 0: + msg.append("Media Type: %s" % media_type.get(mt,'ukn (%s)' % mt )) + + return state, ", ".join(msg) + return 3, "Device not found in SNMP tree" + +check_info["dell_om_disks"] = { + "check_function" : check_dell_om_disks, + "inventory_function" : inventory_dell_om_disks, + "service_description" : "Physical Disk %s", + "has_perfdata" : False, + # There is no other way to find out that openmanage is present. + "snmp_scan_function" : scan_dell_om, + "snmp_info" : ( ".1.3.6.1.4.1.674.10893.1.20.130.4.1", [ + 2, # arrayDiskName + 4, # arrayDiskState + 6, # arrayDiskProductID + 9, # arrayDiskEnclosureID + 10, # arrayDiskChannel + 15, # arrayDiskTargetID + 11, # arrayDiskLengthInMB + 21, # arrayDiskBusType + 22, # arrayDiskSpareState + #24, #arrayDiskComponentStatus + #31, #arrayDiskSmartAlertIndication + 35, # arrayDiskMediaType + ]), + "includes" : [ "dell_om.include" ], +} + diff -Nru check-mk-1.2.2p3/dell_om_esmlog check-mk-1.2.6p12/dell_om_esmlog --- check-mk-1.2.2p3/dell_om_esmlog 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/dell_om_esmlog 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,59 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_dell_om_esmlog(info): + if len(info) > 0: + return [ ( None, None ) ] + return [] + +def check_dell_om_esmlog(_no_item, _no_params, info): + status = int(info[0][0]) + if status == 5: + state = 2 + message = "ESM Log is full" + elif status == 3: + state = 0 + message = "EMS Log is less than 80% full" + else: + state =1 + message = "EMS log more than 80% full" + + return state, message + + +check_info["dell_om_esmlog"] = { + "check_function" : check_dell_om_esmlog, + "inventory_function" : inventory_dell_om_esmlog, + "service_description" : "ESM Log", + "has_perfdata" : False, + # There is no other way to find out that openmanage is present. + "snmp_scan_function" : scan_dell_om, + "snmp_info" : ( ".1.3.6.1.4.1.674.10892.1.200.10.1.41", [ + 1, # eventlogStatus + ]), + "includes" : [ "dell_om.include" ], +} + diff -Nru check-mk-1.2.2p3/dell_om.include check-mk-1.2.6p12/dell_om.include --- check-mk-1.2.2p3/dell_om.include 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/dell_om.include 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,34 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# The OpenManage module attaches itself to the SNMP agent of the +# operating system. We trigger on all Windows and Linux systems. +# This is not optimal but still enough for excluding network +# devices and similar stuff +def scan_dell_om(oid): + return "Open Manage" in oid(".1.3.6.1.2.1.1.1.0") or \ + "Linux" in oid(".1.3.6.1.2.1.1.1.0") or \ + "Software: Windows" in oid(".1.3.6.1.2.1.1.1.0") diff -Nru check-mk-1.2.2p3/dell_om_mem check-mk-1.2.6p12/dell_om_mem --- check-mk-1.2.2p3/dell_om_mem 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/dell_om_mem 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,83 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_dell_om_mem(info): + return [ (x[0], None) for x in info ] + +# DellMemoryDeviceFailureModes ::= INTEGER { +# -- Note: These values are bit masks, so combination values are possible. +# -- If value is 0 (zero), memory device has no faults. +# eccSingleBitCorrectionWarningRate(1), -- ECC single bit correction warning rate exceeded +# eccSingleBitCorrectionFailureRate(2), -- ECC single bit correction failure rate exceeded +# eccMultiBitFault(4), -- ECC multibit fault encountered +# eccSingleBitCorrectionLoggingDisabled(8), -- ECC single bit correction logging disabled +# deviceDisabledBySpareActivation(16) -- device disabled because of spare activation + + +def check_dell_om_mem(item, _no_params, info): + failure_modes = { + 1 : 'ECC single bit correction warning rate exceeded', + 2 : 'ECC single bit correction failure rate exceeded', + 4 : 'ECC multibit fault encountered', + 8 : 'ECC single bit correction logging disabled', + 16 : 'device disabled because of spare activation', + } + + for location, status, size, failuremode in info: + if location == item: + status = int(status) + failuremode = int(failuremode) + if failuremode == 0: + yield 0, "No failure" + else: + bitmask = 1 + while bitmask <= 16: + if failuremode & bitmask != 0: + if bitmask in [2, 4]: + yield 2, failure_modes[bitmask] + elif bitmask in [1, 8, 16]: + yield 1, failure_modes[bitmask] + bitmask *= 2 + + yield 0, "Size: %s" % get_bytes_human_readable(int(size) * 1024) + + +check_info["dell_om_mem"] = { + "check_function" : check_dell_om_mem, + "inventory_function" : inventory_dell_om_mem, + "service_description" : "Memory Module %s", + "has_perfdata" : False, + # There is no other way to find out that openmanage is present. + "snmp_scan_function" : scan_dell_om, + "snmp_info" : ( ".1.3.6.1.4.1.674.10892.1.1100.50.1", [ + "8.1", # Location + "5.1", # Status + "14.1", # Size + "20.1", # FailureMode + ]), + "includes" : [ "dell_om.include" ], +} + diff -Nru check-mk-1.2.2p3/dell_om_processors check-mk-1.2.6p12/dell_om_processors --- check-mk-1.2.2p3/dell_om_processors 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/dell_om_processors 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,85 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_dell_om_processors(info): + return [(x[0], None) for x in info if x[1] != '4' and x[0] != '' ] + +def check_dell_om_processors(item, _no_params, info): + #Probetypes found in check_openmanage3.pl + cpu_states = { + 1 : 'Other', # other than following values + 2 : 'Unknown', # unknown + 3 : 'Enabled', # enabled + 4 : 'User Disabled', # disabled by user via BIOS setup + 5 : 'BIOS Disabled', # disabled by BIOS (POST error) + 6 : 'Idle', # idle + } + + cpu_readings = { + 0 : 'Unkown', + 1 : 'Internal Error', # Internal Error + 2 : 'Thermal Trip', # Thermal Trip + 32 : 'Configuration Error', # Configuration Error + 128 : 'Present', # Processor Present + 256 : 'Disabled', # Processor Disabled + 512 : 'Terminator Present', # Terminator Present + 1024 : 'Throttled', # Processor Throttled + } + + for index, status, manuf, status2, reading in info: + if index == item: + state = 0 + if not status: + status = status2 + status = saveint(status) + reading = saveint(reading) + msg = "Cpu (%s) State: %s, CPU Reading: %s" % \ + (manuf, cpu_states.get(status, 'ukn (%s)' % status ), cpu_readings[int(reading)]) + if status != 3: + state = 2 + if reading in [ 1, 32 ]: + state = 2 + return state, msg + + return 2, "Processor not found" + +check_info["dell_om_processors"] = { + "check_function" : check_dell_om_processors, + "inventory_function" : inventory_dell_om_processors, + "service_description" : "Processor %s", + "has_perfdata" : False, + # There is no other way to find out that openmanage is present. + "snmp_scan_function" : scan_dell_om, + "snmp_info" : ( ".1.3.6.1.4.1.674.10892.1.1100", [ + "30.1.2", # Index + "30.1.5", # Device Status + "30.1.8", # Manufacturerer Name + "30.1.9", # DeviceStatus State + "32.1.6", # Deive Status reading + ]), + "includes" : [ "dell_om.include" ], +} + diff -Nru check-mk-1.2.2p3/dell_om_sensors check-mk-1.2.6p12/dell_om_sensors --- check-mk-1.2.2p3/dell_om_sensors 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/dell_om_sensors 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,83 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +dell_om_sensors_default_levels = (50, 60) + + +def dell_om_sensors_item(name): + return name.replace("Temp", "").strip() + + +def inventory_dell_om_sensors(info): + return [ (dell_om_sensors_item(line[4]), "dell_om_sensors_default_levels") for line in info ] + + +def check_dell_om_sensors(item, params, info): + #Probetypes found in check_openmanage3.pl + probe_types = { + 1 : 'Other', # type is other than following values + 2 : 'Unknown', # type is unknown + 3 : 'AmbientESM', # type is Ambient Embedded Systems Management temperature probe + 16 : 'Discrete', # type is temperature probe with discrete reading + } + for idx, sensor_state, reading, sensor_type, location_name in info: + if item == idx or dell_om_sensors_item(location_name) == item: + if params == None: + params = dell_om_sensors_default_levels # compatibility with old autochecks + temp = int(reading) / 10.0 + + yield check_temperature(temp, params) + + if item == idx: # old style item: output location name + yield 0, "%s, Type: %s" % (location_name, probe_types[int(sensor_type)]) + + if int(sensor_state) != 3: + yield 2, "in critical state" + + +check_info["dell_om_sensors"] = { + "check_function" : check_dell_om_sensors, + "inventory_function" : inventory_dell_om_sensors, + "service_description" : "Temperature %s", + "has_perfdata" : True, + "group" : "room_temperature", + # There is no other way to find out that openmanage is present. + "snmp_scan_function" : scan_dell_om, + "snmp_info" : ( ".1.3.6.1.4.1.674.10892.1.700.20.1", [ + '2', # ProbeIndex + '5', # ProbeStatus + '6', # ProbeReading + '7', # ProbeType + '8', # ProbeLocationName + #'10.1', # ProbeUpperCriticalThreshold', + #'11.1', # ProbeUpperNonCriticalThreshold', + #'12.1', # ProbeLowerNonCriticalThreshold', + #'13.1', # ProbeLowerCriticalThreshold', + #'16.1', # ProbeDiscreteReading', + ]), + "includes" : [ "dell_om.include", "temperature.include" ], +} + diff -Nru check-mk-1.2.2p3/dell_powerconnect_cpu check-mk-1.2.6p12/dell_powerconnect_cpu --- check-mk-1.2.2p3/dell_powerconnect_cpu 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/dell_powerconnect_cpu 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,57 +24,61 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -# Author: Lars Michelsen -# Modified for Dell PowerConnect Switches by : Chris Bowlby - # Relevant SNMP OIDs: -#.1.3.6.1.4.1.89.1.1.0 = INTEGER: 65535 -#.1.3.6.1.4.1.89.1.2.0 = INTEGER: none(26) -#.1.3.6.1.4.1.89.1.4.0 = Hex-STRING: E0 -#.1.3.6.1.4.1.89.1.5.0 = INTEGER: 1 -#.1.3.6.1.4.1.89.1.6.0 = INTEGER: true(1) -#.1.3.6.1.4.1.89.1.7.0 = INTEGER: 91 -#.1.3.6.1.4.1.89.1.8.0 = INTEGER: 10 -#.1.3.6.1.4.1.89.1.9.0 = INTEGER: 4 +# .1.3.6.1.4.1.89.1.1.0 = INTEGER: 65535 +# .1.3.6.1.4.1.89.1.2.0 = INTEGER: none(26) +# .1.3.6.1.4.1.89.1.4.0 = Hex-STRING: E0 +# .1.3.6.1.4.1.89.1.5.0 = INTEGER: 1 +# .1.3.6.1.4.1.89.1.6.0 = INTEGER: true(1) +# .1.3.6.1.4.1.89.1.7.0 = INTEGER: 91 +# .1.3.6.1.4.1.89.1.8.0 = INTEGER: 10 +# .1.3.6.1.4.1.89.1.9.0 = INTEGER: 4 # Default values for parameters that can be overriden. dell_powerconnect_cpu_default_levels = (80, 90) # Inventory of dell power connect CPU details. def inventory_dell_powerconnect_cpu(checkname, info): - enabled, onesecondperc, oneminuteperc, fiveminutesperc = info[0] - if enabled == '1' and onesecondperc != '' and int(onesecondperc) >= 0: - return [ (None, 'dell_powerconnect_cpu_default_levels') ] + if info: + enabled, onesecondperc, oneminuteperc, fiveminutesperc = info[0] + if enabled == '1' and onesecondperc != '' and int(onesecondperc) >= 0: + return [ (None, 'dell_powerconnect_cpu_default_levels') ] # Check of dell power connect CPU details. def check_dell_powerconnect_cpu(item, params, info): try: enabled, onesecondperc, oneminuteperc, fiveminutesperc = map(int, info[0]) except ValueError: - raise MKCounterWrapped("", "Ignoring empty data from SNMP agent") + raise MKCounterWrapped("Ignoring empty data from SNMP agent") if int(enabled) == 1: - cpu_load = saveint(onesecondperc) - if cpu_load >= 0 <= 100: + cpu_util = saveint(onesecondperc) + if cpu_util >= 0 <= 100: status = 0 output = '' - if cpu_load >= params[1]: + if cpu_util >= params[1]: status = 2 output = ' (Above %d%%)' % params[1] - elif cpu_load >= params[0]: + elif cpu_util >= params[0]: status = 1 output = ' (Above %d%%)' % params[0] - return (status, '%s - CPU utilization is %d%% %s' % - (nagios_state_names[status], cpu_load, output), - [('load', '%d%%' % cpu_load, params[0], params[1], 0, 100), + # Darn. It again happend. Someone mixed up load and utilization. + # We do *not* rename the performance variables here, in order not + # to mix up existing RRDs... + return (status, 'CPU utilization is %d%% %s' % (cpu_util, output), + [('load', '%d%%' % cpu_util, params[0], params[1], 0, 100), ('loadavg 60s', '%d%%' % saveint(oneminuteperc), params[0], params[1], 0, 100), ('loadavg 5m', '%d%%' % saveint(fiveminutesperc), params[0], params[1], 0, 100), ]) - return (3, "UNKNOWN - Invalid information in SNMP data") + return (3, "Invalid information in SNMP data") -# Auto-detection of CPU elements. -check_info['dell_powerconnect_cpu'] = (check_dell_powerconnect_cpu, "CPU utilization", 1, inventory_dell_powerconnect_cpu) -snmp_info['dell_powerconnect_cpu'] = ( ".1.3.6.1.4.1.89.1", [ "6", "7", "8", "9" ] ) -snmp_scan_functions['dell_powerconnect_cpu'] = \ - lambda oid: ".1.3.6.1.4.1.674.10895" in oid(".1.3.6.1.2.1.1.2.0") +check_info["dell_powerconnect_cpu"] = { + 'check_function': check_dell_powerconnect_cpu, + 'inventory_function': inventory_dell_powerconnect_cpu, + 'service_description': 'CPU utilization', + 'has_perfdata': True, + 'snmp_info': ('.1.3.6.1.4.1.89.1', ['6', '7', '8', '9']), + 'snmp_scan_function': \ + lambda oid: ".1.3.6.1.4.1.674.10895" in oid(".1.3.6.1.2.1.1.2.0") +} diff -Nru check-mk-1.2.2p3/dell_powerconnect_fans check-mk-1.2.6p12/dell_powerconnect_fans --- check-mk-1.2.2p3/dell_powerconnect_fans 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/dell_powerconnect_fans 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -67,19 +67,19 @@ dell_powerconnect_status = dell_powerconnect_fans_status_map[state] status = dell_powerconnect_fans_status2nagios_map[dell_powerconnect_status] - return (status, '%s - Condition of FAN "%s" is %s' % - (nagios_state_names[status], name, dell_powerconnect_status)) + return (status, 'Condition of FAN "%s" is %s' % (name, dell_powerconnect_status)) -# Retain code below in case future state flags to enable/disable performance data are added. -# return (status, '%s - Condition of FAN "%s" is %s' % -# (nagios_state_names[status], name, dell_powerconnect_status), [ -# ("state", dell_powerconnect_fans_state_performance_map[state], "", 0, 0, 1), -# ]) - return (3, "UNKNOWN - item not found in snmp data") + return (3, "item not found in snmp data") # Auto-detection of fan related details. -check_info['dell_powerconnect_fans'] = (check_dell_powerconnect_fans, "Sensor %s", 1, inventory_dell_powerconnect_fans) -snmp_info['dell_powerconnect_fans'] = ( ".1.3.6.1.4.1.674.10895.3000.1.2.110.7.1.1", [ "1", "2", "3"] ) -snmp_scan_functions['dell_powerconnect_fans'] = \ - lambda oid: ".1.3.6.1.4.1.674.10895" in oid(".1.3.6.1.2.1.1.2.0") + +check_info["dell_powerconnect_fans"] = { + 'check_function': check_dell_powerconnect_fans, + 'inventory_function': inventory_dell_powerconnect_fans, + 'service_description': 'Sensor %s', + 'has_perfdata': True, + 'snmp_info': ('.1.3.6.1.4.1.674.10895.3000.1.2.110.7.1.1', ['1', '2', '3']), + 'snmp_scan_function': \ + lambda oid: ".1.3.6.1.4.1.674.10895" in oid(".1.3.6.1.2.1.1.2.0"), +} diff -Nru check-mk-1.2.2p3/dell_powerconnect_psu check-mk-1.2.6p12/dell_powerconnect_psu --- check-mk-1.2.2p3/dell_powerconnect_psu 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/dell_powerconnect_psu 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,19 +24,16 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -# Author: Lars Michelsen -# Modified for Dell Sensors By: Chris Bowlby - # Tested with Dell PowerConnect 5448 and 5424 models. # Relevant SNMP OIDs: -#.1.3.6.1.4.1.674.10895.3000.1.2.110.7.2.1.1.67109185 = INTEGER: 67109185 -#.1.3.6.1.4.1.674.10895.3000.1.2.110.7.2.1.1.67109186 = INTEGER: 67109186 -#.1.3.6.1.4.1.674.10895.3000.1.2.110.7.2.1.2.67109185 = STRING: "ps1_unit1" -#.1.3.6.1.4.1.674.10895.3000.1.2.110.7.2.1.2.67109186 = STRING: "ps2_unit1" -#.1.3.6.1.4.1.674.10895.3000.1.2.110.7.2.1.3.67109185 = INTEGER: 1 -#.1.3.6.1.4.1.674.10895.3000.1.2.110.7.2.1.3.67109186 = INTEGER: 5 -#.1.3.6.1.4.1.674.10895.3000.1.2.110.7.2.1.4.67109185 = INTEGER: 5 -#.1.3.6.1.4.1.674.10895.3000.1.2.110.7.2.1.4.67109186 = INTEGER: 4 +# .1.3.6.1.4.1.674.10895.3000.1.2.110.7.2.1.1.67109185 = INTEGER: 67109185 +# .1.3.6.1.4.1.674.10895.3000.1.2.110.7.2.1.1.67109186 = INTEGER: 67109186 +# .1.3.6.1.4.1.674.10895.3000.1.2.110.7.2.1.2.67109185 = STRING: "ps1_unit1" +# .1.3.6.1.4.1.674.10895.3000.1.2.110.7.2.1.2.67109186 = STRING: "ps2_unit1" +# .1.3.6.1.4.1.674.10895.3000.1.2.110.7.2.1.3.67109185 = INTEGER: 1 +# .1.3.6.1.4.1.674.10895.3000.1.2.110.7.2.1.3.67109186 = INTEGER: 5 +# .1.3.6.1.4.1.674.10895.3000.1.2.110.7.2.1.4.67109185 = INTEGER: 5 +# .1.3.6.1.4.1.674.10895.3000.1.2.110.7.2.1.4.67109186 = INTEGER: 4 # Status codes: # 1 => normal, @@ -54,39 +51,67 @@ # 5 => internalRedundant # GENERAL MAPS: -dell_powerconnect_psu_status_map = { '1': 'normal', '2': 'warning', '3': 'critical', '4': 'shutdown', '5': 'notPresent', '6': 'notFunctioning' } -dell_powerconnect_psu_supply_map = { '1': 'Unknown', '2': 'Alternating Current', '3': 'Direct Current', '4': 'External Power Supply', '5': 'Internal Redundant' } -dell_powerconnect_psu_status2nagios_map = { 'normal': 0, 'warning': 1, 'critical': 2, 'shutdown': 3, 'notPresent': 1, 'notFunctioning': 2 } -dell_powerconnect_psu_state_performance_map = { '1': 1, '2': 0, '3': 0, '4': 0, '5': 0, '6': 0 } +dell_powerconnect_psu_status_map = { + '1': 'normal', + '2': 'warning', + '3': 'critical', + '4': 'shutdown', + '5': 'notPresent', + '6': 'notFunctioning', +} + +dell_powerconnect_psu_supply_map = { + '1': 'Unknown', + '2': 'Alternating Current', + '3': 'Direct Current', + '4': 'External Power Supply', + '5': 'Internal Redundant', +} + +dell_powerconnect_psu_status2nagios_map = { + 'normal' : 0, + 'warning' : 1, + 'critical' : 2, + 'shutdown' : 3, + 'notPresent' : 1, + 'notFunctioning' : 2, +} -# Inventory of all the PS devices in the switch def inventory_dell_powerconnect_psu(checkname, info): inventory = [] - for device_id, name, state, supply in info: - if dell_powerconnect_psu_status_map[state] != 'notPresent': + hw_ident = info[0][0][0] + for device_id, name, state, supply in info[1]: + # M6220 are blade switches which report valid values only for the "Main" + # sensor. The other one is reported as notFunctioning, but this is wrong. + # Simply ignore the "System" sensor for those devices. + if dell_powerconnect_psu_status_map[state] != 'notPresent' \ + and ('M6220' not in hw_ident or name != 'System'): inventory.append((name, None)) return inventory -# Check of the states of each PS unit. def check_dell_powerconnect_psu(item, _not_used, info): - for device_id, name, state, supply in info: + hw_ident = info[0][0][0] + for device_id, name, state, supply in info[1]: if name == item: dell_powerconnect_status = dell_powerconnect_psu_status_map[state] status = dell_powerconnect_psu_status2nagios_map[dell_powerconnect_status] - return (status, '%s - Condition of PSU "%s" is %s, with source %s' % - (nagios_state_names[status], name, dell_powerconnect_status, dell_powerconnect_psu_supply_map[supply])) + return status, 'Condition is %s, with source %s' % \ + (dell_powerconnect_status, dell_powerconnect_psu_supply_map[supply]) + + return (3, "item not found in snmp data") -# Retain code below in case future changes permit performance data for states via a global/check specific flag -# return (status, '%s - Condition of PSU "%s" is %s, with source %s' % -# (nagios_state_names[status], name, dell_powerconnect_status, dell_powerconnect_psu_supply_map[supply]), [ -# ("state", dell_powerconnect_psu_state_performance_map[state], "", 0, 0, 1), -# ]) - return (3, "UNKNOWN - item not found in snmp data") - - -# Auto-detection of what PSU's are connected and active. -check_info['dell_powerconnect_psu'] = (check_dell_powerconnect_psu, "Sensor %s", 1, inventory_dell_powerconnect_psu) -snmp_info['dell_powerconnect_psu'] = ( ".1.3.6.1.4.1.674.10895.3000.1.2.110.7.2.1", [ "1", "2", "3", "4"] ) -snmp_scan_functions['dell_powerconnect_psu'] = \ - lambda oid: ".1.3.6.1.4.1.674.10895" in oid(".1.3.6.1.2.1.1.2.0") +check_info["dell_powerconnect_psu"] = { + 'check_function': check_dell_powerconnect_psu, + 'inventory_function': inventory_dell_powerconnect_psu, + 'service_description': 'Sensor %s', + 'has_perfdata': False, + 'snmp_info': [('.1.3.6.1.4.1.674.10895.3000.1.2.100.1', ['0']), #productIdentificationDisplayName + ('.1.3.6.1.4.1.674.10895.3000.1.2.110.7.2.1', [ + '1', # envMonSupplyStatusIndex + '2', # envMonSupplyStatusDescr + '3', # envMonSupplyState + '4', # envMonSupplySource + ])], + 'snmp_scan_function': lambda oid: ".1.3.6.1.4.1.674.10895" in oid(".1.3.6.1.2.1.1.2.0"), +} diff -Nru check-mk-1.2.2p3/dell_powerconnect_temp check-mk-1.2.6p12/dell_powerconnect_temp --- check-mk-1.2.2p3/dell_powerconnect_temp 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/dell_powerconnect_temp 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -56,7 +56,7 @@ return inventory # Execute a check against each sensor. -def check_dell_powerconnect_temp(item, params, info): +def check_dell_powerconnect_temp(_no_item, params, info): warn, crit = params sensor_info = "" temp_info = "" @@ -78,23 +78,23 @@ else: status = dell_powerconnect_temp_status2nagios_map[dell_powerconnect_status] - return (status, '%s - The temperature sensor is currently (%s)%s, and the current temperature is %sC%s' % - (nagios_state_names[status], dell_powerconnect_temp_status_map[state], sensor_info, temp, temp_info), [ + return (status, 'The temperature sensor is currently (%s)%s, and the current temperature is %s °C%s' % + (dell_powerconnect_temp_status_map[state], sensor_info, temp, temp_info), [ ("temperature", int(temp), warn, crit, 0, 200), ]) -# Retain code below in case future changes permit performance data for states via a global/check specific flag -# return (status, '%s - The temperature sensor is currently (%s), and the current temperature is %sC' % -# (nagios_state_names[status], dell_powerconnect_temp_status_map[state], temp), [ -# ("state", dell_powerconnect_temp_sensor_state_performance_map[state], "", 0, 0, 1), -# ("temperature", int(temp), warn, crit, 0, 200), -# ]) - return (3, "UNKNOWN - item not found in snmp data") + return (3, "item not found in snmp data") # Auto-detection of what temperature sensors are active. -check_info['dell_powerconnect_temp'] = (check_dell_powerconnect_temp, "Temperature", 1, inventory_dell_powerconnect_temp) -snmp_info['dell_powerconnect_temp'] = ( ".1.3.6.1.4.1.89.53.15.1", [ "9", "10" ] ) -snmp_scan_functions['dell_powerconnect_temp'] = \ - lambda oid: ".1.3.6.1.4.1.674.10895" in oid(".1.3.6.1.2.1.1.2.0") -checkgroup_of["dell_powerconnect_temp"] = "hw_temperature" + +check_info["dell_powerconnect_temp"] = { + 'check_function': check_dell_powerconnect_temp, + 'inventory_function': inventory_dell_powerconnect_temp, + 'service_description': 'Temperature', + 'has_perfdata': True, + 'snmp_info': ('.1.3.6.1.4.1.89.53.15.1', ['9', '10']), + 'snmp_scan_function': \ + lambda oid: ".1.3.6.1.4.1.674.10895" in oid(".1.3.6.1.2.1.1.2.0"), + 'group': 'hw_temperature_single', +} diff -Nru check-mk-1.2.2p3/dell_poweredge_amperage check-mk-1.2.6p12/dell_poweredge_amperage --- check-mk-1.2.2p3/dell_poweredge_amperage 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/dell_poweredge_amperage 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,115 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_dell_poweredge_amperage_power(info): + inventory = [] + for line in info: + if line[6] != "" and line[5] in ( "24", "26" ): + inventory.append( ( line[6], None ) ) + return inventory + +def inventory_dell_poweredge_amperage_current(info): + inventory = [] + for line in info: + if line[6] != "" and line[5] in ( "23", "25" ): + inventory.append( ( line[6], None ) ) + return inventory + +def check_dell_poweredge_amperage(item, _no_params, info): + for chassisIndex, Index, StateSettings, Status, Reading, ProbeType, LocationName, \ + UpperCritical, UpperNonCritical in info: + + if item == LocationName: + state_table = { + "1" : ( "other", 1 ), + "2" : ( "unknown", 1 ), + "3" : ( "", 0 ), + "4" : ( "nonCriticalUpper", 1 ), + "5" : ( "CriticalUpper", 2 ), + "6" : ( "NonRecoverableUpper", 2 ), + "7" : ( "nonCriticalLower", 1 ), + "8" : ( "CriticalLower", 2 ), + "9" : ( "NonRecoverableLower", 2 ), + "10" : ( "failed", 2 ), + } + state_txt, state = state_table.get(Status, "2") + + if UpperNonCritical and UpperCritical: + limittext = " (upper limits %s/%s)" % (UpperNonCritical, UpperCritical) + maxi = savefloat(UpperCritical) * 1.1 + else: + limittext = "" + maxi = "" + + if ProbeType in ( "23", "25" ): # Amps + current = str(int(Reading)/10.0) + infotext = "%s Ampere %s" % ( current, state_txt ) + perfdata = [( "current", current+"A", UpperNonCritical, UpperCritical, "", maxi )] + elif ProbeType in ( "24", "26" ): # Watts + infotext = "%s Watt %s" % ( Reading, state_txt ) + perfdata = [( "power", Reading+"W", UpperNonCritical, UpperCritical, "", maxi )] + else: + infotext = "Unknown Probe Type %s" % ProbeType + return 3, infotext + + return state, infotext+limittext, perfdata + + return 3, "Amperage Device not found" + +check_info["dell_poweredge_amperage.power"] = { + "check_function" : check_dell_poweredge_amperage, + "inventory_function" : inventory_dell_poweredge_amperage_power, + "service_description" : "%s", + "has_perfdata" : True, + "snmp_info" : ( ".1.3.6.1.4.1.674.10892.5.4.600.30.1", [ + "1", # amperageProbechassisIndex + "2", # amperageProbeIndex + # "3", # amperageProbeStateCapabilities + "4", # amperageProbeStateSettings + "5", # amperageProbeStatus + "6", # amperageProbeReading + "7", # amperageProbeType + "8", # amperageProbeLocationName + #"9", # amperageProbeUpperNonRecoverableThreshold + "10", # amperageProbeUpperCriticalThreshold + "11", # amperageProbeUpperNonCriticalThreshold + #"12", # amperageProbeLowerNonCriticalThreshold + #"13", # amperageProbeLowerCriticalThreshold + #"14", # amperageProbeLowerNonRecoverableThreshold + #"15", # amperageProbeCapabilities + #"16", # amperageProbeDiscreteReading + ]), + "snmp_scan_function" : lambda oid: oid('.1.3.6.1.2.1.1.2.0') == ".1.3.6.1.4.1.674.10892.5", +} + +#check_info["dell_poweredge_amperage.current"] = { +# "check_function" : check_dell_poweredge_amperage, +# "inventory_function" : inventory_dell_poweredge_amperage_current, +# "service_description" : "%s", +# "has_perfdata" : True, +# "snmp_info" : dell_poweredge_amperage_info, +# "snmp_scan_function" : dell_poweredge_amperage_scan, +#} diff -Nru check-mk-1.2.2p3/dell_poweredge_amperage.current check-mk-1.2.6p12/dell_poweredge_amperage.current --- check-mk-1.2.2p3/dell_poweredge_amperage.current 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/dell_poweredge_amperage.current 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,31 @@ +title: Dell PowerEdge Server: Current +agents: snmp +catalog: hw/server/dell +license: GPL +distribution: check_mk +description: + This check monitors the electrical current going through power units of + Dell PowerEdge Servers. + + No limits are set in the check, since limits are configured in the device + itself. The state given by the device is taken as the state of the check + as follows: If the device returns {OK}, the check is {OK}. If the device returns + {other}, {unknown}, {nonCriticalUpper} and {nonCriticalLower} a state of {WARN} + is returned. Otherwise the check is {CRIT}. + + The check displays the upper warning and critical limits as configured in the device. + Further limits are {not} displayed, even though they may be configured. These are + UpperNonRecoverable, LowerCritical, LowerNonCritical and LowerNonRecoverable. + These limits yet may lead to state changes, the corresponding states are honoured + as outlined above. + +item: + The name of the location variable configured for the power unit + +perfdata: + One variable: the electrical current in Ampere + +inventory: + All power units + + diff -Nru check-mk-1.2.2p3/dell_poweredge_amperage.power check-mk-1.2.6p12/dell_poweredge_amperage.power --- check-mk-1.2.2p3/dell_poweredge_amperage.power 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/dell_poweredge_amperage.power 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,29 @@ +title: Dell PowerEdge Server: Power Consumption +agents: snmp +catalog: hw/server/dell +license: GPL +distribution: check_mk +description: + This check monitors the power consumption of power units of Dell PowerEdge Servers. + + No limits are set in the check, since limits are configured in the device + itself. The state given by the device is taken as the state of the check + as follows: If the device returns {OK}, the check is {OK}. If the device returns + {other}, {unknown}, {nonCriticalUpper} and {nonCriticalLower} a state of {WARN} + is returned. Otherwise the check is {CRIT}. + + The check displays the upper warning and critical limits as configured in the device. + Further limits are {not} displayed, even though they may be configured. These are + UpperNonRecoverable, LowerCritical, LowerNonCritical and LowerNonRecoverable. + These limits yet may lead to state changes, the corresponding states are honoured + as outlined above. + +item: + The name of the location variable configured for the power unit + +perfdata: + One variable: the power consumption in Watts + +inventory: + All power units + diff -Nru check-mk-1.2.2p3/dell_poweredge_cpu check-mk-1.2.6p12/dell_poweredge_cpu --- check-mk-1.2.2p3/dell_poweredge_cpu 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/dell_poweredge_cpu 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,77 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +def inventory_dell_poweredge_cpu(info): + for chassisIndex, Index, StateSettings, Status, LocationName in info[0]: + if LocationName != "" and StateSettings != "1": + yield LocationName, None + + +def check_dell_poweredge_cpu(item, _no_params, info): + for chassisIndex, Index, StateSettings, Status, LocationName in info[0]: + if item == LocationName: + BrandName = None + for line in info[1]: + if line[0] == chassisIndex and line[1] == Index: + BrandName = line[2] + + state_table = { + "1" : ("other", 1), + "2" : ("unknown", 1), + "3" : ("", 0), + "4" : ("non-critical", 1), + "5" : ("critical", 2), + "6" : ("non-recoverable", 2), + } + + infotext, state = state_table.get(Status, ( "unknown state", 2 )) + if BrandName: + infotext += " " + BrandName + + return state, infotext + + +check_info["dell_poweredge_cpu"] = { + "check_function" : check_dell_poweredge_cpu, + "inventory_function" : inventory_dell_poweredge_cpu, + "service_description" : "%s", + "snmp_info" : [( ".1.3.6.1.4.1.674.10892.5.4.1100.32.1", [ + 1, # processorDevicechassisIndex + 2, # processorDeviceIndex + 4, # processorDeviceStateSettings + 5, # processorDeviceStatus + 7, # processorDeviceLocationName + ]), + ( ".1.3.6.1.4.1.674.10892.5.4.1100.30.1", [ + 1, # processorDevicechassisIndex + 2, # processorDeviceIndex + 23, # processorDeviceBrandName + ]) + ], + "snmp_scan_function" : lambda oid: oid('.1.3.6.1.2.1.1.2.0') == ".1.3.6.1.4.1.674.10892.5", +} + diff -Nru check-mk-1.2.2p3/dell_poweredge_mem check-mk-1.2.6p12/dell_poweredge_mem --- check-mk-1.2.2p3/dell_poweredge_mem 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/dell_poweredge_mem 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,83 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_dell_poweredge_mem(info): + inventory = [] + for line in info: + location = line[1] + if location != "": + inventory.append( ( location, None ) ) + return inventory + +def check_dell_poweredge_mem(item, _no_params, info): + di = dict() + for status, location, size, di['Speed'], di['MFR'], di['P/N'], di['S/N'] in info: + + di['Size'] = str(saveint(size)/1024/1024)+"GB" + if item == location: + state_table = { + "1" : ( "other", 1 ), + "2" : ( "unknown", 1 ), + "3" : ( "", 0 ), + "4" : ( "nonCritical", 1 ), + "5" : ( "Critical", 2 ), + "6" : ( "NonRecoverable", 2 ), + } + infotext, state = state_table.get(status, ( "unknown state", 2 )) + for parameter, value in di.items(): + infotext += ", %s: %s" % ( parameter, value ) + + infotext = re.sub("^, ","", infotext) + + return state, infotext + + return 3, "Memory Device not found" + +check_info["dell_poweredge_mem"] = { + "check_function" : check_dell_poweredge_mem, + "inventory_function" : inventory_dell_poweredge_mem, + "service_description" : "%s", + "has_perfdata" : False, + "snmp_info" : ( ".1.3.6.1.4.1.674.10892.5.4.1100.50.1", [ + #"1", # memoryDevicechassisIndex 0 + #"2", # memoryDeviceIndex 1 + #"3", # memoryDeviceStateCapabilities + #"4", # memoryDeviceStateSettings 0 + "5", # memoryDeviceStatus 0 + #"6", # memoryDeviceReading + #"7", # memoryDeviceType + "8", # memoryDeviceLocationName 1 + #"10", # memoryDeviceBankLocationName + "14", # memoryDeviceSize + "15", # memoryDeviceSpeed + "21", # memoryDeviceManufacturerName + "22", # memoryDevicePartNumberName + "23", # memoryDeviceSerialNumberName + #"26", # memoryDeviceFQDD + ]), + "snmp_scan_function" : lambda oid: oid('.1.3.6.1.2.1.1.2.0') == ".1.3.6.1.4.1.674.10892.5", +} + diff -Nru check-mk-1.2.2p3/dell_poweredge_netdev check-mk-1.2.6p12/dell_poweredge_netdev --- check-mk-1.2.2p3/dell_poweredge_netdev 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/dell_poweredge_netdev 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,93 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_dell_poweredge_netdev(info): + inventory = [] + for line in info: + if line[4] != "": + inventory.append( ( line[4], None ) ) + return inventory + +def check_dell_poweredge_netdev(item, _no_params, info): + di = dict() + for status, connection_status, di['Product'], cur_mac, fqdd in info: + if item == fqdd: + di['MAC'] = '-'.join( [ "%02X" % ord( c ) for c in cur_mac ] ).strip() + state_table = { + "1" : ( "other,", 1 ), + "2" : ( "unknown,", 1 ), + "3" : ( "", 0 ), + "4" : ( "nonCritical,", 1 ), + "5" : ( "Critical,", 2 ), + "6" : ( "NonRecoverable,", 2 ), + } + connection_table = { + "1" : ( "connected, ", 0 ), + "2" : ( "disconnected, ", 2 ), + "3" : ( "driverBad, ", 2 ), + "4" : ( "driverDisabled, ", 2 ), + "10" : ( "hardwareInitializing, ", 2 ), + "11" : ( "hardwareResetting, ", 2 ), + "12" : ( "hardwareClosing, ", 2 ), + "13" : ( "hardwareNotReady, ", 2 ), + } + dev_state_txt, dev_state = state_table.get(status, ( "unknown device status,", 2 )) + conn_state_txt, conn_state = connection_table.get(connection_status, ( "", 0 )) + state = max(dev_state, conn_state) + infotext = "%s %s" % (dev_state_txt, conn_state_txt) + for parameter, value in di.items(): + infotext += "%s: %s, " % ( parameter, value ) + infotext = re.sub(", $","", infotext) + + return state, infotext + + return 3, "network device not found" + +check_info["dell_poweredge_netdev"] = { + "check_function" : check_dell_poweredge_netdev, + "inventory_function" : inventory_dell_poweredge_netdev, + "service_description" : "%s", + "has_perfdata" : False, + "snmp_info" : ( ".1.3.6.1.4.1.674.10892.5.4.1100.90.1", [ + #"1", # networkDeviceChassisIndex + #"2", # networkDeviceIndex + "3", # networkDeviceStatus 0 + "4", # networkDeviceConnectionStatus 1 + "6", # networkDeviceProductName 2 + "15", # networkDeviceCurrentMACAddress 3 + #"16", # networkDevicePermanentMACAddress + #"17", # networkDevicePCIBisNumber + #"18", # networkDevicePCIDeviceNumber + #"19", # networkDevicePCIFunctionNumber + #"23", # networkDeviceTOECapabilityFlag + #"27", # networkDeviceSCSICapabilityFlag + #"28", # networkDeviceSCSIEnabled + #"29", # networkDeviceCapabilities + "30", # networkDeviceFQDD 4 + ]), + "snmp_scan_function" : lambda oid: oid('.1.3.6.1.2.1.1.2.0') == ".1.3.6.1.4.1.674.10892.5", +} + diff -Nru check-mk-1.2.2p3/dell_poweredge_pci check-mk-1.2.6p12/dell_poweredge_pci --- check-mk-1.2.2p3/dell_poweredge_pci 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/dell_poweredge_pci 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,76 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_dell_poweredge_pci(info): + inventory = [] + for line in info: + fqdd = line[4] + if fqdd != "": + inventory.append( ( fqdd, None ) ) + return inventory + +def check_dell_poweredge_pci(item, _no_params, info): + di = dict() + for status, di['BusWidth'], di['MFR'], di['Desc.'], fqdd in info: + + if item == fqdd: + state_table = { + "1" : ( "other", 1 ), + "2" : ( "unknown", 1 ), + "3" : ( "", 0 ), + "4" : ( "nonCritical", 1 ), + "5" : ( "Critical", 2 ), + "6" : ( "NonRecoverable", 2 ), + } + infotext, state = state_table.get(status, ( "unknown state", 2 )) + for parameter, value in di.items(): + infotext += ", %s: %s" % ( parameter, value ) + + infotext = re.sub("^, ","", infotext) + + return state, infotext + + return 3, "Memory Device not found" + +check_info["dell_poweredge_pci"] = { + "check_function" : check_dell_poweredge_pci, + "inventory_function" : inventory_dell_poweredge_pci, + "service_description" : "PCI %s", + "has_perfdata" : False, + "snmp_info" : ( ".1.3.6.1.4.1.674.10892.5.4.1100.80.1", [ + #"1", # pCIDevicechassisIndex 0 + #"2", # pCIDeviceIndex 1 + #"3", # pCIDeviceStateCapabilities + #"4", # pCIDeviceStateSettings 0 + "5", # pCIDeviceStatus 0 + "7", # pCIDeviceDataBusWidth 1 + "8", # pCIDeviceManufacturerName 2 + "9", # pCIDeviceDescriptionName 3 + "12", # pCIDeviceFQDD 4 + ]), + "snmp_scan_function" : lambda oid: oid('.1.3.6.1.2.1.1.2.0') == ".1.3.6.1.4.1.674.10892.5", +} + diff -Nru check-mk-1.2.2p3/dell_poweredge_status check-mk-1.2.6p12/dell_poweredge_status --- check-mk-1.2.2p3/dell_poweredge_status 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/dell_poweredge_status 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,66 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_dell_poweredge_status(info): + if info: + return [ ( None, None ) ] + +def check_dell_poweredge_status(item, _no_params, info): + di = dict() + di['racURL'], di['Chassis'], di['Slot'], di['Model'], status, di['ServiceTag'] = info[0] + + state_table = { + "1" : ( "other, ", 1 ), + "2" : ( "unknown, ", 1 ), + "3" : ( "", 0 ), + "4" : ( "nonCritical, ", 1 ), + "5" : ( "Critical, ", 2 ), + "6" : ( "NonRecoverable, ", 2 ), + } + state_txt, state = state_table.get(status, "2") + infotext, state = state_table.get(status, "2") + for parameter, value in di.items(): + infotext += "%s: %s, " % ( parameter, value ) + infotext = re.sub(", $","", infotext) + + return state, infotext + +check_info["dell_poweredge_status"] = { + "check_function" : check_dell_poweredge_status, + "inventory_function" : inventory_dell_poweredge_status, + "service_description" : "PowerEdge Health", + "has_perfdata" : False, + "snmp_info" : ( ".1.3.6.1.4.1.674.10892.5", [ + "1.1.6.0", # racURL 0 + "1.2.2.0", # chassisSystemName ? 1 + "1.3.5.0", # SystemBladeSlotNumber 2 + "1.3.12.0", # systemModelName 3 + "2.1.0", # globalSystemStatus 4 + "4.300.10.1.11.1", # chassisServiceTagName 5 + ]), + "snmp_scan_function" : lambda oid: oid('.1.3.6.1.2.1.1.2.0') == ".1.3.6.1.4.1.674.10892.5", +} + diff -Nru check-mk-1.2.2p3/dell_poweredge_temp check-mk-1.2.6p12/dell_poweredge_temp --- check-mk-1.2.2p3/dell_poweredge_temp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/dell_poweredge_temp 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,96 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def dell_poweredge_temp_makeitem(chassisIndex, Index, LocationName): + if LocationName: + item = LocationName + else: + item = chassisIndex + "-" + Index + if item.endswith(" Temp"): + item = item[:-5] + return item + + +def inventory_dell_poweredge_temp(info): + for line in info: + if line[2] != '1': # StateSettings not 'unknown' + item = dell_poweredge_temp_makeitem(line[0], line[1], line[5]) + yield item, None + + +def check_dell_poweredge_temp(item, _no_params, info): + for chassisIndex, Index, StateSettings, Status, Reading, LocationName, \ + UpperCritical, UpperNonCritical in info: + + if item == dell_poweredge_temp_makeitem(chassisIndex, Index, LocationName): + temp = int(Reading) / 10.0 + warn = int(UpperNonCritical) / 10.0 + crit = int(UpperCritical) / 10.0 + state_table = { + "1" : ("other", 1), + "2" : ("unknown", 1), + "3" : ("", 0), + "4" : ("nonCriticalUpper", 1), + "5" : ("CriticalUpper", 2), + "6" : ("NonRecoverableUpper", 2), + "7" : ("nonCriticalLower", 1), + "8" : ("CriticalLower", 2), + "9" : ("NonRecoverableLower", 2), + "10" : ("failed", 2), + } + state_txt, state = state_table.get(Status, ("unknown state", 3)) + if state: + yield state, state_txt + yield check_temperature(temp, (warn, crit)) + + + +check_info["dell_poweredge_temp"] = { + "check_function" : check_dell_poweredge_temp, + "inventory_function" : inventory_dell_poweredge_temp, + "service_description" : "Temperature %s", + "has_perfdata" : True, + "snmp_info" : ( ".1.3.6.1.4.1.674.10892.5.4.700.20.1", [ + "1", # temperatureProbechassisIndex + "2", # temperatureProbeIndex + "4", # temperatureProbeStateSettings + "5", # temperatureProbeStatus + "6", # temperatureProbeReading + #"7", # temperatureProbeType + "8", # temperatureProbeLocationName + #"9", # temperatureProbeUpperNonRecoverableThreshold + "10", # temperatureProbeUpperCriticalThreshold + "11", # temperatureProbeUpperNonCriticalThreshold + #"12", # temperatureProbeLowerNonCriticalThreshold + #"13", # temperatureProbeLowerCriticalThreshold + #"14", # temperatureProbeLowerNonRecoverableThreshold + #"15", # temperatureProbeCapabilities + #"16", # temperatureProbeDiscreteReading + ]), + "snmp_scan_function" : lambda oid: oid('.1.3.6.1.2.1.1.2.0') == ".1.3.6.1.4.1.674.10892.5", + "includes" : [ "temperature.include" ], +} + diff -Nru check-mk-1.2.2p3/df check-mk-1.2.6p12/df --- check-mk-1.2.2p3/df 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/df 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -31,14 +31,46 @@ # /dev/sda1 reiserfs 256666 16052 227362 7% /boot # /dev/mapper/mirrored-database ext3 20642428 1027112 19405604 6% /mirrored/database +# Another example from a Windows 7 system: +# <<>> +# SYSTEM NTFS 312569172 180648472 131920700 58% C:\ +# Data NTFS 976506816 528665344 447841472 55% D:\ +# PS3 PlayStation(R)3 File System 0 0 0 0% P:\ + +def df_parse_info(info): + df_blocks = [] + df_inodes = [] + lines = iter(info) + try: + is_inode = False + while True: + line = lines.next() + if line[-1] == '[df_inodes_start]': + is_inode = True + continue + elif line[-1] == '[df_inodes_end]': + is_inode = False + continue + if not is_inode: + # Handle known cases, where the file system contains spaces + if line[2] == "File" and line[3] == "System": + line = [ line[0], " ".join(line[1:4]) ] + line[4:] + df_blocks.append(line) + else: + df_inodes.append(line) + except StopIteration: + pass + + return df_blocks, df_inodes + def inventory_df(info): + df_blocks, df_inodes = df_parse_info(info) mplist = [] - for line in info: + for line in df_blocks: if line[1] in inventory_df_exclude_fs: continue # ignore this filesystem type - size_kb = int(line[2]) - if size_kb == 0 or line[5] == '-': + if line[2] == '-' or int(line[2]) == 0 or line[5] == '-': continue # exclude filesystems without size mountpoint = " ".join(line[6:]).replace('\\', '/') # Windows \ is replaced with / @@ -51,8 +83,10 @@ def check_df(item, params, info): - fslist = [] - for line in info: + fslist_blocks = [] + fslist_inodes = [] + df_blocks, df_inodes = df_parse_info(info) + for idx, line in enumerate(df_blocks): # df outputs seven columns: # DEVICE FS-TYPE SIZE(KB) USED(KB) AVAIL(KB) USED(%) MOUNTPOINT # The mount point may contain spaces (seen on VMWare volumes and on ESX) @@ -65,14 +99,15 @@ # to compute the used space. size_mb = int(line[2]) / 1024.0 avail_mb = int(line[4]) / 1024.0 - fslist.append((mountpoint, size_mb, avail_mb)) - - return df_check_filesystem_list(item, params, fslist) + fslist_blocks.append((mountpoint, size_mb, avail_mb)) + if df_inodes: + fslist_inodes.append((mountpoint, int(df_inodes[idx][2]), int(df_inodes[idx][4]))) + return df_check_filesystem_list(item, params, fslist_blocks, fslist_inodes) check_info['df'] = { "check_function" : check_df, "inventory_function" : inventory_df, - "service_description" : "fs_%s", + "service_description" : "Filesystem %s", "has_perfdata" : True, "group" : "filesystem", "default_levels_variable" : "filesystem_default_levels", diff -Nru check-mk-1.2.2p3/df.include check-mk-1.2.6p12/df.include --- check-mk-1.2.2p3/df.include 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/df.include 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -31,7 +31,7 @@ filesystem_default_levels = {} # can also be dropped some day in future # Filesystems to ignore (shouldn't be sent by agent anyway) -inventory_df_exclude_fs = [ 'nfs', 'smbfs', 'cifs', 'iso9660' ] +inventory_df_exclude_fs = [ 'tmpfs', 'nfs', 'smbfs', 'cifs', 'iso9660' ] inventory_df_exclude_mountpoints = [ '/dev' ] # Grouping of filesystems into groups that are monitored as one entity @@ -55,9 +55,9 @@ # } factory_settings["filesystem_default_levels"] = { - "levels" : (80, 90), # warn/crit in percent + "levels" : (80.0, 90.0), # warn/crit in percent "magic_normsize" : 20, # Standard size if 20 GB - "levels_low" : (50, 60), # Never move warn level below 50% due to magic factor + "levels_low" : (50.0, 60.0), # Never move warn level below 50% due to magic factor "trend_range" : 24, "trend_perfdata" : True, # do send performance data for trends } @@ -101,16 +101,28 @@ # old style params a'la (80, 90). As soon as we drop support for that # (can happen any decade now), we can get rid of this function. def get_filesystem_levels(host, mountpoint, size_gb, params): - + mega = 1024 * 1024 + giga = mega * 1024 # Start with factory settings levels = factory_settings["filesystem_default_levels"].copy() + + def convert_legacy_levels(value): + if type(params) == tuple or not params.get("flex_levels"): + return tuple(map(float, value)) + else: + return value + # convert default levels to dictionary. This is in order support # old style levels like (80, 90) if type(filesystem_default_levels) == dict: + fs_default_levels = filesystem_default_levels.copy() + fs_levels = fs_default_levels.get("levels") + if fs_levels: + fs_default_levels["levels"] = convert_legacy_levels(fs_levels) levels.update(filesystem_default_levels) else: levels = factory_settings["filesystem_default_levels"].copy() - levels["levels"] = filesystem_default_levels[:2] + levels["levels"] = convert_legacy_levels(filesystem_default_levels[:2]) if len(filesystem_default_levels) == 2: levels["magic"] = None else: @@ -121,16 +133,41 @@ levels.update(params) else: # simple format - explicitely override levels and magic - levels["levels"] = params[:2] + levels["levels"] = convert_legacy_levels(params[:2]) if len(params) >= 3: levels["magic"] = params[2] - warn, crit = levels["levels"] + # Determine real warn, crit levels + if type(levels["levels"]) == tuple: + warn, crit = levels["levels"] + else: + # A list of levels. Choose the correct one depending on the + # size of the current filesystem. We do not make the first + # rule match, but that with the largest size_gb. That way + # the order of the entries is not important. + found = False + found_size = 0 + for to_size, this_levels in levels["levels"]: + if size_gb * giga > to_size and to_size >= found_size: + warn, crit = this_levels + found_size = to_size + found = True + if not found: + warn, crit = 100.0, 100.0 # entry not found in list + # If the magic factor is used, take disk size and magic factor # into account in order to move levels magic = levels.get("magic") - if magic: + # We need a way to disable the magic factor so check + # if magic not 1.0 + if magic and magic != 1.0: + # convert warn/crit to percentage + if type(warn) != float: + warn = savefloat(warn * mega / float(size_gb * giga)) * 100 + if type(crit) != float: + crit = savefloat(crit * mega / float(size_gb * giga)) * 100 + normsize = levels["magic_normsize"] hgb_size = size_gb / float(normsize) felt_size = hgb_size ** magic @@ -145,14 +182,60 @@ if crit_scaled < lowest_critical_level: crit_scaled = lowest_critical_level else: - warn_scaled = warn - crit_scaled = crit + if type(warn) != float: + warn_scaled = savefloat(warn * mega / float(size_gb * giga)) * 100 + else: + warn_scaled = warn + + if type(crit) != float: + crit_scaled = savefloat(crit * mega / float(size_gb * giga)) * 100 + else: + crit_scaled = crit size_mb = size_gb * 1024 warn_mb = savefloat(size_mb * warn_scaled / 100) crit_mb = savefloat(size_mb * crit_scaled / 100) levels["levels_mb"] = (warn_mb, crit_mb) - levels["levels_text"] = "(levels at %.1f/%.1f%%)" % (warn_scaled, crit_scaled) + if type(warn) == float: + if warn_scaled < 0 and crit_scaled < 0: + label = 'levels at free space below' + warn_scaled *= -1 + crit_scaled *= -1 + else: + label = 'levels at' + levels["levels_text"] = "(%s %.2f/%.2f%%)" % (label, warn_scaled, crit_scaled) + else: + if warn * mega < 0 and crit * mega < 0: + label = 'levels at free space below' + warn *= -1 + crit *= -1 + else: + label = 'levels at' + warn_hr = get_bytes_human_readable(warn * mega) + crit_hr = get_bytes_human_readable(crit * mega) + levels["levels_text"] = "(%s %s/%s)" % (label, warn_hr, crit_hr) + + if "inodes_levels" in params: + if type(levels["inodes_levels"]) == tuple: + warn, crit = levels["inodes_levels"] + else: + # A list of inode levels. Choose the correct one depending on the + # size of the current filesystem. We do not make the first + # rule match, but that with the largest size_gb. That way + # the order of the entries is not important. + found = False + found_size = 0 + for to_size, this_levels in levels["inodes_levels"]: + if size_gb * giga > to_size and to_size >= found_size: + warn, crit = this_levels + found_size = to_size + found = True + if not found: + warn, crit = 100.0, 100.0 # entry not found in list + levels["inodes_levels"] = warn, crit + else: + levels["inodes_levels"] = (None, None) + return levels # Legacy function for checks that do not support groups yet @@ -160,39 +243,48 @@ return df_check_filesystem_list(mountpoint, params, [(mountpoint, size_mb, avail_mb)]) # New function for checks that support groups. -def df_check_filesystem_list(item, params, fslist): +def df_check_filesystem_list(item, params, fslist_blocks, fslist_inodes = None): if "patterns" in params: import fnmatch patterns = params["patterns"] count = 0 - total_size = 0 - total_avail = 0 - for mp, size_mb, avail_mb in fslist: + total_blocks_size = 0 + total_blocks_avail = 0 + total_inodes = 0 + total_inodes_avail = 0 + for idx, (mp, size_mb, avail_mb) in enumerate(fslist_blocks): for pattern in patterns: if fnmatch.fnmatch(mp, pattern): count += 1 - total_size += size_mb - total_avail += avail_mb + total_blocks_size += size_mb + total_blocks_avail += avail_mb + if fslist_inodes: + total_inodes += fslist_inodes[idx][1] + total_inodes_avail += fslist_inodes[idx][2] break # If no filesystem has been found we cannot do the # actual check since the size is zero. if count == 0: - return (3, "UKNOWN - No filesystem matching the patterns") + return (3, "No filesystem matching the patterns") else: - status, infotext, perfdata = df_check_filesystem_single(g_hostname, item, total_size, total_avail, params) + status, infotext, perfdata = df_check_filesystem_single(g_hostname, item, total_blocks_size, total_blocks_avail, total_inodes, total_inodes_avail, params) infotext += " (%d filesystems)" % count return status, infotext, perfdata else: - for mp, size_mb, avail_mb in fslist: + for idx, (mp, size_mb, avail_mb) in enumerate(fslist_blocks): if mp == item: - return df_check_filesystem_single(g_hostname, mp, size_mb, avail_mb, params) - return (3, "UNKNOWN - filesystem not found") + if fslist_inodes: + inodes_total, inodes_avail = fslist_inodes[idx][1], fslist_inodes[idx][2] + else: + inodes_total, inodes_avail = None, None + return df_check_filesystem_single(g_hostname, mp, size_mb, avail_mb, inodes_total, inodes_avail, params) + return (3, "filesystem not found") -def df_check_filesystem_single(hostname, mountpoint, size_mb, avail_mb, params): +def df_check_filesystem_single(hostname, mountpoint, size_mb, avail_mb, inodes_total, inodes_avail, params): if size_mb == 0: - return (1, "WARN - size of filesystem is 0 MB") + return (1, "size of filesystem is 0 MB", []) used_mb = size_mb - avail_mb used_perc = 100.0 * (float(used_mb) / size_mb) @@ -200,7 +292,8 @@ # Get warning and critical levels already with 'magic factor' applied levels = get_filesystem_levels(g_hostname, mountpoint, size_gb, params) - warn_mb, crit_mb = levels["levels_mb"] + warn_mb, crit_mb = levels["levels_mb"] + warn_inode, crit_inode = levels["inodes_levels"] # Take into account magic scaling factor (third optional argument # in check params). A factor of 1.0 changes nothing. Factor should @@ -211,8 +304,20 @@ # TODO: In some future version use a fixed name as perf variable perf_var = mountpoint.replace(" ", "_") perfdata = [(perf_var, str(used_mb) + 'MB', warn_mb, crit_mb, 0, size_mb)] - infotext = "%.1f%% used (%.2f of %.1f GB), %s" % \ - (used_perc, used_mb / 1024.0, size_gb, levels["levels_text"]) + + used_hr = get_bytes_human_readable(used_mb * 1024 * 1024) + size_hr = get_bytes_human_readable(size_mb * 1024 * 1024) + # If both numbers end with both MB or GB or TB, then drop the first one + if used_hr[-2:] == size_hr[-2:]: + used_hr = used_hr[:-3] + + # Show enough decimal digits so that very small percentages are still + # visible! + if used_perc > 0: + perc_precision = max(1, 2 - int(round(math.log(used_perc, 10)))) + else: + perc_precision = 1 + infotext = "%%.%df%%%% used (%%s of %%s), %%s" % perc_precision % (used_perc, used_hr, size_hr, levels["levels_text"]) # Trends. The trends are computed in two steps. In the first step we # compute the delta to the last check, using a normal check_mk counter. @@ -239,15 +344,15 @@ this_time = time.time() # first compute current rate in MB/s by computing delta since last check - timedif, rate = get_counter("df.%s.delta" % mountpoint, this_time, used_mb, True) + rate = get_rate("df.%s.delta" % mountpoint, this_time, used_mb, allow_negative=True, onwrap=ZERO) if levels.get("trend_perfdata"): # Change in 1.1.13i3: The trend perfdata always outputs # the growth in MB/24h, not any longer in MB/trendrange perfdata.append(("growth", rate * H24)) # average trend, initialize with zero, rate_avg is in MB/s - timedif, rate_avg = get_average("df.%s.trend" % mountpoint, - this_time, rate, range_sec / 60.0, True) + rate_avg = get_average("df.%s.trend" % mountpoint, + this_time, rate, range_sec / 60.0, True) # rate_avg is growth in MB/s, trend is in MB per trend range hours trend = rate_avg * range_sec @@ -264,12 +369,13 @@ wa, cr = trend_mb warn_perf, crit_perf = wa, cr if trend >= wa: - problems.append("growing too fast (levels at %s/%s per %.1fh)!" % + problems.append("growing too fast (levels at %s/%s per %.1f h)(!" % ( get_bytes_human_readable(wa * MB), get_bytes_human_readable(cr * MB), range)) status = max(1, status) if trend >= cr: status = 2 problems[-1] += "!" + problems[-1] += ")" else: wa, cr = None, None @@ -285,38 +391,59 @@ else: warn_perf, crit_perf = wa, cr if trend >= wa: - problems.append("growing too fast (levels at %.3f%%/%.3f%% per %.1fh)!" % + problems.append("growing too fast (levels at %.3f%%/%.3f%% per %.1f h)(!" % ( wa_perc, cr_perc, range)) status = max(1, status) if trend >= cr: status = 2 problems[-1] += "!" + problems[-1] += ")" - if levels.get("trend_perfdata"): - # New in 1.1.13i3: output trend not as MB / trend_range, but as - # MB / 24 hours. The same holds for the warn and crit information. - # It is configured in MB / trend range but in the performance data - # it's sent as MB / 24h. - perfdata.append(("trend", rate_avg * H24, - warn_perf != None and (warn_perf / range_sec * H24) or None, - crit_perf != None and (crit_perf / range_sec * H24) or None, - 0, size_mb / range)) # compute time until filesystem is full (only for positive trend, of course) + + # The start value of hours_left is negative. The pnp graph and the perfometer + # will interpret this as inifinite -> not growing + hours_left = -1 if trend > 0: space_left = size_mb - used_mb hours_left = space_left / trend * range timeleft = levels.get("trend_timeleft") + def format_hours(hours): + if hours > 4 * 7 * 24: # 4 weeks + return "%0d weeks" % (hours/ (7 * 24)) + elif hours > 7 * 24: # 1 week + return "%0.1f weeks" % (hours/ (7 * 24)) + elif hours > 2 * 24: # 2 days + return "%0.1f days" % (hours/24) + else: + return "%d hours" % hours + if timeleft: wa, cr = timeleft if hours_left <= cr: status = 2 - problems.append("only %.1fh until disk full!!" % hours_left) + problems.append("only %s until disk full(!!)" % format_hours(hours_left)) elif hours_left <= wa: status = max(status, 1) - problems.append("only %.1fh until disk full!" % hours_left) - elif hours_left <= wa * 2: - problems.append("time left: %.1fh" % hours_left) + problems.append("only %s until disk full(!)" % format_hours(hours_left)) + elif hours_left <= wa * 2 or levels.get("trend_showtimeleft"): + problems.append("time left until disk full: %s" % format_hours(hours_left)) + elif levels.get("trend_showtimeleft"): + problems.append("time left until disk full: %s" % format_hours(hours_left)) + + if levels.get("trend_perfdata"): + # New in 1.1.13i3: output trend not as MB / trend_range, but as + # MB / 24 hours. The same holds for the warn and crit information. + # It is configured in MB / trend range but in the performance data + # it's sent as MB / 24h. + perfdata.append(("trend", rate_avg * H24, + warn_perf != None and (warn_perf / range_sec * H24) or None, + crit_perf != None and (crit_perf / range_sec * H24) or None, + 0, size_mb / range)) + + if levels.get("trend_showtimeleft"): + perfdata.append(("trend_hoursleft", hours_left)) except MKCounterWrapped: @@ -326,12 +453,48 @@ # for the trend information perfdata = [] + if warn_mb <= -1: + #Negativ levels, so calculate mb left + rest_mb = size_mb - used_mb + crit_mb = crit_mb * -1 + warn_mb = warn_mb * -1 + if rest_mb <= crit_mb: + status = 2 + elif rest_mb <= warn_mb: + status = max(1, status) + else: + if used_mb >= crit_mb: + status = 2 + elif used_mb >= warn_mb: + status = max(1, status) + + if problems: + infotext += " - %s" % ", ".join(problems) + problems = [] + + # Check inode levels + if inodes_total: + inodes_avail_perc = 100.0 * inodes_avail / inodes_total + inodes_warn, inodes_crit = levels["inodes_levels"] + if inodes_warn != None: + if type(inodes_warn) == int: # Absolute levels + if inodes_crit > inodes_avail: + status = max(2, status) + problems.append("less than %dk inodes available(!!)" % (crit_inode / 1000)) + elif inodes_warn > inodes_avail: + status = max(1, status) + problems.append("less than %dk inodes available(!)" % (warn_inode / 1000)) + else: # Percentage levels + if inodes_crit > inodes_avail_perc: + status = max(2, status) + problems.append("less than %0.2f%% inodes available(!!)" % inodes_crit) + elif inodes_warn > inodes_avail_perc: + status = max(1, status) + problems.append("less than %.02f%% inodes available(!)" % inodes_warn) + infotext += ", inodes available %dk/%0.2f%%" % (inodes_avail / 1000, inodes_avail_perc) + + if problems: + infotext += " - %s" % ", ".join(problems) + problems = [] - if used_mb >= crit_mb: - status = 2 - elif used_mb >= warn_mb: - status = max(1, status) - - if len(problems) > 0: - infotext += " - " + ", ".join(problems) - return (status, nagios_state_names[status] + " - " + infotext, perfdata) + return (status, infotext, perfdata) diff -Nru check-mk-1.2.2p3/df_netapp check-mk-1.2.6p12/df_netapp --- check-mk-1.2.2p3/df_netapp 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/df_netapp 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -26,11 +26,17 @@ check_includes['df_netapp'] = [ "df.include", "df_netapp.include" ] -check_info['df_netapp'] = (check_df_netapp, "fs_%s", 1, inventory_df_netapp) -snmp_info['df_netapp'] = (".1.3.6.1.4.1.789.1.5.4.1", [ 2, 29, 30 ] ) -checkgroup_of["df_netapp"] = "filesystem" -snmp_scan_functions['df_netapp'] = \ - lambda oid: is_netapp_filer(oid) and oid(".1.3.6.1.4.1.789.1.5.4.1.29.1") -check_default_levels['df_netapp'] = "filesystem_default_levels" + +check_info["df_netapp"] = { + 'check_function': check_df_netapp, + 'inventory_function': inventory_df_netapp, + 'service_description': 'Filesystem %s', + 'has_perfdata': True, + 'snmp_info': ('.1.3.6.1.4.1.789.1.5.4.1', [2, 29, 30]), + 'snmp_scan_function': \ + lambda oid: is_netapp_filer(oid) and oid(".1.3.6.1.4.1.789.1.5.4.1.29.*"), + 'group': 'filesystem', + 'default_levels_variable': 'filesystem_default_levels', +} diff -Nru check-mk-1.2.2p3/df_netapp32 check-mk-1.2.6p12/df_netapp32 --- check-mk-1.2.2p3/df_netapp32 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/df_netapp32 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -26,11 +26,17 @@ check_includes['df_netapp32'] = [ "df.include", "df_netapp.include" ] -check_info['df_netapp32'] = (check_df_netapp, "fs_%s", 1, inventory_df_netapp) -snmp_info['df_netapp32'] = (".1.3.6.1.4.1.789.1.5.4.1", [ 2, 3, 4 ] ) -checkgroup_of["df_netapp32"] = "filesystem" -snmp_scan_functions['df_netapp32'] = \ - lambda oid: is_netapp_filer(oid) and not oid(".1.3.6.1.4.1.789.1.5.4.1.29.1") -check_default_levels['df_netapp32'] = "filesystem_default_levels" + +check_info["df_netapp32"] = { + 'check_function': check_df_netapp, + 'inventory_function': inventory_df_netapp, + 'service_description': 'Filesystem %s', + 'has_perfdata': True, + 'snmp_info': ('.1.3.6.1.4.1.789.1.5.4.1', [2, 3, 4]), + 'snmp_scan_function': \ + lambda oid: is_netapp_filer(oid) and not oid(".1.3.6.1.4.1.789.1.5.4.1.29.*"), + 'group': 'filesystem', + 'default_levels_variable': 'filesystem_default_levels', +} diff -Nru check-mk-1.2.2p3/df_netapp.include check-mk-1.2.6p12/df_netapp.include --- check-mk-1.2.2p3/df_netapp.include 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/df_netapp.include 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/discovery.py check-mk-1.2.6p12/discovery.py --- check-mk-1.2.2p3/discovery.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/discovery.py 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,869 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# .--cmk -I--------------------------------------------------------------. +# | _ ___ | +# | ___ _ __ ___ | | __ |_ _| | +# | / __| '_ ` _ \| |/ / _____| | | +# | | (__| | | | | | < |_____| | | +# | \___|_| |_| |_|_|\_\ |___| | +# | | +# +----------------------------------------------------------------------+ +# | Functions for command line options -I and -II | +# '----------------------------------------------------------------------' + +# Function implementing cmk -I and cmk -II. This is directly +# being called from the main option parsing code. The list +# hostnames is already prepared by the main code. If it is +# empty then we use all hosts and switch to using cache files. +def do_discovery(hostnames, check_types, only_new): + use_caches = False + if not hostnames: + verbose("Discovering services on all hosts:\n") + hostnames = all_hosts_untagged + use_caches = True + else: + verbose("Discovering services on %s:\n" % ", ".join(hostnames)) + + # For clusters add their nodes to the list. Clusters itself + # cannot be discovered but the user is allowed to specify + # them and we do discovery on the nodes instead. + nodes = [] + for h in hostnames: + nodes = nodes_of(h) + if nodes: + hostnames += nodes + + # Then remove clusters and make list unique + hostnames = list(set([ h for h in hostnames if not is_cluster(h) ])) + hostnames.sort() + + # Now loop through all hosts + for hostname in hostnames: + try: + verbose(tty_white + tty_bold + hostname + tty_normal + ":\n") + do_discovery_for(hostname, check_types, only_new, use_caches) + verbose("\n") + except Exception, e: + if opt_debug: + raise + verbose(" -> Failed: %s\n" % e) + + +def do_discovery_for(hostname, check_types, only_new, use_caches): + # Usually we disable SNMP scan if cmk -I is used without a list of + # explicity hosts. But for host that have never been service-discovered + # yet (do not have autochecks), we enable SNMP scan. + do_snmp_scan = not use_caches or not has_autochecks(hostname) + new_items = discover_services(hostname, check_types, use_caches, do_snmp_scan) + if not check_types and not only_new: + old_items = [] # do not even read old file + else: + old_items = parse_autochecks_file(hostname) + + # There are three ways of how to merge existing and new discovered checks: + # 1. -II without --checks= + # check_types is empty, only_new is False + # --> complete drop old services, only use new ones + # 2. -II with --checks= + # --> drop old services of that types + # check_types is not empty, only_new is False + # 3. -I + # --> just add new services + # only_new is True + + # Parse old items into a dict (ct, item) -> paramstring + result = {} + for check_type, item, paramstring in old_items: + # Take over old items if -I is selected or if -II + # is selected with --checks= and the check type is not + # one of the listed ones + if only_new or (check_types and check_type not in check_types): + result[(check_type, item)] = paramstring + + stats = {} + for check_type, item, paramstring in new_items: + if (check_type, item) not in result: + result[(check_type, item)] = paramstring + stats.setdefault(check_type, []).append((item, paramstring)) + + final_items = [] + for (check_type, item), paramstring in result.items(): + final_items.append((check_type, item, paramstring)) + final_items.sort() + save_autochecks_file(hostname, final_items) + + found_check_types = stats.keys() + found_check_types.sort() + if found_check_types: + for check_type in found_check_types: + verbose(" %s%3d%s %s\n" % (tty_green + tty_bold, len(stats[check_type]), tty_normal, check_type)) + if opt_verbose >= 2: + for item, paramstring in stats[check_type]: + verbose(" - %s%-30s%s %s%s\n" % ( + tty_bold, service_description(check_type, item), tty_blue, paramstring, tty_normal)) + else: + verbose(" nothing%s\n" % (only_new and " new" or "")) + +#. +# .--Discovery Check-----------------------------------------------------. +# | ____ _ _ _ | +# | | _ \(_)___ ___ ___| |__ ___ ___| | __ | +# | | | | | / __|/ __| / __| '_ \ / _ \/ __| |/ / | +# | | |_| | \__ \ (__ _ | (__| | | | __/ (__| < | +# | |____/|_|___/\___(_) \___|_| |_|\___|\___|_|\_\ | +# | | +# +----------------------------------------------------------------------+ +# | Active check for checking undiscovered services. | +# '----------------------------------------------------------------------' + +def check_discovery(hostname, ipaddress=None): + new_check_types = {} + lines = [] + + try: + services = get_host_services(hostname, use_caches=opt_use_cachefile, do_snmp_scan=inventory_check_do_scan, ipaddress=ipaddress) + for (check_type, item), (check_source, paramstring) in services.items(): + if check_source == "new": + new_check_types.setdefault(check_type, 0) + new_check_types[check_type] += 1 + lines.append("%s: %s\n" % (check_type, service_description(check_type, item))) + + if lines: + info = ", ".join([ "%s:%d" % e for e in new_check_types.items() ]) + output = "%d unchecked services (%s)\n" % (len(lines), info) + output += "".join(lines) + status = inventory_check_severity + else: + output = "no unchecked services found\n" + status = 0 + except SystemExit, e: + raise e + except Exception, e: + if opt_debug: + raise + # Honor rule settings for "Status of the Check_MK service". In case of + # a problem we assume a connection error here. + spec = exit_code_spec(hostname) + if isinstance(e, MKAgentError) or isinstance(e, MKSNMPError): + what = "connection" + else: + what = "exception" + status = spec.get(what, 3) + output = str(e) + "\n" + + if opt_keepalive: + global total_check_output + total_check_output += output + return status + else: + sys.stdout.write(nagios_state_names[status] + " - " + output) + sys.exit(status) + + +#. +# .--Helpers-------------------------------------------------------------. +# | _ _ _ | +# | | | | | ___| |_ __ ___ _ __ ___ | +# | | |_| |/ _ \ | '_ \ / _ \ '__/ __| | +# | | _ | __/ | |_) | __/ | \__ \ | +# | |_| |_|\___|_| .__/ \___|_| |___/ | +# | |_| | +# +----------------------------------------------------------------------+ +# | Various helper functions | +# '----------------------------------------------------------------------' + +def checktype_ignored_for_host(host, checktype): + if checktype in ignored_checktypes: + return True + ignored = host_extra_conf(host, ignored_checks) + for e in ignored: + if checktype == e or (type(e) == list and checktype in e): + return True + return False + + +def service_ignored(hostname, check_type, service_description): + if check_type and check_type in ignored_checktypes: + return True + if service_description != None and in_boolean_serviceconf_list(hostname, service_description, ignored_services): + return True + if check_type and checktype_ignored_for_host(hostname, check_type): + return True + return False + + + +#. +# .--Discovery-----------------------------------------------------------. +# | ____ _ | +# | | _ \(_)___ ___ _____ _____ _ __ _ _ | +# | | | | | / __|/ __/ _ \ \ / / _ \ '__| | | | | +# | | |_| | \__ \ (_| (_) \ V / __/ | | |_| | | +# | |____/|_|___/\___\___/ \_/ \___|_| \__, | | +# | |___/ | +# +----------------------------------------------------------------------+ +# | Core code of actual service discovery | +# '----------------------------------------------------------------------' + + + +# Create a table of autodiscovered services of a host. Do not save +# this table anywhere. Do not read any previously discovered +# services. The table has the following columns: +# 1. Check type +# 2. Item +# 3. Parameter string (not evaluated) +# Arguments: +# check_types: None -> try all check types, list -> omit scan in any case +# use_caches: True is cached agent data is being used (for -I without hostnames) +# do_snmp_scan: True if SNMP scan should be done (WATO: Full scan) +# Error situation (unclear what to do): +# - IP address cannot be looked up +# +# This function does not handle: +# - clusters +# - disabled services +# +# This function *does* handle: +# - disabled check typess +# +def discover_services(hostname, check_types, use_caches, do_snmp_scan, ipaddress=None): + if ipaddress == None: + ipaddress = lookup_ipaddress(hostname) + + # Check types not specified (via --checks=)? Determine automatically + if not check_types: + check_types = [] + if is_snmp_host(hostname): + + # May we do an SNMP scan? + if do_snmp_scan: + check_types = snmp_scan(hostname, ipaddress) + + # Otherwise use all check types that we already have discovered + # previously + else: + for check_type, item, params in read_autochecks_of(hostname): + if check_type not in check_types and check_uses_snmp(check_type): + check_types.append(check_type) + + if is_tcp_host(hostname) or has_piggyback_info(hostname): + check_types += discoverable_check_types('tcp') + + # Make hostname available as global variable in discovery functions + # (used e.g. by ps-discovery) + global g_hostname + g_hostname = hostname + + discovered_services = [] + try: + for check_type in check_types: + for item, paramstring in discover_check_type(hostname, ipaddress, check_type, use_caches): + discovered_services.append((check_type, item, paramstring)) + + return discovered_services + except KeyboardInterrupt: + raise MKGeneralException("Interrupted by Ctrl-C.") + + +def snmp_scan(hostname, ipaddress): + # Make hostname globally available for scan functions. + # This is rarely used, but e.g. the scan for if/if64 needs + # this to evaluate if_disabled_if64_checks. + global g_hostname + g_hostname = hostname + + vverbose(" SNMP scan:") + if not in_binary_hostlist(hostname, snmp_without_sys_descr): + sys_descr_oid = ".1.3.6.1.2.1.1.1.0" + sys_descr = get_single_oid(hostname, ipaddress, sys_descr_oid) + if sys_descr == None: + raise MKSNMPError("Cannot fetch system description OID %s" % sys_descr_oid) + + found = [] + for check_type, check in check_info.items(): + if check_type in ignored_checktypes: + continue + elif not check_uses_snmp(check_type): + continue + basename = check_type.split(".")[0] + # The scan function should be assigned to the basename, because + # subchecks sharing the same SNMP info of course should have + # an identical scan function. But some checks do not do this + # correctly + scan_function = snmp_scan_functions.get(check_type, + snmp_scan_functions.get(basename)) + if scan_function: + try: + result = scan_function(lambda oid: get_single_oid(hostname, ipaddress, oid)) + if result is not None and type(result) not in [ str, bool ]: + verbose("[%s] Scan function returns invalid type (%s).\n" % + (check_type, type(result))) + elif result: + found.append(check_type) + vverbose(" " + check_type) + except MKGeneralException: + # some error messages which we explicitly want to show to the user + # should be raised through this + raise + except: + pass + else: + found.append(check_type) + vverbose(" " + tty_blue + tty_bold + check_type + tty_normal) + + vverbose("\n") + found.sort() + return found + +def discover_check_type(hostname, ipaddress, check_type, use_caches): + # Skip this check type if is ignored for that host + if service_ignored(hostname, check_type, None): + return [] + + # Skip SNMP checks on non-SNMP hosts + if check_uses_snmp(check_type) and not is_snmp_host(hostname): + return [] + + try: + discovery_function = check_info[check_type]["inventory_function"] + if discovery_function == None: + discovery_function = no_discovery_possible + except KeyError: + raise MKGeneralException("No such check type '%s'" % check_type) + + section_name = check_type.split('.')[0] # make e.g. 'lsi' from 'lsi.arrays' + + try: + info = None # default in case of exception + info = get_realhost_info(hostname, ipaddress, section_name, + use_caches and inventory_max_cachefile_age or 0, ignore_check_interval=True) + + except MKAgentError, e: + if str(e) and str(e) != "Cannot get information from agent, processing only piggyback data.": + raise + + except MKSNMPError, e: + if str(e): + raise + + if info == None: # No data for this check type + return [] + + # Add information about nodes if check wants this. Note: + # in the node info we always put None, not the name of a node. + # During inventory we behave like a non-cluster. We do not know + # yet if the service is going to be clustered! + if check_info[check_type]["node_info"]: + info = [ [None] + line for line in info ] + + # Now do the actual inventory + try: + # Convert with parse function if available + if section_name in check_info: # parse function must be define for base check + parse_function = check_info[section_name]["parse_function"] + if parse_function: + info = check_info[section_name]["parse_function"](info) + + # Check number of arguments of discovery function. Note: This + # check for the legacy API will be removed after 1.2.6. + if len(inspect.getargspec(discovery_function)[0]) == 2: + discovered_items = discovery_function(check_type, info) # discovery is a list of pairs (item, current_value) + else: + # New preferred style since 1.1.11i3: only one argument: info + discovered_items = discovery_function(info) + + # tolerate function not explicitely returning [] + if discovered_items == None: + discovered_items = [] + + # New yield based api style + elif type(discovered_items) != list: + discovered_items = list(discovered_items) + + result = [] + for entry in discovered_items: + if not isinstance(entry, tuple): + sys.stderr.write("%s: Check %s returned invalid discovery data (entry not a tuple): %r\n" % + (hostname, check_type, repr(entry))) + continue + + if len(entry) == 2: # comment is now obsolete + item, paramstring = entry + else: + try: + item, comment, paramstring = entry + except ValueError: + sys.stderr.write("%s: Check %s returned invalid discovery data (not 2 or 3 elements): %r\n" % + (hostname, check_type, repr(entry))) + continue + + description = service_description(check_type, item) + # make sanity check + if len(description) == 0: + sys.stderr.write("%s: Check %s returned empty service description - ignoring it.\n" % + (hostname, check_type)) + continue + + result.append((item, paramstring)) + + except Exception, e: + if opt_debug: + sys.stderr.write("Exception in discovery function of check type %s\n" % check_type) + raise + if opt_verbose: + sys.stderr.write("%s: Invalid output from agent or invalid configuration: %s\n" % (hostname, e)) + return [] + + return result + +def discoverable_check_types(what): # snmp, tcp, all + check_types = [ k for k in check_info.keys() + if check_info[k]["inventory_function"] != None + and (what == "all" + or check_uses_snmp(k) == (what == "snmp")) + ] + check_types.sort() + return check_types + + +# Creates a table of all services that a host has or could have according +# to service discovery. The result is a dictionary of the form +# (check_type, item) -> (check_source, paramstring) +# check_source is the reason/state/source of the service: +# "new" : Check is discovered but currently not yet monitored +# "old" : Check is discovered and already monitored (most common) +# "vanished" : Check had been discovered previously, but item has vanished +# "legacy" : Check is defined via legacy_checks +# "active" : Check is defined via active_checks +# "custom" : Check is defined via custom_checks +# "manual" : Check is a manual Check_MK check without service discovery +# "ignored" : discovered or static, but disabled via ignored_services +# "obsolete" : Discovered by vanished check is meanwhile ignored via ignored_services +# "clustered_new" : New service found on a node that belongs to a cluster +# "clustered_old" : Old service found on a node that belongs to a cluster +# This function is cluster-aware +def get_host_services(hostname, use_caches, do_snmp_scan, ipaddress=None): + if is_cluster(hostname): + return get_cluster_services(hostname, use_caches, do_snmp_scan) + else: + return get_node_services(hostname, ipaddress, use_caches, do_snmp_scan) + + +# Part of get_node_services that deals with discovered services +def get_discovered_services(hostname, ipaddress, use_caches, do_snmp_scan): + # Create a dict from check_type/item to check_source/paramstring + services = {} + + # Handle discovered services -> "new" + new_items = discover_services(hostname, None, use_caches, do_snmp_scan, ipaddress) + for check_type, item, paramstring in new_items: + services[(check_type, item)] = ("new", paramstring) + + # Match with existing items -> "old" and "vanished" + old_items = parse_autochecks_file(hostname) + for check_type, item, paramstring in old_items: + if (check_type, item) not in services: + services[(check_type, item)] = ("vanished", paramstring) + else: + services[(check_type, item)] = ("old", paramstring) + + return services + +# Do the actual work for a non-cluster host or node +def get_node_services(hostname, ipaddress, use_caches, do_snmp_scan): + services = get_discovered_services(hostname, ipaddress, use_caches, do_snmp_scan) + + # Identify clustered services + for (check_type, item), (check_source, paramstring) in services.items(): + descr = service_description(check_type, item) + if hostname != host_of_clustered_service(hostname, descr): + if check_source == "vanished": + del services[(check_type, item)] # do not show vanished clustered services here + else: + services[(check_type, item)] = ("clustered_" + check_source, paramstring) + + merge_manual_services(services, hostname) + return services + +# To a list of discovered services add/replace manual and active +# checks and handle ignoration +def merge_manual_services(services, hostname): + # Find manual checks. These can override discovered checks -> "manual" + manual_items = get_check_table(hostname, skip_autochecks=True) + for (check_type, item), (params, descr, deps) in manual_items.items(): + services[(check_type, item)] = ('manual', repr(params) ) + + # Add legacy checks -> "legacy" + legchecks = host_extra_conf(hostname, legacy_checks) + for cmd, descr, perf in legchecks: + services[('legacy', descr)] = ('legacy', 'None') + + # Add custom checks -> "custom" + custchecks = host_extra_conf(hostname, custom_checks) + for entry in custchecks: + services[('custom', entry['service_description'])] = ('custom', 'None') + + # Similar for 'active_checks', but here we have parameters + for acttype, rules in active_checks.items(): + act_info = active_check_info[acttype] + entries = host_extra_conf(hostname, rules) + for params in entries: + descr = act_info["service_description"](params) + services[(acttype, descr)] = ('active', repr(params)) + + # Handle disabled services -> "obsolete" and "ignored" + for (check_type, item), (check_source, paramstring) in services.items(): + descr = service_description(check_type, item) + if service_ignored(hostname, check_type, descr): + if check_source == "vanished": + new_source = "obsolete" + else: + new_source = "ignored" + services[(check_type, item)] = (new_source, paramstring) + + return services + +# Do the work for a cluster +def get_cluster_services(hostname, use_caches, with_snmp_scan): + nodes = nodes_of(hostname) + + # Get services of the nodes. We are only interested in "old", "new" and "vanished" + # From the states and parameters of these we construct the final state per service. + cluster_items = {} + for node in nodes: + services = get_discovered_services(node, None, use_caches, with_snmp_scan) + for (check_type, item), (check_source, paramstring) in services.items(): + descr = service_description(check_type, item) + if hostname == host_of_clustered_service(node, descr): + if (check_type, item) not in cluster_items: + cluster_items[(check_type, item)] = (check_source, paramstring) + else: + first_check_source, first_paramstring = cluster_items[(check_type, item)] + if first_check_source == "old": + pass + elif check_source == "old": + cluster_items[(check_type, item)] = (check_source, paramstring) + elif first_check_source == "vanished" and check_source == "new": + cluster_items[(check_type, item)] = ("old", first_paramstring) + elif check_source == "vanished" and first_check_source == "new": + cluster_items[(check_type, item)] = ("old", paramstring) + # In all other cases either both must be "new" or "vanished" -> let it be + + # Now add manual and active serivce and handle ignored services + merge_manual_services(cluster_items, hostname) + return cluster_items + + +# Get the list of service of a host or cluster and guess the current state of +# all services if possible +def get_check_preview(hostname, use_caches, do_snmp_scan): + services = get_host_services(hostname, use_caches, do_snmp_scan) + if is_cluster(hostname): + ipaddress = None + else: + ipaddress = lookup_ipaddress(hostname) + + table = [] + for (check_type, item), (check_source, paramstring) in services.items(): + params = None + if check_source not in [ 'legacy', 'active', 'custom' ]: + # apply check_parameters + try: + if type(paramstring) == str: + params = eval(paramstring) + else: + params = paramstring + except: + raise MKGeneralException("Invalid check parameter string '%s'" % paramstring) + + descr = service_description(check_type, item) + global g_service_description + g_service_description = descr + infotype = check_type.split('.')[0] + + # Sorry. The whole caching stuff is the most horrible hack in + # whole Check_MK. Nobody dares to clean it up, YET. But that + # day is getting nearer... + global opt_use_cachefile + old_opt_use_cachefile = opt_use_cachefile + opt_use_cachefile = True + opt_dont_submit = True # hack for get_realhost_info, avoid skipping because of check interval + + if check_type not in check_info: + continue # Skip not existing check silently + + try: + exitcode = None + perfdata = [] + info = get_host_info(hostname, ipaddress, infotype) + # Handle cases where agent does not output data + except MKAgentError, e: + exitcode = 3 + output = "Error getting data from agent" + if str(e): + output += ": %s" % e + tcp_error = output + + except MKSNMPError, e: + exitcode = 3 + output = "Error getting data from agent for %s via SNMP" % infotype + if str(e): + output += ": %s" % e + snmp_error = output + + except Exception, e: + exitcode = 3 + output = "Error getting data for %s: %s" % (infotype, e) + if check_uses_snmp(check_type): + snmp_error = output + else: + tcp_error = output + + opt_use_cachefile = old_opt_use_cachefile + + if exitcode == None: + check_function = check_info[check_type]["check_function"] + if check_source != 'manual': + params = compute_check_parameters(hostname, check_type, item, params) + + try: + reset_wrapped_counters() + result = convert_check_result(check_function(item, params, info), check_uses_snmp(check_type)) + if last_counter_wrap(): + raise last_counter_wrap() + except MKCounterWrapped, e: + result = (None, "WAITING - Counter based check, cannot be done offline") + except Exception, e: + if opt_debug: + raise + result = (3, "UNKNOWN - invalid output from agent or error in check implementation") + if len(result) == 2: + result = (result[0], result[1], []) + exitcode, output, perfdata = result + else: + descr = item + exitcode = None + output = "WAITING - %s check, cannot be done offline" % check_source.title() + perfdata = [] + + if check_source == "active": + params = eval(paramstring) + + if check_source in [ "legacy", "active", "custom" ]: + checkgroup = None + if service_ignored(hostname, None, descr): + check_source = "ignored" + else: + checkgroup = check_info[check_type]["group"] + + table.append((check_source, check_type, checkgroup, item, paramstring, params, descr, exitcode, output, perfdata)) + + return table + + + +#. +# .--Autochecks----------------------------------------------------------. +# | _ _ _ _ | +# | / \ _ _| |_ ___ ___| |__ ___ ___| | _____ | +# | / _ \| | | | __/ _ \ / __| '_ \ / _ \/ __| |/ / __| | +# | / ___ \ |_| | || (_) | (__| | | | __/ (__| <\__ \ | +# | /_/ \_\__,_|\__\___/ \___|_| |_|\___|\___|_|\_\___/ | +# | | +# +----------------------------------------------------------------------+ +# | Reading, parsing, writing, modifying autochecks files | +# '----------------------------------------------------------------------' + +# Read automatically discovered checks of one host. +# world: "config" -> File in var/check_mk/autochecks +# "active" -> Copy in var/check_mk/core/autochecks +# Returns a table with three columns: +# 1. check_type +# 2. item +# 3. parameters evaluated! +def read_autochecks_of(hostname, world="config"): + if world == "config": + basedir = autochecksdir + else: + basedir = var_dir + "/core/autochecks" + filepath = basedir + '/' + hostname + '.mk' + + if not os.path.exists(filepath): + return [] + try: + autochecks_raw = eval(file(filepath).read()) + except SyntaxError,e: + if opt_verbose or opt_debug: + sys.stderr.write("Syntax error in file %s: %s\n" % (filepath, e)) + if opt_debug: + raise + return [] + except Exception, e: + if opt_verbose or opt_debug: + sys.stderr.write("Error in file %s:\n%s\n" % (filepath, e)) + if opt_debug: + raise + return [] + + # Exchange inventorized check parameters with those configured by + # the user. Also merge with default levels for modern dictionary based checks. + autochecks = [] + for entry in autochecks_raw: + if len(entry) == 4: # old format where hostname is at the first place + entry = entry[1:] + ct, it, par = entry + autochecks.append( (ct, it, compute_check_parameters(hostname, ct, it, par)) ) + return autochecks + + +# Read autochecks, but do not compute final check parameters, +# also return a forth column with the raw string of the parameters. +# Returns a table with three columns: +# 1. check_type +# 2. item +# 3. parameter string, not yet evaluated! +def parse_autochecks_file(hostname): + def split_python_tuple(line): + quote = None + bracklev = 0 + backslash = False + for i, c in enumerate(line): + if backslash: + backslash = False + continue + elif c == '\\': + backslash = True + elif c == quote: + quote = None # end of quoted string + elif c in [ '"', "'" ]: + quote = c # begin of quoted string + elif quote: + continue + elif c in [ '(', '{', '[' ]: + bracklev += 1 + elif c in [ ')', '}', ']' ]: + bracklev -= 1 + elif bracklev > 0: + continue + elif c == ',': + value = line[0:i] + rest = line[i+1:] + return value.strip(), rest + return line.strip(), None + + path = "%s/%s.mk" % (autochecksdir, hostname) + if not os.path.exists(path): + return [] + lineno = 0 + + table = [] + for line in file(path): + lineno += 1 + try: + line = line.strip() + if not line.startswith("("): + continue + + # drop everything after potential '#' (from older versions) + i = line.rfind('#') + if i > 0: # make sure # is not contained in string + rest = line[i:] + if '"' not in rest and "'" not in rest: + line = line[:i].strip() + + if line.endswith(","): + line = line[:-1] + line = line[1:-1] # drop brackets + + # First try old format - with hostname + parts = [] + while True: + try: + part, line = split_python_tuple(line) + parts.append(part) + except: + break + if len(parts) == 4: + parts = parts[1:] # drop hostname, legacy format with host in first column + elif len(parts) != 3: + raise Exception("Invalid number of parts: %d" % len(parts)) + + checktypestring, itemstring, paramstring = parts + table.append((eval(checktypestring), eval(itemstring), paramstring)) + except: + if opt_debug: + raise + raise Exception("Invalid line %d in autochecks file %s" % (lineno, path)) + return table + + +def has_autochecks(hostname): + return os.path.exists(autochecksdir + "/" + hostname + ".mk") + + +def save_autochecks_file(hostname, items): + if not os.path.exists(autochecksdir): + os.makedirs(autochecksdir) + filepath = autochecksdir + "/" + hostname + ".mk" + if os.path.exists(filepath): + os.remove(filepath) + out = file(filepath, "w") + out.write("[\n") + for entry in items: + out.write(" (%r, %r, %s),\n" % entry) + out.write("]\n") + + +# Remove all autochecks of a host while being cluster-aware! +def remove_autochecks_of(hostname): + removed = 0 + nodes = nodes_of(hostname) + if nodes: + for node in nodes: + old_items = parse_autochecks_file(node) + new_items = [] + for check_type, item, paramstring in old_items: + descr = service_description(check_type, item) + if hostname != host_of_clustered_service(node, descr): + new_items.append((check_type, item, paramstring)) + else: + removed += 1 + save_autochecks_file(node, new_items) + else: + old_items = parse_autochecks_file(hostname) + new_items = [] + for check_type, item, paramstring in old_items: + descr = service_description(check_type, item) + if hostname != host_of_clustered_service(hostname, descr): + new_items.append((check_type, item, paramstring)) + else: + removed += 1 + save_autochecks_file(hostname, new_items) + + return removed + diff -Nru check-mk-1.2.2p3/diskstat check-mk-1.2.6p12/diskstat --- check-mk-1.2.2p3/diskstat 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/diskstat 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -70,8 +70,6 @@ # Index 12 -- # of milliseconds spent doing I/Os # Index 13 -- weighted # of milliseconds spent doing I/Os -check_includes['diskstat'] = [ "diskstat.include" ] - # Convert information to generic format also generated # by winperf_phydisk # [ now, [( disk, readctr, writectr ), ... ]] @@ -81,37 +79,45 @@ info_plain = [] nameinfo = {} phase = 'info' + node = None for line in info: - if line[0] == '[dmsetup_info]': + if node == None: + node = line[0] + if line[1] == '[dmsetup_info]': phase = 'dmsetup_info' - elif line[0] == '[vx_dsk]': + elif line[1] == '[vx_dsk]': phase = 'vx_dsk' + # new node in case of a cluster, restart with info phase + elif line[0] != node: + phase = 'info' + node = line[0] else: if phase == 'info': info_plain.append(line) elif phase == 'dmsetup_info': try: - majmin = tuple(map(int, line[1].split(':'))) - if len(line) == 4: - name = "LVM %s" % line[0] + maj, min = map(int, line[2].split(':')) + if len(line) == 5: + name = "LVM %s" % line[1] else: - name = "DM %s" % line[0] - nameinfo[majmin] = name + name = "DM %s" % line[1] + nameinfo[node, maj, min] = name except: pass # ignore such crap as "No Devices Found" elif phase == 'vx_dsk': - maj = int(line[0], 16) - min = int(line[1], 16) - group, disk = line[2].split('/')[-2:] + maj = int(line[1], 16) + min = int(line[2], 16) + group, disk = line[3].split('/')[-2:] name = "VxVM %s-%s" % (group, disk) - nameinfo[(maj, min)] = name + nameinfo[(node, maj, min)] = name return info_plain, nameinfo def diskstat_rewrite_device(nameinfo, linestart): - major, minor = map(int, linestart[:2]) - device = linestart[2] - return nameinfo.get((major, minor), device) + node = linestart[0] + major, minor = map(int, linestart[1:3]) + device = linestart[3] + return nameinfo.get((node, major, minor), device) def linux_diskstat_convert(info): info, nameinfo = diskstat_parse_info(info) @@ -126,24 +132,35 @@ # 6: read queue length *counters* # 7: write queue length *counters* rewritten = [ - ( diskstat_rewrite_device(nameinfo, l[0:3]), - int(l[5]), - int(l[9]), - int(l[3]), - int(l[7]), - int(l[12]) - ) for l in info[1:] if len(l) >= 13 + ( l[0], # node name or None + diskstat_rewrite_device(nameinfo, l[0:4]), + int(l[6]), + int(l[10]), + int(l[4]), + int(l[8]), + # int(l[13]) + ) for l in info[1:] if len(l) >= 14 ] # Remove device mapper devices without a translated name - return [ line for line in rewritten if not line[0].startswith("dm-") ] + return [ line for line in rewritten if not line[1].startswith("dm-") ] def inventory_diskstat(info): return inventory_diskstat_generic(linux_diskstat_convert(info)) def check_diskstat(item, params, info): - return check_diskstat_generic(item, params, int(info[0][0]), linux_diskstat_convert(info)) + this_time = int(info[0][1]) + return check_diskstat_generic(item, params, this_time, linux_diskstat_convert(info)) + + -check_info['diskstat'] = (check_diskstat, "Disk IO %s", 1, inventory_diskstat) -checkgroup_of["diskstat"] = "disk_io" +check_info["diskstat"] = { + 'check_function' : check_diskstat, + 'inventory_function' : inventory_diskstat, + 'service_description' : 'Disk IO %s', + 'has_perfdata' : True, + 'group' : 'disk_io', + "node_info" : True, # add first column with actual host name + 'includes' : [ "diskstat.include" ], +} diff -Nru check-mk-1.2.2p3/diskstat.include check-mk-1.2.6p12/diskstat.include --- check-mk-1.2.2p3/diskstat.include 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/diskstat.include 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -30,7 +30,7 @@ # "read" : (10, 20), # MB/sec # "write" : (20, 40), # MB/sec # "average" : 15, # min -# "latency" : (10, 20), # ms +# "latency" : (10, 20), # ms # "latency_perfdata" : True, } @@ -71,60 +71,71 @@ inventory += [ ( "read", None ), ( "write", None ) ] if "physical" in modes: - inventory += [ (line[0], "diskstat_default_levels") + inventory += [ (line[1], "diskstat_default_levels") for line in info - if not ' ' in line[0] ] + if not ' ' in line[1] ] if "lvm" in modes: - inventory += [ (line[0], "diskstat_default_levels") + inventory += [ (line[1], "diskstat_default_levels") for line in info - if line[0].startswith("LVM ") ] + if line[1].startswith("LVM ") ] if "vxvm" in modes: - inventory += [ (line[0], "diskstat_default_levels") + inventory += [ (line[1], "diskstat_default_levels") for line in info - if line[0].startswith("VxVM ") ] + if line[1].startswith("VxVM ") ] return inventory -def check_diskstat_line(this_time, item, params, line): +def check_diskstat_line(this_time, item, params, line, mode='sectors'): average_range = params.get("average") perfdata = [] infos = [] status = 0 - for what, ctr in [ ("read", line[1]), ("write", line[2]) ]: - countername = "diskstat.%s.%s" % (item, what) + node = line[0] + if node != None and node != "": + infos.append("Node %s" % node) + prediction_perf = [] + for what, ctr in [ ("read", line[2]), ("write", line[3]) ]: + if node: + countername = "diskstat.%s.%s.%s" % (node, item, what) + else: + countername = "diskstat.%s.%s" % (item, what) # unpack levels now, need also for perfdata levels = params.get(what) - if levels: + if type(levels) == tuple: warn, crit = levels else: warn, crit = None, None - # compute IO rate in bytes/sec - timedif, sectors_per_sec = get_counter(countername, this_time, int(ctr)) - bytes_per_sec = sectors_per_sec * 512 + per_sec = get_rate(countername, this_time, int(ctr)) + if mode == 'sectors': + # compute IO rate in bytes/sec + bytes_per_sec = per_sec * 512 + elif mode == 'bytes': + bytes_per_sec = per_sec + infos.append("%s/sec %s" % (get_bytes_human_readable(bytes_per_sec), what)) perfdata.append( (what, bytes_per_sec, warn, crit) ) + dsname = what # compute average of the rate over ___ minutes if average_range != None: - timedif, avg = get_average(countername + ".avg", this_time, bytes_per_sec, average_range) - perfdata.append( (what + ".avg", avg) ) + avg = get_average(countername + ".avg", this_time, bytes_per_sec, average_range) + dsname = what + ".avg" + perfdata.append( (dsname, avg) ) bytes_per_sec = avg # check levels - if levels != None: - mb_per_sec = bytes_per_sec / 1048576 - if mb_per_sec >= crit: - status = 2 - infos[-1] += "(!!)" - elif mb_per_sec >= warn: - status = max(status, 1) - infos[-1] += "(!)" + state, text, extraperf = check_levels(bytes_per_sec, dsname, levels, + unit = "MB/s", scale = 1048576, statemarkers=True) + if text: + infos.append(text) + status = max(state, status) + prediction_perf += extraperf # Add performance data for averaged IO if average_range != None: @@ -132,19 +143,19 @@ # Process IOs when available ios_per_sec = None - if len(line) >= 5 and line[3] >= 0 and line[4] > 0: - reads, writes = map(int, line[3:5]) + if len(line) >= 6 and line[4] >= 0 and line[5] > 0: + reads, writes = map(int, line[4:6]) ios = reads + writes - timedif, ios_per_sec = get_counter(countername + ".ios", this_time, ios) + ios_per_sec = get_rate(countername + ".ios", this_time, ios) infos.append("IOs: %.2f/sec" % ios_per_sec) if params.get("latency_perfdata"): perfdata.append(("ios", ios_per_sec)) # Do Latency computation if this information is available: - if len(line) >= 6 and line[5] >= 0: - timems = int(line[5]) - timedif, timems_per_sec = get_counter(countername + ".time", this_time, timems) + if len(line) >= 7 and line[6] >= 0: + timems = int(line[6]) + timems_per_sec = get_rate(countername + ".time", this_time, timems) if not ios_per_sec: latency = 0.0 else: @@ -166,8 +177,8 @@ # Queue Lengths (currently only Windows). Windows uses counters here. # I have not understood, why.... - if len(line) >= 8: - for what, ctr in [ ("read", line[6]), ("write", line[7]) ]: + if len(line) >= 9: + for what, ctr in [ ("read", line[7]), ("write", line[8]) ]: countername = "diskstat.%s.ql.%s" % (item, what) levels = params.get(what + "_ql") if levels: @@ -175,7 +186,7 @@ else: warn, crit = None, None - timedif, qlx = get_counter(countername, this_time, int(ctr)) + qlx = get_rate(countername, this_time, int(ctr)) ql = qlx / 10000000.0 infos.append(what.title() + " Queue: %.2f" % ql) @@ -191,31 +202,39 @@ if params.get("ql_perfdata"): perfdata.append((what + "_ql", ql)) + perfdata += prediction_perf - return (status, nagios_state_names[status] + " - " + ", ".join(infos) , perfdata) + return (status, ", ".join(infos) , perfdata) -def check_diskstat_generic(item, params, this_time, info): +def check_diskstat_generic(item, params, this_time, info, mode='sectors'): # legacy version if item is "read" or "write" if item in [ 'read', 'write' ]: return check_diskstat_old(item, params, this_time, info) - # summary mode - if item == 'SUMMARY': # summary mode (only summarize physical disks!) - summary_line = [0] * 13 - for line in info: - devname = line[0] - if ' ' in devname: - continue - summary_line = map(lambda e: e[0] + int(e[1]), zip(summary_line, line[1:])) - return check_diskstat_line(this_time, "SUMMARY", params, [''] + summary_line) + # Sum up either all physical disks (if item is "SUMMARY") or + # all entries matching the item in question. It is not a bug if + # a disk appears more than once. This can for example happen in + # Windows clusters - even if they are no Check_MK clusters. + + summed_up = [0] * 13 + matching = 0 - # single mode for line in info: - if line[0] == item: - return check_diskstat_line(this_time, item, params, line) + if item == 'SUMMARY' and line[0] != None: + return 3, "summary mode not supported in a cluster" + + elif item == 'SUMMARY' and ' ' in line[1]: + continue # skip non-physical disks - return (3, "UNKNOWN - device missing") + elif item == 'SUMMARY' or line[1] == item: + matching += 1 + summed_up = map(lambda e: e[0] + int(e[1]), zip(summed_up, line[2:])) + + if matching == 0: + return 3, "No matching disk found" + else: + return check_diskstat_line(this_time, item, params, [None, ''] + summed_up, mode) # This is the legacy version of diskstat as used in <= 1.1.10. @@ -224,15 +243,21 @@ def check_diskstat_old(item, params, this_time, info): # sum up over all devices if item == 'read': - index = 1 # sectors read + index = 2 # sectors read elif item == 'write': - index = 2 # sectors written + index = 3 # sectors written else: - return (3, "UNKNOWN - invalid item %s" % (item,)) + return (3, "invalid item %s" % (item,)) - this_val = sum([int(x[index]) for x in info if ' ' not in x[0]]) + this_val = 0 + for line in info: + if line[0] != None: + return 3, "read/write mode not supported in a cluster" + if ' ' not in line[1]: + this_val += int(line[index]) - timedif, per_sec = get_counter("diskstat." + item, this_time, this_val) + per_sec = get_rate("diskstat." + item, this_time, this_val) mb_per_s = per_sec / 2048.0 # Diskstat output is in sectors a 512 Byte - perfdata = [ (item, "%dc" % this_val ) ] - return (0, "OK - %.1fMB/s (in last %d secs)" % (mb_per_s, timedif), perfdata) + kb_per_s = per_sec / 2.0 + perfdata = [ (item, "%f" % kb_per_s ) ] + return (0, "%.1f MB/s" % mb_per_s, perfdata) diff -Nru check-mk-1.2.2p3/dmidecode check-mk-1.2.6p12/dmidecode --- check-mk-1.2.2p3/dmidecode 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/dmidecode 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,312 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output: +# BIOS Information +# Vendor: LENOVO +# Version: 6FET49WW (1.19 ) +# Release Date: 10/17/2008 +# Address: 0xE0000 +# Runtime Size: 128 kB +# ROM Size: 8192 kB +# Characteristics: +# PCI is supported +# PC Card (PCMCIA) is supported +# PNP is supported +# BIOS is upgradeable +# BIOS shadowing is allowed +# ESCD support is available +# Boot from CD is supported +# Selectable boot is supported +# BIOS ROM is socketed +# EDD is supported +# ACPI is supported +# USB legacy is supported +# BIOS boot specification is supported +# Targeted content distribution is supported +# BIOS Revision: 1.25 +# Firmware Revision: 1.1 +# +# System Information +# Manufacturer: LENOVO +# Product Name: 4061AR7 +# Version: ThinkPad W500 +# Serial Number: L3AFB3L +# UUID: AD137E01-4A86-11CB-A580-BE0E287D2679 +# Wake-up Type: Power Switch +# SKU Number: Not Specified +# Family: ThinkPad W500 +# +# ... any many other sections... + +# Note: on Linux \t is replaced by : and then the split +# is done by :. On Windows the \t comes 1:1 and no splitting +# is being done. So we need to split manually here +def inv_dmidecode(info): + section_name = None + section_lines = [] + for line in info: + # Windows plugin keeps tabs and has no separator + if len(line) == 1: + parts = line[0].replace("\t", ":").split(":") + line = [ x.strip() for x in parts ] + if len(line) == 1: + if section_name: + inv_dmidecode_parse_section(section_name, section_lines) + section_name = line[0] + section_lines = [] + else: + section_lines.append(line[1:]) + if section_name: + inv_dmidecode_parse_section(section_name, section_lines) + + node = inv_tree("hardware.") + + +def inv_dmidecode_parse_section(name, lines): + lines = [ [ w.strip() for w in words ] for words in lines ] + if name == "BIOS Information": + inv_dmidecode_parse_bios(lines) + elif name == "System Information": + inv_dmidecode_parse_system(lines) + elif name == "Chassis Information": + inv_dmidecode_parse_chassis(lines) + elif name == "Processor Information": + inv_dmidecode_parse_processor(lines) +# elif name == "Memory Controller Information": +# inv_dmidecode_parse_mem_controller(lines) +# elif name == "Memory Module Information": +# inv_dmidecode_parse_mem_module(lines) + elif name == "Physical Memory Array": + inv_dmidecode_parse_physical_mem_array(lines) + elif name == "Memory Device": + inv_dmidecode_parse_mem_device(lines) + + # TODO: Summe über alle Arrays ausrechnen + +def inv_dmidecode_parse_date(value): + try: + # 10/17/2008 + return time.mktime(time.strptime(value, "%m/%d/%Y")) + except Exception, e: + return + +def inv_dmidecode_parse_bios(lines): + inv_dmidecode_parse_generic("hardware.bios.", lines, { + "Vendor" : "vendor", + "Version" : "version", + "Release Date" : ("date", inv_dmidecode_parse_date), + "BIOS Revision" : "revision", + "Firmware Revision" : "firmware", + }) + +def inv_dmidecode_parse_system(lines): + inv_dmidecode_parse_generic("hardware.system.", lines, { + "Manufacturer" : "manufacturer", + "Product Name" : "product", + "Version" : "version", + "Serial Number" : "serial", + "UUID" : "uuid", + "Family" : "family", + }) + +def inv_dmidecode_parse_chassis(lines): + inv_dmidecode_parse_generic("hardware.chassis.", lines, { + "Manufacturer" : "manufacturer", + "Type" : "type", + }) + +# Note: This node is also being filled by lnx_cpuinfo +def inv_dmidecode_parse_processor(lines): + cpu_info = {} + for line in lines: + if line[0] == "Manufacturer": + cpu_info["vendor"] = { + "GenuineIntel" : "intel", + "AuthenticAMD" : "amd", + }.get(line[1], line[1]) + elif line[0] == "Max Speed": # 2530 MHz + cpu_info["max_speed"] = dmidecode_parse_speed(line[1]) + elif line[0] == "Voltage": + cpu_info["voltage"] = dmidecode_parse_voltage(line[1]) + elif line[0] == "Status": + if line[1] == "Unpopulated": + return + + # Only update our CPU information if the socket is populated + inv_tree("hardware.cpu.").update(cpu_info) + +# def inv_dmidecode_parse_mem_controller(lines): +# # TODO: Can we have multiple memory controllers +# node = inv_tree("hardware.memory.") +# for line in lines: +# if line[0] == "Maximum Memory Module Size": +# node["max_module_size"] = dmidecode_parse_size(line[1]) +# elif line[0] == "Maximum Total Memory Size": +# node["max_memory_size"] = dmidecode_parse_size(line[1]) +# elif line[0] == "Memory Module Voltage": +# node["module_voltage"] = dmidecode_parse_voltage(line[1]) +# +# def inv_dmidecode_parse_mem_module(lines): +# node = inv_tree("hardware.memory.modules:") +# module = {} +# node.append(module) +# for line in lines: +# if line[0] == "Socket Designation": +# module["disignation"] = line[1] +# elif line[0] == "Type": +# module["type"] = line[1] +# elif line[0] == "Installed Size": +# module["size"] = dmidecode_parse_size(line[1]) +# elif line[0] == "Enabled Size": +# module["enabled_size"] = dmidecode_parse_size(line[1]) +# elif line[0] == "Current Speed": +# time_sec = dmidecode_parse_time(line[1]) +# speed = 1.0 / time_sec +# module["current_speed"] = speed + +def inv_dmidecode_parse_physical_mem_array(lines): + # We expect several possible arrays + node = inv_tree("hardware.memory.arrays:") + + # If we have a dummy entry from previous Memory Devices (see below) + # then we fill that entry rather than creating a new one + if len(node) == 1 and node[0].keys() == [ "devices" ]: + array = node[0] + else: + array = { + "devices" : [] + } + node.append(array) + for line in lines: + if line[0] == "Location": + array["location"] = line[1] + elif line[0] == "Use": + array["use"] = line[1] + elif line[0] == "Error Correction Type": + array["error_correction"] = line[1] + elif line[0] == "Maximum Capacity": + array["maximum_capacity"] = dmidecode_parse_size(line[1]) + +def inv_dmidecode_parse_mem_device(lines): + # Do we already have an entry for a memory array? Then + # we assume that this device belongs to the most recently + # read array. Otherwise we create a dummy entry and replace + # that later with actual information + node = inv_tree("hardware.memory.arrays:") + if node: + array = node[-1] + else: + array = { "devices": []} + node.append(array) + + device = {} + inv_dmidecode_parse_generic(device, lines, { + "Total Width" : "total_width", # 64 bits + "Data Width" : "data_width", # 64 bits + "Form Factor" : "form_factor", # SODIMM + "Set" : "set", # None + "Locator" : "locator", # DIMM 2 + "Bank Locator" : "bank_locator", # Bank 2/3 + "Type" : "type", # DDR2 + "Type Detail" : "type_detail", # Synchronous + "Manufacturer" : "manufacturer", # Not Specified + "Serial Number" : "serial", # Not Specified + "Asset Tag" : "asset_tag", # Not Specified + "Part Number" : "part_number", # Not Specified + "Speed" : "speed", # 667 MHz + "Size" : "size", # 2048 MB + }) + + if device["size"] != "No Module Installed": + # Convert speed and size into numbers + device["speed"] = dmidecode_parse_speed(device.get("speed", "Unknown")) + device["size"] = dmidecode_parse_size(device.get("size", "Unknown")) + array["devices"].append(device) + + +def inv_dmidecode_parse_generic(node, lines, keyinfo): + if type(node) == str: + node = inv_tree(node) + for line in lines: + if line[0] in keyinfo: + key = keyinfo[line[0]] + if line[1] != "Not Specified": + value = line[1] + if type(key) == tuple: + key, transform = key + value = transform(value) + if value == None: + continue + node[key] = value + + +def dmidecode_parse_size(v): # into Bytes (int) + if v == "Unknown": + return None + + parts = v.split() + if parts[1].lower() == "tb": + return int(parts[0]) * 1024 * 1024 * 1024 * 1024 + elif parts[1].lower() == "gb": + return int(parts[0]) * 1024 * 1024 * 1024 + elif parts[1].lower() == "mb": + return int(parts[0]) * 1024 * 1024 + elif parts[1].lower() == "kb": + return int(parts[0]) * 1024 + else: + return int(parts[0]) + +def dmidecode_parse_speed(v): # into Hz (float) + if v == "Unknown": + return None + + parts = v.split() + if parts[1] == "GHz": + return float(parts[0]) * 1000000000.0 + elif parts[1] == "MHz": + return float(parts[0]) * 1000000.0 + elif parts[1] == "kHz": + return float(parts[0]) * 1000.0 + elif parts[1] == "Hz": + return float(parts[0]) + +def dmidecode_parse_voltage(v): + if v == "Unknown": + return None + return float(v.split()[0]) + +def dmidecode_parse_time(v): # 155 ns + parts = v.split() + if parts[1] == "ns": + return float(parts[0]) / 1000000000.0 + else: + return float(parts[0]) # assume seconds + + +inv_info['dmidecode'] = { + "inv_function" : inv_dmidecode, +} diff -Nru check-mk-1.2.2p3/dmi_sysinfo check-mk-1.2.6p12/dmi_sysinfo --- check-mk-1.2.2p3/dmi_sysinfo 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/dmi_sysinfo 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,6 +24,8 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. +# Note: this check is deprecated. It is superseeded by the new +# Check_MK HW/SW-Inventory. def inventory_dmi_sysinfo(checkname, info): if len(info) > 0 and info[0] == ['System', 'Information']: @@ -31,7 +33,7 @@ def check_dmi_sysinfo(item, param, info): if len(info) == 0 or info[0] != ['System', 'Information']: - return (3, "UNKNOWN - Invalid information") + return (3, "Invalid information") data = {} for line in info: line = " ".join(line) @@ -47,4 +49,9 @@ )) -check_info['dmi_sysinfo'] = (check_dmi_sysinfo, "DMI Sysinfo", 0, inventory_dmi_sysinfo) + +check_info["dmi_sysinfo"] = { + 'check_function': check_dmi_sysinfo, + 'inventory_function': inventory_dmi_sysinfo, + 'service_description': 'DMI Sysinfo', +} diff -Nru check-mk-1.2.2p3/dmraid check-mk-1.2.6p12/dmraid --- check-mk-1.2.2p3/dmraid 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/dmraid 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -27,14 +27,13 @@ # Author: Markus Lengler - # Example outputs from agent: # -#<<>> -#name : isw_ebdabbedfh_system -#status : ok -#/dev/sda: isw, "isw_ebdabbedfh", GROUP, ok, 976773166 sectors, data@ 0 Model: WDC WD5002ABYS-5 -#/dev/sdb: isw, "isw_ebdabbedfh", GROUP, ok, 976773166 sectors, data@ 0 Model: WDC WD5002ABYS-5 +# <<>> +# name : isw_ebdabbedfh_system +# status : ok +# /dev/sda: isw, "isw_ebdabbedfh", GROUP, ok, 976773166 sectors, data@ 0 Model: WDC WD5002ABYS-5 +# /dev/sdb: isw, "isw_ebdabbedfh", GROUP, ok, 976773166 sectors, data@ 0 Model: WDC WD5002ABYS-5 def inventory_dmraid(checkname, info): @@ -57,10 +56,10 @@ if status == "ok": pos=line.index("Model:") model=" ".join(line[pos+1:]) - return (0, "OK - Online (%s)" % model) + return (0, "Online (%s)" % model) else: - return (2, "CRIT - Error on disk!!") - return (2, "CRIT - Missing disk!!") + return (2, "Error on disk!!") + return (2, "Missing disk!!") def check_dmraid_ldisks(item, _no_params, info): @@ -70,18 +69,26 @@ if line[0] == "status": status = line[2] if status == "ok": - return(0, "OK - state is %s" % status) + return(0, "state is %s" % status) else: - return(2, "CRIT - %s" % status) + return(2, "%s" % status) if line[0] == "name" and line[2] == item: LDISK_FOUND=True - return (3, "UNKNOWN - incomplete data from agent") + return (3, "incomplete data from agent") + + -check_info['dmraid.pdisks'] = \ - (check_dmraid_pdisks, "RAID PDisk %s", 0, inventory_dmraid) +check_info["dmraid.ldisks"] = { + 'check_function': check_dmraid_ldisks, + 'inventory_function': inventory_dmraid, + 'service_description': 'RAID LDisk %s', +} -check_info['dmraid.ldisks'] = \ - (check_dmraid_ldisks, "RAID LDisk %s", 0, inventory_dmraid) +check_info["dmraid.pdisks"] = { + 'check_function': check_dmraid_pdisks, + 'inventory_function': inventory_dmraid, + 'service_description': 'RAID PDisk %s', +} diff -Nru check-mk-1.2.2p3/dmraid.ldisks check-mk-1.2.6p12/dmraid.ldisks --- check-mk-1.2.2p3/dmraid.ldisks 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/dmraid.ldisks 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Check state of logical disks of DMRaid +title: Logical disks of DMRaid agents: linux -author: Markus Lengler +catalog: os/storage license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/dmraid.pdisks check-mk-1.2.6p12/dmraid.pdisks --- check-mk-1.2.2p3/dmraid.pdisks 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/dmraid.pdisks 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Check state of phyiscal disks of DMRaid +title: Phyiscal disks of DMRaid agents: linux -author: Markus Lengler +catalog: os/storage license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/docsis_channels_downstream check-mk-1.2.6p12/docsis_channels_downstream --- check-mk-1.2.2p3/docsis_channels_downstream 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/docsis_channels_downstream 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,97 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +factory_settings["docsis_channels_downstream"] = { + "power" : ( 5.0, 1.0 ), +} + +def inventory_docsis_channels_downstream(info): + for line in info: + if line[1] != '0': + yield line[0], {} + +def check_docsis_channels_downstream(item, params, info): + for channel_id, frequency, power in info: + if channel_id == item: + # Power + warn, crit = params["power"] + power_dbmv = abs(float(int(power))) / 10 + infotext = "Power is %.1f dBmV" % power_dbmv + levels = " (Levels Warn/Crit at %d dBmV/ %d dBmV)" % ( warn, crit ) + state = 0 + if power_dbmv <= crit: + state = 2 + infotext += levels + elif power_dbmv <= warn: + state = 1 + infotext += levels + yield state, infotext, [ ('power', power_dbmv, warn, crit ) ] + + # Check Frequency + frequency_mhz = float(frequency) / 1000000 + infotext = "Frequency is %.1f MHz" % frequency_mhz + perfdata = [("frequency" , frequency_mhz, warn, crit )] + state = 0 + if "frequency" in params: + warn, crit = params["frequency"] + levels = " (warn/crit at %d MHz/ %d MHz)" % ( warn, crit ) + if frequency_mhz >= crit: + state = 2 + infotext += levels + elif frequency_mhz >= warn: + state = 1 + infotext += levels + # Change this to yield in case of future extension of the check + yield state, infotext, perfdata + return + + yield 3, "Channel information not found in SNMP data" + + +# This Check is a subcheck because there is also a upstream version possible +check_info["docsis_channels_downstream"] = { + "check_function" : check_docsis_channels_downstream, + "inventory_function" : inventory_docsis_channels_downstream, + "service_description" : "Downstream Channel %s", + "has_perfdata" : True, + "snmp_scan_function" : docsis_scan_function, + "snmp_info" : ( ".1.3.6.1.2.1.10.127.1.1.1.1", [ 1, # docsIfDownChannelId + 2, # docsIfDownChannelFrequency + 6, # docsIfDownChannelPower (1/10 dBmV) + ]), + "group" : "docsis_channels_downstream", + "default_levels_variable" : "docsis_channels_downstream", + "includes" : [ "docsis.include" ], +} + +# Information for future extensions of the check: +# docsIfDownChannelId 1.3.6.1.2.1.10.127.1.1.1.1.1 +# docsIfDownChannelFrequency 1.3.6.1.2.1.10.127.1.1.1.1.2 +# docsIfDownChannelWidth 1.3.6.1.2.1.10.127.1.1.1.1.3 +# docsIfDownChannelModulation 1.3.6.1.2.1.10.127.1.1.1.1.4 +# docsIfDownChannelInterleave 1.3.6.1.2.1.10.127.1.1.1.1.5 +# docsIfDownChannelPower 1.3.6.1.2.1.10.127.1.1.1.1.6 +# docsIfDownChannelAnnex 1.3.6.1.2.1.10.127.1.1.1.1.7 diff -Nru check-mk-1.2.2p3/docsis_channels_upstream check-mk-1.2.6p12/docsis_channels_upstream --- check-mk-1.2.2p3/docsis_channels_upstream 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/docsis_channels_upstream 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,168 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# tails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +factory_settings["docsis_channels_upstream_default_levels"] = { + "signal_noise" : ( 10.0, 5.0 ), # dB +} + +def docsis_channels_upstream_convert(info): + parsed = {} + freq_info, sig_info, cm_info = info + if len(freq_info) == 1 and len(sig_info) == 1 and \ + freq_info[0][0] != sig_info[0][0]: # Probably ARRIS modem + cid = freq_info[0][0] + parsed[cid] = freq_info[0][1:] + sig_info[0][1:] + if len(cm_info) == 1: # Never seen + parsed[cid] += cm_info[0] + + else: + sig_info_dict = dict([ (x[0], x[1:]) for x in sig_info]) + cm_info_dict = dict([ (x[0], x[1:]) for x in cm_info]) + # If the channel id is not unique we also use the OID_END in the item name + is_unique = len(freq_info) == len(set(map(lambda x: x[1], freq_info))) + for line in freq_info: + endoid = line[0] + cid = is_unique and line[1] or "%s.%s" % (line[0], line[1]) + if line[2] != '0': + parsed[cid] = line[1:] + sig_info_dict[endoid] + cm_info_dict.get(endoid, []) + + return parsed + +def inventory_docsis_channels_upstream(info): + parsed = docsis_channels_upstream_convert(info) + for cid, entry in parsed.items(): + if entry[2] != '0' and entry[5] != '0': + yield cid, {} + +def check_docsis_channels_upstream(item, params, info): + parsed = docsis_channels_upstream_convert(info) + if item in parsed: + entry = parsed[item] + + channel_id, mhz, unerroreds, correcteds, uncorrectables, signal_noise = entry[:6] + + # Signal Noise + noise_db = float(signal_noise) / 10 + infotext = "Signal/Noise ratio: %.2f dB" % noise_db + warn, crit = params['signal_noise'] + levels = " (warn/crit at %.1f/%.1f dB)" % ( warn, crit ) + state = 0 + if noise_db <= crit: + state = 2 + infotext += levels + elif noise_db <= warn: + state = 1 + infotext += levels + yield state, infotext, [ ('signal_noise', noise_db, warn, crit ) ] + + fields = [ + ( "frequency", float(mhz) / 1000000, "Frequency", "%.2f", " MHz"), + ( "unerroreds", int(unerroreds), "codewords without errors", "%d", "" ), + ( "correcteds", int(correcteds), "corrected errors", "%d", "" ), + ( "uncorrectables", int(uncorrectables), "uncorrectable errors", "%d", "" ), + ] + if len(entry) >= 10: + total, active, registered, avg_util = entry[6:10] + fields += [ + ( "total", int(total), "Modems total", "%d", "" ), + ( "active", int(active), "active", "%d", "" ), + ( "registered", int(registered), "registered", "%d", "" ), + ( "util", int(avg_util), "average utilization", "%d", "%" ), + ] + + for varname, value, title, form, unit in fields: + yield 0, title + ": " + (form + "%s") % (value, unit), [ (varname, value) ] + + + return + + yield 3, "Channel information not found in SNMP data" + + +# This Check is a subcheck because there is also a upstream version possible +check_info["docsis_channels_upstream"] = { + "check_function" : check_docsis_channels_upstream, + "inventory_function" : inventory_docsis_channels_upstream, + "service_description" : "Upstream Channel %s", + "has_perfdata" : True, + "snmp_scan_function" : docsis_scan_function, + "snmp_info" : [ + ( ".1.3.6.1.2.1.10.127.1.1.2.1", [ + OID_END, + "1", # docsIfUpChannelId + "2", # docsIfUpChannelFrequency + ]), + ( ".1.3.6.1.2.1.10.127.1.1.4.1", [ + OID_END, + "2", # docsIfSigQUnerroreds: + # "Codewords received on this channel without error. + # This includes all codewords, whether or not they + # were part of frames destined for this device." + + "3", # docsIfSigQCorrecteds: + # "Codewords received on this channel with correctable + # errors. This includes all codewords, whether or not + # they were part of frames destined for this device." + + "4", # docsIfSigQUncorrectables: + # "Codewords received on this channel with uncorrectable + # errors. This includes all codewords, whether or not + # they were part of frames destined for this device." + + "5", # docsIfSigQSignalNoise + ]), + ( ".1.3.6.1.4.1.9.9.116.1.4.1.1" , [ + OID_END, + "3", # cdxIfUpChannelCmTotal + "4", # cdxIfUpChannelCmActive + "5", # cdxIfUpChannelCmRegistered + "7", # cdxIfUpChannelAvgUtil + ]), + ], + "default_levels_variable" : "docsis_channels_upstream_default_levels", + "group" : "docsis_channels_upstream", + "includes" : [ "docsis.include" ], +} + +# Strange: Channel IDs seem to be not unique. But the second +# usage has '0' in the docsIfUpChannelFrequency... + +# All branches of this MIB section: +# cdxIfUpChannelWidth (1) +# cdxIfUpChannelModulationProfile (2) +# cdxIfUpChannelCmTotal (3) +# cdxIfUpChannelCmActive (4) +# cdxIfUpChannelCmRegistered (5) +# cdxIfUpChannelInputPowerLevel (6) +# cdxIfUpChannelAvgContSlots (8) +# cdxIfUpChannelRangeSlots (9) +# cdxIfUpChannelNumActiveUGS (10) +# cdxIfUpChannelMaxUGSLastOneHour (11) +# cdxIfUpChannelMinUGSLastOneHour (12) +# cdxIfUpChannelAvgUGSLastOneHour (13) +# cdxIfUpChannelMaxUGSLastFiveMins (14) +# cdxIfUpChannelMinUGSLastFiveMins (15) +# cdxIfUpChannelAvgUGSLastFiveMins (16) diff -Nru check-mk-1.2.2p3/docsis_cm_status check-mk-1.2.6p12/docsis_cm_status --- check-mk-1.2.2p3/docsis_cm_status 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/docsis_cm_status 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,111 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# docsIfCmStatusValue 1.3.6.1.2.1.10.127.1.2.2.1.1 +# docsIfCmStatusT1Timeouts 1.3.6.1.2.1.10.127.1.2.2.1.10 +# docsIfCmStatusT2Timeouts 1.3.6.1.2.1.10.127.1.2.2.1.11 +# docsIfCmStatusT3Timeouts 1.3.6.1.2.1.10.127.1.2.2.1.12 +# docsIfCmStatusT4Timeouts 1.3.6.1.2.1.10.127.1.2.2.1.13 +# docsIfCmStatusRangingAborteds 1.3.6.1.2.1.10.127.1.2.2.1.14 +# docsIfCmStatusDocsisOperMode 1.3.6.1.2.1.10.127.1.2.2.1.15 +# docsIfCmStatusModulationType 1.3.6.1.2.1.10.127.1.2.2.1.16 +# docsIfCmStatusCode 1.3.6.1.2.1.10.127.1.2.2.1.2 +# docsIfCmStatusTxPower 1.3.6.1.2.1.10.127.1.2.2.1.3 +# docsIfCmStatusResets 1.3.6.1.2.1.10.127.1.2.2.1.4 +# docsIfCmStatusLostSyncs 1.3.6.1.2.1.10.127.1.2.2.1.5 +# docsIfCmStatusInvalidMaps 1.3.6.1.2.1.10.127.1.2.2.1.6 +# docsIfCmStatusInvalidUcds 1.3.6.1.2.1.10.127.1.2.2.1.7 +# docsIfCmStatusInvalidRangingResponses 1.3.6.1.2.1.10.127.1.2.2.1.8 +# docsIfCmStatusInvalidRegistrationResponses 1.3.6.1.2.1.10.127.1.2.2.1.9 + +factory_settings["docsis_cm_status_default_levels"] = { + "tx_power" : ( 20.0, 10.0 ), + "error_states" : [ 13, 2, 1 ], +} + +def inventory_docsis_cm_status(info): + for line in info: + yield line[0], {} + +def check_docsis_cm_status( item, params, info): + status_table = { + 1 : "other", + 2 : "notReady", + 3 : "notSynchronized", + 4 : "phySynchronized", + 5 : "usParametersAcquired", + 6 : "rangingComplete", + 7 : "ipComplete", + 8 : "todEstablished", + 9 : "securityEstablished", + 10 :"paramTransferComplete", + 11 :"registrationComplete", + 12 :"operational", + 13 :"accessDenied", + } + + for sid, status, tx_power in info: + if sid == item: + # Modem StatusD + status = int(status) + infotext = "Status: %s" % status_table[status] + state = 0 + if status in params['error_states']: + state = 2 + yield state, infotext + + # TX Power + tx_power_dbmv = float(tx_power) / 10 + warn, crit = params['tx_power'] + levels = " (warn/crit at %.1f/%.1f dBmV)" % ( warn, crit ) + state = 0 + infotext = "TX Power is %.1f dBmV" % tx_power_dbmv + if tx_power_dbmv <= crit: + state = 2 + infotext += levels + elif tx_power_dbmv <= warn: + state = 1 + infotext += levels + yield state, infotext, [ ('tx_power', tx_power_dbmv, warn, crit ) ] + return + + yield 3, "Status Entry not found" + +check_info["docsis_cm_status"] = { + "check_function" : check_docsis_cm_status, + "inventory_function" : inventory_docsis_cm_status, + "service_description" : "Cable Modem %s Status", + "snmp_scan_function" : docsis_scan_function_cable_modem, + "snmp_info" : ( ".1.3.6.1.2.1.10.127.1.2.2.1", [ + OID_END, + 1, #docsIfCmStatusValue + 3, #docsIfCmStatusTxPower + ]), + "default_levels_variable" : "docsis_cm_status_default_levels", + "group" : "docsis_cm_status", + "includes" : [ "docsis.include" ], +} + diff -Nru check-mk-1.2.2p3/docsis.include check-mk-1.2.6p12/docsis.include --- check-mk-1.2.2p3/docsis.include 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/docsis.include 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,44 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +# Scan Function for DOCSIS Devices who supports the general information +def docsis_scan_function(oid): + if oid(".1.3.6.1.2.1.1.2.0") in [ ".1.3.6.1.4.1.4115.820.1.0.0.0.0.0", # ARRIS Touchstone WideBand Cable Modem + ".1.3.6.1.4.1.4115.900.2.0.0.0.0.0", # ARRIS Touchstone Cable Modem HW REV:2 + ".1.3.6.1.4.1.9.1.827", # Cisco CMTS UBR 7200 + ".1.3.6.1.4.1.4998.2.1", # ARRIS CMTS C4 + ".1.3.6.1.4.1.20858.2.600", # CASA C100G + ]: + return True + +# Scan Function for cable modems with DOCSIS MIB +# docsIfCmStatusTable 1.3.6.1.2.1.10.127.1.2.2 +def docsis_scan_function_cable_modem(oid): + if oid(".1.3.6.1.2.1.1.2.0") in [ ".1.3.6.1.4.1.4115.820.1.0.0.0.0.0", # ARRIS Touchstone WideBand Cable Modem + ".1.3.6.1.4.1.4115.900.2.0.0.0.0.0", # ARRIS Touchstone Cable Modem HW REV:2 + ]: + return True Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/doc.tar.gz and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/doc.tar.gz differ diff -Nru check-mk-1.2.2p3/domino_info check-mk-1.2.6p12/domino_info --- check-mk-1.2.2p3/domino_info 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/domino_info 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,80 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +# Example SNMP walk: +# +# .1.3.6.1.4.1.334.72.2.2.0 1 +# .1.3.6.1.4.1.334.72.1.1.4.8.0 MEDEMA +# .1.3.6.1.4.1.334.72.1.1.6.2.1.0 CN=HH-BK4/OU=SRV/O=MEDEMA/C=DE +# .1.3.6.1.4.1.334.72.1.1.6.2.4.0 Release 8.5.3FP5 HF89 + + +domino_info_states = { + '1' : 'up', + '2' : 'down', + '3' : 'not-responding', + '4' : 'crashed', + '5' : 'unknown' +} + +def inventory_domino_info(info): + if info and len(info[0]) != 0: + yield None, None + +def check_domino_info(_no_item, _no_params, info): + serverstate, domain, name, release = info + if len(domain) > 0: + maildomain = domain[0][0] + else: + maildomain = "-" + if int(serverstate[0][0]) > 4: + state = 1 + infotext = "Server is %s" % ( domino_info_states[serverstate[0][0]] ) + elif int(serverstate[0][0]) > 1: + state = 2 + infotext = "Server is %s" % ( domino_info_states[serverstate[0][0]] ) + else: + state = 0 + infotext = "Server is %s. Domain: %s, Name: %s, %s" \ + % ( domino_info_states[serverstate[0][0]], maildomain, name[0][0], release[0][0] ) + yield state, infotext + +check_info['domino_info'] = { + "check_function" : check_domino_info, + "inventory_function" : inventory_domino_info, + "has_perfdata" : False, + "service_description" : "Domino Info", + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.311.1.1.3") or \ + oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.8072.3.1.10"), + "snmp_info" : [ + [ ".1.3.6.1.4.1.334.72.2", [ 2 ] ], # lnNotesServerState + [ ".1.3.6.1.4.1.334.72.1.1.4", [ 8 ] ], # lnMailDomain + [ ".1.3.6.1.4.1.334.72.1.1.6.2", [ 1 ] ], # lnServerName + [ ".1.3.6.1.4.1.334.72.1.1.6.2", [ 4 ] ], # lnServerNotesVersion + ] +} + diff -Nru check-mk-1.2.2p3/domino_mailqueues check-mk-1.2.6p12/domino_mailqueues --- check-mk-1.2.2p3/domino_mailqueues 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/domino_mailqueues 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,74 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +factory_settings["domino_mailqueues_defaults"] = { + "queue_length": ( 300, 350 ) +} + +domino_mailqueues = { + 1: ( "lnDeadMail", "Dead Mails" ), # lnDeadMail + 6: ( "lnWaitingMail", "Waiting Mails" ), # lnWaitingMail + 21: ( "lnMailHold", "Mails on Hold" ), # lnMailHold + 31: ( "lnMailTotalPending", "Total Pending Mails" ), # lnMailTotalPending + 34: ( "InMailWaitingforDNS", "Mails waiting for DNS" ), # InMailWaitingforDNS +} + +def inventory_domino_mailqueues(info): + for line in info: + if line: + val = int(line[0][0].split(".")[12]) + if val in domino_mailqueues.keys(): + yield domino_mailqueues[val][0], {} + +def check_domino_mailqueues(item, params, info): + for line in info: + val = int(line[0][0].split(".")[12]) + if item == domino_mailqueues[val][0]: + reading = int(line[0][1]) + warn, crit = params.get("queue_length") + infotext = "%d %s" % (reading, domino_mailqueues[val][1]) + levels = " (Warn/Crit at %s/%s)" % ( warn, crit ) + perfdata = [ ( "mails", reading, warn, crit ) ] + state = 0 + if reading >= crit: + state = 2 + infotext += levels + elif reading >= warn: + state = 1 + infotext += levels + yield state, infotext, perfdata + +check_info["domino_mailqueues"] = { + "check_function" : check_domino_mailqueues, + "inventory_function" : inventory_domino_mailqueues, + "service_description" : "Domino Queue %s", + "has_perfdata" : True, + "default_levels_variable" : "domino_mailqueues_defaults", + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.311.1.1.3.1.2", + "snmp_info" : map(lambda x: (".1.3.6.1.4.1.334.72.1.1.4", [OID_STRING, x]), + domino_mailqueues.keys()), + "group" : "domino_mailqueues" +} diff -Nru check-mk-1.2.2p3/domino_tasks check-mk-1.2.6p12/domino_tasks --- check-mk-1.2.2p3/domino_tasks 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/domino_tasks 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,66 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +# Example SNMP walk: +# +# InTaskName: The actual name of the task as it appears in the SERVER.TASK statistic on the server. +# .1.3.6.1.4.1.334.72.1.1.6.1.2.1.4.0 Router +# .1.3.6.1.4.1.334.72.1.1.6.1.2.1.4.1 tm_grab Subsystems +# .1.3.6.1.4.1.334.72.1.1.6.1.2.1.4.2 tm_grab M01 +# .1.3.6.1.4.1.334.72.1.1.6.1.2.1.4.3 tm_grab M02 +# .1.3.6.1.4.1.334.72.1.1.6.1.2.1.4.4 tm_grab M03 +# .1.3.6.1.4.1.334.72.1.1.6.1.2.1.4.5 tm_grab M04 +# .1.3.6.1.4.1.334.72.1.1.6.1.2.1.4.6 tm_grab M05 +# .1.3.6.1.4.1.334.72.1.1.6.1.2.1.4.7 tm_grab +# .1.3.6.1.4.1.334.72.1.1.6.1.2.1.4.8 Router + + +inv_domino_tasks_rules = [] +inv_domino_tasks = [] + +def inventory_domino_tasks(info): + return inventory_ps_common(inv_domino_tasks, inv_domino_tasks_rules, info) + +def check_domino_tasks(item, params, info): + return check_ps_common( item, params, info, info_name = "Tasks" ) + +check_info['domino_tasks'] = { + "check_function" : check_domino_tasks, + "inventory_function" : inventory_domino_tasks, + "has_perfdata" : True, + "group" : "domino_tasks", + "service_description" : "Domino Task %s", + "includes" : [ "ps.include" ], + "node_info" : True, # add first column with actual host name + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.311.1.1.3.1.2", + "snmp_info" : (".1.3.6.1.4.1.334.72.1.1.6.1.2.1", + [ + 4, # InTaskName + ] + ), +} + diff -Nru check-mk-1.2.2p3/domino_transactions check-mk-1.2.6p12/domino_transactions --- check-mk-1.2.2p3/domino_transactions 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/domino_transactions 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,58 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +domino_transactions_default_levels = ( 30000, 35000 ) + +def inventory_domino_transactions(info): + if info: + yield None, 'domino_transactions_default_levels' + +def check_domino_transactions(_no_item, params, info): + if info: + reading = int(info[0][0]) + warn, crit = params + infotext = "Transactions per minute (avg): %s" % reading + levels = " (Warn/Crit at %s/%s)" % ( warn, crit ) + perfdata = [ ( "transactions", reading, warn, crit ) ] + state = 0 + if reading >= crit: + state = 2 + infotext += levels + elif reading >= warn: + state = 1 + infotext += levels + yield state, infotext, perfdata + + +check_info["domino_transactions"] = { + "check_function" : check_domino_transactions, + "inventory_function" : inventory_domino_transactions, + "service_description" : "Domino Server Transactions", + "has_perfdata" : True, + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.311.1.1.3.1.2", + "snmp_info" : (".1.3.6.1.4.1.334.72.1.1.6.3", [2]), + "group" : "domino_transactions" +} diff -Nru check-mk-1.2.2p3/domino_users check-mk-1.2.6p12/domino_users --- check-mk-1.2.2p3/domino_users 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/domino_users 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,58 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +domino_users_default_levels = ( 1000, 1500 ) + +def inventory_domino_users(info): + if info: + yield None, 'domino_users_default_levels' + +def check_domino_users(_no_item, params, info): + if info: + users = int(info[0][0]) + warn, crit = params + infotext = "%d Domino Users on Server" % users + levels = " (Warn/Crit at %s/%s)" % ( warn, crit ) + perfdata = [ ( "users", users, warn, crit ) ] + state = 0 + if users >= crit: + state = 2 + infotext += levels + elif users >= warn: + state = 1 + infotext += levels + yield state, infotext, perfdata + + +check_info["domino_users"] = { + "check_function" : check_domino_users, + "inventory_function" : inventory_domino_users, + "service_description" : "Domino Users", + "has_perfdata" : True, + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.311.1.1.3.1.2", + "snmp_info" : (".1.3.6.1.4.1.334.72.1.1.6.3", [6]), + "group" : "domino_users" +} diff -Nru check-mk-1.2.2p3/drafts/LIESMICH.checkgenerator check-mk-1.2.6p12/drafts/LIESMICH.checkgenerator --- check-mk-1.2.2p3/drafts/LIESMICH.checkgenerator 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/drafts/LIESMICH.checkgenerator 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,36 @@ +Idee: Ein WATO-gesteuerter Generator für Checks, der Templates +von Checks erzeugt. + +Eine Art Wizard oder ValueSpec, in dem man ausfüllt: + +Name des Checks (intern): [______________] +Service-Description: [____________] +Check hat Items: [X] +Features des Checks: + Check hat Parameter: [X] + Check verarbeitet Counter: [X] + Schwellwerte mit Prediction: [X] + Mittelung: [X] + Parse-Funktion: [X] + +Titel für Manpage: [__________________________________________] +Type: [v SNMP] +OID-Bäume: + Basis-OID: 1.3.6.1._________________ + Subbäume: 1.3 1.5 OIDEND +Perfdaten: [X] +Checkgruppe für WATO: [_________] +Include-Dateien: [_______________] + +Beispielausgabe von Agent: TEXTAREA + +--> Als Ergebnis kommt heraus: + - Ein Check + - Eine Check-Manpage + - Ein PNP-Template + - Ein Perf-O-Meter-Template + - Evtl. ein WATO-Template + +Die Einstellungen von oben werden persistiert, damit man weitere +Checks einfach generieren kann. + diff -Nru check-mk-1.2.2p3/drafts/LIESMICH.Check_MK_2.0 check-mk-1.2.6p12/drafts/LIESMICH.Check_MK_2.0 --- check-mk-1.2.2p3/drafts/LIESMICH.Check_MK_2.0 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/drafts/LIESMICH.Check_MK_2.0 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,62 @@ +Ideen für Aufräumarbeiten und Vereinfachungen an Check_MK für die Zukunft + +[1] Manual Checks + +Die aktuellen Regelsätze für Manual Checks fliegen weg. Dafür gibt +es ein neues WATO-Modul, was optisch (und vom Code) her dem Modul +"Host & Service Parameters" entspricht. In diesem Modul kann man +sich durch den Baumartigen Check-Katalog hangeln und dort einen +Checktyp auswählen (nicht eine Gruppe). Von diesem kann man dann +statisch *Items* anlegen, jedoch ohne Parameter! Die jetzigen +Regel für die "Inventorzed Checks" gelten dann ab sofort auch für +die manuell angelegten Checks und heißen nur noch "Check Parameters". +Die Agentenlosen "Active Checks" werden ebenfalls in die neuen manuell +Checks einsortiert - auch wenn diese als Ausnahme direkt die Parameter +tragen. + + +[2] Prozess-Inventur + +Die Prozess-Inventur-Regel wird so vereinfacht, dass sie nur noch +eine Abbilung von Itemname (z.B. "NTP") auf eine Prozess-Selektion +darstellt (z.B. ~.*sbin/ntp, User egal). Es werden in Inventurregeln +dann generell keine Parameter mehr definiert. Die Inventur erzeugt +dann für jede gefundene Prozess-Selektion ein Item. Die Schwellwerte +werden durch eine neue Regel gemacht, in die die jetzigen Werte +umgezogen werden (Anzahlen, Schwellen für CPU, etc.). Per Default +gelten dann als Schwellen 1,1,None,None (mindestens 1, sonst kritisch). +Evtl. schalten wir dann gleich per Default die Perfdaten ein. +Der Subcheck ps.perf entfällt. + + +[3] Gruppierungsregeln umziehen + +Die Regeln File Info Groups, Filesystem Groups und Prozess Names(?) +werden in einen eigenen Kasten eingebaut. Evtl. auch noch Interface +groups. + + +[4] WATO-Module etwas zusammenfassen + +Wir könnten Host & Service-Gruops zusammenziehen. Und ebenfalls Contact Groups und Users -> "Users". + + +[5] Begrifflichkeiten + +Wir räumen einige Begriffe auf und formulieren mehr aus +Sicht des Benutzers als der Implementierung oder Historie. + +Contact -> User (keine Unterscheidung mehr) +Contact Group -> User group oder einfach nur Group +Inventory -> Service discovery + + +[6] Roles & Permissions + +Das WATO-Modul heißt dann nur noch "Permissions". Die +Eingangstabelle bekommt die Überschrift "Userroles". + + +[7] Kontexthilfe + + diff -Nru check-mk-1.2.2p3/drafts/LIESMICH.cookieauth check-mk-1.2.6p12/drafts/LIESMICH.cookieauth --- check-mk-1.2.2p3/drafts/LIESMICH.cookieauth 2013-03-06 07:57:34.000000000 +0000 +++ check-mk-1.2.6p12/drafts/LIESMICH.cookieauth 1970-01-01 00:00:00.000000000 +0000 @@ -1,75 +0,0 @@ -Authentifizierung mit Cookies ------------------------------ - -Zielsetzung: - -* Login nicht per HTTP Auth, sondern über eine hübsche Loginmaske. - Warum? Nur weil es hübsch ist. Andere Vorteile sehe ich aktuell - nicht. Ausloggen geht ja mittlerweile auch über HTTPBasicAuth. - -* Login soll in den Addons Multisite, NagVis und PNP4Nagios funktionieren. - - -Umsetzung: - -* Die Passworte bleiben nach wie vor in der Datei htpasswd (verschlüsselt). - Damit ist man auf jeden Fall kompatibel. - -* Wenn man auf index.py kommt (das Frameset) und hat noch kein Login-Cookie, - dann kommt anstelle des Framesets eine Einloggemaske. Dort gibt man Name - und Passwort ein und kann evtl. noch - wenn wir nett sind - seine Sprache - einstellen. - -* Das eingetippte Passwort wird gegen die htpasswd-Datei geprüft. Dabei wird - zumindest MD5 (verwendet WATO) und crypt (default von htpasswd) unterstützt. - -* Wenn das erfolgreich ist, wird ein Login-Cookie generiert. Dieses enthält - folgende Daten im Klartext: - - Loginname - - Verfallszeit als Timestamp. Wenn das 0 ist heißt es "unendlich" - Die Verfallszeit wird - in multisite.mk einstellbar - auf die Zukunft gesetzt, - z.B. auf 60 Minuten. Das ganze ist pro Benutzeraccount einstellbar. - Nach Ablauf der Zeit verfällt das Cookie. - - Diese beiden Angaben werden jetzt noch kombiniert mit dem verschlüsselten - Passwort und einem Geheimnis, dass in der Datei etc/auth.secret gespeichert - wird. Alle vier Werte werden durch Pipesymbole zusammengehängt, z.B. - "harri|123909495|$1$090775$8M/9Eq3MlnP4yhafnc1ef/|L8jJLlk3ekFL" - Dieser String wird jetzt mit MD5 gehäscht und daraus ein Cookie gebildet, - z.B. "harri|123909495|89a3292c8a1496e864a4ba3d4080cad9". Dies ist dann - der Inhalt des Cookies. - -* Das etc/auth.secret wird automatisch ausgewürfelt, wenn es noch nicht - da ist. - -* Die WATO-Replikation muss auch das Secret mit replizieren, damit eine - Anmeldung auch Remote immer gültig ist. - -* Der Pfad zu etc/auth.secret wird aus dem Pfad zur htpasswd-Datei - gebildet. Diese ist Check_MK bekannt und steht in den Defaults in - der Variable "htpasswd_file". Davon nimmt man den dirname. - -* Zum Überprüfen holt man Loginname und Verfallszeit aus dem Cookie. Ist - die Zeit abgelaufen, wird das Cookie verworfen. Dann holt man sich - das auth.secret und das verschlüsselte Passwort des Users und bildet - erneut den Rohstring und häscht diesen. Wenn die MD5-Summe mit der im - Cookie übereinstimmt darf man weiter. Wenn das Cookie eine Verfallszeit - hat und *keine* Ajax-ID gesetzt ist, wird das Cookie automatisch erneuert. - -* Wenn man irgendeine Seite außer index.py aufruft und *kein korrektes* Cookie hat, - dann wird man irgendwie weitergeleitet auf die Loginmaske. In dieser wird die - eigentlich URL hidden mitgespeichert, so dass man nach dem Einloggen da weitermachen - kann, wo man eigentlich hinwollte. - -* PNP und NagVis müssen ebenfalls in der Lage sein, ein Cookie zu überprüfen - und zu erneuern. Dazu muss man den Code für das Überprüfen des Cookies auch - in PHP schreiben. - -* Die Einstellung, ob man htpasswd oder Cookies macht, ist einfach: Wenn beim - index.py bereits ein user bekannt ist (von Apache), nimmt man den. Ansonsten - nimmt man die Cookies. - -* omd config bekommt eine Variable, die das steuert. Diese schaltet dann - die Apache-Konfiguration für Check_MK, PNP und NagVis um. Wenn andere - Lust haben (Thruk, etc.) können die sich auch dranhängen. - diff -Nru check-mk-1.2.2p3/drafts/LIESMICH.globalsettings check-mk-1.2.6p12/drafts/LIESMICH.globalsettings --- check-mk-1.2.2p3/drafts/LIESMICH.globalsettings 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/drafts/LIESMICH.globalsettings 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,41 @@ +Manche der globalen Einstellungen wären schön, wenn man sie pro Host +einstellen könnte. Dazu gehört z.B. das if_inventory_uses_description. Aber +auch Dinge wie agent_min_version, agent_simulator, simulation_mode, +inventory_max_cachefile_age, inventory_check_interval, inventory_check_severity +und andere könnte man gut pro Host gebrauchen. + +Folgende Idee könnte das elegant lösen: Wir führen eine neue +generische Regelkette global_setting_per_host[] ein. Diese +ist eigentlich ein dict von Ketten. Beispiel: + +global_setting_per_host["inventory_check_interval"] = [ + ( 120, [ "linux", "prod" ], ALL_HOSTS ), + ( 1440, [ "linux", "test" ], ALL_HOSTS ), +] + +Die Umsetzung ist denkbar einfach: Nach dem Laden der Konfig +wird die Variable einmal ausgewertet und die entsprechenden +globalen Varialen gesetzt. Beim precompile muss man garnix +ändern, weil diese Variablen dann ja schon gesetzt sind +und korrekt einkodiert werden. + +In Fällen, in denen es um mehrere Host geht (z.B. cmk -I), +muss die Funktion, die die globalen Variablen setzt, immer +dann aufgerufen werden, wenn auf einen neuen Host umgeschaltet +wird. Hier kann man evtl. noch ein Caching einführen, wenn +das zulange dauert. + +Im WATO können wir dann etliche Variablen aus den global +settings umziehen. Frage ist, ob wir dann auch automatisch +eine Migration machen. Die könnte so gehen: Bei jeder globalen +Variable, die vom Default abweicht und bei der eine Regelkette +in der neuen Form existiert, wird - wenn diese Kette leer ist - +eine Regeln eingefügt mit ALL_HOSTS, die genau den geänderten +Wert einsetzt. Gleichzeitig setzen wir den Wert dann wieder +auf den Default zurück. + +Dumm noch: der Benutzer sieht jetzt den Defaultwert nicht +mehr. Aber das Problem haben wir bei allen Regelketten. +Hier müsste man sowieso mal den Defaultwert anzeigen, der +dann gilt, wenn keine Regel greift. + diff -Nru check-mk-1.2.2p3/drafts/LIESMICH.interval check-mk-1.2.6p12/drafts/LIESMICH.interval --- check-mk-1.2.2p3/drafts/LIESMICH.interval 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/drafts/LIESMICH.interval 1970-01-01 00:00:00.000000000 +0000 @@ -1,119 +0,0 @@ -Idee zu neuem Verfahren mit Check-Intervallen ---------------------------------------------- - -Problemstellung: Manche der Plugins/Befehle im Agenten dauern zu lange, -als dass man sie jedes mal ausführen möchte. Aktuell gibt es einen Trick, -dass man mit einem Cache-File arbeitet - wie z.B. der ORACLE-Agent. -Das ist etwas umständlich und funktioniert auch nicht bei Windows. - -Ich habe jetzt eine neue Idee, die das ganze vor allem für den Agenten -vereinfacht und die Intelligenz ins zentrale Check_MK verlagert. Es -funktioniert so: - -Es wird eine neue Sektions-Option eingeführt (die zweite nach sep(...)). -Diese sagt dem Check_MK, dass eine Sektion bis zu einem bestimmten -Zeitpunkt gültig ist und (maximal) solange vom Agenten nicht mehr neu gesendet -werden wird. - -<<>> -foo1 bar test -foo2 bar test2 -... - -Innerhalb der nächsten 300 Sekunden kann diese Sektion fehlen. In diesem -Fall soll Check_MK einfach - aus der alten Datei von tmp/check_mk/cache - -den Wert vom letzten Mal nehmen. Erst nach Ablauf der Zeit soll die übliche -Warnung ausgegeben werden, dass Daten vom Agenten fehlen. - -Check_MK muss jetzt so vorgehen: Wenn es feststellt, dass eine Sektion fehlt -(und nur dann!), lädt es die Cache-Datei. Wenn nicht vorhanden, gilt die -Sektion als endgültig fehlend. Falls sie vorhanden ist, wird der Sektion -aus der Cache-Datei geholt. Wenn der Zeitstempel noch nicht erreicht ist, -wird die Sektion genommen und zur Ausgabe des Agenten hinzugefügt und auch -an die dann neu erstellte Cache-Datei wieder angehängt. - -Gleichzeitig aber - und jetzt kommts(!) - wird Check_MK den Check dann -nicht einfach mit den Cache-Daten nochmal ausführen, sondern einfach -auslassen. Dadurch ist die Ausgabe in der GUI korrekt, wo man sieht, -wie alt Check-Ergebnisse sind. Das Einzige, was jetzt noch doof ist, ist -die neue Staleness-Funktion, die jetzt nicht weiß, wie oft die Daten -eigentlich kommen sollen. - -Um die Implementierung mit dem Plugins zu vereinfachen (siehe unten), -wird ferner die Möglichkeit eingeführt, Sektionsoptionen anonym für -zukünftige Sektionen zu setzen: - -<<<:valid(1353854778)>>> --> Gilt für alle zukünftigen Sektionen -<<<:valid(0)>>> --> Löscht die Option wieder - -Implementierung im Agenten (Linux): - -Hier muss sich der Agent irgendwie merken, wann er eine Sache das letzte -Mal ausgeführt hat. Hier ist eine mögliche Lösung für das Verzeichnis -plugins: Man führt darunter Unterverzeichnisse ein, die einer Anzahl von -Minuten entsprechen (oder Sekunden)? - -/usr/lib/check_mk_agent/plugins/10/mk_oracle - -Das bedeutet, dass die Daten nur alle 10 Minuten berechnet werden sollen. -Im Agenten ist das dann so implementiert (man verwendet die modification time -des plugins selbst als Indikator, wann es das letzte mal aufgerufen wurde): - -# Execute timed plugins -cd $PLUGINS_DIR -for dir in $(find -type d) ; do - cd $dir - date '+<<<:valid(%s)>>>' -d "now + $dir min" - for plugin in $(find -cmin +$dir) ; do - touch $plugin - ./plugin - done -done - -Frage ist noch, wie man das effizient bei eingebauten Plugins machen -soll. Gut wäre es schon, wenn das geht. - -Implementierung im Agenten (Windows): - -Im Windows-Agenten merkt man sich die Ausführungszeit einfach im Speicher. -Zusätzlich kann man in [global] auch die Gültigkeiten für die eingebauten -Sektionen konfigurieren. Das sieht dann so aus: - -[global] - valid logwatch = 10 - valid winperf_phydisk = 5 - - -SNMP: - -Hier kann man das Intervall einfach per Regel steuern: - -snmp_check_interval["filesystem"] = [ - ( 3, ALL_HOSTS, ), -] - -Hier geht man einfach nach der Checkgruppe. Das Item kann man natürlich -nicht beeinflussen, da ein Check ja immer ganz oder garnich läuft. -Zusätzlich könnte ein Check - analog zu dem was ja dann der Linux-Agent -macht - selbst einen Default für seine Häufigkeit vorgeben. Das ist -dann ein neuer Schlüssel in der check_info: - -check_info["hr_fs"] = { - .... - "interval" : 5, -} - -Um das hinzubekommen, könnte man mit Zeitstempeln auf den Check_MK -Cachefiles arbeiten. Diese sind ja pro Checktyp separat. Also könnte -das gehen. - -Noch ein Problem gibt es: wenn ein manuelles reschedule ausgeführt wurde, -wäre es natürlich schön, wenn das Intervall jetzt nicht berücksichtigt -würde. Dazu müsste man bei SNMP Checks das Intervall ignorieren und bei den -Agenten-Checks zumindest auf die Daten aus dem Cache-File zugreifen und doch -zum Nagios senden, auch wenn diese ja nicht aktuell sind. Immerhin werden -jetzt neue Check-Parameter aktiv, auch wenn der Agent wieder die gleichen -Daten liefert. Um das hinzubekommen müsste man irgendwie rausbekommen, -ob ein Check manuell angeworfen wurde oder nicht. Ist das möglich? -Sendet Nagios hier etwas? - diff -Nru check-mk-1.2.2p3/drafts/LIESMICH.inventur check-mk-1.2.6p12/drafts/LIESMICH.inventur --- check-mk-1.2.2p3/drafts/LIESMICH.inventur 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/drafts/LIESMICH.inventur 1970-01-01 00:00:00.000000000 +0000 @@ -1,58 +0,0 @@ -SERVER-INVENTURISIERUNG ------------------------ - -(aus der Antwort aus einer Email) - -in der Tat haben wir über das Thema diskutiert und ich habe mir auch -einige Gedanken dazu gemacht. Die Ideen aus dem LIESMICH.interval sind -nicht zuletzt deswegen entstanden. - -Das Konzept, dass mir vorschwebt, ist allerdings nicht, einfach Check_MK -als Transportmechnismus für ein bestehenden Inventurisierungsskript zu -nehmen, sondern die vorhandenen Mittel auszureizen. Das Beispiel, das Sie -mir gemailt haben, zeigt den Grund: der aktuelle Agent sendet bereits heute -einen Großteil der Daten. Wenn man z.B. unter Linux noch ergänzt: - -/proc/cpuinfo -rpm -qa -lspci -dmidecode - -Dann hat man fast alles, was was man braucht. Die Idee ist wie beim Monitoring -mit Check_MK, dass der Agent die Daten nicht vorauswertet - also z.B. nicht -selbst in der Ausgabe von lspci nach einer Soundkarte sucht - sondern dass -man das im zentralen Check_MK macht. Das ist effizienter, flexibler, leichter -änderbar und sorgt vor allem für einen viel einfacheren Agenten. - -Was man also tun müsste wäre: - -* Agenten um eine Handvoll Plugins erweitern, dabei eventuelle Langläufer -mit größeren Intervall abfedern. Im obigen Beispiel ist das evtl. noch -nicht mal notwendig. - -* Inventur-basierte Parser für die vorhandenen und neuen relevanten Sektionen -schreiben. Die extrahieren dann Daten und gliedern sie in einen strukturierten -Baum ein. Der Baum wird pro Host in eine Daten geschrieben. - -* Die Check_MK Kommandozeile um Befehle zur Inventur erweitern. - -* Auch im WATO eine Bedienung der Inventur ermöglichen - z.B. -direktes antriggern. Evtl. ist die Inventur aber einfach als aktiver -Check realisiert. Damit könnte man die Nagios-Funktionen direkt nutzen -(z.B. Reschedule). - -* In der Multisite-GUI Seiten, mit denen das schön angezeigt werden -kann. Evtl. verwendet man die Tabellenfunktionen, die es aktuell schon -gibt. Dadurch könnte man alles nutzen, wie Filter/Suchfunktionen, Sortierung, -Gruppierung, Freie Spaltenauswahl, Export in JSON, etc. und könnte die -Daten auch sofort mit Monitoringdaten verknüpfen. Evtl. dann noch ein -Webservice für einen XML-Export. - -* Das ganze im Rahmen des verteilten Monitorings auch umsetzen, also Zugriff -über Multisite auf Inventurdaten, die auf einem anderen Host liegen - -oder Synchronisation der Daten. - -* Und - am schwierigsten - einen guten Begriff für das ganze finden. Denn -Check_MK verwendet den Begriff "Inventur" bereits für das automatische -Einrichten von Services.... - diff -Nru check-mk-1.2.2p3/drafts/LIESMICH.windows_persisted check-mk-1.2.6p12/drafts/LIESMICH.windows_persisted --- check-mk-1.2.2p3/drafts/LIESMICH.windows_persisted 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/drafts/LIESMICH.windows_persisted 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,20 @@ + +Man möchte in windows also ein persisted Feature in den Agent bauen.. + +- script container struct erweitern um eine liste von hosts und ihrem letzten + persisted zeitpunkt + +- beim Senden der Daten an einen Host prüfen ob + a) persisted Daten wieder gemeldet werden sollen + b) wenn ja, das Sammeln der Daten wieder anwerfen, aber noch nichts melden + c) beim nächsten Aufruf sind die Daten bestimmt aktuell, diese melden + und den persisted timestamp des Hosts aktualisieren + +Jedes Skript kann man einzeln in der check_mk.ini konfigurieren. + +# Daten alle 60 Minuten (hostabhängig) in die Welt hinaus senden +persisted mk_inventory.ps1 = 60 + +Weitere Überlegung. Vielleicht nicht unbedingt persisted nennen. +Das Feature kann man evtl. auch anderweitig einsetzen. + diff -Nru check-mk-1.2.2p3/drafts/README.cmk-register-host check-mk-1.2.6p12/drafts/README.cmk-register-host --- check-mk-1.2.2p3/drafts/README.cmk-register-host 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/drafts/README.cmk-register-host 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,32 @@ +Draft for a command line tool (Linux and Windows) for adding a host to the +monitoring configuration by calling the tool right on the target machine. + +The idea is: + +1. Setup a new Linux/Windows box, install the Check_MK Agent on it +2. Right on that box call a command line tool: + +Example 1: Add this host to the Check_MK Server cmkserver.my.net with the OMD +site mysite. + +> cmk-register-host cmkserver.my.net/mysite + +Example 2: Specify host tags and a target folder, also activate the changes immediately + +> cmk-register-host --tags windows,prod,muc --folder foo/bar --activate cmkserver.my.net/mysite + + +Commandline options: + --tags A,B,C Comma separated list of host tags to add to the host (default: inherit from folder) + --folder F Put the host into that WATO folder (default: main folder). You have to + specify the physical folder path, not the WATO display name + --no-inventory do *not* automatically inventorize the host after adding it + --activate automatically activate the changes after adding it + --user U WATO username to log in with (otherwise: prompted) + --password P Password for logging into WATO (otherwise: prompted) + --secret S Secret for logging into WATO (for automation users) + + --hostname H Force hostname H (otherwise: read from system hostname) + --ipaddress I Force IP address I (otherwise: let Check_MK use DNS lookup) + --site S Make Check_MK monitor this by site S (otherwise: inherit from folder) + diff -Nru check-mk-1.2.2p3/drafts/README.predictive check-mk-1.2.6p12/drafts/README.predictive --- check-mk-1.2.2p3/drafts/README.predictive 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/drafts/README.predictive 1970-01-01 00:00:00.000000000 +0000 @@ -1,239 +0,0 @@ -PREDECTIVE MONITORING ---------------------- - -1) Introduction - -Some people call the following concept "predictive monitoring": Let's assume -that you have problems assigning levels for your CPU load on a specific server, -because at certain times in a week an important CPU intense job is running. -These jobs do produce a high load - which is completely OK. At other times - -however - a high load could indicate a problem. If you now set the warn/crit -levels such that the check does not trigger during the runs of the job - -you make the monitoring blind to CPU load problems in the other periods. - -What we need is levels that change dynamically with the time, so that e.g. a load -of 10 at monday 10:00 is OK while the same load at 11:00 should raise an alert. -If those levels are then *automatically computed* from the values that have -been measured in the past, then we get an intelligent monitoring that "predicts" -what is OK and what not. Other people also call this "anomaly detection". - -2) An idea for a solution - -Our idea is that we use the data that are kept in RRDs in order to compute -sensible dynamic levels for certain check parameters. Let's stay with the CPU -load as an example. In any default Check_MK or OMD installation, PNP4Nagios -and thus RRDs are used for storing historic performance data such as the CPU -load for up to four years. This will be our basis. We will analyse this data -from time to time and compute a forecast for the future. - -Before we can do this we need to understand, that the whole prediction idea -is based on *periodic intervals* that repeat again and again. For example if -we had a high CPU load on each of the last 50 mondays from 10:00 to 10:05 -then we assume that at the next monday we will have a similar development. -But the day of the week might not always be the way to go. Here are some -possible periods: - -* The day of the week -* The day of the month (1st, 2nd, ...) -* The day of the month reverse (last, 2nd last, ...) -* The hour -* Just whether its a work day or a holiday - -In general we need to make two decisions: -* The slicing (for example "one day") -* The grouping (for example "group by the day of the week") - -Example: if we slice into days and then group by the day of the week then we -get seven different groups. For each of these groups we separately compute -a prediction by fetching the relevant data from the past - limited to a -certain time horizon, for example to the data of the last 20 mondays. Then -we overlay these 20 graphs and compute for each time of day the - -- Maximum value -- Minimum value -- Average value -- Standard deviation - -When doing this we could impose a larger weight on more recent mondays and -a lesser weight on the older ones. The result is a condensed information -about the past that we use as a prediction for the future. - -Based on that prediction we can now construct dynamic levels by creating -a "corridor". An example could be: "Alert a warning if the CPU load is -more than 10% higher then the predicted value". A percentage is not the -only way to go. Useful seem at least: - -- +/- a percentage of the predicted value (average, min or max) -- an absolute difference to the predicted value (e.g. +/- 5) -- a difference in relation to the standard deviation - -Working with the the Standard deviation takes into account the difference -between situations where the historic values show a greater or smaller -variety. In other words: the smaller the standard deviation the more precise -is the prediction. - -It is not only possible to set upper levels - also lower levels are -possible. In other words: "Warn me, if the CPU load is too *low*!" This could -be a hint for an important job that is *not* running. - - -3) Implementation within Check_MK - -When trying to find a good architecture for an implementation several aspects -have to taken into mind: - -- performance (used CPU/IO/disk ressources on the monitoring host) -- code complexity - and thus cost of implementation and code maintainance -- flexibility for the user -- transparency to the user -- openness to later improvements - -The implementation that we suggest tries to maximise all those aspects - -while we are sure that even better ideas might exist... - -The implementation touches various areas of Check_MK and consists of the -following tasks: - -a) A script/program that analyses RRD data and creates predicted dynamic levels -b) A helper function for checks that makes use of that data when they need - to determine levels -c) Implementing dynamic levels in several checks by using that function -d) Enhancing the WATO rules of those checks such that the user can configure - the dynamic levels -e) Adapting the PNP templates of those checks such that the dynamic levels - are being displayed in the graphs. - - -Implementation details: - -a) Analyser script - -This script needs the following input parameters: - -* Hostname and RRD and variable therein to analyse - (e.g. srvabc012 / CPU load / load1) - -* Slicing - (e.g. 24 hours, aligned to UTC) - -* Slice to compute [1] - (e.g. "monday") - -* Grouping [2] - (e.g. group by day of week) - -* Time horizon - (e.g. 20 weeks into the past) - -* Weight function - (e.g. the weight of each week is just 90% of the weight - of the succeessing week, or: weight all weeks identically) - -Notes: - - [1] If we just compute one slice at a time (e.g. only monday) then we can - cut down the running time of the script and can do this right within - the check on a on-demand base. - - [2] The grouping can be implemented as a Python function that maps a time - stamp (the beginning of a slice) to a string that represents the - group. E.g. 1763747600 -> "monday". - -The result is a binary encoded file that is stored below var, e.g.: - -var/check_mk/prediction/srvabc012/CPU load/load1/monday - -A second file (Python repr() syntax) contains the input parameters including -the time of youngest contained slice: - -var/check_mk/prediction/srvabc012/CPU load/load1/monday.info - -This info file allows to re-run the prediction only when the check parameters have changed -or if a new slice is needed. - -The implementation of this program is in Python, if this is fast enough (which I -assume) or in C++ otherwise. If it is in Python then we do not need an external -program but can put this into a module (just like currently snmp.py or automation.py) -und call it directly from the check. - - -b) Helper function for checks - -When a check (e.g. cpu.loads) wants to apply dynamic levels then it should -call a helper function the encapsulates all of the intelligent stuff of the -prediction. An example call could be the following (the current hostname and -checktype are implicitely known, the service description is being computed -from the checktype and the item): - -# These values come from the checks' parameters and are configured -# on a per-check base: -analyser_params = { - "slicing" : (24 * 3600, 0), # duration in sec, offset from UTC - "slice" : "monday", - "grouping" : "weekday", - "horizon" : 24 * 3600 * 150, # 150 days back - "weight" : 0.95, -} -warn, crit = predict_levels("load1", "avg", "relative", (0.05, 0.10), analyser_params) - -The function prototype looks like this: - -def predict_levels(ds_name, levels_rel, levels_op, levels, prediction_parameters, item=None): - - Get current slice - - Check if prediction file is up-to-date - - if not (re-)create prediction file - - get min, max, avg, stddev from prediction file for current time - - compute levels from that by applying levels_rel, levels_op and levels - - return levels - - -c) Implementing dynamic levels in several checks - -From the existing variety of different checks it should be clear that there -can be no generic way of enabling dynamic levels for *all* checks. So we -need to concentrate on those checks where dynamic levels make sense. Such -are for example: - -CPU Load -CPU Utilization -Memory usage (?) -Used bandwidth on network ports -Disk IO -Kernel counters like context switches and process creations - -Those checks should get *additional* parameters for dynamic levels. That way the -current configuration of those checks keeps compatible and you can impose an ultimate -upper limit - regardless of dynamic computations. - -Some of those checks need to be converted from a simple tuple to a dictionary based -configuration in order to do this. Here we must make sure that old tuple-based -configurations are still supported. - - -d) WATO rules for dynamic levels - -When the user is using dynamic levels for a check then he needs to (or better: -can) specify many parameters, as we have seen. All those parameters are -mostly the same for all the different checks that support dynamic levels. We -can create a helper function that makes it easier to declare such parameters -in WATO rules. - -Checks that support upper *and* lower levels will get two sets of parameters, because -the logic for upper and lower levels might differ. But the need to share the same -parameters for the prediction generation (slicing, etc) so that one prediction file -per check is sufficient. - - -e) PNP Templates - -Making the actually predicted levels transparent to the user is a crucial point -in the implementation. An easy way to do this is to simply add the predicted -levels as additional performance values. The PNP templates of the affected -checks need to detect the availability of those values and add nice lines to -the graph. - -This is easy - while having one drawback: The user can see predicted levels -not before they are actually applied. The advantage on the other hand is that -if parameters for the prediction are changed then the graph still correctly -shows the levels that had been valid at each point of time in the past. - diff -Nru check-mk-1.2.2p3/drbd check-mk-1.2.6p12/drbd --- check-mk-1.2.2p3/drbd 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/drbd 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -147,7 +147,7 @@ drbd_disk_default_levels = ( None, None ) drbd_stats_default_levels = ( None, None, None, None, None, None, None, None, None ) -drbd_block_start_match = re.compile('^[0-9]+:') +_drbd_block_start_match = re.compile('^[0-9]+:') drbd_general_map = [ 'cs', 'ro', 'ds' ] drbd_net_map = [ 'cs', 'ns', 'nr' ] @@ -166,21 +166,37 @@ 'SyncSource': 1, 'SyncTarget': 1, 'PausedSyncS': 1, 'PausedSyncT': 1, 'VerifyS': 0, 'VerifyT': 0, + 'Ahead': 1, 'Behind': 1, +} + +drbd_ds_map = { + "primary_Diskless": 2, "secondary_Diskless": 2, + "primary_Attaching": 2, "secondary_Attaching": 2, + "primary_Failed": 2, "secondary_Failed": 2, + "primary_Negotiating": 2, "secondary_Negotiating": 2, + "primary_Inconsistent": 1, "secondary_Inconsistent": 1, + "primary_Outdated": 2, "secondary_Outdated": 2, + "primary_DUnknown": 2, "secondary_DUnknown": 2, + "primary_Consistent": 2, "secondary_Consistent": 2, + "primary_UpToDate": 0, "secondary_UpToDate": 0, } def inventory_drbd(info, checktype): inventory = [] for line in info[2:]: - if drbd_block_start_match.search(line[0]) > 0: + if _drbd_block_start_match.search(line[0]) > 0: parsed = drbd_parse_block(drbd_extract_block('drbd%s' % line[0][:-1], info), checktype) # Skip unconfigured drbd devices if parsed['cs'] == 'Unconfigured': continue if checktype == 'drbd': - levels = '( [ "%s", "%s" ], [ "%s", "%s" ] )' % \ - (parsed['ro'][0], parsed['ro'][1], - parsed['ds'][0], parsed['ds'][1]) + if 'ro' not in parsed or 'ds' not in parsed: + continue + levels = { + "roles_inventory": parsed['ro'], + "diskstates_inventory": parsed['ds'], + } elif checktype == 'drbd.net': levels = "drbd_net_default_levels" elif checktype == 'drbd.disk': @@ -219,7 +235,7 @@ for line in info[2:]: if "drbd" + line[0][:-1] == item: inBlock = True - elif inBlock and drbd_block_start_match.search(line[0]) > 0 \ + elif inBlock and _drbd_block_start_match.search(line[0]) > 0 \ and "drbd" + line[0][:-1] != item: # Another block starts. So the requested block is finished break @@ -233,6 +249,7 @@ return block + def drbd_get_block(item, info, checktype): block = drbd_extract_block(item, info) if len(block) > 0: @@ -240,76 +257,147 @@ else: return None + def check_drbd_general(item, params, info): parsed = drbd_get_block(item, info, 'drbd') + + if type(params) == tuple: + params_conv = {} + params_conv.update({ "roles_inventory": params[0] and params[0] or None }) + params_conv.update({ "diskstates_inventory": (params[0] and params[1]) and params[1] or None}) + params = params_conv + if not parsed is None: if parsed['cs'] == 'Unconfigured': - return (2, 'CRIT - The device is "Unconfigured"') + return (2, 'The device is "Unconfigured"') elif not parsed['cs'] in drbd_cs_map: - return (3, 'UNKNOWN - Undefined "connection state" in drbd output') + return (3, 'Undefined "connection state" in drbd output') # Weight of connection state is calculated by the drbd_cs_map. # The roles and disk states are calculated using the expected values state = drbd_cs_map[parsed['cs']] output = 'Connection State: %s' % parsed['cs'] - output += ', Roles: %s/%s' % (parsed['ro'][0], parsed['ro'][1]) - if params[0] is not None and parsed['ro'] != params[0]: - state = state < 2 and 2 or state - output += ' (Expected: %s/%s)' % (params[0][0], params[0][1]) - output += ', Disk States: %s/%s' % (parsed['ds'][0], parsed['ds'][1]) - if params[1] is not None and parsed['ds'] != params[1]: - state = state < 2 and 2 or state - output += ' (Expected: %s/%s)' % (params[1][0], params[1][1]) - - return (state, "%s - %s" % (nagios_state_names[state], output)) + # Roles + output += ', Roles: %s/%s' % tuple(parsed['ro']) + current_roles = "_".join(map(str.lower, parsed["ro"])) + + found_role_match = False + if "roles" in params: + roles = params.get("roles") + if roles: + for roles_entry, roles_state in roles: + if roles_entry == current_roles: + found_role_match = True + state = max(state, roles_state) + output += ' %s' % state_markers[roles_state] + break + else: # Ignore roles if set to None + found_role_match = True + + if not found_role_match: + if "roles_inventory" in params: + roles_inventory = params.get("roles_inventory") + if roles_inventory and parsed["ro"] != roles_inventory: + state = max(2, state) + output += ' (Expected: %s/%s)' % tuple(params.get("roles_inventory")) + else: + state = max(3, state) + output += ' (Check requires a new service discovery)' - return (3, "UNKNOWN - Undefined state") + output += ', Diskstates: %s/%s' % tuple(parsed['ds']) + # Do not evaluate diskstates. Either set by rule or through the + # legacy configuration option None in the check parameters tuple + if "diskstates" in params and params["diskstates"] == None or \ + "diskstates_inventory" in params and params["diskstates_inventory"] == None: + return (state, output) + + + params_diskstates_dict = dict(params.get("diskstates", [])) + diskstates_info = set() + for ro, ds in [ (parsed["ro"][0], parsed["ds"][0]), (parsed["ro"][1], parsed["ds"][1]) ]: + diskstate = "%s_%s" % (ro.lower(), ds) + params_diskstate = params_diskstates_dict.get(diskstate) + + if params_diskstate != None: + state = max(state, params_diskstate) + diskstates_info.add('%s/%s is %s' % (ro, ds, state_markers[params_diskstate])) + else: + default_state = drbd_ds_map.get(diskstate, 3) + if default_state > 0: + diskstates_info.add('%s/%s is %s' % (ro, ds, state_markers[default_state])) + state = max(state, drbd_ds_map.get(diskstate, 3)) + if diskstates_info: + output += " (%s)" % ", ".join(diskstates_info) + + return (state, output) + + return (3, "Undefined state") + +check_info["drbd"] = { + 'inventory_function' : lambda info: inventory_drbd(info, "drbd"), + 'check_function' : check_drbd_general, + 'group' : 'drbd', + 'has_perfdata' : True, + 'service_description' : 'DRBD %s status', +} -def drbd_get_counters(list): +def drbd_get_rates(list): now = time.time() output = '' perfdata = [] for type, name, item, value, uom in list: - try: - timedif, rate = get_counter("%s.%s.%s" % (type, name, item), now, value) - perfdata.append((name, rate)) - output += ' %s/sec: %s%s' % (name, rate, uom) - except MKCounterWrapped: - perfdata = [] - break + rate = get_rate("%s.%s.%s" % (type, name, item), now, value) + perfdata.append((name, rate)) + output += ' %s/sec: %s%s' % (name, rate, uom) return (output, perfdata) def check_drbd_net(item, params, info): parsed = drbd_get_block(item, info, 'drbd.net') if not parsed is None: if parsed['cs'] == 'Unconfigured': - return (2, 'CRIT - The device is "Unconfigured"') - output, perfdata = drbd_get_counters([ ('drbd.net', 'in', item, int(parsed['nr']), 'kb'), + return (2, 'The device is "Unconfigured"') + output, perfdata = drbd_get_rates([ ('drbd.net', 'in', item, int(parsed['nr']), 'kb'), ('drbd.net', 'out', item, int(parsed['ns']), 'kb') ]) # FIXME: Maybe handle thresholds in the future - return (0, "OK -%s" % output, perfdata) + return (0, output, perfdata) + + return (3, "Undefined state") - return (3, "UNKNOWN - Undefined state") +check_info["drbd.net"] = { + 'inventory_function' : lambda info: inventory_drbd(info, "drbd.net"), + 'check_function' : check_drbd_net, + 'group' : 'drbd.net', + 'has_perfdata' : True, + 'service_description' : 'DRBD %s net', +} def check_drbd_disk(item, params, info): parsed = drbd_get_block(item, info, 'drbd.disk') if not parsed is None: if parsed['cs'] == 'Unconfigured': - return (2, 'CRIT - The device is "Unconfigured"') - output, perfdata = drbd_get_counters([ ('drbd.disk', 'write', item, int(parsed['dw']), 'kb'), + return (2, 'The device is "Unconfigured"') + output, perfdata = drbd_get_rates([ ('drbd.disk', 'write', item, int(parsed['dw']), 'kb'), ('drbd.disk', 'read', item, int(parsed['dr']), 'kb') ]) # FIXME: Maybe handle thresholds in the future - return (0, "OK -%s" % output, perfdata) + return (0, output, perfdata) + + return (3, "Undefined state") - return (3, "UNKNOWN - Undefined state") +check_info["drbd.disk"] = { + 'inventory_function' : lambda info: inventory_drbd(info, "drbd.disk"), + 'check_function' : check_drbd_disk, + 'group' : 'drbd.disk', + 'has_perfdata' : True, + 'service_description' : 'DRBD %s disk', +} def check_drbd_stats(item, params, info): parsed = drbd_get_block(item, info, 'drbd.stats') if not parsed is None: if parsed['cs'] == 'Unconfigured': - return (2, 'CRIT - The device is "Unconfigured"') + return (2, 'The device is "Unconfigured"') output = '' perfdata = [] for key, label in [ ('al', 'activity log updates'), ('bm', 'bit map updates'), @@ -320,13 +408,17 @@ if key in parsed: output += '%s: %s, ' % (label, parsed[key]) else: - parsed[key] = 0 # perfdata must always have same number of entries - perfdata.append(('%s' % label.replace(" ", "_"), parsed[key])) - return (0, 'OK - ' + output.rstrip(', '), perfdata) - - return (3, "UNKNOWN - Undefined state") - -check_info['drbd'] = (check_drbd_general, "DRBD %s status", 1, lambda info: inventory_drbd(info, "drbd")) -check_info['drbd.net'] = (check_drbd_net, "DRBD %s net", 1, lambda info: inventory_drbd(info, "drbd.net")) -check_info['drbd.disk'] = (check_drbd_disk, "DRBD %s disk", 1, lambda info: inventory_drbd(info, "drbd.disk")) -check_info['drbd.stats'] = (check_drbd_stats, "DRBD %s stats", 1, lambda info: inventory_drbd(info, "drbd.stats")) + parsed[key] = '0' # perfdata must always have same number of entries + if parsed[key].isdigit(): + perfdata.append(('%s' % label.replace(" ", "_"), parsed[key])) + return (0, output.rstrip(', '), perfdata) + + return (3, "Undefined state") + +check_info["drbd.stats"] = { + 'inventory_function' : lambda info: inventory_drbd(info, "drbd.stats"), + 'check_function' : check_drbd_stats, + 'group' : 'drbd.stats', + 'has_perfdata' : True, + 'service_description' : 'DRBD %s stats', +} diff -Nru check-mk-1.2.2p3/drbd.disk check-mk-1.2.6p12/drbd.disk --- check-mk-1.2.2p3/drbd.disk 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/drbd.disk 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Check disk read and writes of DRBD devices +title: Disk read and writes of DR:BD devices agents: linux -author: Lars Michelsen +catalog: os/storage license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/drbd.net check-mk-1.2.6p12/drbd.net --- check-mk-1.2.2p3/drbd.net 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/drbd.net 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Check network load of DRBD devices +title: Network load of DR:BD devices agents: linux -author: Lars Michelsen +catalog: os/storage license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/drbd.stats check-mk-1.2.6p12/drbd.stats --- check-mk-1.2.2p3/drbd.stats 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/drbd.stats 2015-06-24 09:48:36.000000000 +0000 @@ -1,11 +1,11 @@ -title: Check counter statistics of DRBD devices +title: Counter statistics of DR:BD devices agents: linux -author: Lars Michelsen +catalog: os/storage license: GPL distribution: check_mk description: This check uses the contents of {/proc/drbd} as sent by the Check_MK linux - agent. The check records the statistic counter of each DRBD device. + agent. The check records the statistic counter of each DR:BD device. These counters are handled by this check: al (activity log updates), bm (bit map updates), lo (local count requests), pe (pending requests), diff -Nru check-mk-1.2.2p3/dummy check-mk-1.2.6p12/dummy --- check-mk-1.2.2p3/dummy 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/dummy 1970-01-01 00:00:00.000000000 +0000 @@ -1,45 +0,0 @@ -title: Dummy check man page - used as template for new check manuals -agents: linux, windows, aix, solaris, hpux, vms, freebsd, snmp -author: Mathias Kettner -license: GPL -distribution: check_mk -description: - Describe here: (1) what the check actually does, (2) under which - circumstances it goes warning/critical, (3) which devices are supported - by the check, (4) if the check requires a separated plugin or - tool or separate configuration on the target host. - -item: - Describe the syntax and meaning of the check's item here. Provide all - information one needs if coding a manual check with {checks +=} in {main.mk}. - Give an example. If the check uses {None} as sole item, - then leave out this section. - -examples: - # Give examples for configuration in {main.mk} here. If the check has - # configuration variable, then give example for them here. - - # set default levels to 40 and 60 percent: - foo_default_values = (40, 60) - - # another configuration variable here: - inventory_foo_filter = [ "superfoo", "superfoo2" ] - -perfdata: - Describe precisely the number and meaning of performance variables - the check sends. If it outputs no performance data, then leave out this - section. - -inventory: - Describe how the inventory for the check works. Which items - will it find? Describe the influence of check specific - configuration parameters to the inventory. - -[parameters] -foofirst(int): describe the first parameter here (if parameters are grouped - as tuple) -fooother(string): describe another parameter here. - -[configuration] -foo_default_levels(int, int): Describe global configuration variable of - foo here. Important: also tell the user how they are preset. diff -Nru check-mk-1.2.2p3/elphase.include check-mk-1.2.6p12/elphase.include --- check-mk-1.2.2p3/elphase.include 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/elphase.include 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,169 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_elphase(parsed): + for item in parsed.keys(): + yield item, {} + + +def check_elphase(item, params, parsed): + + def tostring(value): + if type(value) == int: + return "%d" % value + else: + return "%.1f" % value + + if "voltage" in parsed[item]: + volt, state_info = parsed[item]["voltage"] + infotext = "Voltage: %s V" % tostring(volt) + if "voltage" in params: + warn, crit = params["voltage"] + levelstext = " (warn/crit below %s/%s V)" % (tostring(warn), tostring(crit)) + perfdata = [ ("voltage", volt, warn, crit) ] + if volt < crit: + status = 2 + infotext += levelstext + elif volt < warn: + status = 1 + infotext += levelstext + else: + status = 0 + else: + status = 0 + perfdata = [ ("voltage", volt) ] + + yield status, infotext, perfdata + if state_info: + yield state_info + + if "current" in parsed[item]: + current, state_info = parsed[item]["current"] + perfdata = [ ("current", current) ] + infotext = "Current: %s A" % tostring(current) + + if "current" in params: + warn, crit = params["current"] + levelstext = " (warn/crit at %s/%s A)" % (tostring(warn), tostring(crit)) + perfdata = [ ("current", current, warn, crit) ] + if current >= crit: + status = 2 + infotext += levelstext + elif current >= warn: + status = 1 + infotext += levelstext + else: + status = 0 + + else: + status = 0 + perfdata = [ ("current", current) ] + + yield status, infotext, perfdata + if state_info: + yield state_info + + if "load" in parsed[item]: + load, state_info = parsed[item]["load"] + infotext = "Load: %s%%" % tostring(load) + if "load" in params: + warn, crit = params["load"] + levelstext = " (warn/crit at %s/%s%%)" % (tostring(warn), tostring(crit)) + perfdata = [ ("load", load, warn, crit) ] + if load >= crit: + status = 2 + infotext += levelstext + elif load >= warn: + status = 1 + infotext += levelstext + else: + status = 0 + + else: + status = 0 + perfdata = [ ("load", load) ] + + yield status, infotext, perfdata + + if state_info: + yield state_info + + if "power" in parsed[item]: + power, state_info = parsed[item]["power"] + infotext = "Power: %s W" % tostring(power) + if "power" in params: + warn, crit = params["power"] + levelstext = " (warn/crit at %s/%s W)" % (tostring(warn), tostring(crit)) + perfdata = [ ("power", power, warn, crit) ] + if power >= crit: + status = 2 + infotext += levelstext + elif power >= warn: + status = 1 + infotext += levelstext + else: + status = 0 + + else: + status = 0 + perfdata = [ ("power", power) ] + + yield status, infotext, perfdata + if state_info: + yield state_info + + if "appower" in parsed[item]: + appower, state_info = parsed[item]["appower"] + infotext = "Apparent Power: %s VA" % tostring(appower) + if "appower" in params: + warn, crit = params["appower"] + levelstext = " (warn/crit at %s/%s VA)" % (tostring(warn), tostring(crit)) + perfdata = [ ("appower", appower, warn, crit) ] + if appower >= crit: + status = 2 + infotext += levelstext + elif appower >= warn: + status = 1 + infotext += levelstext + else: + status = 0 + + else: + status = 0 + perfdata = [ ("appower", appower) ] + + yield status, infotext, perfdata + if state_info: + yield state_info + + if "energy" in parsed[item]: + current, state_info = parsed[item]["energy"] + perfdata = [ ("energy", current) ] + infotext = "Energy: %s Wh" % tostring(current) + + yield 0, infotext, perfdata + if state_info: + yield state_info diff -Nru check-mk-1.2.2p3/emc_datadomain_disks check-mk-1.2.6p12/emc_datadomain_disks --- check-mk-1.2.2p3/emc_datadomain_disks 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/emc_datadomain_disks 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,82 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_emc_datadomain_disks(info): + inventory = [] + for line in info[0]: + item = line[0]+"-"+line[1] + inventory.append((item, None)) + return inventory + +def check_emc_datadomain_disks(item, _no_params, info): + state_table = { "1": ("Operational", 0), + "2": ("Unknown", 3), + "3": ("Absent", 1), + "4": ("Failed", 2), + "5": ("Spare", 0), + "6": ("Available", 0), + } + for line in info[0]: + if item == line[0]+"-"+line[1]: + model = line[2] + firmware = line[3] + serial = line[4] + capacity = line[5] + dev_state = line[6] + index = int(line[7].split('.')[1]) - 1 + busy = info[1][index][0] + dev_state_str = state_table.get(dev_state, ("Unknown",3))[0] + dev_state_rc = state_table.get(dev_state, ("Unknown",3))[1] + infotext = "%s, busy %s%% - %s, Firmware %s, Serial %s, Capacity %s" \ + % ( dev_state_str, busy, model, firmware, serial, capacity ) + perfdata = [('busy', busy+"%" )] + return dev_state_rc, infotext,perfdata + +check_info["emc_datadomain_disks"] = { + "check_function" : check_emc_datadomain_disks, + "inventory_function" : inventory_emc_datadomain_disks, + "service_description" : "Hard Disk %s", + "has_perfdata" : True, + "snmp_info" : [ + (".1.3.6.1.4.1.19746.1.6.1.1.1", + [ + 1, # diskPropEnclosureID + 2, # diskPropIndex + 4, # diskModel + 5, # diskFirmwareVersion + 6, # diskSerialNumber + 7, # diskPropCapacity + 8, # diskPropState + OID_END + ]), + (".1.3.6.1.4.1.19746.1.6.2.1.1", + [ + 6, # diskPerfBusy + ] + )], + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.1.0").startswith("Data Domain OS") +} + diff -Nru check-mk-1.2.2p3/emc_datadomain_fans check-mk-1.2.6p12/emc_datadomain_fans --- check-mk-1.2.2p3/emc_datadomain_fans 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/emc_datadomain_fans 2015-07-01 12:18:10.000000000 +0000 @@ -0,0 +1,72 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_emc_datadomain_fans(info): + inventory = [] + for line in info: + item = line[0]+"-"+line[1] + inventory.append((item, None)) + return inventory + +def check_emc_datadomain_fans(item, _no_params, info): + state_table = { "0": ("notfound", 1), + "1": ("OK", 0), + "2": ("Fail", 2), + } + fan_level = { "0": "Unknown", + "1": "Low", + "2": "Medium", + "3": "High" + } + for line in info: + if item == "%s-%s" % (line[0], line[1]): + dev_descr = line[2] + dev_level = line[3] + dev_state = line[4] + dev_state_str = state_table.get(dev_state, ("Unknown",3))[0] + dev_state_rc = state_table.get(dev_state, ("Unknown",3))[1] + dev_level_str = fan_level.get(dev_level, "Unknown") + infotext = "%s %s RPM %s" % ( dev_descr, dev_state_str, dev_level_str ) + return dev_state_rc, infotext + + +check_info["emc_datadomain_fans"] = { + "check_function" : check_emc_datadomain_fans, + "inventory_function" : inventory_emc_datadomain_fans, + "service_description" : "FAN %s", + "has_perfdata" : False, + "snmp_info" : (".1.3.6.1.4.1.19746.1.1.3.1.1.1", + [ + 1, # fanEnclosureID + 2, # fanIndex + 4, # fanDescription + 5, # fanLevel + 6, # fanStatus + ] + ), + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.1.0").startswith("Data Domain OS") +} + diff -Nru check-mk-1.2.2p3/emc_datadomain_fs check-mk-1.2.6p12/emc_datadomain_fs --- check-mk-1.2.2p3/emc_datadomain_fs 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/emc_datadomain_fs 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,65 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_emc_datadomain_fs(info): + mplist = [] + for line in info: + if line[1] in inventory_df_exclude_mountpoints: + continue + mplist.append((line[1], None)) + return mplist + +def check_emc_datadomain_fs(item, params, info): + fslist = [] + for line in info: + if item == line[1] or "patterns" in params: + size_mb = float(line[2])*1024.0 + avail_mb = float(line[4])*1024.0 + fslist.append((item, size_mb, avail_mb)) + return df_check_filesystem_list(item, params, fslist) + +check_info["emc_datadomain_fs"] = { + "check_function" : check_emc_datadomain_fs, + "inventory_function" : inventory_emc_datadomain_fs, + "service_description" : "DD-Filesystem %s", + "includes" : [ "df.include" ], + "group" : "filesystem", + "default_levels_variable" : "filesystem_default_levels", + "has_perfdata" : True, + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.1.0").startswith("Data Domain OS"), + "snmp_info" : (".1.3.6.1.4.1.19746.1.3.2.1.1", + [ + 1, # fileSystemResourceIndex + 3, # fileSystemResourceName + 4, # fileSystemSpaceSize + 5, # fileSystemSpaceUsed + 6, # fileSystemSpaceAvail + 7, # fileSystemPercentUsed + 8, # fileSystemSpaceCleanable + ], + ), +} + diff -Nru check-mk-1.2.2p3/emc_datadomain_nvbat check-mk-1.2.6p12/emc_datadomain_nvbat --- check-mk-1.2.2p3/emc_datadomain_nvbat 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/emc_datadomain_nvbat 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,66 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_emc_datadomain_nvbat(info): + inventory = [] + for line in info: + item = line[0]+"-"+line[1] + inventory.append((item, None)) + return inventory + +def check_emc_datadomain_nvbat(item, _no_params, info): + state_table = { "0": ("OK", 0), + "1": ("Disabled", 1), + "2": ("Discharged", 2), + "3": ("Softdisabled", 1), + } + for line in info: + if item == line[0]+"-"+line[1]: + dev_charge = line[3] + dev_state = line[2] + dev_state_str = state_table.get(dev_state, ("Unknown",3))[0] + dev_state_rc = state_table.get(dev_state, ("Unknown",3))[1] + infotext = "Status %s Charge Level %s%%" % ( dev_state_str, dev_charge ) + perfdata = [('charge', dev_charge+"%" )] + return dev_state_rc, infotext, perfdata + + +check_info["emc_datadomain_nvbat"] = { + "check_function" : check_emc_datadomain_nvbat, + "inventory_function" : inventory_emc_datadomain_nvbat, + "service_description" : "NVRAM Battery %s", + "has_perfdata" : True, + "snmp_info" : (".1.3.6.1.4.1.19746.1.2.3.1.1", + [ + 1, # BatteriesIndex + 2, # BatteryIndex + 3, # BatteryStatus + 4, # BatteryCharge + ] + ), + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.1.0").startswith("Data Domain OS") +} + diff -Nru check-mk-1.2.2p3/emc_datadomain_power check-mk-1.2.6p12/emc_datadomain_power --- check-mk-1.2.2p3/emc_datadomain_power 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/emc_datadomain_power 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,66 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_emc_datadomain_power(info): + inventory = [] + for line in info: + item = line[0]+"-"+line[1] + inventory.append((item, None)) + return inventory + +def check_emc_datadomain_power(item, _no_params, info): + state_table = { "0": ("Absent", 0), + "1": ("OK", 0), + "2": ("Failed", 2), + "3": ("Faulty", 2), + "4": ("Acnone", 1), + "99": ("Unknown", 3), + } + for line in info: + if item == line[0]+"-"+line[1]: + dev_descr = line[2] + dev_state = line[3] + dev_state_str = state_table.get(dev_state,("Unknown",3))[0] + dev_state_rc = state_table.get(dev_state,("Unknown",3))[1] + infotext = "%s Status %s" % ( dev_descr, dev_state_str ) + return dev_state_rc, infotext + +check_info["emc_datadomain_power"] = { + "check_function" : check_emc_datadomain_power, + "inventory_function" : inventory_emc_datadomain_power, + "service_description" : "Power Module %s", + "has_perfdata" : False, + "snmp_info" : (".1.3.6.1.4.1.19746.1.1.1.1.1.1", + [ + 1, # powerEnclosureID + 2, # powerModuleIndex + 3, # powerModuleDescription + 4, # powerModuleStatus + ] + ), + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.1.0").startswith("Data Domain OS") +} + diff -Nru check-mk-1.2.2p3/emc_datadomain_temps check-mk-1.2.6p12/emc_datadomain_temps --- check-mk-1.2.2p3/emc_datadomain_temps 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/emc_datadomain_temps 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,69 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_emc_datadomain_temps(info): + inventory = [] + for line in info: + item = line[0]+"-"+line[1] + inventory.append((item, None)) + return inventory + +def check_emc_datadomain_temps(item, _no_params, info): + state_table = { "0": ("Failed", 1), + "1": ("OK", 0), + "2": ("Notfound", 1), + "3": ("OverheatWarning", 1), + "4": ("OverheatCritical", 2), + } + for line in info: + if item == line[0]+"-"+line[1]: + dev_temp = line[3] + dev_descr = line[2] + dev_state = line[4] + dev_state_str = state_table.get(dev_state, ("Unknown",3))[0] + dev_state_rc = state_table.get(dev_state, ("Unknown",3))[1] + yield 0, "Temperature: %s °C (%s)" % (dev_temp, dev_descr), [('temp', dev_temp)] + if dev_state_rc != 0: + yield dev_state_rc, dev_state_str + + +check_info["emc_datadomain_temps"] = { + "check_function" : check_emc_datadomain_temps, + "inventory_function" : inventory_emc_datadomain_temps, + "service_description" : "Temperature %s", + "has_perfdata" : True, + "snmp_info" : (".1.3.6.1.4.1.19746.1.1.2.1.1.1", + [ + 1, # tempEnclosureID + 2, # tempSensorIndex + 4, # tempSensorDescription + 5, # tempSensorCurrentValue + 6, # tempSensorStatus + ] + ), + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.1.0").startswith("Data Domain OS") +} + diff -Nru check-mk-1.2.2p3/emc_isilon check-mk-1.2.6p12/emc_isilon --- check-mk-1.2.2p3/emc_isilon 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/emc_isilon 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,133 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +emc_isilon_info = [(".1.3.6.1.4.1.12124.1.1", [1, # clusterName + 2, # clusterHealth + 5, # configuredNodes + 6]), # onlineNodes + (".1.3.6.1.4.1.12124.2.1", [1, # nodeName + 2]), # nodeHealth + ] + +def emc_isilon_scan(oid): + return oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.12124.1") + +# .--ClusterHealth------------------------------------------------------. + +def inventory_emc_isilon_clusterhealth(info): + return [(None, None)] + +def check_emc_isilon_clusterhealth(item, _no_params, info): + status=int(info[0][0][1]) + statusmap = ("ok", "attn", "down", "invalid") + if status >= len(statusmap): + return 3, "ClusterHealth reports unidentified status %s" % status + else: + if status == 0: + rc = 0 + else: + rc = 2 + return rc, "ClusterHealth reports status %s" % statusmap[status] + +check_info["emc_isilon.clusterhealth"] = { + "check_function" : check_emc_isilon_clusterhealth, + "inventory_function" : inventory_emc_isilon_clusterhealth, + "service_description" : "Cluster Health", + "has_perfdata" : False, + "snmp_info" : emc_isilon_info, + "snmp_scan_function" : emc_isilon_scan +} + +#. +# .--NodeHealth------------------------------------------------------. + +def inventory_emc_isilon_nodehealth(info): + return [(None, None)] + +def check_emc_isilon_nodehealth(item, _no_params, info): + status=int(info[1][0][1]) + statusmap = ("ok", "attn", "down", "invalid") + nodename=info[1][0][0] + if status >= len(statusmap): + return 3, "nodeHealth reports unidentified status %s" % status + else: + if status == 0: + rc = 0 + else: + rc = 2 + return rc, "nodeHealth for %s reports status %s" % (nodename, statusmap[status]) + +check_info["emc_isilon.nodehealth"] = { + "check_function" : check_emc_isilon_nodehealth, + "inventory_function" : inventory_emc_isilon_nodehealth, + "service_description" : "Node Health", + "has_perfdata" : False, + "snmp_info" : emc_isilon_info, + "snmp_scan_function" : emc_isilon_scan +} + +#. +# .--Nodes------------------------------------------------------. + +def inventory_emc_isilon_nodes(info): + return [(None, None)] + +def check_emc_isilon_nodes(item, _no_params, info): + cluster_name, cluster_health, configured_nodes, online_nodes = info[0][0] + if configured_nodes == online_nodes: + rc = 0 + else: + rc = 2 + return rc, "Configured Nodes: %s / Online Nodes: %s" % (configured_nodes, online_nodes) + +check_info["emc_isilon.nodes"] = { + "check_function" : check_emc_isilon_nodes, + "inventory_function" : inventory_emc_isilon_nodes, + "service_description" : "Nodes", + "has_perfdata" : False, + "snmp_info" : emc_isilon_info, + "snmp_scan_function" : emc_isilon_scan +} + +#. +# .--Cluster- and Node Name-------------------------------------------. + +def inventory_emc_isilon_names(info): + return [(None, None)] + +def check_emc_isilon_names(item, _no_params, info): + return 0, "Cluster Name is %s, Node Name is %s" % (info[0][0][0], info[1][0][0]) + +check_info["emc_isilon.names"] = { + "check_function" : check_emc_isilon_names, + "inventory_function" : inventory_emc_isilon_names, + "service_description" : "Isilon Info", + "has_perfdata" : False, + "snmp_info" : emc_isilon_info, + "snmp_scan_function" : emc_isilon_scan +} + +#. diff -Nru check-mk-1.2.2p3/emc_isilon.clusterhealth check-mk-1.2.6p12/emc_isilon.clusterhealth --- check-mk-1.2.2p3/emc_isilon.clusterhealth 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/emc_isilon.clusterhealth 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,16 @@ +title: EMC Isilon: ClusterHealth +agents: snmp +catalog: hw/storagehw/emc +license: GPL +distribution: check_mk +description: + Checks the ClusterHealth (Overall Status) of an EMC Isilon Storage + System as reported by SNMP. + + Returns {OK} on status ok, {CRIT} on status attn, down or invalid + and {UNKN} on every other status. + +inventory: + Finds exactly one service on each EMC Isilon System, named + Cluster Health + diff -Nru check-mk-1.2.2p3/emc_isilon_diskstatus check-mk-1.2.6p12/emc_isilon_diskstatus --- check-mk-1.2.2p3/emc_isilon_diskstatus 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/emc_isilon_diskstatus 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,56 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_emc_isilon_diskstatus(info): + inventory = [] + for disk_id, name, disk_status, serial in info: + inventory.append( (disk_id, None) ) + return inventory + +def check_emc_isilon_diskstatus(item, _no_params, info): + for disk_id, name, disk_status, serial in info: + if disk_id == item: + message = "Disk %s, serial number %s status is %s" % (name, serial, disk_status) + if disk_status == "HEALTHY": + status = 0 + else: + status = 2 + return status, message + return 3, "Disk %s not found" % item + +check_info["emc_isilon_diskstatus"] = { + "check_function" : check_emc_isilon_diskstatus, + "inventory_function" : inventory_emc_isilon_diskstatus, + "service_description" : "Disk bay %s Status", + "snmp_info" : (".1.3.6.1.4.1.12124.2.52.1", + [ 1, # diskBay + 4, # diskDeviceName + 5, # diskStatus + 7, # diskSerialNumber + ]), + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.12124.1") +} + diff -Nru check-mk-1.2.2p3/emc_isilon_iops check-mk-1.2.6p12/emc_isilon_iops --- check-mk-1.2.2p3/emc_isilon_iops 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/emc_isilon_iops 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,52 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_emc_isilon_iops(info): + inventory = [] + for name, iops in info: + inventory.append( (name, None) ) + return inventory + +def check_emc_isilon_iops(item, _no_params, info): + for name, iops in info: + if name == item: + perfdata = [ ("iops", iops) ] + message = "%s disk operations per second" % int(iops) + return 0, message, perfdata + return 3, "Disk not found" + +check_info["emc_isilon_iops"] = { + "check_function" : check_emc_isilon_iops, + "inventory_function" : inventory_emc_isilon_iops, + "service_description" : "Disk %s IO", + "has_perfdata" : True, + "snmp_info" : (".1.3.6.1.4.1.12124.2.2.52.1", + [ 2, # diskPerfDeviceName + 3, # diskPerfOpsPerSecond + ]), + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.12124.1") +} + diff -Nru check-mk-1.2.2p3/emc_isilon.names check-mk-1.2.6p12/emc_isilon.names --- check-mk-1.2.2p3/emc_isilon.names 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/emc_isilon.names 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,11 @@ +title: EMC Isilon: Report Cluster and Node Name +agents: snmp +catalog: hw/storagehw/emc +license: GPL +distribution: check_mk +description: + Reports the Cluster and Node Name of EMC Isilon Storage System. This + Check always returns {OK}. + +inventory: + Finds exactly one service on each EMC Isilon System, named Isilon Info. diff -Nru check-mk-1.2.2p3/emc_isilon.nodehealth check-mk-1.2.6p12/emc_isilon.nodehealth --- check-mk-1.2.2p3/emc_isilon.nodehealth 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/emc_isilon.nodehealth 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,16 @@ +title: EMC Isilon: NodeHealth +agents: snmp +catalog: hw/storagehw/emc +license: GPL +distribution: check_mk +description: + Checks the NodeHealth (Overall Status) of an EMC Isilon Storage + Node as reported by SNMP. + + Returns {OK} on status ok, {CRIT} on status attn, down or invalid + and {UNKN} on every other status. + +inventory: + Finds exactly one service on each EMC Isilon Node, named + Node Health + diff -Nru check-mk-1.2.2p3/emc_isilon.nodes check-mk-1.2.6p12/emc_isilon.nodes --- check-mk-1.2.2p3/emc_isilon.nodes 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/emc_isilon.nodes 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,14 @@ +title: EMC Isilon: Online Status of Nodes +agents: snmp +catalog: hw/storagehw/emc +license: GPL +distribution: check_mk +description: + Checks the Online Status of Nodes in an EMC Isilon Storage System. + + Returns {OK} if all configured nodes are listed as online nodes + and {CRIT} otherwise. + +inventory: + Finds exactly one service on each EMC Isilon System, named Nodes. + diff -Nru check-mk-1.2.2p3/emcvnx_disks check-mk-1.2.6p12/emcvnx_disks --- check-mk-1.2.2p3/emcvnx_disks 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/emcvnx_disks 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,195 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +# <<>> +# +# All Disks Information +# --------------------- +# + +# Bus 0 Enclosure 0 Disk 0 +# Vendor Id: SEAGATE +# Product Id: STE60005 CLAR600 +# Product Revision: ES0F +# Lun: Unbound +# Type: N/A +# State: Unbound +# Hot Spare: NO +# Prct Rebuilt: Unbound +# Prct Bound: Unbound +# Serial Number: 6SL342E6 +# Sectors: 0 (0) +# Capacity: 549691 +# Private: Unbound +# Bind Signature: 0x0, 0, 0 +# Hard Read Errors: 0 +# Hard Write Errors: 0 +# Soft Read Errors: 0 +# Soft Write Errors: 0 +# Read Retries: N/A +# Write Retries: N/A +# Remapped Sectors: N/A +# Number of Reads: 15922079 +# Number of Writes: 14841793 +# Number of Luns: 0 +# Raid Group ID: This disk does not belong to a RAIDGroup +# Clariion Part Number: DG118032656 +# Request Service Time: N/A +# Read Requests: 15922079 +# Write Requests: 14841793 +# Kbytes Read: 998099223 +# Kbytes Written: 1661571498 +# Stripe Boundary Crossing: None +# Drive Type: SAS +# Clariion TLA Part Number:005049274 +# User Capacity: 0 +# Idle Ticks: 162808947 +# Busy Ticks: 1220056 +# Current Speed: 6Gbps +# Maximum Speed: 6Gbps +# Queue Max: N/A +# Queue Avg: N/A +# Prct Idle: 0 +# Prct Busy: 0 +# Hardware Power Savings Qualified: NO +# Hardware Power Savings Eligible: NO +# Power Savings State: Full Power +# Current Power Savings Log Timestamp: N/A +# Spinning Ticks: N/A +# Standby Ticks: N/A +# Number of Spin Ups: N/A +# Arrivals with Nonzero Queue: 8982980 +# High Sum of Seeks: 315504963402436 +# Idle Ticks SPA: 81201290 +# Idle Ticks SPB: 81607657 +# Busy Ticks SPA: 812651 +# Busy Ticks SPB: 407405 +# Queue Length: 83023848 +# +# Bus 1 Enclosure 0 Disk 7 +# State: Removed +# +# Bus 1 Enclosure 0 Disk 8 +# Vendor Id: SEAGATE +# Product Id: STE60005 CLAR600 +# Product Revision: ES0F +# [...] + + +# Parse agent output into a dict of the form: +# parsed = { +# '0/0 Disk 0': {'Hard Read Errors': '0', +# 'Hard Write Errors': '0', +# 'state': 'Unbound'}, +# '1/0 Disk 7': {'state': 'Removed'}, +# '1/0 Disk 8': {'Hard Read Errors': '0', +# 'Hard Write Errors': '0', +# 'state': 'Enabled'}, +# } + +def parse_emcvnx_disks(info): + parsed = {} + for line in info: + if len(line) > 4 and line[0] == "Bus" and line[4] == "Disk": + encid = line[1] + "/" + line[3] + " " + line[4] + " " + line[5] + enc = {} + parsed[encid] = enc + elif len(line) > 1 and line[0] == "State:": + state = line[-1] + enc["state"] = state + elif len(line) > 2 and line[0] == "Hard" and line[2] == "Errors:": + error_count = saveint(line[-1]) + enc[line[0] + " " + line[1] + " Errors"] = error_count + elif len(line) > 1 and line[0] == "Kbytes" and line[1] in ["Read:", "Written:"]: + io_kbytes = saveint(line[-1]) + enc[line[0] + " " + line[1].replace(':', '')] = io_kbytes + return parsed + + +def inventory_emcvnx_disks(info): + parsed = parse_emcvnx_disks(info) + inventory = [] + for disk in parsed: + if parsed[disk]["state"] != "Empty": + inventory.append((disk, None)) + return inventory + + +def check_emcvnx_disks(item, _no_params, info): + now = time.time() + perfdata = [] + parsed = parse_emcvnx_disks(info) + if item not in parsed: + return 3, "Enclosure %s not found in agent output" % item + + diskstate = parsed[item]["state"] + message = "Enclosure %s is %s" % (item, diskstate) + if diskstate in ["Unbound", "Hot Spare Ready", "Enabled", "Ready"]: + nagstate = 0 + elif diskstate == "Rebuilding": + nagstate = 1 + message += " (!)" + else: + nagstate = 2 + message += " (!!)" + # on error state all other fields besides "State:" are missing, omitting... + return nagstate, message + + read_errors = parsed[item]["Hard Read Errors"] + message += ", Hard Read Errors: %s" % read_errors + if read_errors > 0: + nagstate = 2 + message += " (!!)" + + write_errors = parsed[item]["Hard Write Errors"] + message += ", Hard Write Errors: %s" % write_errors + if write_errors > 0: + nagstate = 2 + message += " (!!)" + + read_bytes = parsed[item]["Kbytes Read"] * 1024 + write_bytes = parsed[item]["Kbytes Written"] * 1024 + countername_r = "emcvnx_disks.read_bytes.%s" % item.replace(" ", "_") + countername_w = "emcvnx_disks.write_bytes.%s" % item.replace(" ", "_") + + read_bytes_per_sec = get_rate(countername_r, now, read_bytes) + message += ", Read: %s/s" % get_bytes_human_readable(read_bytes_per_sec) + perfdata.append(("read", read_bytes_per_sec)) + + write_bytes_per_sec = get_rate(countername_w, now, write_bytes) + message += ", Write: %s/s" % get_bytes_human_readable(write_bytes_per_sec) + perfdata.append(("write", write_bytes_per_sec)) + + return nagstate, message, perfdata + + +check_info['emcvnx_disks'] = { + "inventory_function" : inventory_emcvnx_disks, + "check_function" : check_emcvnx_disks, + "service_description" : "Enclosure %s", + "has_perfdata" : True, +} diff -Nru check-mk-1.2.2p3/emcvnx_hba check-mk-1.2.6p12/emcvnx_hba --- check-mk-1.2.2p3/emcvnx_hba 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/emcvnx_hba 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,128 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +# <<>> +# Information about each SPPORT: +# +# SP Name: SP A +# SP Port ID: 0 +# SP UID: 50:06:01:60:BE:A0:5D:E5:50:06:01:60:3E:A0:5D:E5 +# Link Status: Up +# Port Status: Online +# Switch Present: YES +# Switch UID: 10:00:00:27:F8:28:52:5B:20:02:00:27:F8:28:52:5B +# SP Source ID: 66048 +# ALPA Value: 0 +# Speed Value : 8Gbps +# Auto Negotiable : NO +# Available Speeds: +# 2Gbps +# 4Gbps +# 8Gbps +# Auto +# Requested Value: Auto +# MAC Address: Not Applicable +# SFP State: Online +# Reads: 426729 +# Writes: 8683578 +# Blocks Read: 4917783 +# Blocks Written: 12008476 +# Queue Full/Busy: 0 +# I/O Module Slot: Onboard +# Physical Port ID: 2 +# Usage: Mirrorview +# SFP/Connector EMC Part Number: 019-078-042 +# SFP/Connector EMC Serial Number: 00000000000 +# SFP/Connector Vendor Part Number: AFBR-57D7APZ-E2 +# SFP/Connector Vendor Serial Number: AGL1213A3188822 +# SFP/Connector Supported Speeds: +# 2Gbps +# 4Gbps +# 8Gbps +# +# SP Name: SP A +# SP Port ID: 1 +# SP UID: 50:06:01:60:BE:A0:5D:E5:50:06:01:61:3E:A0:5D:E5 +# Link Status: Up +# Port Status: Online +# Switch Present: YES +# [...] + +# Parse agent output into a dict of the form: +# parsed = { +# {'SP A Port 0': {'Blocks Read': 4917783, 'Blocks Written': 12008476}, +# 'SP A Port 1': {'Blocks Read': 363283639, 'Blocks Written': 218463965}, +# 'SP A Port 2': {'Blocks Read': 2, 'Blocks Written': 0}, +# 'SP B Port 0': {'Blocks Read': 0, 'Blocks Written': 4348086}, +# } + +def parse_emcvnx_hba(info): + parsed = {} + for line in info: + if len(line) > 2 and line[0] == "SP" and line[1] == "Name:": + hba_id = " ".join(line[2:]) + elif len(line) > 2 and line[0] == "SP" and line[1] == "Port" and line[2] == "ID:": + hba_id += " Port " + line[-1] + hba = {} + parsed[hba_id] = hba + elif len(line) > 2 and line[0] == "Blocks" and line[1] in ("Read:", "Written:"): + hba["Blocks " + line[1].replace(":", "")] = saveint(line[-1]) + return parsed + + +def inventory_emcvnx_hba(info): + return [ (hba, None) for hba in parse_emcvnx_hba(info).keys() ] + + +def check_emcvnx_hba(item, _no_params, info): + now = time.time() + perfdata = [] + parsed = parse_emcvnx_hba(info) + if item not in parsed: + return 3, "HBA %s not found in agent output" % item + + read_blocks = parsed[item]["Blocks Read"] + write_blocks = parsed[item]["Blocks Written"] + countername_r = "emcvnx_hba.read_blocks.%s" % item.replace(" ", "_") + countername_w = "emcvnx_hba.write_blocks.%s" % item.replace(" ", "_") + + read_blocks_per_sec = get_rate(countername_r, now, read_blocks) + write_blocks_per_sec = get_rate(countername_w, now, write_blocks) + read_blocks_per_sec = saveint(read_blocks_per_sec) + write_blocks_per_sec = saveint(write_blocks_per_sec) + perfdata.append(("read_blocks", read_blocks_per_sec)) + perfdata.append(("write_blocks", write_blocks_per_sec)) + + return 0, "Read: %s Blocks/s, Write: %s Blocks/s" % (read_blocks_per_sec, write_blocks_per_sec), perfdata + + +check_info['emcvnx_hba'] = { + "inventory_function" : inventory_emcvnx_hba, + "check_function" : check_emcvnx_hba, + "service_description" : "HBA %s", + 'has_perfdata' : True, +} diff -Nru check-mk-1.2.2p3/emcvnx_hwstatus check-mk-1.2.6p12/emcvnx_hwstatus --- check-mk-1.2.2p3/emcvnx_hwstatus 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/emcvnx_hwstatus 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,147 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +# <<>> +# DPE7 Bus 0 Enclosure 0 +# Enclosure Drive Type: SAS +# Current Speed: 6Gbps +# Maximum Speed: 6Gbps +# SP A State: Present +# SP B State: Present +# Bus 0 Enclosure 0 Power A State: Present +# Bus 0 Enclosure 0 Power B State: Present +# Bus 0 Enclosure 0 SPS A State: Present +# Bus 0 Enclosure 0 SPS B State: Present +# Bus 0 Enclosure 0 SPS A Cabling State: Valid +# Bus 0 Enclosure 0 SPS B Cabling State: Valid +# Bus 0 Enclosure 0 CPU Module A State: Present +# Bus 0 Enclosure 0 CPU Module B State: Present +# Bus 0 Enclosure 0 SP A I/O Module 0 State: Present +# Bus 0 Enclosure 0 SP A I/O Module 1 State: Empty +# Bus 0 Enclosure 0 SP B I/O Module 0 State: Present +# Bus 0 Enclosure 0 SP B I/O Module 1 State: Empty +# Bus 0 Enclosure 0 DIMM Module A State: Present +# Bus 0 Enclosure 0 DIMM Module B State: Present +# +# DAE6S Bus 0 Enclosure 1 +# Enclosure Drive Type: SAS, NL SAS +# Current Speed: 6Gbps +# Maximum Speed: 6Gbps +# Bus 0 Enclosure 1 Power A State: Present +# Bus 0 Enclosure 1 Power B State: Present +# Bus 0 Enclosure 1 LCC A State: Present +# Bus 0 Enclosure 1 LCC B State: Present +# Bus 0 Enclosure 1 LCC A Revision: 1.33 +# Bus 0 Enclosure 1 LCC B Revision: 1.33 +# Bus 0 Enclosure 1 LCC A Serial #: US1V2120601546 +# Bus 0 Enclosure 1 LCC B Serial #: US1V2120602428 +# Bus 0 Enclosure 1 LCC A Current Speed: 6Gbps +# Bus 0 Enclosure 1 LCC B Current Speed: 6Gbps +# Bus 0 Enclosure 1 LCC A Maximum Speed: 6Gbps +# Bus 0 Enclosure 1 LCC B Maximum Speed: 6Gbps +# +# DAE6S Bus 1 Enclosure 0 +# Enclosure Drive Type: SAS, NL SAS +# Current Speed: 6Gbps +# Maximum Speed: 6Gbps +# Bus 1 Enclosure 0 Power A State: Present + +# Parse agent output into a dict of the form: +# parsed = { +# "0/1" : { +# "Power A" : "Present", +# "Power B" : "Present", +# # ... +# } +# } + +def parse_emcvnx_hwstatus(info): + parsed = {} + for line in info: + # recognice Enclosures by a line like + # DAE6S Bus 0 Enclosure 1 + # with maybe an additional error message if Overall Status is not ok + if len(line) > 3 and line[1] == "Bus" and line[3] == "Enclosure": + encid = line[2] + "/" + line[4] + enc = {} + parsed[encid] = enc + if len(line) > 5: + enc["Overall Status"] = line[5].replace("*", "") + else: + enc["Overall Status"] = "No Errors Reported" + # recognice Enclosures by a line like + # SPE5 Enclosure SPE + # with maybe an additional error message if Overall Status is not ok + elif len(line) > 2 and line[1] == "Enclosure": + encid = line[2] + enc = {} + parsed[encid] = enc + if len(line) > 3: + enc["Overall Status"] = line[3].replace("*", "") + else: + enc["Overall Status"] = "No Errors Reported" + # gather additional information about an Enclosure found in one + # of the cases above + elif len(line) > 2 and line[-2] == "State:": + if line[0] == "SP": + device = line[0] + " " + line[1] + else: + device = " ".join(line[4:-2]) + state = line[-1] + enc[device] = state + return parsed + + +def inventory_emcvnx_hwstatus(info): + parsed = parse_emcvnx_hwstatus(info) + inventory = [] + for enclosure in parsed: + for device in parsed[enclosure]: + if parsed[enclosure][device] != "Empty": + inventory.append( (enclosure + " " + device, None) ) + return inventory + + +def check_emcvnx_hwstatus(item, _no_params, info): + enc, device = item.split(" ", 1) + try: + devstate = parse_emcvnx_hwstatus(info)[enc][device] + if devstate in ("Present", "Valid", "No Errors Reported"): + nagstate = 0 + else: + nagstate = 2 + return nagstate, "Enclosure %s is %s" % (item, devstate) + + except KeyError: + return 3, "Enclosure %s not found in agent output" % item + + +check_info['emcvnx_hwstatus'] = { + "inventory_function" : inventory_emcvnx_hwstatus, + "check_function" : check_emcvnx_hwstatus, + "service_description" : "Enclosure %s", # Example for Item: "0/1 Power A" +} diff -Nru check-mk-1.2.2p3/emcvnx_info check-mk-1.2.6p12/emcvnx_info --- check-mk-1.2.2p3/emcvnx_info 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/emcvnx_info 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,86 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +# <<>> +# +# +# Server IP Address: 10.1.36.13 +# Agent Rev: 7.32.25 (1.56) +# +# +# Agent/Host Information +# ----------------------- +# +# +# +# Agent Rev: 7.32.25 (1.56) +# Name: K10 +# Desc: +# Node: A-CKM00114701225 +# Physical Node: K10 +# Signature: 3195192 +# Peer Signature: 3187006 +# Revision: 05.32.000.5.201 +# SCSI Id: 0 +# Model: VNX5300 +# Model Type: Rackmount +# Prom Rev: 7.00.00 +# SP Memory: 8192 +# Serial No: CKM00114701225 +# SP Identifier: A +# Cabinet: DPE7 +# +# Name of the software package: -Compression +# Revision of the software package: - +# Commit Required: NO +# Revert Possible: NO +# Active State: YES +# Is installation completed: YES +# Is this System Software: NO +# +# [... more software packages follow ...] + + +def inventory_emcvnx_info(info): + return [ (None, None) ] + + +def check_emcvnx_info(item, _no_params, info): + message = "" + for line in info: + if len(line) > 1 and line[0] in ("Revision:", "Model:", "Serial"): + if message != "": + message += ", " + message += " ".join(line) + return 0, message + + +check_info['emcvnx_info'] = { + "inventory_function" : inventory_emcvnx_info, + "check_function" : check_emcvnx_info, + "service_description" : "EMC VNX Info" +} diff -Nru check-mk-1.2.2p3/emcvnx_raidgroups check-mk-1.2.6p12/emcvnx_raidgroups --- check-mk-1.2.2p3/emcvnx_raidgroups 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/emcvnx_raidgroups 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,269 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +# <<>> +# +# +# Server IP Address: 172.16.8.82 +# Agent Rev: 7.32.27 (0.14) +# +# +# All RAID Groups Information +# ---------------------------- +# +# +# RaidGroup ID: 0 +# RaidGroup Type: r5 +# RaidGroup State: Explicit_Remove +# Valid_luns +# List of disks: Bus 0 Enclosure 0 Disk 0 +# Bus 0 Enclosure 0 Disk 1 +# Bus 0 Enclosure 0 Disk 2 +# Bus 0 Enclosure 0 Disk 3 +# List of luns: 4 +# Max Number of disks: 16 +# Max Number of luns: 256 +# Raw Capacity (Blocks): 702504960 +# Logical Capacity (Blocks): 526878720 +# Free Capacity (Blocks,non-contiguous): 0 +# Free contiguous group of unbound segments: 0 +# Defrag/Expand priority: Medium +# Percent defragmented: 100 +# Percent expanded: N/A +# Disk expanding onto: N/A +# Lun Expansion enabled: NO +# Legal RAID types: r5 +# +# RaidGroup ID: 124 +# RaidGroup Type: hot_spare +# [...] + +# Parse agent output into a dict of the form: +# (where the RAID Group ID is used as key) +# parsed = {'0': {'luns': '4'}, +# '1': {'luns': '0,1'}, +# '124': {'luns': '4089'}, +# '2': {'luns': '2,3'}} + + +def parse_emcvnx_raidgroups(info): + parsed = {} + append = False + for line in info: + if len(line) > 2 and line[0] == "RaidGroup" and line[1] == "ID:": + rg = {} + parsed[line[2]] = rg + elif len(line) > 3 and line[0] == "List" and line[1] == "of" and line[2] == "luns:": + rg["luns"] = ",".join(line[3:]) + elif len(line) > 8 and line[0] == "List" and line[1] == "of" and line[2] == "disks:": + disks = [] + disk = line[4] + "/" + line[6] + " Disk " + line[8] + disks.append(disk) + rg["disks"] = disks + append = True + elif append == True and len(line) > 5 and line[0] == "Bus" and line[2] == "Enclosure" and line[4] == "Disk": + disk = line[1] + "/" + line[3] + " Disk " + line[5] + disks.append(disk) + elif append == True: + append = False + elif len(line) > 3 and line[0] == "Raw" and line[1] == "Capacity" and line[2] == "(Blocks):": + rg["capacity_raw_blocks"] = line[3] + elif len(line) > 3 and line[0] == "Logical" and line[1] == "Capacity" and line[2] == "(Blocks):": + rg["capacity_logical_blocks"] = line[3] + elif len(line) > 3 and line[0] == "Free" and line[1] == "Capacity" and line[2] == "(Blocks,non-contiguous):": + rg["capacity_free_total_blocks"] = line[3] + elif len(line) > 6 and line[0] == "Free" and line[1] == "contiguous" and line[4] == "unbound" and line[5] == "segments:": + rg["capacity_free_contiguous_blocks"] = line[6] + return parsed + + +def inventory_emcvnx_raidgroups(info): + parsed = parse_emcvnx_raidgroups(info) + inventory = [] + for rg in parsed: + inventory.append((rg, None)) + return inventory + + +# .--list of LUNs--------------------------------------------------------. +# | _ _ _ __ _ _ _ _ _ | +# | | (_)___| |_ ___ / _| | | | | | | \ | |___ | +# | | | / __| __| / _ \| |_ | | | | | | \| / __| | +# | | | \__ \ |_ | (_) | _| | |__| |_| | |\ \__ \ | +# | |_|_|___/\__| \___/|_| |_____\___/|_| \_|___/ | +# | | +# '----------------------------------------------------------------------' + +def check_emcvnx_raidgroups_list_luns(item, _no_params, info): + parsed = parse_emcvnx_raidgroups(info) + if item not in parsed: + return 3, "RAID Group %s not found in agent output" % item + return 0, "List of LUNs: " + parsed[item]["luns"] + + +check_info['emcvnx_raidgroups.list_luns'] = { + "inventory_function" : inventory_emcvnx_raidgroups, + "check_function" : check_emcvnx_raidgroups_list_luns, + "service_description" : "RAID Group %s LUNs" +} + +#. +# .--list of disks-------------------------------------------------------. +# | _ _ _ __ _ _ _ | +# | | (_)___| |_ ___ / _| __| (_)___| | _____ | +# | | | / __| __| / _ \| |_ / _` | / __| |/ / __| | +# | | | \__ \ |_ | (_) | _| | (_| | \__ \ <\__ \ | +# | |_|_|___/\__| \___/|_| \__,_|_|___/_|\_\___/ | +# | | +# '----------------------------------------------------------------------' + +def check_emcvnx_raidgroups_list_disks(item, _no_params, info): + parsed = parse_emcvnx_raidgroups(info) + if item not in parsed: + return 3, "RAID Group %s not found in agent output" % item + + message = "" + enc = "" + for disk in sorted(parsed[item]["disks"]): + if message != "": + message += ", " + enc_id, disk_id = disk.split(' ', 1) + if enc_id == enc: + message += disk_id + else: + message += "Enclosure " + enc_id + " " + disk_id + enc = enc_id + + return 0, "List of Disks: " + message + + +check_info['emcvnx_raidgroups.list_disks'] = { + "inventory_function" : inventory_emcvnx_raidgroups, + "check_function" : check_emcvnx_raidgroups_list_disks, + "service_description" : "RAID Group %s Disks" +} + +#. +# .--capacity------------------------------------------------------------. +# | _ _ | +# | ___ __ _ _ __ __ _ ___(_) |_ _ _ | +# | / __/ _` | '_ \ / _` |/ __| | __| | | | | +# | | (_| (_| | |_) | (_| | (__| | |_| |_| | | +# | \___\__,_| .__/ \__,_|\___|_|\__|\__, | | +# | |_| |___/ | +# '----------------------------------------------------------------------' + +def inventory_emcvnx_raidgroups_capacity(info): + parsed = parse_emcvnx_raidgroups(info) + inventory = [] + for rg in parsed: + inventory.append((rg, {})) + return inventory + + +def check_emcvnx_raidgroups_capacity(item, params, info): + parsed = parse_emcvnx_raidgroups(info) + if item not in parsed: + return 3, "RAID Group %s not found in agent output" % item + + fslist = [] + # Blocksize in Bytes, seems to be fix + # (is not listed in the naviseccli output anywhere) + blocksize = 512 + size_mb = int(parsed[item]["capacity_logical_blocks"]) * blocksize / 1048576.0 + avail_mb = int(parsed[item]["capacity_free_total_blocks"]) * blocksize / 1048576.0 + fslist.append((item, size_mb, avail_mb)) + + # variable name in perfdata is not allowed to be just a number + # especially 0 does not work, so prefix it generally with "rg" + rc, message, perfdata = df_check_filesystem_list(item, params, fslist) + # note: on very first run perfdata is empty + if len(perfdata) > 0: + perfdata[0] = ("rg" + perfdata[0][0], perfdata[0][1], perfdata[0][2], perfdata[0][3], perfdata[0][4], perfdata[0][5]) + return rc, message, perfdata + + +check_info['emcvnx_raidgroups.capacity'] = { + "inventory_function" : inventory_emcvnx_raidgroups_capacity, + "check_function" : check_emcvnx_raidgroups_capacity, + "service_description" : "RAID Group %s Capacity", + "has_perfdata" : True, + "group" : "filesystem", + "includes" : [ "df.include" ], + "default_levels_variable" : "filesystem_default_levels", +} + +#. +# .--capacity contiguous-------------------------------------------------. +# | _ _ | +# | ___ ___ _ __ | |_(_) __ _ _ _ ___ _ _ ___ | +# | / __/ _ \| '_ \| __| |/ _` | | | |/ _ \| | | / __| | +# | | (_| (_) | | | | |_| | (_| | |_| | (_) | |_| \__ \ | +# | \___\___/|_| |_|\__|_|\__, |\__,_|\___/ \__,_|___/ | +# | |___/ | +# '----------------------------------------------------------------------' + +def inventory_emcvnx_raidgroups_capacity_contiguous(info): + parsed = parse_emcvnx_raidgroups(info) + inventory = [] + for rg in parsed: + inventory.append((rg, {})) + return inventory + + +def check_emcvnx_raidgroups_capacity_contiguous(item, params, info): + parsed = parse_emcvnx_raidgroups(info) + if item not in parsed: + return 3, "RAID Group %s not found in agent output" % item + + fslist = [] + # Blocksize in Bytes, seems to be fix + # (is not listed in the naviseccli output anywhere) + blocksize = 512 + size_mb = int(parsed[item]["capacity_logical_blocks"]) * blocksize / 1048576.0 + avail_mb = int(parsed[item]["capacity_free_contiguous_blocks"]) * blocksize / 1048576.0 + fslist.append((item, size_mb, avail_mb)) + + # variable name in perfdata is not allowed to be just a number + # especially 0 does not work, so prefix it generally with "rg" + rc, message, perfdata = df_check_filesystem_list(item, params, fslist) + perfdata[0] = ("rg" + perfdata[0][0], perfdata[0][1], perfdata[0][2], perfdata[0][3], perfdata[0][4], perfdata[0][5]) + return rc, message, perfdata + + +check_info['emcvnx_raidgroups.capacity_contiguous'] = { + "inventory_function" : inventory_emcvnx_raidgroups_capacity_contiguous, + "check_function" : check_emcvnx_raidgroups_capacity_contiguous, + "service_description" : "RAID Group %s Capacity Contiguous", + "has_perfdata" : True, + "group" : "filesystem", + "includes" : [ "df.include" ], + "default_levels_variable" : "filesystem_default_levels", +} + + +#. diff -Nru check-mk-1.2.2p3/emcvnx_raidgroups.capacity check-mk-1.2.6p12/emcvnx_raidgroups.capacity --- check-mk-1.2.2p3/emcvnx_raidgroups.capacity 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/emcvnx_raidgroups.capacity 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,43 @@ +title: EMC VNX Storage: Free Capacity in RAID Groups +agents: emc +catalog: hw/storagehw/emc +license: GPL +distribution: check_mk +description: + Checks the Usage of RAID Groups in an EMC VNX storage system. + + It uses the check logic of the {df} check, so for configuration parameters + and examples please refer to the man page of {df}. + + The information is retriefed by the special agent agent_emcvnx which uses + EMC's command line tool naviseccli. Capacity is retriefed in blocks. For + calculation of size in MB the check assumes a blocksize of 512 Bytes. + +item: + The RAID Group ID. + +inventory: + Automatically configures one check for every RAID Group listed in the agent output. + +examples: + # set levels for RAID group 0 to 90% for WARN and 95% for CRIT + # for all hosts with the host tag emcvnx + checkgroup_parameters['filesystem'] = [ + ( {'levels': (90.0, 95.0)}, ['emcvnx'], ALL_HOSTS, ['0$'] ), + ] + checkgroup_parameters['filesystem'] + +perfdata: + Three values: The first value is the used space of in the RAID Group + in MB. Also the minimum (0 MB), maximum (Logical Capacity of the RAID Group) + and the warning and critical levels in MB are provided. + The second is the change of the usage in MB per range since the last check + (e.g. in MB per 24 hours) and the 3rd is the averaged change (so called + trend), also in MB per range. Please note, that performance data for + trends is enabled per default. You can globally disable that in {main.mk} + with {filesystem_default_levels["trend_perfdata"] = False}. + +[parameters] +parameters (dict): See man page of {df}. + +[configuration] +filesystem_default_levels: And other, see man page of {df}. diff -Nru check-mk-1.2.2p3/emcvnx_raidgroups.capacity_contiguous check-mk-1.2.6p12/emcvnx_raidgroups.capacity_contiguous --- check-mk-1.2.2p3/emcvnx_raidgroups.capacity_contiguous 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/emcvnx_raidgroups.capacity_contiguous 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,39 @@ +title: EMC VNX Storage: Contiguous Free Capacity in RAID Groups +agents: emc +catalog: hw/storagehw/emc +license: GPL +distribution: check_mk +description: + Checks the Usage of RAID Groups in an EMC VNX storage system. In contrast to + the Check {emcvnx_raidgroups.capacity} this check rates only the biggest + free contiguous group of unbound segments as "free". Fragmented space is + rated as used. + + The Check uses the check logic of the {df} check, so for configuration parameters + and examples please refer to the man page of {df}. + + The information is retriefed by the special agent agent_emcvnx which uses + EMC's command line tool naviseccli. Capacity is retriefed in blocks. For + calculation of size in MB the check assumes a blocksize of 512 Bytes. + +item: + The RAID Group ID. + +inventory: + Automatically configures one check for every RAID Group listed in the agent output. + +perfdata: + Three values: The first value is the used space of in the RAID Group + in MB. Also the minimum (0 MB), maximum (Logical Capacity of the RAID Group) + and the warning and critical levels in MB are provided. + The second is the change of the usage in MB per range since the last check + (e.g. in MB per 24 hours) and the 3rd is the averaged change (so called + trend), also in MB per range. Please note, that performance data for + trends is enabled per default. You can globally disable that in {main.mk} + with {filesystem_default_levels["trend_perfdata"] = False}. + +[parameters] +parameters (dict): See man page of {df}. + +[configuration] +filesystem_default_levels: And other, see man page of {df}. diff -Nru check-mk-1.2.2p3/emcvnx_raidgroups.list_disks check-mk-1.2.6p12/emcvnx_raidgroups.list_disks --- check-mk-1.2.2p3/emcvnx_raidgroups.list_disks 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/emcvnx_raidgroups.list_disks 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,18 @@ +title: EMC VNX Storage: RAID Group: List of Disks +agents: emc +catalog: hw/storagehw/emc +license: GPL +distribution: check_mk +description: + Reports the List of Disks used in a RAID Group in an EMC VNX storage system. + + This Check is just informational and always returns {OK}. + + The information is retriefed by the special agent agent_emcvnx which uses + EMC's command line tool naviseccli. + +item: + The RAID Group ID. + +inventory: + Automatically configures one check for every RAID Group listed in the agent output. diff -Nru check-mk-1.2.2p3/emcvnx_raidgroups.list_luns check-mk-1.2.6p12/emcvnx_raidgroups.list_luns --- check-mk-1.2.2p3/emcvnx_raidgroups.list_luns 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/emcvnx_raidgroups.list_luns 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,18 @@ +title: EMC VNX Storage: RAID Group: List of LUNs +agents: emc +catalog: hw/storagehw/emc +license: GPL +distribution: check_mk +description: + Reports the List of LUNs used in a RAID Group in an EMC VNX storage system. + + This Check is just informational and always returns {OK}. + + The information is retriefed by the special agent agent_emcvnx which uses + EMC's command line tool naviseccli. + +item: + The RAID Group ID. + +inventory: + Automatically configures one check for every RAID Group listed in the agent output. diff -Nru check-mk-1.2.2p3/emerson_stat check-mk-1.2.6p12/emerson_stat --- check-mk-1.2.2p3/emerson_stat 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/emerson_stat 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,89 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +# EES-POWER-MIB::SYSTEMSTATUS.0 .1.3.6.1.4.1.6302.2.1.2.1.0 +# OPERATIONAL VALUES: +# (1) UNKNOWN - STATUS HAS NOT YET BEEN DEFINED +# (2) NORMAL - THERE ARE NO ACTIVATED ALARMS +# (3) OBSERVATION - OA, LOWEST LEVEL OF 'ABNORMAL' STATUS +# (4) WARNING - A3 +# (5) MINOR - MA +# (6) MAJOR - CA, HIGHEST LEVEL OF 'ABNORMAL' STATUS +# ADMINISTRATIVE VALUES: +# (7) UNMANAGED +# (8) RESTRICTED +# (9) TESTING +# (10) DISABLED" +# SYNTAX INTEGER { +# UNKNOWN(1), +# NORMAL(2), +# OBSERVATION(3), +# WARNING(4), +# MINOR(5), +# MAJOR(6), +# UNMANAGED(7), +# RESTRICTED(8), +# TESTING(9), +# DISABLED(10) } + +# the mib is the NetSure_ESNA.mib, which we have received directly +# from a customer, it is named "Emerson Energy Systems (EES) Power MIB" + + +emerson_stat_default = ( 0,0 ) # warning / critical, unused + +def inventory_emerson_stat(info): + if info: + return [ ( None , "emerson_stat_default" ) ] + +def check_emerson_stat(item, params, info): + if info: + warn, crit = params # unused + status_text = { 1:"unknown", 2:"normal", 3:"observation", 4: "warning - A3", + 5: "minor - MA", 6: "major - CA", 7: "unmanaged", 8: "restricted", + 9: "testing", 10: "disabled" } + status = saveint(info[0][0]) + infotext = "Status: " + status_text.get(status) + + state = 0 + if status in [ 5, 6, 10 ]: + state = 2 + elif status in [ 1, 3, 4, 7, 8, 9 ]: + state = 1 + + return (state, infotext) + + return (3, "Status not found in SNMP output") + +check_info['emerson_stat'] = { + "inventory_function" : inventory_emerson_stat, + "check_function" : check_emerson_stat, + "service_description" : "Status", + "has_perfdata" : False, + "snmp_info" : ( ".1.3.6.1.4.1.6302.2.1.2.1", ["0"] ), + "snmp_scan_function" : lambda oid: oid('.1.3.6.1.4.1.6302.2.1.1.1.0').startswith('Emerson Network Power'), +} diff -Nru check-mk-1.2.2p3/emerson_temp check-mk-1.2.6p12/emerson_temp --- check-mk-1.2.2p3/emerson_temp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/emerson_temp 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,67 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# +# during inventory we are looking for all temperatures available, +# in this example there are two (index 1 & 2): +# +# EES-POWER-MIB::psTemperature1.0 .1.3.6.1.4.1.6302.2.1.2.7.1 +# EES-POWER-MIB::psTemperature2.0 .1.3.6.1.4.1.6302.2.1.2.7.2 +# +# the mib is the NetSure_ESNA.mib, which we have received from directly +# from a customer, its named "Emerson Energy Systems (EES) Power MIB" + + +emerson_temp_default = (40, 50) # warning / critical + +def inventory_emerson_temp(info): + for nr, line in enumerate(info): + # Device appears to mark missing sensors by temperature value -999999 + if int(line[0]) >= -273000: + yield nr, "emerson_temp_default" + + +def check_emerson_temp(item, params, info): + if len(info) > item: + if int(info[item][0]) < -273000: + return 3, "Sensor offline" + + temp = float(info[item][0]) / 1000.0 + return check_temperature(temp, params) + + return 3, "Sensor not found in SNMP data" + + +check_info['emerson_temp'] = { + "inventory_function" : inventory_emerson_temp, + "check_function" : check_emerson_temp, + "service_description" : "Temperature %s", + "has_perfdata" : True, + "group" : "hw_temperature", + "snmp_info" : ( ".1.3.6.1.4.1.6302.2.1.2", ["7"] ), + "snmp_scan_function" : lambda oid: oid('.1.3.6.1.4.1.6302.2.1.1.1.0').startswith('Emerson Network Power'), + "includes" : [ "temperature.include" ], +} diff -Nru check-mk-1.2.2p3/enterasys_cpu_util check-mk-1.2.6p12/enterasys_cpu_util --- check-mk-1.2.2p3/enterasys_cpu_util 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/enterasys_cpu_util 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,53 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +enterasys_cpu_default_levels = (90, 95) + +def inventory_enterasys_cpu_util(info): + # [:-2] to remove the oid end + return [ ( x[0][:-2], "enterasys_cpu_default_levels" ) for x in info ] + +def check_enterasys_cpu_util( item, params, info): + for core, util in info: + if item == core[:-2]: + usage = saveint(util) / 10.0 + return check_cpu_util(usage, params) + return 3, "Core not found in agent Outut" + + +check_info["enterasys_cpu_util"] = { + "check_function" : check_enterasys_cpu_util, + "inventory_function" : inventory_enterasys_cpu_util, + "service_description" : "CPU util %s", + "snmp_info" : ( ".1.3.6.1.4.1.5624.1.2.49.1.1.1.1", [ OID_END, 3 ]), #util in last 1min + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.5624.2.1") \ + or oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.5624.2.2"), + "has_perfdata" : True, + "group" : "cpu_utilization_multiitem", + "includes" : [ "cpu_util.include" ], + +} + diff -Nru check-mk-1.2.2p3/enterasys_fans check-mk-1.2.6p12/enterasys_fans --- check-mk-1.2.2p3/enterasys_fans 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/enterasys_fans 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,55 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_enterasys_fans(info): + return [ (x[0], None) for x in info if x[1] != '2' ] + +def check_enterasys_fans(item, _no_params, info): + fan_states = { + "1" : "info not available", + "2" : "not installed", + "3" : "installed and operating", + "4" : "installed and not operating", + } + for num, state in info: + if num == item: + message = "FAN State: %s" % ( fan_states[state] ) + if state in [ "1", "2" ]: + return 3, message + if state == "4": + return 2, message + return 0, message + + +check_info["enterasys_fans"] = { + "check_function" : check_enterasys_fans, + "inventory_function" : inventory_enterasys_fans, + "service_description" : "FAN %s", + "snmp_info" : ( ".1.3.6.1.4.1.52.4.3.1.3.1.1", [ OID_END, 2 ]), + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.5624.2.1") \ + or oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.5624.2.2"), +} + diff -Nru check-mk-1.2.2p3/enterasys_lsnat check-mk-1.2.6p12/enterasys_lsnat --- check-mk-1.2.2p3/enterasys_lsnat 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/enterasys_lsnat 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,62 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_enterasys_lsnat(info): + return [ (None, {}) ] + +def check_enterasys_lsnat(no_item, params, info): + if not info: + return 3, "LSNAT bindings info is missing" + + lsnat_bindings = saveint(info[0][0]) + warn, crit = params.get("current_bindings", (None, None)) + + state = 0 + state_info = "" + if warn: + if lsnat_bindings > crit: + state = 2 + state_info = state_markers[state] + elif lsnat_bindings > warn: + state = 1 + state_info = state_markers[state] + + perfdata = [ ("current_bindings", lsnat_bindings, warn, crit) ] + + return state, "Current bindings %d%s" % (lsnat_bindings, state_info), perfdata + + +check_info["enterasys_lsnat"] = { + "check_function" : check_enterasys_lsnat, + "inventory_function" : inventory_enterasys_lsnat, + "service_description" : "LSNAT Bindings", + "group" : "lsnat", + "has_perfdata" : True, + "snmp_info" : ( ".1.3.6.1.4.1.5624.1.2.74.1.1.5", [ "0" ] ), + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.5624.2.1") and + oid(".1.3.6.1.4.1.5624.1.2.74.1.1.5.0"), +} + diff -Nru check-mk-1.2.2p3/enterasys_powersupply check-mk-1.2.6p12/enterasys_powersupply --- check-mk-1.2.2p3/enterasys_powersupply 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/enterasys_powersupply 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,70 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# MIB structure: +# 1.3.6.1.4.1.52.4.3.1.2.1.1.1 ctChasPowerSupplyNum +# 1.3.6.1.4.1.52.4.3.1.2.1.1.2 ctChasPowerSupplyState +# 1.3.6.1.4.1.52.4.3.1.2.1.1.3 ctChasPowerSupplyType +# 1.3.6.1.4.1.52.4.3.1.2.1.1.4 ctChasPowerSupplyRedundancy + +def inventory_enterasys_powersupply(info): + inventory = [] + for num, state, typ, redun in info: + if state == '3': + inventory.append(( num, None )) + return inventory + +def check_enterasys_powersupply(item, _no_params, info): + supply_types = { + '1' : 'ac-dc', + '2' : 'dc-dc', + '3' : 'notSupported', + '4' : 'highOutput', + } + redundancy_types = { + '1' : 'redundant', + '2' : 'notRedundant', + '3' : 'notSupported', + } + + for num, state, typ, redun in info: + if num == item: + if state == '4': + return 2, 'PSU installed and not operating' + if redun != '1': + return 1, 'PSU %s' % redundancy_types[redun] + return 0, 'PSU working and redundant (%s)' % supply_types[typ] + + +check_info["enterasys_powersupply"] = { + "check_function" : check_enterasys_powersupply, + "inventory_function" : inventory_enterasys_powersupply, + "service_description" : "PSU %s", + "snmp_info" : ( ".1.3.6.1.4.1.52.4.3.1.2.1.1", [ OID_END, 2, 3, 4 ]), + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.5624.2.1") \ + or oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.5624.2.2"), +} + diff -Nru check-mk-1.2.2p3/enterasys_temp check-mk-1.2.6p12/enterasys_temp --- check-mk-1.2.2p3/enterasys_temp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/enterasys_temp 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,56 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +enterasys_temp_default_levels = (30, 35) + +def inventory_enterasys_temp(info): + if info and info[0][0] != "0": + return [ (None, "enterasys_temp_default_levels") ] + +def check_enterasys_temp(item, params, info): + # info for MIB: The ambient temperature of the room in which the chassis + # is located. If this sensor is broken or not supported, then + # this object will be set to zero. The value of this object + # is the actual temperature in degrees Fahrenheit * 10. + if info[0][0] == "0": + return 3, "Sensor broken or not supported" + + temp = fahrenheit_to_celsius(int(info[0][0]) / 10.0) + return check_temperature(temp, params) + + +check_info["enterasys_temp"] = { + "check_function" : check_enterasys_temp, + "inventory_function" : inventory_enterasys_temp, + "service_description" : "Temperature", + "has_perfdata" : True, + "snmp_info" : ( ".1.3.6.1.4.1.52.4.1.1.8.1", [ 1 ]), # chEnvAmbientTemp + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.5624.2.1") \ + or oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.5624.2.2"), + "group" : "hw_single_temperature", + "includes" : [ "temperature.include" ], +} + diff -Nru check-mk-1.2.2p3/esx_vpshere_hostsystem check-mk-1.2.6p12/esx_vpshere_hostsystem --- check-mk-1.2.2p3/esx_vpshere_hostsystem 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/esx_vpshere_hostsystem 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,102 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output: +# hardware.pciDevice.deviceName.00:00.0 5520 I/O Hub to ESI Port +# hardware.pciDevice.deviceName.00:01.0 5520/5500/X58 I/O Hub PCI Express Root Port 1 +# hardware.pciDevice.deviceName.00:02.0 5520/5500/X58 I/O Hub PCI Express Root Port 2 +# hardware.pciDevice.deviceName.00:03.0 5520/5500/X58 I/O Hub PCI Express Root Port 3 +# hardware.cpuPkg.busHz.0 133338028 +# hardware.cpuPkg.busHz.1 133338066 +# hardware.cpuPkg.description.0 Intel(R) Xeon(R) CPU X5670 @ 2.93GHz +# hardware.cpuPkg.description.1 Intel(R) Xeon(R) CPU X5670 @ 2.93GHz +# hardware.cpuPkg.hz.0 2933437438 +# hardware.cpuPkg.hz.1 2933437797 +# hardware.cpuPkg.index.0 0 +# hardware.cpuPkg.index.1 1 +# hardware.cpuPkg.vendor.0 intel +# hardware.cpuPkg.vendor.1 intel + +def inv_esx_vsphere_hostsystem_parse(info, inv_items = {}): + result = {} + for line in info: + key = line[0] + for pattern, settings in inv_items.items(): + if key.startswith(pattern): + tokens = key.split(".") + if settings.get("index"): + name, subtype, index = ".".join(tokens[:2]), tokens[2], ".".join(tokens[3:]) + result.setdefault(name, {}) + result[name].setdefault(index, {})[subtype] = " ".join(line[1:]) + else: + result.setdefault(".".join(tokens[:-1]), {})[tokens[-1]] = " ".join(line[1:]) + break + return result + +def inv_esx_vsphere_hostsystem(info): + inv_items = { + "hardware.cpuPkg": { "index": True}, + "hardware.cpuInfo": { "index": False}, + "hardware.biosInfo": { "index": False}, + "hardware.systemInfo": { "index": False} + } + + data = inv_esx_vsphere_hostsystem_parse(info, inv_items) + # data example: {'hardware.cpuPkg': {'0': {'busHz': '133338028', + # 'description': 'Intel(R) Xeon(R) CPU X5670 @ 2.93GHz', + # 'hz': '2933437438', + # 'index': '0', + # 'vendor': 'intel'}}} + + node = inv_tree("hardware.cpu.") + node["max_speed"] = float(data["hardware.cpuInfo"]["hz"]) + node["cpus"] = int(data["hardware.cpuInfo"]["numCpuPackages"]) + node["cores"] = int(data["hardware.cpuInfo"]["numCpuCores"]) + node["cores_per_cpu"] = node["cores"] / node["cpus"] + node["threads"] = int(data["hardware.cpuInfo"]["numCpuThreads"]) + node["threads_per_cpu"] = node["threads"] / node["cpus"] + if "hardware.cpuPkg" in data: + node["model"] = data["hardware.cpuPkg"]["0"]["description"] + node["vendor"] = data["hardware.cpuPkg"]["0"]["vendor"] + node["bus_speed"] = float(data["hardware.cpuPkg"]["0"]["busHz"]) + + node = inv_tree("hardware.bios.") + node["version"] = data["hardware.biosInfo"]["biosVersion"] + import time + try: + node["date"] = float(time.strftime("%s", \ + time.strptime(data["hardware.biosInfo"]["releaseDate"],"%Y-%m-%dT%H:%M:%SZ"))) + except Exception, e: + pass + + node = inv_tree("hardware.system.") + node["product"] = data["hardware.systemInfo"]["model"] + node["vendor"] = data["hardware.systemInfo"]["vendor"] + +inv_info['esx_vsphere_hostsystem'] = { + "inv_function" : inv_esx_vsphere_hostsystem, +} + diff -Nru check-mk-1.2.2p3/esx_vsphere_counters check-mk-1.2.6p12/esx_vsphere_counters --- check-mk-1.2.2p3/esx_vsphere_counters 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/esx_vsphere_counters 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,352 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output: +# <<>> +# net.broadcastRx|vmnic0|11|number +# net.broadcastRx||11|number +# net.broadcastTx|vmnic0|0|number +# net.broadcastTx||0|number +# net.bytesRx|vmnic0|3820|kiloBytesPerSecond +# net.bytesRx|vmnic1|0|kiloBytesPerSecond +# net.bytesRx|vmnic2|0|kiloBytesPerSecond +# net.bytesRx|vmnic3|0|kiloBytesPerSecond +# net.bytesRx||3820|kiloBytesPerSecond +# net.bytesTx|vmnic0|97|kiloBytesPerSecond +# net.bytesTx|vmnic1|0|kiloBytesPerSecond +# net.bytesTx|vmnic2|0|kiloBytesPerSecond +# net.bytesTx|vmnic3|0|kiloBytesPerSecond +# net.bytesTx||97|kiloBytesPerSecond +# net.droppedRx|vmnic0|0|number +# net.droppedRx|vmnic1|0|number +# net.droppedRx|vmnic2|0|number +# net.droppedRx|vmnic3|0|number +# net.droppedRx||0|number +# net.droppedTx|vmnic0|0|number +# net.droppedTx|vmnic1|0|number +# ... +# sys.uptime||630664|second + + +# .--Disk IO-------------------------------------------------------------. +# | ____ _ _ ___ ___ | +# | | _ \(_)___| | __ |_ _/ _ \ | +# | | | | | / __| |/ / | | | | | | +# | | |_| | \__ \ < | | |_| | | +# | |____/|_|___/_|\_\ |___\___/ | +# | | +# '----------------------------------------------------------------------' +# Example output: +# disk.deviceLatency|naa.600605b002db9f7018d0a40c2a1444b0|0|millisecond +# disk.numberRead|naa.600605b002db9f7018d0a40c2a1444b0|8|number +# disk.numberWrite|naa.600605b002db9f7018d0a40c2a1444b0|47|number +# disk.read|naa.600605b002db9f7018d0a40c2a1444b0|12|kiloBytesPerSecond +# disk.read||12|kiloBytesPerSecond +# disk.write|naa.600605b002db9f7018d0a40c2a1444b0|51|kiloBytesPerSecond +# disk.write||51|kiloBytesPerSecond + + +def inventory_esx_vsphere_counters_diskio(info): + for counter, item, value, unit in info: + if counter == 'disk.read' and item == '': + return [(None, None)] + +def check_esx_vsphere_counters_diskio(_no_item, _no_params, info): + if not info: + raise MKCounterWrapped("Counter data is missing") + + read_bytes = 0 + write_bytes = 0 + reads = 0 + writes = 0 + latency = 0 + for counter, item, value, unit in info: + if item == '': + if counter == 'disk.read': + read_bytes = int(value) * 1024 + elif counter == 'disk.write': + write_bytes = int(value) * 1024 + elif counter == 'disk.numberRead': + reads += int(value) + elif counter == 'disk.numberWrite': + writes += int(value) + elif counter == 'disk.deviceLatency': + latency = max(latency, int(value)) + + return 0, "%s/sec read, %s/sec write, IOs: %.2f/sec, latency: %d ms" % ( + get_bytes_human_readable(read_bytes), get_bytes_human_readable(write_bytes), reads + writes, latency), \ + [ ("read", read_bytes), ("write", write_bytes), ("ios", reads + writes), ("latency" , latency) ] + + +check_info['esx_vsphere_counters.diskio'] = { + 'inventory_function' : inventory_esx_vsphere_counters_diskio, + 'check_function' : check_esx_vsphere_counters_diskio, + 'service_description': 'Disk IO SUMMARY', + 'has_perfdata': True, +} + + + +#. +# .--Interfaces----------------------------------------------------------. +# | ___ _ __ | +# | |_ _|_ __ | |_ ___ _ __ / _| __ _ ___ ___ ___ | +# | | || '_ \| __/ _ \ '__| |_ / _` |/ __/ _ \/ __| | +# | | || | | | || __/ | | _| (_| | (_| __/\__ \ | +# | |___|_| |_|\__\___|_| |_| \__,_|\___\___||___/ | +# | | +# '----------------------------------------------------------------------' + +# The bad thing here: ESX does not send *counters* but *rates*. This might +# seem user friendly on the first look, but is really bad at the second. The +# sampling rate defaults to 20s and is not aligned with our check rate. Also +# a reschedule of the check does not create new data. And: our if.include really +# requires counters. In order to use if.include we therefore simulate counters. + +def convert_esx_counters_if(info): + this_time = time.time() + by_item = {} + for counter, item, value, unit in info: + if item and counter.startswith("net."): + name = counter[4:] + by_item.setdefault(item, {}) + by_item[item][name] = int(value) + + # Example of by_item: + # { + # 'vmnic0': { + # 'broadcastRx': 31, + # 'broadcastTx': 0, + # 'bytesRx': 3905, # is in Kilobytes! + # 'bytesTx': 134, + # 'droppedRx': 0, + # 'droppedTx': 0, + # 'errorsRx': 0, + # 'errorsTx': 0, + # 'multicastRx': 5, + # 'multicastTx': 1, + # 'packetsRx': 53040, + # 'packetsTx': 30822, + # 'received': 3905, + # 'transmitted': 134, + # 'unknownProtos': 0, + # 'usage': 4040, + # }, + # } + nics = by_item.keys() + nics.sort() + + converted = [ + [], # 0 ifIndex 0 + [], # 1 ifDescr 1 + [], # 2 ifType 2 + [], # 3 ifHighSpeed .. 1000 means 1Gbit + [], # 4 ifOperStatus 4 + [], # 5 ifHCInOctets 5 + [], # 6 ifHCInUcastPkts 6 + [], # 7 ifHCInMulticastPkts 7 + [], # 8 ifHCInBroadcastPkts 8 + [], # 9 ifInDiscards 9 + [], # 10 ifInErrors 10 + [], # 11 ifHCOutOctets 11 + [], # 12 ifHCOutUcastPkts 12 + [], # 13 ifHCOutMulticastPkts 13 + [], # 14 ifHCOutBroadcastPkts 14 + [], # 15 ifOutDiscards 15 + [], # 16 ifOutErrors 16 + [], # 17 ifOutQLen 17 + [], # 18 ifAlias 18 + [], # 19 ifPhysAddress 19 + ] + + tableindex = { + 'bytesRx': 5, # is in Kilobytes! + 'packetsRx': 6, + 'multicastRx': 7, + 'broadcastRx': 8, + 'droppedRx': 9, + 'errorsRx': 10, + 'bytesTx': 11, + 'packetsTx': 12, + 'multicastTx': 13, + 'broadcastTx': 14, + 'droppedTx': 15, + 'errorsTx': 16, + # 'received': 3905, + # 'transmitted': 134, + # 'unknownProtos': 0, + # 'usage': 4040, + } + + converted = [] + for index, name in enumerate(nics): + entry = ['0'] * 20 + converted.append(entry) + if name: # Skip summary entry without interface name + entry[0] = (str(index)) + entry[1] = (name) + entry[2] = ('6') # Ethernet + entry[3] = ('') # Speed not known + entry[4] = ('1') # Assume up + entry[18] = (name) # ifAlias + entry[19] = ('') # MAC address not known here + for ctr_name, ti in tableindex.items(): + ctr_value = by_item[name].get(ctr_name, 0) + if ctr_name.startswith("bytes"): + ctr_value *= 1024 + countername = "vmnic." + name + "." + ctr_name + if countername in g_counters: + last_time, last_value = g_counters[countername] + new_value = last_value + ((this_time - last_time) * ctr_value) + else: + last_time = this_time - 60 + last_value = 0 + new_value = ctr_value * 60 + g_counters[countername] = (this_time, new_value) + entry[ti] = str(int(new_value)) + + return converted + + +def inventory_esx_vsphere_counters_if(info): + converted = convert_esx_counters_if(info) + return inventory_if_common(converted) + +def check_esx_vsphere_counters_if(item, params, info): + if not info: + raise MKCounterWrapped("Counter data is missing") + + converted = convert_esx_counters_if(info) + return check_if_common(item, params, converted) + +check_info['esx_vsphere_counters.if'] = { + 'inventory_function' : inventory_esx_vsphere_counters_if, + 'check_function' : check_esx_vsphere_counters_if, + 'service_description' : 'Interface %s', + 'has_perfdata' : True, + 'group' : 'if', + 'default_levels_variable' : 'if_default_levels', + 'includes' : [ 'if.include' ], +} + +#. +# .--Uptime--------------------------------------------------------------. +# | _ _ _ _ | +# | | | | |_ __ | |_(_)_ __ ___ ___ | +# | | | | | '_ \| __| | '_ ` _ \ / _ \ | +# | | |_| | |_) | |_| | | | | | | __/ | +# | \___/| .__/ \__|_|_| |_| |_|\___| | +# | |_| | +# '----------------------------------------------------------------------' + +def inventory_esx_vsphere_counters_uptime(info): + for name, instance, counter, unit in info: + if name == "sys.uptime": + return [ (None, {}) ] + +def check_esx_vsphere_counters_uptime(_no_item, params, info): + if not info: + raise MKCounterWrapped("Counter data is missing") + + for name, instance, counter, unit in info: + if name == "sys.uptime": + return check_uptime_seconds(params, int(counter)) + return 3, "No uptime information found in agent output" + + +check_info['esx_vsphere_counters.uptime'] = { + 'inventory_function' : inventory_esx_vsphere_counters_uptime, + 'check_function' : check_esx_vsphere_counters_uptime, + 'service_description': 'Uptime', + 'has_perfdata': True, + 'includes': ['uptime.include'], + 'group': 'uptime', +} + +#. +# .--Ramdisk-------------------------------------------------------------. +# | ____ _ _ _ | +# | | _ \ __ _ _ __ ___ __| (_)___| | __ | +# | | |_) / _` | '_ ` _ \ / _` | / __| |/ / | +# | | _ < (_| | | | | | | (_| | \__ \ < | +# | |_| \_\__,_|_| |_| |_|\__,_|_|___/_|\_\ | +# | | +# +----------------------------------------------------------------------+ + +# We assume that all ramdisks have the same size (in mb) on all hosts +# -> To get size infos about unknown ramdisks, connect to the ESX host via +# SSH and check the size of the disk via "du" command +esx_vsphere_counters_ramdisk_sizes = { + 'root': 32, + 'etc': 28, + 'tmp': 192, + 'hostdstats': 319, + 'snmptraps': 1, + 'upgradescratch': 300, + 'ibmscratch': 300, + 'sfcbtickets': 1, +} + +def inventory_esx_vsphere_counters_ramdisk(info): + ramdisks = [] + for name, instance, counter, unit in info: + if name == 'sys.resourceMemConsumed' \ + and instance.startswith('host/system/kernel/kmanaged/visorfs/'): + ramdisks.append(instance.split('/')[-1]) + + return df_inventory(ramdisks) + +def check_esx_vsphere_counters_ramdisk(item, params, info): + if not info: + raise MKCounterWrapped("Counter data is missing") + + ramdisks = [] + for name, instance, counter, unit in info: + if name == 'sys.resourceMemConsumed' \ + and instance.startswith('host/system/kernel/kmanaged/visorfs/'): + name = instance.split('/')[-1] + try: + size_mb = esx_vsphere_counters_ramdisk_sizes[name] + except KeyError: + if item == name: + return 3, 'Unhandled ramdisk found (%s)' % name + else: + continue + used_mb = float(counter) / 1000 + avail_mb = size_mb - used_mb + ramdisks.append((name, size_mb, avail_mb)) + + return df_check_filesystem_list(item, params, ramdisks) + +check_info['esx_vsphere_counters.ramdisk'] = { + 'inventory_function': inventory_esx_vsphere_counters_ramdisk, + 'check_function': check_esx_vsphere_counters_ramdisk, + 'service_description': 'Ramdisk %s', + 'has_perfdata': True, + 'includes': [ 'df.include' ], + 'group': 'filesystem', + 'default_levels_variable': 'filesystem_default_levels', +} diff -Nru check-mk-1.2.2p3/esx_vsphere_counters.diskio check-mk-1.2.6p12/esx_vsphere_counters.diskio --- check-mk-1.2.2p3/esx_vsphere_counters.diskio 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/esx_vsphere_counters.diskio 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,17 @@ +title: VMWare ESX host systems: Disk throughput +agents: vsphere +catalog: os/storage +license: GPL +distribution: check_mk +description: + This check measures the number of read and writes bytes + of an ESX host system via the vsphere special agent. Currently + the check is always OK and does one summary check over + all disks and LUNs of the system. + +inventory: + One service per ESX host is being created. + +perfdata: + Three values: read, write and ios (per second) + diff -Nru check-mk-1.2.2p3/esx_vsphere_counters.if check-mk-1.2.6p12/esx_vsphere_counters.if --- check-mk-1.2.2p3/esx_vsphere_counters.if 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/esx_vsphere_counters.if 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,33 @@ +title: VMWare ESX host systems: Physical network interfaces +agents: vsphere +catalog: os/networking +license: GPL +distribution: check_mk +description: + This check retrieves performance counters of network interfaces + via the vSphere API and simulates the data of an SNMP network + interface check. + + This check is fully compatible with {if64} with the following restrictions: + + 1) The interface speed (1GBit/s, etc.) is not detected + + 2) The link status in not detected and assumed to be up + + 3) The queue length is not reported + +item: + The index or name of the network interface. See {if} for details. + +perfdata: + Compatible with {if64}. See that man page for details. + +inventory: + One service per interface is being created. See {if64} for details. + +[parameters] +parameters(dict): Compatible with {if64} + +[configuration] +various: compatible with {if64}. + diff -Nru check-mk-1.2.2p3/esx_vsphere_counters.ramdisk check-mk-1.2.6p12/esx_vsphere_counters.ramdisk --- check-mk-1.2.2p3/esx_vsphere_counters.ramdisk 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/esx_vsphere_counters.ramdisk 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,19 @@ +title: VMWare ESX host systems: Ramdisks +agents: vsphere +catalog: os/kernel +license: GPL +distribution: check_mk +description: + This check monitors the usage of ramdisks used by the operating system + of ESX host systems. + + The check needs a host system with version 5.1 or newer. Older versions + do not provide the needed information. + + The check makes use of generic the {df} check code. Therefor parameters, + perfdata, configuration and results are similar to the {df} check. So + please take a look at that check man page for details. + +inventory: + One check per ramdisk is created from the counters reported by + the vsphere special agent. diff -Nru check-mk-1.2.2p3/esx_vsphere_counters.uptime check-mk-1.2.6p12/esx_vsphere_counters.uptime --- check-mk-1.2.2p3/esx_vsphere_counters.uptime 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/esx_vsphere_counters.uptime 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,23 @@ +title: VMWare ESX host systems: Uptime +agents: vsphere +catalog: os/misc +license: GPL +distribution: check_mk +description: + This check is always OK and simply outputs the uptime of an ESX host + system. + +perfdata: + The uptime in seconds. + +inventory: + One check per system is created if the agent has a section {<<>>}. + +[parameters] +parameters (dict): A dictionary with the following optional keys: + + {"min"}: Pair of integers of warn and crit: the minimum required uptime + uptime in seconds. + + {"max"}: Pair of integers of warn and crit: the maximum allowed uptime + uptime in seconds. diff -Nru check-mk-1.2.2p3/esx_vsphere_datastores check-mk-1.2.6p12/esx_vsphere_datastores --- check-mk-1.2.2p3/esx_vsphere_datastores 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/esx_vsphere_datastores 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,117 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +# [zmucvm99-lds] +# accessible True +# capacity 578478407680 +# freeSpace 388398841856 +# type VMFS +# uncommitted 51973812224 +# url /vmfs/volumes/513df1e9-12fd7366-ac5a-e41f13e69eaa + + +def esx_vsphere_datastores_parse(info): + stores = {} + for line in info: + if line[0].startswith('['): + name = line[0][1:-1] + store = {} + stores[name] = store + else: + key, value = line + if key == "accessible": + value = value.lower() == "true" + elif key in [ "capacity", "freeSpace", "uncommitted" ]: + value = int(value) + store[key] = value + return stores + + +def inventory_esx_vsphere_datastores(info): + stores = esx_vsphere_datastores_parse(info) + return [ (name, {}) for name in stores ] + + +def check_esx_vsphere_datastores(item, params, info): + stores = esx_vsphere_datastores_parse(info) + if item not in stores: + return 3, "Datastore not existing" + store = stores[item] + size_mb = store["capacity"] / 1024.0 / 1024.0 + avail_mb = store["freeSpace"] / 1024.0 / 1024.0 + + state, infotext, perfdata = df_check_filesystem_single( + g_hostname, item, size_mb, avail_mb, None, None, params) + + if 'uncommitted' in store: + uncommitted_mb = store["uncommitted"] / 1024.0 / 1024.0 + used_mb = size_mb - avail_mb + overprov_mb = used_mb + uncommitted_mb + if size_mb == 0: + overprov_percent = 0 + else: + overprov_percent = (overprov_mb / size_mb) * 100 + + overprov_txt = '' + overprov_warn_mb = None + overprov_crit_mb = None + if 'provisioning_levels' in params: + warn, crit = params['provisioning_levels'] + overprov_warn_mb = avail_mb / 100 * warn + overprov_crit_mb = avail_mb / 100 * crit + if overprov_percent >= crit: + state = max(state, 2) + overprov_txt = ' (!!)' + elif overprov_percent >= warn: + state = max(state, 1) + overprov_txt = ' (!)' + + infotext += ", uncommitted: %.2f GB, provisioning: %.1f%%%s" % ( + uncommitted_mb / 1024, overprov_percent, overprov_txt) + if perfdata: + perfdata += [ + ('uncommitted', str(uncommitted_mb) + 'MB'), + ('overprovisioned', str(overprov_mb) + 'MB', overprov_warn_mb, overprov_crit_mb), + ] + + if not store["accessible"]: + state = 2 + infotext = "unaccessible(!!), " + infotext + + return state, infotext, perfdata + +check_info['esx_vsphere_datastores'] = { + "inventory_function" : inventory_esx_vsphere_datastores, + "check_function" : check_esx_vsphere_datastores, + "service_description" : "Filesystem %s", + "includes" : [ 'df.include' ], + "has_perfdata" : True, + "group" : "esx_vsphere_datastores", + "default_levels_variable" : "filesystem_default_levels", +} + +#. diff -Nru check-mk-1.2.2p3/esx_vsphere_hostsystem check-mk-1.2.6p12/esx_vsphere_hostsystem --- check-mk-1.2.2p3/esx_vsphere_hostsystem 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/esx_vsphere_hostsystem 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,388 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def esx_vsphere_hostsystem_convert(info): + data = {} + for line in info: + data[line[0]] = line[1:] + return data + + +#. +# .--CPU-----------------------------------------------------------------. +# | ____ ____ _ _ | +# | / ___| _ \| | | | | +# | | | | |_) | | | | | +# | | |___| __/| |_| | | +# | \____|_| \___/ | +# | | +# +----------------------------------------------------------------------+ + +esx_host_cpu_default_levels = {} + +# hardware.cpuInfo.numCpuCores 12 +# hardware.cpuInfo.numCpuPackages 2 +# hardware.cpuInfo.numCpuThreads 24 +# hardware.cpuInfo.hz 2933436846 --> In Hz per CPU Core +# summary.quickStats.overallCpuUsage 7539 --> In MHz + + +def inventory_esx_vsphere_hostsystem_cpu(info): + data = esx_vsphere_hostsystem_convert(info).keys() + if 'summary.quickStats.overallCpuUsage' in data \ + and 'hardware.cpuInfo.hz' in data\ + and 'hardware.cpuInfo.numCpuCores' in data: + return [(None, {})] + +def check_esx_vsphere_hostsystem_cpu(item, params, info): + data = esx_vsphere_hostsystem_convert(info) + num_sockets = int(data['hardware.cpuInfo.numCpuPackages'][0]) + num_cores = int(data['hardware.cpuInfo.numCpuCores'][0]) + num_threads = int(data['hardware.cpuInfo.numCpuThreads'][0]) + used_mhz = float(data['summary.quickStats.overallCpuUsage'][0]) + mhz_per_core = float(data['hardware.cpuInfo.hz'][0]) / 1024.0 / 1024.0 + total_mhz = mhz_per_core * num_cores + + usage = used_mhz / total_mhz * 100 + per_core = num_cores / 100.0 + + infotext = "%.1f%%" % usage + + # Convert legacy parameters + this_time = time.time() + state, infotext, perfdata = check_cpu_util(usage, params) + + infotext += ", %.2fGHz/%.2fGHz" % (used_mhz / 1024.0, total_mhz / 1024.0) + infotext += ", %d sockets, %d cores/socket, %d threads" % ( + num_sockets, num_cores / num_sockets, num_threads) + + # put number of threads as MAX value for first perf-data. This + # is needed by the PNP template. + perfdata_cpu = list(perfdata[0]) + perfdata_cpu[-1] = num_threads + perfdata = [ tuple(perfdata_cpu) ] + perfdata[1:] + return (state, infotext, perfdata) + + +check_info['esx_vsphere_hostsystem.cpu_usage'] = { + "inventory_function" : inventory_esx_vsphere_hostsystem_cpu, + "check_function" : check_esx_vsphere_hostsystem_cpu, + "service_description" : "CPU utilization", + "group" : "cpu_utilization_os", + "has_perfdata" : True, + "default_levels_variable" : "esx_host_cpu_default_levels", + "includes" : [ "cpu_util.include" ], +} + + +#. +# .--Mem-Cluster---------------------------------------------------------. +# | __ __ ____ _ _ | +# | | \/ | ___ _ __ ___ / ___| |_ _ ___| |_ ___ _ __ | +# | | |\/| |/ _ \ '_ ` _ \ _____| | | | | | / __| __/ _ \ '__| | +# | | | | | __/ | | | | |_____| |___| | |_| \__ \ || __/ | | +# | |_| |_|\___|_| |_| |_| \____|_|\__,_|___/\__\___|_| | +# | | +# +----------------------------------------------------------------------+ + +def check_esx_vsphere_hostsystem_mem_cluster(item, params, info): + data = {} + for line in info: + if line[0] in ["summary.quickStats.overallMemoryUsage", "hardware.memorySize", "name"]: + data.setdefault(line[0], []).append(line[1]) + sorted_params = sorted(params, reverse = True) + + nodes_count = len(data['name']) + total_memory_usage = sum(map(lambda x: savefloat(x) * 1024 * 1024, data['summary.quickStats.overallMemoryUsage'])) + total_memory_size = sum(map(lambda x: savefloat(x), data['hardware.memorySize'])) + + level = total_memory_usage / total_memory_size * 100 + label = "" + state = 0 + for count, levels in sorted_params: + if nodes_count >= count: + warn, crit = levels + if level > crit: + state = 2 + label = " (Levels at %d%%/%d%%)" % (warn, crit) + elif level > warn: + state = 1 + label = " (Levels at %d%%/%d%%)" % (warn, crit) + break + + + perf = [("usage", total_memory_usage, warn * total_memory_size / 100, + crit * total_memory_size / 100, 0, total_memory_size)] + yield state, "%d%%%s used - %s/%s" % \ + (level, label, get_bytes_human_readable(total_memory_usage), get_bytes_human_readable(total_memory_size)), perf + + +check_info['esx_vsphere_hostsystem.mem_usage_cluster'] = { + "check_function" : check_esx_vsphere_hostsystem_mem_cluster, + "service_description" : "Memory used", + "group" : "mem_cluster", + "has_perfdata" : True, +} + + + +# .--Memory--------------------------------------------------------------. +# | __ __ | +# | | \/ | ___ _ __ ___ ___ _ __ _ _ | +# | | |\/| |/ _ \ '_ ` _ \ / _ \| '__| | | | | +# | | | | | __/ | | | | | (_) | | | |_| | | +# | |_| |_|\___|_| |_| |_|\___/|_| \__, | | +# | |___/ | +# +----------------------------------------------------------------------+ + + +esx_host_mem_default_levels = ( 80.0, 90.0 ) + +def inventory_esx_vsphere_hostsystem_mem(info): + data = esx_vsphere_hostsystem_convert(info).keys() + if 'summary.quickStats.overallMemoryUsage' in data and 'hardware.memorySize' in data: + return [(None, 'esx_host_mem_default_levels')] + +def check_esx_vsphere_hostsystem_mem(item, params, info): + data = esx_vsphere_hostsystem_convert(info) + memory_usage = savefloat(data['summary.quickStats.overallMemoryUsage'][0]) * 1024 * 1024 + memory_size = savefloat(data['hardware.memorySize'][0]) + level = memory_usage / memory_size * 100 + + warn, crit = params + state = 0 + label = '' + if level > crit: + state = 2 + label = " (Levels at %d%%/%d%%)" % (warn, crit) + elif level > warn: + state = 1 + label = " (Levels at %d%%/%d%%)" % (warn, crit) + + message = "%d%%%s used - %s/%s" % \ + (level, label, get_bytes_human_readable(memory_usage), get_bytes_human_readable(memory_size)) + perf = [("usage", memory_usage, warn * memory_size / 100, crit * memory_size / 100, 0, memory_size)] + return(state, message, perf) + + +check_info['esx_vsphere_hostsystem.mem_usage'] = { + "inventory_function" : inventory_esx_vsphere_hostsystem_mem, + "check_function" : check_esx_vsphere_hostsystem_mem, + "service_description" : "Memory used", + "group" : "esx_host_memory", + "has_perfdata" : True +} + +#. +# .--State---------------------------------------------------------------. +# | ____ _ _ | +# | / ___|| |_ __ _| |_ ___ | +# | \___ \| __/ _` | __/ _ \ | +# | ___) | || (_| | || __/ | +# | |____/ \__\__,_|\__\___| | +# | | +# +----------------------------------------------------------------------+ + + +def inventory_esx_vsphere_hostsystem_state(info): + data = esx_vsphere_hostsystem_convert(info).keys() + if 'runtime.inMaintenanceMode' in data: + return [(None, None)] + +def check_esx_vsphere_hostsystem_state(_no_item, _no_params, info): + data = esx_vsphere_hostsystem_convert(info) + state = 0 + label = {} + messages = [] + label['Status'] = "" + overallStatus = str(data['overallStatus'][0]) + if overallStatus == "yellow": + state = 1 + label['Status'] = "(!)" + elif overallStatus in [ "red", "gray"]: + state = 2 + label['Status'] = "(!!)" + messages.append("Entity state: %s%s" % (overallStatus, label['Status'])) + + label['powerState'] = '' + powerState = str(data['runtime.powerState'][0]) + if powerState in ['poweredOff', 'unknown']: + state = 2 + label['powerState'] = "(!!)" + elif powerState == 'standBy': + state = max(state, 1) + label['powerState'] = "(!)" + messages.append("Power state: %s%s" % (powerState, label['powerState'])) + + + return(state, ", ".join(messages)) + +check_info['esx_vsphere_hostsystem.state'] = { + "inventory_function" : inventory_esx_vsphere_hostsystem_state, + "check_function" : check_esx_vsphere_hostsystem_state, + "service_description" : "Overall state", +} +#. +# .--Maintenance---------------------------------------------------------. +# | __ __ _ _ | +# | | \/ | __ _(_)_ __ | |_ ___ _ __ __ _ _ __ ___ ___ | +# | | |\/| |/ _` | | '_ \| __/ _ \ '_ \ / _` | '_ \ / __/ _ \ | +# | | | | | (_| | | | | | || __/ | | | (_| | | | | (_| __/ | +# | |_| |_|\__,_|_|_| |_|\__\___|_| |_|\__,_|_| |_|\___\___| | +# | | +# +----------------------------------------------------------------------+ +# | | +# '----------------------------------------------------------------------' + +def inventory_esx_vsphere_hostsystem_maintenance(info): + data = esx_vsphere_hostsystem_convert(info) + if 'runtime.inMaintenanceMode' in data: + current_state = str(data['runtime.inMaintenanceMode'][0]).lower() + return [(None, { 'target_state' : current_state })] + +def check_esx_vsphere_hostsystem_maintenance(_no_item, params, info): + data = esx_vsphere_hostsystem_convert(info) + target_state = params['target_state'] + current_state = str(data['runtime.inMaintenanceMode'][0]).lower() + state = 0 + if target_state != current_state: + state = 2 + if current_state == "true": + return state, "System running is in Maintenance mode" + else: + return state, "System not in Maintenance mode" + +check_info['esx_vsphere_hostsystem.maintenance'] = { + "inventory_function" : inventory_esx_vsphere_hostsystem_maintenance, + "check_function" : check_esx_vsphere_hostsystem_maintenance, + "service_description" : "Maintenance Mode", + "group" : "esx_hostystem_maintenance", +} + +#. +# .--Multipath-----------------------------------------------------------. +# | __ __ _ _ _ _ _ | +# | | \/ |_ _| | |_(_)_ __ __ _| |_| |__ | +# | | |\/| | | | | | __| | '_ \ / _` | __| '_ \ | +# | | | | | |_| | | |_| | |_) | (_| | |_| | | | | +# | |_| |_|\__,_|_|\__|_| .__/ \__,_|\__|_| |_| | +# | |_| | +# +----------------------------------------------------------------------+ + + +def esx_vsphere_multipath_convert(info): + data = esx_vsphere_hostsystem_convert(info) + data = data['config.multipathState.path'] + data = zip(data[::2], data[1::2]) + paths = {} + for path, state in data: + path_tokens = path.split('-') + if "." not in path_tokens[-1]: + continue # invalid format / unknown type + path_type, path_id = path_tokens[-1].split('.') + + if path_type in ['naa', 'eui']: + hw_type = path.split('.')[0] + if hw_type != 'unknown': + paths.setdefault(path_id, []) + paths[path_id].append((state, "%s/%s" % (path_type, hw_type))) + return paths + +def inventory_esx_vsphere_hostsystem_multipath(info): + data = esx_vsphere_multipath_convert(info).items() + return [ (x, None) for x, y in data] + +def check_esx_vsphere_hostsystem_multipath(item, params, info): + state_infos = { + # alert_state, count, info + "active" : [0, 0, ""], + "dead" : [2, 0, ""], + "disabled" : [1, 0, ""], + "standby" : [0, 0, ""], + "unknown" : [2, 0, ""] + } + + state = 0 + message = "" + + for path, states in esx_vsphere_multipath_convert(info).items(): + if path == item: + # Collect states + for path_state, path_type in states: + state_item = state_infos.get(path_state) + if state_item: + state_item[1] += 1 + state = max(state_item[0], state) + + # Check warn, critical + if not params or type(params) == list: + if state_infos["standby"][1] > 0 and \ + state_infos["standby"][1] != state_infos["active"][1]: + standby_label = "(!)" + state = max(state_infos["standby"][0], state) + else: + state = 0 + for state_name, state_values in state_infos.items(): + if params.get(state_name): + limits = params.get(state_name) + if len(limits) == 2: + warn_max, crit_max = limits + crit_min, warn_min = 0, 0 + else: + crit_min, warn_min, warn_max, crit_max = limits + + extra_info = "" + count = state_values[1] + if count < crit_min: + state = max(state, 2) + state_values[2] = "(!!)(less than %d)" % crit_min + elif count > crit_max: + state = max(state, 2) + state_values[2] = "(!!)(more than %d)" % crit_max + elif count < warn_min: + state = max(state, 1) + state_values[2] = "(!)(less than %d)" % warn_min + elif count > warn_max: + state = max(state, 1) + state_values[2] = "(!)(more than %d)" % warn_max + + # Output message + message = "Type %s" % path_type + for element in "active", "dead", "disabled", "standby", "unknown": + message += ", %d %s%s" % ( state_infos[element][1], element, state_infos[element][2] ) + break + else: + return 3, "Path not found in agent output" + + return (state, message) + +check_info['esx_vsphere_hostsystem.multipath'] = { + "inventory_function" : inventory_esx_vsphere_hostsystem_multipath, + "check_function" : check_esx_vsphere_hostsystem_multipath, + "service_description" : "Multipath %s", + "group" : "multipath_count" +} + diff -Nru check-mk-1.2.2p3/esx_vsphere_hostsystem.cpu_usage check-mk-1.2.6p12/esx_vsphere_hostsystem.cpu_usage --- check-mk-1.2.2p3/esx_vsphere_hostsystem.cpu_usage 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/esx_vsphere_hostsystem.cpu_usage 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,31 @@ +title: VMWare ESX host systems: CPU utilization +agents: vsphere +catalog: os/kernel +license: GPL +distribution: check_mk +description: + This check measures the CPU utilization of an VMWare ESX Host System. + It also shows you the number of sockets, cores and threads. + +perfdata: + One or two values: the first value is current usage in percent - ranging from + 0 to 100. The "maximum" value is not 100, but the number of CPU threads. This + case be used for scaling the graph in terms of the number of used CPU threads. + + If averaging is enabled then a second value is sent: the averaged CPU utilization + ranging from 0 to 100. + +inventory: + One check per ESX Host System will be created. + +[parameters] +parameters(dict): A dictionary with the following keys: + + {"levels"}: Either {None} for no levels, a tuple of warn and crit (in percent) or + a dictionary with predictive levels settings. + + {"average"}: A number of minutes for enabling averaging. + +[configuration] +esx_host_cpu_default_levels(dict): Default levels, preset to an empty dictionary, which means that no levels + will be applied. diff -Nru check-mk-1.2.2p3/esx_vsphere_hostsystem.maintenance check-mk-1.2.6p12/esx_vsphere_hostsystem.maintenance --- check-mk-1.2.2p3/esx_vsphere_hostsystem.maintenance 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/esx_vsphere_hostsystem.maintenance 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,21 @@ +title: VMWare ESX Host System: Maintenance State +agents: vsphere +catalog: os/misc +license: GPL +distribution: check_mk +description: + This check queries the maintenance State of an ESX host + system via the vsphere agent and returns: + + {OK} if the ESX host has the same sate since the last service detection + + {CRIT} if the state has changed. + + The behavior can be changed by a Wato rule to force Maintenance mode or not. + + See "Monitoring VMWare ESX with Check_MK" in the online documentation + as well. + +inventory: + On each ESX host one check is generated. + diff -Nru check-mk-1.2.2p3/esx_vsphere_hostsystem.mem_usage check-mk-1.2.6p12/esx_vsphere_hostsystem.mem_usage --- check-mk-1.2.2p3/esx_vsphere_hostsystem.mem_usage 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/esx_vsphere_hostsystem.mem_usage 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,26 @@ +title: VMWare ESX host systems: Usage of physical RAM +agents: vsphere +catalog: os/kernel +license: GPL +distribution: check_mk +description: + This check measures the current usage of physical RAM by + an ESX host system. + +inventory: + One check per ESX host system is being created. + +perfdata: + One value: the current usage in bytes. + +[parameters] +warning (float): the percentage of virtual memory used + by processes at which WARNING state is triggered. +critical (float): the percentage or absolute value + at which CRITICAL state is triggered + +[configuration] +esx_host_mem_default_levels(float, float): Levels used by + all checks that are created by inventory. This defaults to {(80.0, 90.0)}. + + diff -Nru check-mk-1.2.2p3/esx_vsphere_hostsystem.mem_usage_cluster check-mk-1.2.6p12/esx_vsphere_hostsystem.mem_usage_cluster --- check-mk-1.2.2p3/esx_vsphere_hostsystem.mem_usage_cluster 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/esx_vsphere_hostsystem.mem_usage_cluster 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,34 @@ +title: VMware ESX vCenter: Total physical RAM usage of clustered ESX hosts +agents: vsphere +catalog: os/kernel +license: GPL +distribution: check_mk +description: + This check measures the total physical RAM usage for a cluster of hosts. + This check is only applicable for clusters and cannot be inventorized. + You need to create a manual check for it in WATO and assign it to a cluster host. + +examples: + static_checks['mem_cluster'] = [ + ( ('esx_vsphere_hostsystem.mem_usage_cluster', None, + [(2, (60.0, 70.0)), + (5, (70.0, 80.0)), + (7, (85.0, 90.0)) + ]), [], ['esx_cluster'] ), + ] + static_checks['mem_cluster'] + + + +inventory: + Not applicable + +perfdata: + One value: the current total usage in bytes. + +[parameters] +parameters (dict): A list of tuples where you can define the number of + minimum required nodes and the corresponding levels. + +OneElement Tuple: number_of_nodes(int), (WARN(float), CRIT(float)) + + Please refer example. diff -Nru check-mk-1.2.2p3/esx_vsphere_hostsystem.multipath check-mk-1.2.6p12/esx_vsphere_hostsystem.multipath --- check-mk-1.2.2p3/esx_vsphere_hostsystem.multipath 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/esx_vsphere_hostsystem.multipath 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,26 @@ +title: VMWare ESX host systems: Multipath state +agents: vsphere +catalog: os/storage +license: GPL +distribution: check_mk +description: + This check monitors the state of all Multipath devices + on an ESX host system. + Without further configuration it returns {CRIT} if at least one + path is {dead} or in {unknown} state. It returns {WARN} if at least + one path is {disabled}. It also returns {WARN} if at least one path + is in {standby} mode and the number of paths in {standby} mode is not + equal to the number of {active} paths. + By providing parameters you can configure the warn and crit levels for + each state type. + +inventory: + One service per multipath device will be created. Paths + to local disk are ignored. + +item: + The unique id of the path + +[parameters] +parameters (dict): Each key represents a path state. The value is a int tuple + with (crit_min, warn_min, warn_max, crit_max). Available key types are {active}, {dead}, {disabled}, {standby}, {unknown} diff -Nru check-mk-1.2.2p3/esx_vsphere_hostsystem.state check-mk-1.2.6p12/esx_vsphere_hostsystem.state --- check-mk-1.2.2p3/esx_vsphere_hostsystem.state 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/esx_vsphere_hostsystem.state 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,23 @@ +title: VMWare ESX Host System: Overall state and power state +agents: vsphere +catalog: os/misc +license: GPL +distribution: check_mk +description: + This check queries the overall state and power state of an ESX host + system via the vsphere agent and returns: + + {OK} if the ESX host is running normally (powered on) + + {WARN} if vsphere reports overall state of yellow or if the ESX host is + in power state standBy + + {CRIT} if vsphere reports overall state of red or grey or if the ESX host is + in power state poweredOff + + See "Monitoring VMWare ESX with Check_MK" in the online documentation + as well. + +inventory: + On each ESX host one check is generated. + diff -Nru check-mk-1.2.2p3/esx_vsphere_licenses check-mk-1.2.6p12/esx_vsphere_licenses --- check-mk-1.2.2p3/esx_vsphere_licenses 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/esx_vsphere_licenses 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,56 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +#esx_vsphere_licenses:sep(9)>>> +#VMware vSphere 5 Standard 100 130 +#VMware vSphere 5 Enterprise 86 114 +#vCenter Server 5 Standard 1 1 + +def inventory_esx_vsphere_licenses(info): + return [ (line[0], None) for line in info ] + +def check_esx_vsphere_licenses(item, params, info): + # Transform: VMware vSphere 5 Standard 100 13 + # Into dict: 'VMware vSphere 5 Standard': (100, 130) + licenses = dict(map( + lambda x: (x[0], tuple(map(lambda y: int(y), x[1].split()))), + info)) + if item not in licenses: + return 3, "License not found in agent output" + + used, have = licenses[item] + return license_check_levels(have, used, params) + +check_info['esx_vsphere_licenses'] = { + "inventory_function" : inventory_esx_vsphere_licenses, + "check_function" : check_esx_vsphere_licenses, + "service_description" : "License %s", + "has_perfdata" : True, + "group" : "esx_licenses", + "includes" : [ "license.include" ] +} + diff -Nru check-mk-1.2.2p3/esx_vsphere_objects check-mk-1.2.6p12/esx_vsphere_objects --- check-mk-1.2.2p3/esx_vsphere_objects 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/esx_vsphere_objects 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,154 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +# <<>> +# hostsystem esx.wacker.corp +# virtualmachine LinuxI +# virtualmachine OpenSUSE_II +# virtualmachine OpenSUSE_III +# virtualmachine OpenSUSE_IV +# virtualmachine OpenSUSE_V +# virtualmachine WindowsXP I +# virtualmachine LinuxII +# virtualmachine LinuxIII +# virtualmachine LinuxIV +# virtualmachine LinuxV +# virtualmachine OpenSUSE_I + +vsphere_object_names = { + "hostsystem" : "HostSystem", + "virtualmachine" : "VM", +} + + +# .--Single--------------------------------------------------------------. +# | ____ _ _ | +# | / ___|(_)_ __ __ _| | ___ | +# | \___ \| | '_ \ / _` | |/ _ \ | +# | ___) | | | | | (_| | | __/ | +# | |____/|_|_| |_|\__, |_|\___| | +# | |___/ | +# '----------------------------------------------------------------------' + +def inventory_esx_vsphere_objects(info): + return [ (vsphere_object_names[line[0]] + " " + line[1], {}) for line in info ] + +# params is a dict with the allowed target states for the systems +# Example: +# params = { +# "states" : { +# "poweredOn" : 0, +# "poweredOff" : 0, +# "suspended" : 3, +# } +# } + + +def check_esx_vsphere_objects(item, params, info): + if params == None: + params = {} + + for line in info: + if vsphere_object_names[line[0]] + " " + line[1] == item and len(line) > 3: + running_on = line[2] + power_state = line[3] + state = params.get("states",{}).get(power_state) + if state == None: + if power_state == "poweredOn": + state = 0 + elif power_state == "poweredOff": + state = 1 + elif power_state == "suspended": + state = 1 + else: + state = 3 + infotext = "power state: %s" % power_state + if running_on: + if state == 0: + infotext += ', running on [%s]' % running_on + else: + infotext += ', defined on [%s]' % running_on + return state, infotext + + what = item.split()[0] + name = item.split()[1] + + if what == "VM": + return 3, "Virtual machine %s is missing" % name + else: + return 3, "No data about host system %s" % name + + +check_info['esx_vsphere_objects'] = { + "inventory_function" : inventory_esx_vsphere_objects, + "check_function" : check_esx_vsphere_objects, + "service_description" : "%s", + "group" : "esx_vsphere_objects", +} + +def inventory_esx_vsphere_objects_count(info): + return [(None, None)] + +def check_esx_vsphere_objects_count(_no_item, _no_params, info): + virtualmachines = 0 + hostsystems = 0 + for line in info: + if line[0] == 'hostsystem': + hostsystems += 1 + elif line[0] == 'virtualmachine': + virtualmachines += 1 + messages = "Virtualmachines: %d" % virtualmachines + perfdata = [('vms', virtualmachines)] + if hostsystems > 1: + messages += ", Hostsystems: %d" % hostsystems + perfdata.append(('hosts', hostsystems)) + return 0, messages, perfdata + + +check_info['esx_vsphere_objects.count'] = { + "inventory_function" : inventory_esx_vsphere_objects_count, + "check_function" : check_esx_vsphere_objects_count, + "service_description" : "Object count", + "has_perfdata" : True, +} +# .--Cluster-------------------------------------------------------------. +# | ____ _ _ | +# | / ___| |_ _ ___| |_ ___ _ __ | +# | | | | | | | / __| __/ _ \ '__| | +# | | |___| | |_| \__ \ || __/ | | +# | \____|_|\__,_|___/\__\___|_| | +# | | +# '----------------------------------------------------------------------' + +# def check_esx_vsphere_objects_cluster(_no_item, params, info): +# print info +# return 3, "MIST" +# +# check_info['esx_vsphere_objects.cluster'] = { +# "check_function" : check_esx_vsphere_objects_cluster, +# "service_description" : "HostSystem SUMMARY", +# } diff -Nru check-mk-1.2.2p3/esx_vsphere_objects.count check-mk-1.2.6p12/esx_vsphere_objects.count --- check-mk-1.2.2p3/esx_vsphere_objects.count 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/esx_vsphere_objects.count 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,16 @@ +title: VMWare ESX: Number of Virtualmachnies and Hostssystems +agents: vsphere +catalog: os/misc +license: GPL +distribution: check_mk +description: + This Check only counts the nubmer of Virtualmachines on a Hostsystem, or the number of + Virtualmachines and Hostsystems on a vCenter. + It's always {OK} + +inventory: + One service will be created + +perfdata: + One graph for vms and one for hosts + diff -Nru check-mk-1.2.2p3/esx_vsphere_sensors check-mk-1.2.6p12/esx_vsphere_sensors --- check-mk-1.2.2p3/esx_vsphere_sensors 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/esx_vsphere_sensors 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,78 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# <<>> +# VMware Rollup Health State;;0;system;0;;red;Red;Sensor is operating under critical conditions +# Power Domain 1 Power Unit 0 - Redundancy lost;;0;power;0;;yellow;Yellow;Sensor is operating under conditions that are non-critical +# Power Supply 2 Power Supply 2 0: Power Supply AC lost - Assert;;0;power;0;;red;Red;Sensor is operating under critical conditions + +def inventory_esx_vsphere_sensors(info): + return [(None, None)] + +def check_esx_vsphere_sensors(_no_item, params, info): + state = 0 + + infos = [] + sensor_state_modified = False + + for name, base_units, current_reading, sensor_type, unit_modifier, rate_units, health_key, health_label, health_summary in info: + if health_key == "green": + continue # usually not output by agent anyway + infos.append("%s: %s (%s)" % (name, health_label, health_summary)) + + sensor_state = 0 + if health_key == "yellow": + sensor_state = 1 + elif health_key == "unknown": + sensor_state = 1 + else: + sensor_state = 2 + + extra_info = "" + if params: + for entry in params: + if name.startswith(entry.get("name")): + new_state = entry.get("states").get(str(sensor_state)) + if new_state != None: + sensor_state = new_state + extra_info = "(state modified by rule)" + sensor_state_modified = True + + state = max(state, sensor_state) + infos[-1] += ["", "(!)", "(!!)", "(!)"][state] + infos[-1] += extra_info + + if state > 0 or sensor_state_modified: + return state, ", ".join(infos) + else: + return 0, "All sensors are in normal state" + +check_info['esx_vsphere_sensors'] = { + "inventory_function" : inventory_esx_vsphere_sensors, + "check_function" : check_esx_vsphere_sensors, + "service_description" : "Hardware Sensors", + "group" : "hostsystem_sensors" +} diff -Nru check-mk-1.2.2p3/esx_vsphere_vm check-mk-1.2.6p12/esx_vsphere_vm --- check-mk-1.2.2p3/esx_vsphere_vm 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/esx_vsphere_vm 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,417 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def esx_vsphere_vm_convert(info): + data = {} + for line in info: + data[line[0]] = line[1:] + return data + +# .--Memory--------------------------------------------------------------. +# | __ __ | +# | | \/ | ___ _ __ ___ ___ _ __ _ _ | +# | | |\/| |/ _ \ '_ ` _ \ / _ \| '__| | | | | +# | | | | | __/ | | | | | (_) | | | |_| | | +# | |_| |_|\___|_| |_| |_|\___/|_| \__, | | +# | |___/ | +# '----------------------------------------------------------------------' + + +def inventory_esx_vsphere_vm_mem(info): + data = esx_vsphere_vm_convert(info).keys() + if 'summary.quickStats.guestMemoryUsage' in data: + return [(None, {})] + +def check_esx_vsphere_vm_mem(_no_item, _no_params, info): + data = esx_vsphere_vm_convert(info) + + # If the machine is powered of, we do not get data + powerstate = data["runtime.powerState"][0] + if powerstate != "poweredOn": + raise MKCounterWrapped("VM is %s, skipping this check" % powerstate) + + + try: + #consumed host memory + host_memory_usage = savefloat(data["summary.quickStats.hostMemoryUsage"][0]) * 1024 * 1024 + #active guest memory + guest_memory_usage = savefloat(data["summary.quickStats.guestMemoryUsage"][0]) * 1024 * 1024 + #size of the balloon driver in the VM + ballooned_memory = savefloat(data["summary.quickStats.balloonedMemory"][0]) * 1024 * 1024 + #The portion of memory, in MB, that is granted to this VM from non-shared host memor(musst not be set) + shared_memory = savefloat(data["summary.quickStats.sharedMemory"][0]) * 1024 * 1024 + #The portion of memory, in MB, that is granted to this VM from host memory that is shared between VMs. + private_memory = savefloat(data.get("summary.quickStats.privateMemory",[0])[0]) * 1024 * 1024 + except: + raise MKCounterWrapped("Hostsystem did not provide memory information (reason may be high load)") + + perf = [ + ("host", host_memory_usage ), + ("guest", guest_memory_usage ), + ("ballooned", ballooned_memory ), + ("shared", shared_memory ), + ("private", private_memory ), + ] + + message = "Host: %s, Guest: %s, " \ + "Ballooned: %s, Private: %s, Shared: %s" % \ + (get_bytes_human_readable(host_memory_usage), \ + get_bytes_human_readable(guest_memory_usage), get_bytes_human_readable(ballooned_memory), \ + get_bytes_human_readable(private_memory), get_bytes_human_readable(shared_memory) ) + return(0, message, perf) + + +check_info['esx_vsphere_vm.mem_usage'] = { + "inventory_function" : inventory_esx_vsphere_vm_mem, + "check_function" : check_esx_vsphere_vm_mem, + "service_description" : "ESX Memory", + "has_perfdata" : True +} + +#. +# .--Name----------------------------------------------------------------. +# | _ _ | +# | | \ | | __ _ _ __ ___ ___ | +# | | \| |/ _` | '_ ` _ \ / _ \ | +# | | |\ | (_| | | | | | | __/ | +# | |_| \_|\__,_|_| |_| |_|\___| | +# | | +# '----------------------------------------------------------------------' + +def inventory_esx_vsphere_vm_name(info): + data = esx_vsphere_vm_convert(info).keys() + if 'name' in data: + return [(None, None)] + +def check_esx_vsphere_vm_name(_no_item, _no_params, info): + data = esx_vsphere_vm_convert(info) + return(0, " ".join(data['name'])) + + +check_info['esx_vsphere_vm.name'] = { + "inventory_function" : inventory_esx_vsphere_vm_name, + "check_function" : check_esx_vsphere_vm_name, + "service_description" : "ESX Name", +} + + + +#. +# .--Runtime Host--------------------------------------------------------. +# | ____ _ _ _ _ _ | +# | | _ \ _ _ _ __ | |_(_)_ __ ___ ___ | | | | ___ ___| |_ | +# | | |_) | | | | '_ \| __| | '_ ` _ \ / _ \ | |_| |/ _ \/ __| __| | +# | | _ <| |_| | | | | |_| | | | | | | __/ | _ | (_) \__ \ |_ | +# | |_| \_\\__,_|_| |_|\__|_|_| |_| |_|\___| |_| |_|\___/|___/\__| | +# | | +# +----------------------------------------------------------------------+ +# | | +# '----------------------------------------------------------------------' + +def inventory_esx_vsphere_vm_running_on(info): + data = esx_vsphere_vm_convert(info) + if 'runtime.host' in data: + return [(None, None)] + return [] + +def check_esx_vsphere_vm_running_on(no_item, no_params, info): + data = esx_vsphere_vm_convert(info) + + running_on = data.get("runtime.host") + if not running_on: + return 3, "Runtime host information is missing" + + return 0, "Running on %s" % running_on[0] + +check_info['esx_vsphere_vm.running_on'] = { + "inventory_function" : inventory_esx_vsphere_vm_running_on, + "check_function" : check_esx_vsphere_vm_running_on, + "service_description" : "ESX Hostsystem", +} + + +#. +# .--VM Datastores--------------------------------------------------------. +# | __ ____ __ ____ _ _ | +# | \ \ / / \/ | | _ \ __ _| |_ __ _ ___| |_ ___ _ __ ___ | +# | \ \ / /| |\/| | | | | |/ _` | __/ _` / __| __/ _ \| '__/ _ \ | +# | \ V / | | | | | |_| | (_| | || (_| \__ \ || (_) | | | __/ | +# | \_/ |_| |_| |____/ \__,_|\__\__,_|___/\__\___/|_| \___| | +# | | +# +----------------------------------------------------------------------+ +# | | +# '----------------------------------------------------------------------' + +def inventory_esx_vsphere_vm_datastores(info): + data = esx_vsphere_vm_convert(info) + # Right now we only handle one datastore per vm + if 'config.datastoreUrl' in data: + return [(None, None)] + return [] + +def check_esx_vsphere_vm_datastores(no_item, no_params, info): + data = esx_vsphere_vm_convert(info) + + datastore_urls = data.get("config.datastoreUrl") + if not datastore_urls: + return 3, "Datastore information is missing" + + output = [] + for datastore_url in " ".join(datastore_urls).split("\t"): + datastore_url = datastore_url.split("|") + output_store = [] + + # datastore_url looks like + #['url /vmfs/volumes/513df1e9-12fd7366-ac5a-e41f13e69eaa', + # 'uncommitted 51973812224', + # 'name zmucvm99-lds', + # 'type VMFS', + # 'accessible true', + # 'capacity 578478407680', + # 'freeSpace 68779245568'] + + # Convert datastore_url to dict + datastore_dict = dict(map(lambda x: x.split(" ", 1), datastore_url)) + + capacity = saveint(datastore_dict.get("capacity", 0)) * 1.0 + if capacity: + free_perc = int(datastore_dict.get("freeSpace", 0)) / capacity * 100 + else: + free_perc = 0.0 + + output_store = "Stored on %s (%s/%0.1f%% free)" %\ + (datastore_dict.get("name"), + get_bytes_human_readable(capacity), + free_perc) + output.append(output_store) + return 0, ", ".join(output) + +check_info['esx_vsphere_vm.datastores'] = { + "inventory_function" : inventory_esx_vsphere_vm_datastores, + "check_function" : check_esx_vsphere_vm_datastores, + "service_description" : "ESX Datastores", +} + +#. +# .--GuestTools----------------------------------------------------------. +# | ____ _ _____ _ | +# | / ___|_ _ ___ ___| ||_ _|__ ___ | |___ | +# | | | _| | | |/ _ \/ __| __|| |/ _ \ / _ \| / __| | +# | | |_| | |_| | __/\__ \ |_ | | (_) | (_) | \__ \ | +# | \____|\__,_|\___||___/\__||_|\___/ \___/|_|___/ | +# | | +# +----------------------------------------------------------------------+ +# | | +# '----------------------------------------------------------------------' + +def inventory_esx_vsphere_vm_guest_tools(info): + data = esx_vsphere_vm_convert(info) + if 'guest.toolsVersionStatus' in data: + return [(None, None)] + +def check_esx_vsphere_vm_guest_tools(_no_item, params, info): + data = esx_vsphere_vm_convert(info) + + vm_status = data['guest.toolsVersionStatus'][0] + + guest_tools_map = { + "guestToolsCurrent": (0, "VMware Tools is installed, and the version is current"), + "guestToolsNeedUpgrade": (1, "VMware Tools is installed, but the version is not current"), + "guestToolsNotInstalled": (2, "VMware Tools has never been installed"), + "guestToolsUnmanaged": (1, "VMware Tools is installed, but it is not managed by VMWare") + } + state, info = guest_tools_map.get(vm_status, (3, "Unknown status for VMware Tools")) + + if params: + state = params.get(vm_status, state) + + return state, info + +check_info['esx_vsphere_vm.guest_tools'] = { + "inventory_function" : inventory_esx_vsphere_vm_guest_tools, + "check_function" : check_esx_vsphere_vm_guest_tools, + "service_description" : "ESX Guest Tools", + "group" : "vm_guest_tools" +} + + +#. +# .--Heartbeat-----------------------------------------------------------. +# | _ _ _ _ _ | +# | | | | | ___ __ _ _ __| |_| |__ ___ __ _| |_ | +# | | |_| |/ _ \/ _` | '__| __| '_ \ / _ \/ _` | __| | +# | | _ | __/ (_| | | | |_| |_) | __/ (_| | |_ | +# | |_| |_|\___|\__,_|_| \__|_.__/ \___|\__,_|\__| | +# | | +# '----------------------------------------------------------------------' + +# Possible values (this list is taken from the official documentation) +# gray - VMware Tools are not installed or not running. +# red - No heartbeat. Guest operating system may have stopped responding. +# yellow - Intermittent heartbeat. May be due to guest load. +# green - Guest operating system is responding normally. +# +def inventory_esx_vsphere_vm_hb_status(info): + data = esx_vsphere_vm_convert(info) + if 'guestHeartbeatStatus' in data: + return [(None, None)] + +def check_esx_vsphere_vm_hb_status(_no_item, params, info): + data = esx_vsphere_vm_convert(info) + + vm_status = data['guestHeartbeatStatus'][0] + state = 3 + + vm_heartbeat_map = { "gray" : (1, "heartbeat_no_tools"), + "green" : (0, "heartbeat_ok"), + "red" : (2, "heartbeat_missing"), + "yellow" : (1, "heartbeat_intermittend") } + if vm_status in vm_heartbeat_map: + if params: + state = params.get(vm_heartbeat_map.get(vm_status)[1], 3) + else: + state = vm_heartbeat_map.get(vm_status)[0] + if vm_status == 'gray': + return state, "No VMWare Tools installed, outdated or not running" + else: + return state, "Heartbeat status is %s" % vm_status + else: + return 3, "Unknown heartbeat status %s" % vm_status + + + +check_info['esx_vsphere_vm.heartbeat'] = { + "inventory_function" : inventory_esx_vsphere_vm_hb_status, + "check_function" : check_esx_vsphere_vm_hb_status, + "service_description" : "ESX Heartbeat", + "group" : "vm_heartbeat" +} + +#. +# .--CPU-----------------------------------------------------------------. +# | ____ ____ _ _ | +# | / ___| _ \| | | | | +# | | | | |_) | | | | | +# | | |___| __/| |_| | | +# | \____|_| \___/ | +# | | +# +----------------------------------------------------------------------+ +# | | +# '----------------------------------------------------------------------' + +# <<>> +# config.hardware.numCPU 8 +# config.hardware.numCoresPerSocket 2 +# summary.quickStats.overallCpuUsage 8 + +def inventory_esx_vsphere_vm_cpu(info): + data = esx_vsphere_vm_convert(info) + if 'summary.quickStats.overallCpuUsage' in data: + return [(None, None)] + +def check_esx_vsphere_vm_cpu(_no_item, _no_params, info): + data = esx_vsphere_vm_convert(info) + # VMs that are currently down do not have this entry + if 'summary.quickStats.overallCpuUsage' not in data: + raise MKCounterWrapped("No information about CPU usage. VM is probably powered off.") + + usage_mhz = int(data['summary.quickStats.overallCpuUsage'][0]) + cpus = int(data['config.hardware.numCPU'][0]) + return 0, "demand is %.3f Ghz, %d virtual CPUs" % (usage_mhz / 1000.0, cpus), [ ("demand", usage_mhz) ] + + + +check_info['esx_vsphere_vm.cpu'] = { + "inventory_function" : inventory_esx_vsphere_vm_cpu, + "check_function" : check_esx_vsphere_vm_cpu, + "service_description" : "ESX CPU", + "has_perfdata" : True, +} + + +# .--Snapshots-----------------------------------------------------------. +# | ____ _ _ | +# | / ___| _ __ __ _ _ __ ___| |__ ___ | |_ ___ | +# | \___ \| '_ \ / _` | '_ \/ __| '_ \ / _ \| __/ __| | +# | ___) | | | | (_| | |_) \__ \ | | | (_) | |_\__ \ | +# | |____/|_| |_|\__,_| .__/|___/_| |_|\___/ \__|___/ | +# | |_| | +# +----------------------------------------------------------------------+ + +# <<>> +# snapshot.rootSnapshotList 1 1363596734 poweredOff 20130318_105600_snapshot_LinuxI|2 1413977827 poweredOn LinuxI Testsnapshot + + +def inventory_esx_vsphere_vm_snapshots(info): + return [(None, {})] + +def check_esx_vsphere_vm_snapshots(_no_item, params, info): + data = esx_vsphere_vm_convert(info) + + if 'snapshot.rootSnapshotList' not in data: + yield 0, "No snapshots found" + else: + last_snapshot = None + powered_on_snapshot = None + snapshots = map(lambda x: x.split(" ", 3), " ".join(data["snapshot.rootSnapshotList"]).split("|")) + snapshots = map(lambda x: (int(x[0]), int(x[1]), x[2], x[3]) , snapshots) + yield 0, "Number of Snapshots %d" % len(snapshots) + + if len(snapshots) > 0: + last_snapshot = snapshots[0] + for snapshot in snapshots: + if snapshot[1] > last_snapshot[1]: + last_snapshot = snapshot + if snapshot[2] == "poweredOn": + powered_on_snapshot = snapshot + + yield 0, "Powered On: %s" % (powered_on_snapshot and powered_on_snapshot[3] or "None") + + perfdata = [] + snapshot_age = time.time() - last_snapshot[1] + if params.get("age"): + warn, crit = params["age"] + if snapshot_age > crit: + yield 2, "Snapshot is older than %s" % get_age_human_readable(snapshot_age) + elif snapshot_age > warn: + yield 1, "Snapshot is older than %s" % get_age_human_readable(snapshot_age) + perfdata = [("age", snapshot_age, warn, crit)] + else: + perfdata = [("age", snapshot_age)] + + yield 0, "Last Snapshot: %s %s" % (last_snapshot[3], + time.strftime("%D %H:%M",time.localtime(last_snapshot[1]))),\ + perfdata + + + +check_info['esx_vsphere_vm.snapshots'] = { + "inventory_function" : inventory_esx_vsphere_vm_snapshots, + "check_function" : check_esx_vsphere_vm_snapshots, + "service_description" : "ESX Snapshots", + "group" : "vm_snapshots", + "has_perfdata" : True, +} diff -Nru check-mk-1.2.2p3/esx_vsphere_vm.cpu check-mk-1.2.6p12/esx_vsphere_vm.cpu --- check-mk-1.2.2p3/esx_vsphere_vm.cpu 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/esx_vsphere_vm.cpu 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,19 @@ +title: Virtual Machine under ESX: number of virtual CPUs and demanded CPU GHz +agents: vsphere +catalog: os/kernel +license: GPL +distribution: check_mk +description: + For a virtual machine running on ESX this check reports the number of + virtual CPUs assigned to it and the currently demanded CPU GHz. + + The check always returns {OK} state. + + See "Monitoring VMWare ESX with Check_MK" in the online documentation + as well. + +perfdata: + One value is returned: The currently demanded CPU MHz. + +inventory: + On each VM one check is generated. diff -Nru check-mk-1.2.2p3/esx_vsphere_vm.datastores check-mk-1.2.6p12/esx_vsphere_vm.datastores --- check-mk-1.2.2p3/esx_vsphere_vm.datastores 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/esx_vsphere_vm.datastores 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,17 @@ +title: Virtual Machine under ESX: Name and disk free of datastores +agents: vsphere +catalog: os/misc +license: GPL +distribution: check_mk +description: + This check provices information about any datastores the virtualmachine is running on. + Reported fields are the name, the total capacity and the percentage of free space. + + The check returns {OK}, unless the datastore information is missing. + In this case it returns {UNKNOWN}. + + See "Monitoring VMWare ESX with Check_MK" in the online documentation + as well. + +inventory: + On each VM one check is generated. diff -Nru check-mk-1.2.2p3/esx_vsphere_vm.guest_tools check-mk-1.2.6p12/esx_vsphere_vm.guest_tools --- check-mk-1.2.2p3/esx_vsphere_vm.guest_tools 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/esx_vsphere_vm.guest_tools 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,28 @@ +title: Virtual Machine under ESX: Guest Tools version status +agents: vsphere +catalog: os/ps +license: GPL +distribution: check_mk +description: + For a virtual machine running on ESX this check reports the guest tools + version status (queried from vsphere agent) + + This check returns: + + {OK} if the guest tools version is up-to-date + + {WARN} if the guest tools need an upgrade or are not managed by VMware + + {CRIT} if there is no heartbeat. Guest operating system may have stopped responding. + + The actual state of this check can be modified by parameters + +inventory: + On each VM one check is generated. + +[parameters] +parameters (dict): A dictionary with the following keys, that define the state of the check in + the various possible error conditions. Use {0} for OK, {1} for WARN, {2} for CRIT and {3} for + UNKNOWN. The default settings are: {"guestToolsUnmanaged"}: 1, {"guestToolsNeedUpgrade"}: 1, + {"guestToolsCurrent"}: 0, {"guestToolsNotInstalled"}: 2 + diff -Nru check-mk-1.2.2p3/esx_vsphere_vm.heartbeat check-mk-1.2.6p12/esx_vsphere_vm.heartbeat --- check-mk-1.2.2p3/esx_vsphere_vm.heartbeat 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/esx_vsphere_vm.heartbeat 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,36 @@ +title: Virtual Machine under ESX: Heartbeat +agents: vsphere +catalog: os/ps +license: GPL +distribution: check_mk +description: + For a virtual machine running on ESX this check reports the heartbeat + status (queried from vsphere agent) + + This check returns: + + {OK} if the guest operating system is responding normally + (vsphere state: green) + + {WARN} on intermittent heartbeat. May be due to guest load. + (vsphere state: yellow) + or if VMware Tools are not installed or not running. + (vsphere state: gray) + + {CRIT} if there is no heartbeat. Guest operating system may have stopped responding. + (vsphere state: red) + + See "Monitoring VMWare ESX with Check_MK" in the online documentation + as well. + +inventory: + On each VM one check is generated. + +[parameters] +parameters (dict): A dictionary with the following keys +{"heartbeat_missing"}: vsphere state: red (see above) +{"heartbeat_intermittend"}: vsphere state: yello (see above) +{"heartbeat_no_tools"}: vsphere state: gray (see above) +{"heartbeat_ok"}: vsphere state: gree (see above) + + The value of each keys represents the alert level: 0(OK), 1(WARN), 2(CRIT), 3(UNKNOWN) diff -Nru check-mk-1.2.2p3/esx_vsphere_vm.mem_usage check-mk-1.2.6p12/esx_vsphere_vm.mem_usage --- check-mk-1.2.2p3/esx_vsphere_vm.mem_usage 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/esx_vsphere_vm.mem_usage 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,20 @@ +title: Virtual Machine under ESX: used Memory (RAM) +agents: vsphere +catalog: os/kernel +license: GPL +distribution: check_mk +description: + The check reports used memory (RAM) for {host}, {guest}, {ballooned}, {private} + and {shared} memory. + + The check always returns {OK} state. + + See "Monitoring VMWare ESX with Check_MK" in the online documentation + as well. + +perfdata: + One value for each of {host}, {guest}, {ballooned}, {private} + and {shared} memory. + +inventory: + On each VM one check is generated. diff -Nru check-mk-1.2.2p3/esx_vsphere_vm.name check-mk-1.2.6p12/esx_vsphere_vm.name --- check-mk-1.2.2p3/esx_vsphere_vm.name 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/esx_vsphere_vm.name 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,15 @@ +title: Virtual Machine under ESX: Name of the VM +agents: vsphere +catalog: os/misc +license: GPL +distribution: check_mk +description: + This check reports the name the virtual machine has within ESX. + + The check always returns {OK} state. + + See "Monitoring VMWare ESX with Check_MK" in the online documentation + as well. + +inventory: + On each VM one check is generated. diff -Nru check-mk-1.2.2p3/esx_vsphere_vm.running_on check-mk-1.2.6p12/esx_vsphere_vm.running_on --- check-mk-1.2.2p3/esx_vsphere_vm.running_on 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/esx_vsphere_vm.running_on 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,15 @@ +title: Virtual Machine under ESX: Hostsystem the VM is running on +agents: vsphere +catalog: os/misc +license: GPL +distribution: check_mk +description: + This check provices information about the hostsystem where the VM is running on. + + The check returns {OK}, unless the hostsystem is missing. + In this case it returns {UNKNOWN}. + + See "Monitoring VMWare ESX with Check_MK" in the online documentation as well. + +inventory: + On each VM one check is generated. diff -Nru check-mk-1.2.2p3/esx_vsphere_vm.snapshots check-mk-1.2.6p12/esx_vsphere_vm.snapshots --- check-mk-1.2.2p3/esx_vsphere_vm.snapshots 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/esx_vsphere_vm.snapshots 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,20 @@ +title: Virtual Machine under ESX: Snapshot settings +agents: vsphere +catalog: os/kernel +license: GPL +distribution: check_mk +description: + For a virtual machine running on ESX this check reports the number of + snapshots, the currently running snapshot and the age of the latest snapshot + +perfdata: + One value is returned: The age of the latest snapshot + +inventory: + On each VM one check is generated if there is snapshot information available + +[parameters] +parameters (dict): The dictionary supports the following key + + {"age"}: Warning and critical levels for the age of the latest snapshot in seconds + diff -Nru check-mk-1.2.2p3/etherbox check-mk-1.2.6p12/etherbox --- check-mk-1.2.2p3/etherbox 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/etherbox 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,208 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# The etherbox supports the following sensor types on each port +# sensor types +# 0 = no sensor +# 1 = temperature - implemented +# 2 = brightness +# 3 = humidity - implemented +# 4 = switch contact - implemented +# 5 = voltage detector +# 6 = smoke sensor - implemented + +# Note: The short contact config option in the etherbox is of type switch contact +# The short contact status is set for 15 seconds after a button press + +# TODO: insert missing snmp output + +def etherbox_convert(info): + sensor_data = [] + for i in range(0, len(info[1])): + sensor_data.append((info[1][i][1], info[2][i][1], info[3][i][1], info[4][i][1] )) + return sensor_data + +def inventory_etherbox(info, req_sensor_type): + inventory = [] + sensor_data = etherbox_convert(info) + for index, name, sensor_type, value in sensor_data: + # Ignore not connected Temperature Sensors + if sensor_type == '1' and value == '0': + continue + if sensor_type == req_sensor_type: + inventory.append( ("%s.%s" % (index, sensor_type), None) ) + return inventory + +def etherbox_get_sensor(item, item_type, info): + sensor_data = etherbox_convert(info) + item_index, item_type = item.split(".") + for index, name, sensor_type, value in sensor_data: + if index == item_index: + if sensor_type != item_type: + raise Exception("Sensor type changed %s" % item) + return name, value + raise Exception("Sensor not found") + +def etherbox_scan(oid): + return oid(".1.3.6.1.4.1.14848.2.1.1.1.0").startswith("Version") + +etherbox_info = [ + ( ".1.3.6.1.4.1.14848.2.1.1.3", [ '' ]), # temperature unit + ( ".1.3.6.1.4.1.14848.2.1.2.1.1", [ OID_END, '' ]), # index + ( ".1.3.6.1.4.1.14848.2.1.2.1.2", [ OID_END, '' ]), # name + ( ".1.3.6.1.4.1.14848.2.1.2.1.3", [ OID_END, '' ]), # type + ( ".1.3.6.1.4.1.14848.2.1.2.1.5", [ OID_END, '' ]), # value * 10 + ] + +def check_etherbox_temp(item, params, info): + try: + name, value = etherbox_get_sensor(item, "1", info) + except Exception, error: + return 3, error.message + + uom = { "0": "°C", "1": "°F", "2": "K" }[info[0][0][0]] + state = 0 + + temp = int(value) / 10.0 + levels_text = "" + if params: + warn, crit = params + levels_text = " (warn/crit at %.1f/%.1f %s)" % (warn, crit, uom) + if temp > crit: + state = 2 + elif temp > warn: + state = 1 + else: + warn, crit = None, None + + perfdata = [ ("temp", temp, warn, crit) ] + infotext = "%s: Temperature %.1f %s %s" % (name, temp, uom, levels_text) + return (state, infotext, perfdata) + +check_info["etherbox.temp"] = { + "check_function" : check_etherbox_temp, + "inventory_function" : lambda x: inventory_etherbox(x, "1"), + "service_description" : "Sensor %s", + "has_perfdata" : True, + "group" : "room_temperature", + "snmp_scan_function" : etherbox_scan, + "snmp_info" : etherbox_info, +} + +def check_etherbox_humidity(item, params, info): + try: + name, value = etherbox_get_sensor(item, "3", info) + except Exception, error: + return 3, error.message + + state = 0 + temp = int(value) / 10.0 + levels_info = "" + if params: + crit_low, warn_low, warn_high, crit_high = params + levels_info = " (low levels at %.1f/%.1f %%RH / high levels at %.1f/%.1f %%RH )" %\ + (crit_low, warn_low, warn_high, crit_high) + if temp <= crit_low: + state = 2 + elif temp >= crit_high: + state = 2 + elif temp <= warn_low: + state = 1 + elif temp >= warn_high: + state = 1 + else: + crit_low, warn_low, warn_high, crit_high = None, None, None, None + + perfdata = [ ("humidity", temp, crit_low, warn_low, warn_high, crit_high) ] + infotext = "%s: Humidity %.1f %%RH %s" % (name, temp, levels_info) + return (state, infotext, perfdata) + +check_info["etherbox.humidity"] = { + "check_function" : check_etherbox_humidity, + "inventory_function" : lambda x: inventory_etherbox(x, "3"), + "service_description" : "Sensor %s", + "has_perfdata" : True, + "group" : "humidity", + "snmp_scan_function" : etherbox_scan, + "snmp_info" : etherbox_info, +} + + +def check_etherbox_switch_contact(item, params, info): + try: + name, value = etherbox_get_sensor(item, "4", info) + except Exception, error: + return 3, error.message + + state = 0 + perfdata = [ ("switch_contact", value) ] + switch_state = value == "1000" and "open" or "closed" + + state = 0 + extra_info = "" + if params and params != "ignore": + if switch_state != params: + state = 2 + extra_info = ", should be %s" % params + + infotext = "%s: Switch contact %s%s" % (name, switch_state, extra_info) + return (state, infotext, perfdata) + +check_info["etherbox.switch"] = { + "check_function" : check_etherbox_switch_contact, + "inventory_function" : lambda x: inventory_etherbox(x, "4"), + "service_description" : "Sensor %s", + "group" : "switch_contact", + "has_perfdata" : True, + "snmp_scan_function" : etherbox_scan, + "snmp_info" : etherbox_info, +} + + +def check_etherbox_smoke(item, no_params, info): + try: + name, value = etherbox_get_sensor(item, "6", info) + except Exception, error: + return 3, error.message + + state = 0 + perfdata = [ ("smoke", value) ] + extra_info = "" + if value != "0": + extra_info = " - Smoke alarm!" + state = 2 + + return (state, "%s: Smoke sensor%s" % (name, extra_info), perfdata) + +check_info["etherbox.smoke"] = { + "check_function" : check_etherbox_smoke, + "inventory_function" : lambda x: inventory_etherbox(x, "6"), + "service_description" : "Sensor %s", + "has_perfdata" : True, + "snmp_scan_function" : etherbox_scan, + "snmp_info" : etherbox_info, +} + diff -Nru check-mk-1.2.2p3/etherbox.humidity check-mk-1.2.6p12/etherbox.humidity --- check-mk-1.2.2p3/etherbox.humidity 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/etherbox.humidity 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,25 @@ +title: Etherbox / MessPC: Sensor Humidity +catalog: hw/environment/betternet +agents: snmp +license: GPL +distribution: check_mk +description: + This check monitors the value of an etherbox humidity sensor + +item: + The port number of the sensor followed by the sensor type + + Example: "2.3" A humidity sensor(type 3) on port 2 + +perfdata: + The current humidity in percent, lower crit, lower warn, upper warn, upper crit + +inventory: + The inventory creates a service for each humidity sensor found + +[parameters] +critical low (int): lower border for triggering critical level +warning low (int): lower border for warning level +warning high (int): upper border for triggering warning level +critical high (int): upper border for critical level + diff -Nru check-mk-1.2.2p3/etherbox.smoke check-mk-1.2.6p12/etherbox.smoke --- check-mk-1.2.2p3/etherbox.smoke 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/etherbox.smoke 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,20 @@ +title: Etherbox / MessPC: Sensor Smoke +catalog: hw/environment/betternet +agents: snmp +license: GPL +distribution: check_mk +description: + This check monitors the state of the etherbox smoke sensor + +item: + The port number of the sensor followed by the sensor type + + Example: "2.6" A smoke sensor(type 6) on port 2 + +perfdata: + The current state of the smoke sensor + +inventory: + The inventory creates a service for each smoke sensor found + + diff -Nru check-mk-1.2.2p3/etherbox.switch check-mk-1.2.6p12/etherbox.switch --- check-mk-1.2.2p3/etherbox.switch 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/etherbox.switch 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,24 @@ +title: Etherbox / MessPC: Sensor Switch Contact +catalog: hw/environment/betternet +agents: snmp +license: GPL +distribution: check_mk +description: + This check monitors the state of the etherbox switch contact sensor + The check reports a CRIT only when the current state differs from the + expected_state ("open", "closed") specified by the parameters. + Without parameters the check is always OK. + +item: + The port number of the sensor followed by the sensor type + + Example: "2.4" A switch contact sensor(type 4) on port 2 + +perfdata: + The current state of the switch contact + +inventory: + The inventory creates a service for each switch contact sensor found + +[parameters] +expected_state (string): the expected state of this switch: "open", "closed" or "ignore" diff -Nru check-mk-1.2.2p3/etherbox.temp check-mk-1.2.6p12/etherbox.temp --- check-mk-1.2.2p3/etherbox.temp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/etherbox.temp 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,26 @@ +title: Etherbox / MessPC: Sensor Temperature +catalog: hw/environment/betternet +agents: snmp +license: GPL +distribution: check_mk +description: + This check monitors the value of an etherbox temperature sensor + +item: + The port number of the sensor followed by the sensor type + + Example: "2.1" A temperature sensor(type 1) on port 2 + +perfdata: + The current temperature value and its warn/crit limits + +inventory: + The inventory creates a service for each temperature sensor found + +[parameters] +warning (int): temperature in degrees at which the check goes warning +critical (int): level for critical temperature + + Please note that the sensor can be configured in Degree, Fahrenheit + and Kelvin. + diff -Nru check-mk-1.2.2p3/export_software_csv check-mk-1.2.6p12/export_software_csv --- check-mk-1.2.2p3/export_software_csv 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/export_software_csv 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,72 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inv_export_software_csv(hostname, params, tree): + separator = params.get("separator", ";") + quotes = params.get("quotes", None) # Also: "single", "double" + headers = params.get("headers", False) + + if quotes == "single": + quote = lambda s: "'" + s.replace("'", "\\'") + "'" + elif quotes == "double": + quote = lambda s: '"' + s.replace('"', '\\"') + '"' + else: + quote = lambda s: s + + try: + packages = tree["software"]["packages"] + except KeyError: + return # No software information available + + filename = params["filename"].replace("", hostname) + if not filename.startswith("/"): + filename = var_dir + "/" + filename + dirname = filename.rsplit("/", 1)[0] + if not os.path.exists(dirname): + try: + os.makedirs(dirname) + except Exception, e: + if opt_debug: + raise + raise MKGeneralException("Cannot create missing directory %s: %s" % ( + dirname, e)) + + out = file(filename, "w") + keys = [ "name", "version", "arch" ] + + if headers: + out.write(separator.join(map(quote, keys)) + "\n") + + for package in packages: + line = [] + for key in keys: + line.append(quote(package.get(key, ""))) + out.write("%s\n" % separator.join(line)) + + +inv_export['software_csv'] = { + "export_function" : inv_export_software_csv, +} diff -Nru check-mk-1.2.2p3/f5_bigip_chassis_temp check-mk-1.2.6p12/f5_bigip_chassis_temp --- check-mk-1.2.2p3/f5_bigip_chassis_temp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/f5_bigip_chassis_temp 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,51 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +f5_bigip_chassis_temp_default_params = (35, 40) + + +def inventory_f5_bigip_chassis_temp(info): + for line in info: + yield line[0], "f5_bigip_chassis_temp_default_params" + + +def check_f5_bigip_chassis_temp(item, params, info): + for name, temp in info: + if name == item: + return check_temperature(int(temp), params) + + +check_info["f5_bigip_chassis_temp"] = { + 'check_function': check_f5_bigip_chassis_temp, + 'inventory_function': inventory_f5_bigip_chassis_temp, + 'service_description': 'Temperature Chassis %s', + 'has_perfdata': True, + 'group': 'room_temperature', + 'snmp_info': ( '.1.3.6.1.4.1.3375.2.1.3.2.3.2.1', [1, 2] ), + 'snmp_scan_function': \ + lambda oid: '.1.3.6.1.4.1.3375.2' in oid(".1.3.6.1.2.1.1.2.0") and "big-ip" in oid(".1.3.6.1.4.1.3375.2.1.4.1.0").lower(), + 'includes': [ 'temperature.include' ], +} diff -Nru check-mk-1.2.2p3/f5_bigip_cluster check-mk-1.2.6p12/f5_bigip_cluster --- check-mk-1.2.2p3/f5_bigip_cluster 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/f5_bigip_cluster 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -35,8 +35,8 @@ def inventory_f5_bigip_cluster(info): # run inventory unless we found a device in unconfigured state - # don't need to loop over the input as there's only one status. - if len(info) == 1 and not info[0][0].startswith("-1"): + # don't need to loop over the input as there's only one status + if len(info) == 1 and not ( info[0][0].startswith("-1") or info[0][0] == '' ): return [(None, None)] return [] @@ -55,25 +55,25 @@ # 3 both systems outdated, crit (config split brain) if statusid == "0": - return (0, "OK - " + statustxt) + return (0, statustxt) elif statusid == "-1": - return (2, "CRIT - " + statustxt) + return (2, statustxt) elif statusid == "1" or statusid == "2": - return (1, "WARN - " + statustxt) + return (1, statustxt) elif statusid == "3": - return (2, "CRIT - " + statustxt) + return (2, statustxt) else: - return (3, "UNKNOWN - unexpected Output from SNMP Agent") + return (3, "unexpected output from SNMP Agent") - -snmp_info["f5_bigip_cluster"] = \ - ( ".1.3.6.1.4.1.3375.2.1.1.1.1", [ - 6, # sysAttrConfigsyncState - ]) - - -check_info["f5_bigip_cluster"] = (check_f5_bigip_cluster, "Config Sync status", 0, inventory_f5_bigip_cluster ) - - -snmp_scan_functions["f5_bigip_cluster"] = \ - lambda oid: '.1.3.6.1.4.1.3375.2' in oid(".1.3.6.1.2.1.1.2.0") and "big-ip" in oid(".1.3.6.1.4.1.3375.2.1.4.1.0").lower() +check_info["f5_bigip_cluster"] = { + 'check_function': check_f5_bigip_cluster, + 'inventory_function': inventory_f5_bigip_cluster, + 'service_description': 'Config Sync status', + 'snmp_info': ('.1.3.6.1.4.1.3375.2.1.1.1.1', [ + 6 # sysAttrConfigsyncState + ]), + 'snmp_scan_function': \ + lambda oid: '.1.3.6.1.4.1.3375.2' in oid(".1.3.6.1.2.1.1.2.0") \ + and "big-ip" in oid(".1.3.6.1.4.1.3375.2.1.4.1.0").lower() \ + and int(oid(".1.3.6.1.4.1.3375.2.1.4.2.0").split('.')[0]) < 11, +} diff -Nru check-mk-1.2.2p3/f5_bigip_cluster_v11 check-mk-1.2.6p12/f5_bigip_cluster_v11 --- check-mk-1.2.2p3/f5_bigip_cluster_v11 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/f5_bigip_cluster_v11 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,78 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +# Agent / MIB output +# see: .1.3.6.1.4.1.3375.2.1.14.1.1.0 +# .1.3.6.1.4.1.3375.2.1.14.1.2.0 +# F5-BIGIP-SYSTEM-MIB::sysCmSyncStatusId +# F5-BIGIP-SYSTEM-MIB::sysCmSyncStatusStatus + +# F5 nodes need to be ntp synced otherwise status reports might be wrong. + + +f5_bigip_cluster_v11_states = { + '0': ( "Unknown", 3), + '1': ( "Syncing", 0), + '2': ( "Need Manual Sync", 1), + '3': ( "In Sync", 0), + '4': ( "Sync Failed", 2), + '5': ( "Sync Disconnected", 2), + '6': ( "Standalone", 2), + '7': ( "Awaiting Initial Sync", 1), + '8': ( "Incompatible Version", 2), + '9': ( "Partial Sync", 2), +} + + +def inventory_f5_bigip_cluster_v11(info): + if len(info) == 1: + return [(None, None)] + + +def check_f5_bigip_cluster_v11(_no_item, _no_params, info): + statusid, statustxt = info[0] + statename, state = f5_bigip_cluster_v11_states[statusid] + infotext = statename + if statename != statustxt: + infotext += ' - ' + statustxt + return state, infotext + + +check_info["f5_bigip_cluster_v11"] = { + 'check_function': check_f5_bigip_cluster_v11, + 'inventory_function': inventory_f5_bigip_cluster_v11, + 'service_description': 'Config Sync Status', + 'snmp_info': ('.1.3.6.1.4.1.3375.2.1.14.1', [ + "1.0", # sysCmSyncStatusId + "2.0" # sysCmSyncStatusStatus + ]), + + 'snmp_scan_function': \ + lambda oid: '.1.3.6.1.4.1.3375.2' in oid(".1.3.6.1.2.1.1.2.0") \ + and "big-ip" in oid(".1.3.6.1.4.1.3375.2.1.4.1.0").lower() \ + and int(oid(".1.3.6.1.4.1.3375.2.1.4.2.0").split('.')[0]) >= 11, +} diff -Nru check-mk-1.2.2p3/f5_bigip_conns check-mk-1.2.6p12/f5_bigip_conns --- check-mk-1.2.2p3/f5_bigip_conns 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/f5_bigip_conns 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,105 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +# {iso(1) identified-organization(3) dod(6) internet(1) private(4) enterprise(1) 3375 bigipTrafficMgmt(2) bigipSystem(1) sysGlobals(1) sysGlobalStats(2) sysGlobalStat(1) sysStatClientCurConns(8)} + +# {iso(1) identified-organization(3) dod(6) internet(1) private(4) enterprise(1) 3375 bigipTrafficMgmt(2) bigipSystem(1) sysGlobals(1) sysGlobalStats(2) sysGlobalClientSslStat(9) sysClientsslStatCurConns(2)} + +factory_settings["f5_bigip_conns_default_levels"] = { + "conns" : (25000, 30000), + "ssl_conns" : (25000, 30000), +} + +def inventory_f5_bigip_conns(info): + if info: + return [ ( None, {} ) ] + +def check_f5_bigip_conns(item, params, info): + type_list = { 'conns': ('Connections', 0), + 'ssl_conns': ('SSL Connections', 1), + } + perfdata = [] + infotext = "" + state = 0 + + separator = "" + for typ, values in type_list.iteritems(): + param = params.get(typ) + desc = values[0] + index = values[1] + conns = int(info[0][index]) + infotext += separator + separator = " - " + if type(param) == tuple: + warn, crit = param + perfdata.append( (typ, conns, warn, crit) ) + if conns >=crit: + sstate = 2 + sym = "(!!)" + elif conns >= warn: + sstate = 1 + sym = "(!)" + else: + sstate = 0 + sym = "" + infotext += "%d %s%s (%d/%d)" % (conns, desc, sym, warn, crit) + else: + warn, crit = None, None + perf = ( (typ, conns, warn, crit) ) + sstate, text, extraperf = check_levels(conns, typ, param) + if sstate == 2: + sym == "(!!)" + elif sstate == 1: + sym = "(!)" + else: + sym = "" + perfdata.append(perf) + if len(extraperf) > 0: + perfdata.append(extraperf[0]) + infotext += "%d %s%s" % (conns, desc, sym) + if text: + infotext += ", " + text + if state < sstate: + state = sstate + + return (state, infotext, perfdata) + +check_info["f5_bigip_conns"] = { + 'check_function' : check_f5_bigip_conns, + 'inventory_function' : inventory_f5_bigip_conns, + 'service_description' : 'Open Connections', + 'has_perfdata' : True, + 'group' : 'f5_connections', + 'default_levels_variable' : 'f5_bigip_conns_default_levels', + 'snmp_info' : ( '.1.3.6.1.4.1.3375.2.1.1.2', [ + '1.8', # sysStatServerCurConns + '9.2', # sysClientsslStatCurConns + ] ), + 'snmp_scan_function' : lambda oid: '.1.3.6.1.4.1.3375.2' in \ + oid(".1.3.6.1.2.1.1.2.0") and "big-ip" in \ + oid(".1.3.6.1.4.1.3375.2.1.4.1.0").lower(), +} diff -Nru check-mk-1.2.2p3/f5_bigip_cpu_temp check-mk-1.2.6p12/f5_bigip_cpu_temp --- check-mk-1.2.2p3/f5_bigip_cpu_temp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/f5_bigip_cpu_temp 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,52 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +f5_bigip_cpu_temp_default_params = (60, 80) + + +def inventory_f5_bigip_cpu_temp(info): + for line in info: + yield line[0], "f5_bigip_cpu_temp_default_params" + + + +def check_f5_bigip_cpu_temp(item, params, info): + for name, temp in info: + if name == item: + return check_temperature(int(temp), params) + + +check_info["f5_bigip_cpu_temp"] = { + 'check_function': check_f5_bigip_cpu_temp, + 'inventory_function': inventory_f5_bigip_cpu_temp, + 'service_description': 'Temperature CPU %s', + 'has_perfdata': True, + 'includes': [ 'temperature.include' ], + 'snmp_info': ( '.1.3.6.1.4.1.3375.2.1.3.1.2.1', [1, 2] ), + 'snmp_scan_function': \ + lambda oid: '.1.3.6.1.4.1.3375.2' in oid(".1.3.6.1.2.1.1.2.0") and "big-ip" in oid(".1.3.6.1.4.1.3375.2.1.4.1.0").lower(), +} diff -Nru check-mk-1.2.2p3/f5_bigip_fans check-mk-1.2.6p12/f5_bigip_fans --- check-mk-1.2.2p3/f5_bigip_fans 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/f5_bigip_fans 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -61,27 +61,26 @@ if f5_bigip_fans_genitem(fanentry[0]) == item: speed = int(fanentry[1]) warn, crit = f5_bigip_fans_default_levels - msgtxt = " - speed is %d rpm" % speed + msgtxt = "speed is %d rpm" % speed if speed > warn: - return (0, "OK" + msgtxt) + return (0, msgtxt) elif speed < crit: - return (2, "CRITCAL" + msgtxt) + return (2, msgtxt) elif speed < warn: - return (1, "WARNING" + msgtxt) + return (1, msgtxt) else: - return (3, "UNKNOWN - could not detect speed") + return (3, "could not detect speed") - return (3, "UNKNOWN - item not found in SNMP output") - - -snmp_info["f5_bigip_fans"] = \ - [ (".1.3.6.1.4.1.3375.2.1.3.2.1.2.1", [ 1, 3 ]), - ( ".1.3.6.1.4.1.3375.2.1.3.1.2.1", [ 1, 3 ]), ] - - -check_info["f5_bigip_fans"] = (check_f5_bigip_fans, "FAN %s", 0, inventory_f5_bigip_fans ) + return (3, "item not found in SNMP output") # Get ID and Speed from the CPU and chassis fan tables -snmp_scan_functions["f5_bigip_fans"] = \ - lambda oid: '.1.3.6.1.4.1.3375.2' in oid(".1.3.6.1.2.1.1.2.0") and "big-ip" in oid(".1.3.6.1.4.1.3375.2.1.4.1.0").lower() + +check_info["f5_bigip_fans"] = { + 'check_function': check_f5_bigip_fans, + 'inventory_function': inventory_f5_bigip_fans, + 'service_description': 'FAN %s', + 'snmp_info': [('.1.3.6.1.4.1.3375.2.1.3.2.1.2.1', [1, 3]), ('.1.3.6.1.4.1.3375.2.1.3.1.2.1', [1, 3])], + 'snmp_scan_function': \ + lambda oid: '.1.3.6.1.4.1.3375.2' in oid(".1.3.6.1.2.1.1.2.0") and "big-ip" in oid(".1.3.6.1.4.1.3375.2.1.4.1.0").lower(), +} diff -Nru check-mk-1.2.2p3/f5_bigip_interfaces check-mk-1.2.6p12/f5_bigip_interfaces --- check-mk-1.2.2p3/f5_bigip_interfaces 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/f5_bigip_interfaces 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,27 +24,32 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -# .1.3.6.1.4.1.3375.2.1.2.4.4.3.1.1. index for ifname -# .1.3.6.1.4.1.3375.2.1.2.4.4.3.1.13. index for ifstate -# .1.3.6.1.4.1.3375.2.1.2.4.4.3.1.3. index for OUT bytes -# .1.3.6.1.4.1.3375.2.1.2.4.4.3.1.5. index for IN bytes +# .1.3.6.1.4.1.3375.2.1.2.4.4.3.1.1. index for ifname +# .1.3.6.1.4.1.3375.2.1.2.4.1.2.1.17. index for ifstate +# .1.3.6.1.4.1.3375.2.1.2.4.4.3.1.3. index for IN bytes +# .1.3.6.1.4.1.3375.2.1.2.4.4.3.1.5. index for OUT bytes + +f5_bigip_interface_states = { + 1 : "down (has no link and is initialized)", + 2 : "disabled (has been forced down)", + 3 : "uninitialized (has not been initialized)", + 4 : "loopback (in loopback mode)", + 5 : "unpopulated (interface not physically populated)", +} + def check_f5_bigip_interfaces(item, params, info): for port, ifstate, inbytes, outbytes in info: - if item == port: - if int(ifstate) != 0: - states = { - 2 : "down (has no link and is initialized)", - 3 : "disabled has been forced down)", - 4 : "uninitialized (has not been initialized)", - 5 : "loopback (in loopback mode)", - 6 : "unpopulated (interface not physically populated)", - } - return(2, "CRIT - State of %s is %s" % (states[ifstate], port)) - this_time = int(time.time()) - in_timedif, in_per_sec = get_counter("f5.interface.in.%s" % item, this_time, int(inbytes)) - out_timedif, out_per_sec = get_counter("f5_interface.out.%s" % item, this_time, int(outbytes)) + if item != port: + continue + if int(ifstate) != 0: + return (2, "State of %s is %s" % + (f5_bigip_interface_states.get(ifstate, "unhandled (%d)" % ifstate), port)) + + this_time = int(time.time()) + in_per_sec = get_rate("f5_interface.in.%s" % item, this_time, saveint(inbytes)) + out_per_sec = get_rate("f5_interface.out.%s" % item, this_time, saveint(outbytes)) inbytes_h = get_bytes_human_readable(in_per_sec) outbytes_h = get_bytes_human_readable(out_per_sec) @@ -52,7 +57,8 @@ ("bytes_in", in_per_sec), ("bytes_out", out_per_sec), ] - return (0, "OK - in bytes: %s/s, out bytes: %s/s" % (inbytes_h, outbytes_h), perf) + return (0, "in bytes: %s/s, out bytes: %s/s" % (inbytes_h, outbytes_h), perf) + return 3, "Interface not found in SNMP data" check_info["f5_bigip_interfaces"] = { @@ -60,6 +66,7 @@ "inventory_function" : lambda info: [ (x[0], {'state': 0 } ) for x in info if int(x[1]) == 0], "service_description" : "f5 Interface %s", "has_perfdata" : True, - "snmp_scan_function" : lambda oid: ".1.3.6.1.4.1.3375.2.1.3.4.20" in oid(".1.3.6.1.2.1.1.2.0"), - "snmp_info" : ( ".1.3.6.1.4.1.3375.2.1.2.4.4.3.1", [1, 13, 3, 5]), + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") in \ + [ ".1.3.6.1.4.1.3375.2.1.3.4.10", ".1.3.6.1.4.1.3375.2.1.3.4.20" ], + "snmp_info" : ( ".1.3.6.1.4.1.3375.2.1.2.4", ["4.3.1.1", "1.2.1.17", "4.3.1.3", "4.3.1.5"]), } diff -Nru check-mk-1.2.2p3/f5_bigip_pool check-mk-1.2.6p12/f5_bigip_pool --- check-mk-1.2.2p3/f5_bigip_pool 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/f5_bigip_pool 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -39,42 +39,69 @@ def inventory_f5_bigip_pool(checkname,info): # inventorize all pools and their member count inventory = [] - for line in info: + for line in info[0]: if line[0] != "": inventory.append((line[0],"f5_bigip_pool_default_levels")) return inventory +def f5_bigip_pool_get_down_members(info, item): + downs = [] + if len(info) < 2: # no data for pool members + return + for line in info[1]: + if line[0] == item and (line[2] != '4' or line[3] != '4' or line[4] in ('2', '3', '4', '5' )): + if re.match("\/\S*\/\S*", line[5]): + host = line[5].split("/")[2] + else: + host = line[5] + downs.append(host+":"+line[1]) + return downs def check_f5_bigip_pool(item, params, info): - for line in info: + for line in info[0]: if line[0] == item: - warn_num_members, crit_num_members = params + warn, crit = params pool_act_members = int(line[1]) pool_def_members = int(line[2]) - - if pool_act_members == pool_def_members or pool_act_members > warn_num_members: - return (0, "OK - all members online") - elif pool_act_members <= crit_num_members: - return (2, "CRIT - only %s pool member(s) left" % pool_act_members) - elif pool_act_members == 0: - return (2, "CRIT - no pool members left") - elif pool_act_members <= warn_num_members: - return (1, "WARN - only %i of %i pool members are up" % (pool_act_members, pool_def_members)) - else: - return (3, "UNKNOWN output for pool %s" % item) - return (3, "UNKNOWN - unexpected Output from SNMP Agent") - - -snmp_info["f5_bigip_pool"] = \ - ( ".1.3.6.1.4.1.3375.2.2.5.1.2.1", [ - 1, # ltmPoolEntry - 8, # ltmPoolActiveMemberCnt - 23, # ltmPoolMemberCnt - ]) - - -check_info["f5_bigip_pool"] = (check_f5_bigip_pool, "Load Balancing Pool %s", 0, inventory_f5_bigip_pool ) - - -snmp_scan_functions["f5_bigip_pool"] = \ - lambda oid: '.1.3.6.1.4.1.3375.2' in oid(".1.3.6.1.2.1.1.2.0") and "big-ip" in oid(".1.3.6.1.4.1.3375.2.1.4.1.0").lower() + message = "%d of %d members are up" % ( pool_act_members, pool_def_members ) + state = 0 + if pool_act_members == pool_def_members or pool_act_members >= warn: + state = 0 + elif pool_act_members < crit: + state = 2 + message += " (warn/crit: %s/%s)" % ( warn, crit ) + elif pool_act_members < warn: + state = 1 + message += " (warn/crit: %s/%s)" % ( warn, crit ) + + if pool_act_members < pool_def_members: + downs = f5_bigip_pool_get_down_members(info, item) + message += ", down/disabled nodes: %s" % ", ".join(downs) + + return state, message + return 3, "unexpected Output from SNMP Agent" + +check_info["f5_bigip_pool"] = { + 'check_function' : check_f5_bigip_pool, + 'group' : 'f5_pools', + 'inventory_function' : inventory_f5_bigip_pool, + 'service_description' : 'Load Balancing Pool %s', + 'snmp_info' : [ + ('.1.3.6.1.4.1.3375.2.2.5.1.2.1', [ + 1, # ltmPoolEntry + 8, # ltmPoolActiveMemberCnt + 23, # ltmPoolMemberCnt + ]), + ('.1.3.6.1.4.1.3375.2.2.5.3.2.1', [ + 1, # ltmPoolMemberPoolName + 4, # ltmPoolMemberPort + 10, # ltmPoolMemberMonitorState + 11, # ltmPoolMemberMonitorStatus + 13, # ltmPoolMemberSessionStatus + 19, # ltmPoolMemberNodeName + ]), + ], + 'snmp_scan_function': \ + lambda oid: '.1.3.6.1.4.1.3375.2' in oid(".1.3.6.1.2.1.1.2.0") \ + and "big-ip" in oid(".1.3.6.1.4.1.3375.2.1.4.1.0").lower(), +} diff -Nru check-mk-1.2.2p3/f5_bigip_psu check-mk-1.2.6p12/f5_bigip_psu --- check-mk-1.2.2p3/f5_bigip_psu 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/f5_bigip_psu 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -37,9 +37,6 @@ # good(1), # notpresent(2) - - - def inventory_f5_bigip_psu(info): inventory = [] for line in info: @@ -50,33 +47,31 @@ inventory.append((psu, None)) return inventory - def check_f5_bigip_psu(item, _no_params, info): for line in info: psu = line[0] state = int(line[1]) if psu == item: if state == 1: - return (0, "OK - PSU state: good") + return (0, "PSU state: good") elif state == 0: - return (2, "CRIT - PSU state: bad!!") + return (2, "PSU state: bad!!") elif state == 2: - return (1, "WARN - PSU state: notpresent!") + return (1, "PSU state: notpresent!") else: - return (3, "UNKNOWN - PSU state is unknown") - - - return (3, "UNKNOWN - item not found in SNMP output") - - -# Get ID and status from the SysChassisPowerSupplyTable -snmp_info["f5_bigip_psu"] = \ - ( ".1.3.6.1.4.1.3375.2.1.3.2.2.2.1", [ 1, 2, ]) - + return (3, "PSU state is unknown") -check_info["f5_bigip_psu"] = (check_f5_bigip_psu, "PSU %s", 0, inventory_f5_bigip_psu ) + return (3, "item not found in SNMP output") -# Check using the vendor mib Id and verify it's a loadbalancer -snmp_scan_functions["f5_bigip_psu"] = \ - lambda oid: '.1.3.6.1.4.1.3375.2' in oid(".1.3.6.1.2.1.1.2.0") and "big-ip" in oid(".1.3.6.1.4.1.3375.2.1.4.1.0").lower() +check_info["f5_bigip_psu"] = { + 'check_function': check_f5_bigip_psu, + 'inventory_function': inventory_f5_bigip_psu, + 'service_description': 'PSU %s', + # Get ID and status from the SysChassisPowerSupplyTable + 'snmp_info': ('.1.3.6.1.4.1.3375.2.1.3.2.2.2.1', [1, 2]), + 'snmp_scan_function': \ + # Check using the vendor mib Id and verify it's a loadbalancer + lambda oid: '.1.3.6.1.4.1.3375.2' in oid(".1.3.6.1.2.1.1.2.0") \ + and "big-ip" in oid(".1.3.6.1.4.1.3375.2.1.4.1.0").lower(), +} diff -Nru check-mk-1.2.2p3/f5_bigip_temp check-mk-1.2.6p12/f5_bigip_temp --- check-mk-1.2.2p3/f5_bigip_temp 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/f5_bigip_temp 1970-01-01 00:00:00.000000000 +0000 @@ -1,97 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# ails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - - -# Agent / MIB output -# F5-BIGIP-SYSTEM-MIB::sysChassisTempNumber -# 1.3.6.1.4.1.3375.2.1.3.2.2.1.0 1 -# 1.3.6.1.4.1.3375.2.1.3.2.2.2.1.1.101 101 -# 1.3.6.1.4.1.3375.2.1.3.2.2.2.1.2.101 1 -# 1.3.6.1.4.1.3375.2.1.3.2.3.1.0 2 -# F5-BIGIP-SYSTEM-MIB::sysCpuTemperature -# 1.3.6.1.4.1.3375.2.1.3.1.2.1.1.1 1 -# 1.3.6.1.4.1.3375.2.1.3.1.2.1.2.1 54 - - -def f5_bigip_temp_genitem(info): - sensors = [] - # parse all detected sensors and name them according to the mib section they came from. - # we will assign a base "nominal" temperature here based on sample MIB data - for chassis_sensor in info[0]: - id = chassis_sensor[0] - type = "Chassis" - cur_temp = int(chassis_sensor[1]) - nom_temp = 49 - sensors.append([ ("%s %s" % (id, type)), cur_temp, nom_temp ]) - for cpu_sensor in info[1]: - id = cpu_sensor[0] - type = "CPU" - cur_temp = int(cpu_sensor[1]) - nom_temp = 55 - sensors.append([ ("%s %s" % (id, type)), cur_temp, nom_temp ]) - - return sensors - - -def inventory_f5_bigip_temp(info): - inventory = [] - for sensor in f5_bigip_temp_genitem(info): - inventory.append((sensor[0], sensor[2])) - return inventory - - -def check_f5_bigip_temp(item, params, info): - - for sensor in f5_bigip_temp_genitem(info): - if item == sensor[0]: - tmpdiff = sensor[1] - sensor[2] - msgtxt = " - Temperature is %dC" % (sensor[1]) - perfdata = [ ("temp", sensor[1]) ] - if 5 >= tmpdiff: - return (0, "OK" + msgtxt, perfdata) - elif tmpdiff > 15: - return (2, "CRIT" + msgtxt + "(!!)", perfdata) - elif tmpdiff > 10: - return (1, "WARN" + msgtxt + "(!)", perfdata) - else: - return (3, "UNKNOWN - unable to read temperature") - - - - return (3, "UNKNOWN - item not found in SNMP output") - - -snmp_info["f5_bigip_temp"] = \ - [ (".1.3.6.1.4.1.3375.2.1.3.2.3.2.1", [ 1, 2 ]), - (".1.3.6.1.4.1.3375.2.1.3.1.2.1", [ 1, 2 ]), ] - - -check_info["f5_bigip_temp"] = (check_f5_bigip_temp, "Temperature %s", 1, inventory_f5_bigip_temp ) - - -# Get ID and Speed from the CPU and chassis fan tables -snmp_scan_functions["f5_bigip_temp"] = \ - lambda oid: '.1.3.6.1.4.1.3375.2' in oid(".1.3.6.1.2.1.1.2.0") and "big-ip" in oid(".1.3.6.1.4.1.3375.2.1.4.1.0").lower() diff -Nru check-mk-1.2.2p3/f5_bigip_vserver check-mk-1.2.6p12/f5_bigip_vserver --- check-mk-1.2.2p3/f5_bigip_vserver 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/f5_bigip_vserver 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -25,58 +25,63 @@ # Boston, MA 02110-1301 USA. -# Example output -# Name of a Virt. Server: -# 1.3.6.1.4.1.3375.2.2.10.2.3.1.1.ascii_encoded_string. "VS_XX" -# Textual Status (only supported in F5 OS 9.3+ -# 1.3.6.1.4.1.3375.2.2.10.1.2.1.25.5.12.34.56.78.90 "The virtual server is available" -# Table with name and status -# 1.3.6.1.4.1.3375.2.2.10.13.2.1. 1 .5.12.34.56.78.90 "VS_XX" -# 1.3.6.1.4.1.3375.2.2.10.13.2.1. 2 .5.12.34.56.78.90 1 - - -def inventory_f5_bigip_vserver(checkname,info): - inventory = [] - for line in info: -# snmp info will return some empty lines here, filter them and -# only inventorize OK status. - if line[0] != "": - if int(line[1]) == 1: - inventory.append((line[0], None)) - return inventory - - -def check_f5_bigip_vserver(item, _no_params, info): - for line in info: - if line[0] != "": - if line[0] == item: - status = int(line[1]) - if status == 0: - return (1, "WARN - Virtual Server is disabled") - elif status == 1: - return (0, "OK - Virtual Server is up and available") - elif status == 2: - return (2, "CRIT - Virtual Server is currently not available") - elif status == 3: - return (2, "CRIT - Virtual Server is not available") - elif status == 4: - return (1, "WARN - Virtual Server status is unknown") - else: - return (3, "UNKNOWN") - return (3, "UNKNOWN - Virtual Server %s not found in SNMP output" % item) - - - -snmp_info["f5_bigip_vserver"] = \ - ( ".1.3.6.1.4.1.3375.2.2.10.13.2.1", [ - 1, # Name - 2, # Status - ]) - - - -check_info["f5_bigip_vserver"] = (check_f5_bigip_vserver, "Virtual Server %s", 0, inventory_f5_bigip_vserver ) - - -snmp_scan_functions["f5_bigip_vserver"] = \ - lambda oid: '.1.3.6.1.4.1.3375.2' in oid(".1.3.6.1.2.1.1.2.0") and "big-ip" in oid(".1.3.6.1.4.1.3375.2.1.4.1.0").lower() +def parse_f5_bigip_vserver(info): + vservers = {} + for name, status, tot_conns, cur_conns in info: + vservers[name] = { + "status" : int(status), + "tot_conns" : int(tot_conns), + "cur_conns" : int(cur_conns), + } + return vservers + + +def inventory_f5_bigip_vserver(parsed): + for name, vserver in parsed.items(): + if vserver["status"] == 1: + yield name, None + + +def check_f5_bigip_vserver(item, _no_params, parsed): + if item in parsed: + vserver = parsed[item] + + # Current number of connections + yield 0, "Client connections: %d" % vserver["cur_conns"], [("connections", vserver["cur_conns"])] + + # New connections per time + counter_name = "f5_bigip_vserver.%s.connections" % item + rate = get_rate(counter_name, time.time(), vserver["tot_conns"]) + yield 0, "Rate: %.2f/sec" % rate, [("conn_rate", rate)] + + # Current server status + status = vserver["status"] + if status == 0: + yield 1, "Virtual Server is disabled" + elif status == 1: + yield 0, "Virtual Server is up and available" + elif status == 2: + yield 2, "Virtual Server is currently not available" + elif status == 3: + yield 2, "Virtual Server is not available" + elif status == 4: + yield 1, "Virtual Server status is unknown" + else: + yield 3, "Unhandled status (%d)" % status + + +check_info["f5_bigip_vserver"] = { + 'parse_function' : parse_f5_bigip_vserver, + 'check_function' : check_f5_bigip_vserver, + 'inventory_function' : inventory_f5_bigip_vserver, + 'service_description' : 'Virtual Server %s', + "has_perfdata" : True, + 'snmp_info' : ('.1.3.6.1.4.1.3375.2.2.10', [ + "13.2.1.1", # Name + "13.2.1.2", # Status + "2.3.1.11", # ltmVirtualServStatClientTotConns + "2.3.1.12", # ltmVirtualServStatClientCurConns + ]), + 'snmp_scan_function' : lambda oid: '.1.3.6.1.4.1.3375.2' in oid(".1.3.6.1.2.1.1.2.0") \ + and "big-ip" in oid(".1.3.6.1.4.1.3375.2.1.4.1.0").lower(), +} diff -Nru check-mk-1.2.2p3/fast_lta_headunit check-mk-1.2.6p12/fast_lta_headunit --- check-mk-1.2.2p3/fast_lta_headunit 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/fast_lta_headunit 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,140 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +fast_lta_headunit_info = [(".1.3.6.1.4.1.27417.2", [1, # headUnitStatus + 2, # replicationMode + 5] # replicationRunning + )] + +def fast_lta_headunit_scan(oid): + return oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.8072.3.2.10") + +# .--status--------------------------------------------------------------. +# | _ _ | +# | ___| |_ __ _| |_ _ _ ___ | +# | / __| __/ _` | __| | | / __| | +# | \__ \ || (_| | |_| |_| \__ \ | +# | |___/\__\__,_|\__|\__,_|___/ | +# | | +# '----------------------------------------------------------------------' + +def inventory_fast_lta_headunit_status(info): + if len(info[0]) > 0: + return [ (None, None) ] + else: + return [] + +def check_fast_lta_headunit_status(item, _no_params, info): + head_unit_status_map = { + "-1": "workerDefect", + "-2": "workerNotStarted", + "2" : "workerBooting", + "3" : "workerRfRRunning", + "10": "appBooting", + "20": "appNoCubes", + "30": "appVirginCubes", + "40": "appRfrPossible", + "45": "appRfrMixedCubes", + "50": "appRfrActive", + "60": "appReady", + "65": "appMixedCubes", + "70": "appReadOnly", + "75": "appEnterpriseCubes", + "80": "appEnterpriseMixedCubes", + } + + if info[0][0][0] == "60": + status = 0 + elif info[0][0][0] == "70" and info[0][0][1] == "0": + # on Slave node appReadOnly is also an ok state + status = 0 + else: + status = 2 + + if info[0][0][0] in head_unit_status_map.keys(): + message = "Head Unit status is %s." % head_unit_status_map[info[0][0][0]] + else: + message = "Head Unit status is %s." % info[0][0][0] + + return status, message + +check_info["fast_lta_headunit.status"] = { + "check_function" : check_fast_lta_headunit_status, + "inventory_function" : inventory_fast_lta_headunit_status, + "service_description" : "Fast LTA Headunit Status", + "has_perfdata" : False, + "snmp_info" : fast_lta_headunit_info, + "snmp_scan_function" : fast_lta_headunit_scan +} + +#. +# .--replication---------------------------------------------------------. +# | _ _ _ _ | +# | _ __ ___ _ __ | (_) ___ __ _| |_(_) ___ _ __ | +# | | '__/ _ \ '_ \| | |/ __/ _` | __| |/ _ \| '_ \ | +# | | | | __/ |_) | | | (_| (_| | |_| | (_) | | | | | +# | |_| \___| .__/|_|_|\___\__,_|\__|_|\___/|_| |_| | +# | |_| | +# '----------------------------------------------------------------------' + +def inventory_fast_lta_headunit_replication(info): + if len(info[0]) > 0: + return [ (None, None) ] + else: + return [] + +def check_fast_lta_headunit_replication(item, _no_params, info): + head_unit_replication_map = { + "0" : "Slave", + "1" : "Master", + "255" : "standalone", + } + + if info[0][0][2] == "1": + message = "Replication is running." + status = 0 + else: + message = "Replication is not running (!!)." + status = 2 + + if info[0][0][1] in head_unit_replication_map.keys(): + message += " This node is %s." \ + % head_unit_replication_map[info[0][0][1]] + else: + message += " Replication mode of this node is %s." % info[0][0][1] + + return status, message + +check_info["fast_lta_headunit.replication"] = { + "check_function" : check_fast_lta_headunit_replication, + "inventory_function" : inventory_fast_lta_headunit_replication, + "service_description" : "Fast LTA Replication", + "has_perfdata" : False, + "snmp_info" : fast_lta_headunit_info, + "snmp_scan_function" : fast_lta_headunit_scan +} + +#. diff -Nru check-mk-1.2.2p3/fast_lta_headunit.replication check-mk-1.2.6p12/fast_lta_headunit.replication --- check-mk-1.2.2p3/fast_lta_headunit.replication 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/fast_lta_headunit.replication 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,14 @@ +title: FAST LTA Storage Systems: Replication Status and Mode +agents: snmp +catalog: hw/storagehw/fastlta +license: GPL +distribution: check_mk +description: + Checks if the Replication between Head Units is running on FAST LTA Storage + System. Returns {OK} if running and {CRIT} if not. + + The replication mode (master, slave or standalone) is displayed only. + It does not trigger a status change of the check. + +inventory: + Finds exactly one check on every system (head unit). diff -Nru check-mk-1.2.2p3/fast_lta_headunit.status check-mk-1.2.6p12/fast_lta_headunit.status --- check-mk-1.2.2p3/fast_lta_headunit.status 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/fast_lta_headunit.status 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,12 @@ +title: FAST LTA Storage Systems: Head Unit Status +agents: snmp +catalog: hw/storagehw/fastlta +license: GPL +distribution: check_mk +description: + Checks the Head Unit Status of a FAST LTA Storage System. + Returns {OK} on status {appReady} and {CRIT} on every other. + On the slave node status {appReadOnly} is {OK} too. + +inventory: + Finds exactly one check on every system (head unit). diff -Nru check-mk-1.2.2p3/fast_lta_silent_cubes check-mk-1.2.6p12/fast_lta_silent_cubes --- check-mk-1.2.2p3/fast_lta_silent_cubes 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/fast_lta_silent_cubes 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,55 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +def inventory_fast_lta_silent_cubes_status(info): + if len(info) > 0 and len(info[0]) > 1: + return [ ("Total", {}) ] + else: + return [] + +def check_fast_lta_silent_cubes_status(item, params, info): + fslist = [] + for total, used in info: + size_mb = int(total) / 1048576.0 + avail_mb = ( int(total) - int(used) ) / 1048576.0 + fslist.append((item, size_mb, avail_mb)) + + return df_check_filesystem_list(item, params, fslist) + +check_info["fast_lta_silent_cubes.capacity"] = { + "check_function" : check_fast_lta_silent_cubes_status, + "inventory_function" : inventory_fast_lta_silent_cubes_status, + "service_description" : "Fast LTA SC Capacity %s", + "has_perfdata" : True, + "group" : "filesystem", + "includes" : [ "df.include" ], + "snmp_info" : (".1.3.6.1.4.1.27417.3", [2, # scTotalCapacity + 3] # scTotalUsedCapacity + ), + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.8072.3.2.10"), +} + diff -Nru check-mk-1.2.2p3/fast_lta_silent_cubes.capacity check-mk-1.2.6p12/fast_lta_silent_cubes.capacity --- check-mk-1.2.2p3/fast_lta_silent_cubes.capacity 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/fast_lta_silent_cubes.capacity 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,35 @@ +title: FAST LTA Storage Systems: Total Capacity over all Silent Cubes +agents: snmp +catalog: hw/storagehw/fastlta +license: GPL +distribution: check_mk +description: + Checks the total / used capacity over all silent cubes connected to a FAST + LTA Storage System. + + Returns {WARN} or {CRIT} if usage is above given levels. + + It uses the check logic of the {df} check, so for configuration + parameters and examples please refer to the man page of {df}. + +item: + "Total" for now is the only item. + +perfdata: + Three values: The first value is the used space in total over all connected + silent cubes in MB. Also the minimum (0 MB), maximum (total size over all + silent cubes) and the warning and critical levels in MB are provided. + The second is the change of the usage in MB per range since the last check + (e.g. in MB per 24 hours) and the 3rd is the averaged change (so called + trend), also in MB per range. Please note, that performance data for + trends is enabled per default. You can globally disable that in {main.mk} + with {filesystem_default_levels["trend_perfdata"] = False}. + +inventory: + Finds exactly one check on every system (head unit). + +[parameters] +parameters (dict): See man page of {df}. + +[configuration] +filesystem_default_levels: And other, see man page of {df}. diff -Nru check-mk-1.2.2p3/fast_lta_volumes check-mk-1.2.6p12/fast_lta_volumes --- check-mk-1.2.2p3/fast_lta_volumes 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/fast_lta_volumes 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,60 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_fast_lta_volumes(info): + inventory = [] + for volname, volquota, volused in info: + inventory.append((volname, {})) + return inventory + + +def check_fast_lta_volumes(item, params, info): + fslist = [] + for volname, volquota, volused in info: + if volname == item: + fslist = [] + size_mb = int(volquota) / 1048576.0 + avail_mb = ( int(volquota) - int(volused) ) / 1048576.0 + fslist.append((item, size_mb, avail_mb)) + return df_check_filesystem_list(item, params, fslist) + + return 3, "Volume %s not found" % item + + +check_info["fast_lta_volumes"] = { + "check_function" : check_fast_lta_volumes, + "inventory_function" : inventory_fast_lta_volumes, + "service_description" : "Fast LTA Volume %s", + "has_perfdata" : True, + "group" : "filesystem", + "includes" : [ "df.include" ], + "snmp_info" : (".1.3.6.1.4.1.27417.5.1.1", [2, # Volume name + 9, # Volume Quota + 11] # Volume used space + ), + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.8072.3.2.10"), +} + diff -Nru check-mk-1.2.2p3/fc_brocade_port check-mk-1.2.6p12/fc_brocade_port --- check-mk-1.2.2p3/fc_brocade_port 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fc_brocade_port 1970-01-01 00:00:00.000000000 +0000 @@ -1,32 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# ails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -def check_brocade_port(portno, params, info): - return (3, "UKNOWN - This check has been removed. Please use brocade_fcport instead.") - -check_info['fc_brocade_port'] = (check_brocade_port, "PORT %s", 1, no_inventory_possible) -snmp_info['fc_brocade_port'] = [] -snmp_scan_functions['fc_brocade_port'] = lambda oid: False diff -Nru check-mk-1.2.2p3/fc_brocade_port_detailed check-mk-1.2.6p12/fc_brocade_port_detailed --- check-mk-1.2.2p3/fc_brocade_port_detailed 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fc_brocade_port_detailed 1970-01-01 00:00:00.000000000 +0000 @@ -1,32 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# ails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -def check_brocade_port(portno, params, info): - return (3, "UKNOWN - This check has been removed. Please use brocade_fcport instead.") - -check_info['fc_brocade_port_detailed'] = (check_brocade_port, "PORT %s", 1, no_inventory_possible) -snmp_info['fc_brocade_port_detailed'] = [] -snmp_scan_functions['fc_brocade_port_detailed'] = lambda oid: False diff -Nru check-mk-1.2.2p3/fc_port check-mk-1.2.6p12/fc_port --- check-mk-1.2.2p3/fc_port 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/fc_port 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,352 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Taken from connUnitPortState +# user selected state of the port hardware +fc_port_admstates = { + 1: ('unknown', 1), + 2: ('online', 0), + 3: ('offline', 0), + 4: ('bypassed', 1), + 5: ('diagnostics', 1), +} +# Taken from connUnitPortStatus +# operational status for the port +fc_port_opstates = { + 1: ('unknown', 1), + 2: ('unused', 1), + 3: ('ready', 0), + 4: ('warning', 1), + 5: ('failure', 2), + 6: ('not participating', 1), + 7: ('initializing', 1), + 8: ('bypass',1), + 9: ('ols', 0), +} +# Taken from connUnitPortHWState +# hardware detected state of the port +fc_port_phystates = { + 1: ('unknown', 1), + 2: ('failed', 2), + 3: ('bypassed', 1), + 4: ('active', 0), + 5: ('loopback', 1), + 6: ('txfault', 1), + 7: ('noMedia', 1), + 8: ('linkDown',2), +} + +# taken from connUnitPortType +porttype_list = ( 'unknown', 'unknown', 'other', 'not-present', 'hub-port', 'n-port', + 'l-port', 'fl-port', 'f-port', 'e-port', 'g-port', 'domain-ctl', + 'hub-controller', 'scsi', 'escon', 'lan', 'wan', 'ac', 'dc', 'ssa') + +# settings for inventory: which ports should not be inventorized +fc_port_no_inventory_types = [ 3 ] +fc_port_no_inventory_admstates = [ 1, 3 ] +fc_port_no_inventory_opstates = [ ] +fc_port_no_inventory_phystates = [ ] +fc_port_inventory_use_portname = False # use connUnitPortName as service description + +factory_settings["fc_port_default_levels"] = { + "rxcrcs": (3.0, 20.0), # allowed percentage of CRC errors + "rxencoutframes": (3.0, 20.0), # allowed percentage of Enc-OUT Frames + "notxcredits": (3.0, 20.0), # allowed percentage of No Tx Credits + "c3discards": (3.0, 20.0), # allowed percentage of C3 discards +} + +# Helper function for computing item from port number +def fc_port_getitem(num_ports, index, portname): + int_len = str(len(str(num_ports))) # number of digits for index + itemname = ("%0" + int_len + "d") % (index - 1) # leading zeros + if portname.strip() and fc_port_inventory_use_portname: + itemname += portname.strip() + return itemname + +def fc_parse_counter(value): + # The counters are sent via SNMP as OCTETSTR, which is converted to + # a byte string by Check_MKs SNMP code. The counters seem to be + # 64 bit big endian values, which are converted to integers here + if len(value) == len("00 00 00 0C 8F 70 DD 74"): + value = "".join(map(chr, [ eval("0x" + v) for v in value.split() ])) + import struct + return int(struct.unpack('>Q', value)[0]) + +def inventory_fc_port(info): + if not info: + return + + inventory = [] + for line in info: + if len(line) == 15: + try: + index = int(line[0]) + porttype = int(line[1]) + admstate = int(line[2]) + opstate = int(line[3]) + phystate = int(line[6]) + except: # missing vital data. Skipping this port + continue + portname = line[5] + + if porttype not in fc_port_no_inventory_types and \ + admstate not in fc_port_no_inventory_admstates and \ + opstate not in fc_port_no_inventory_opstates and \ + phystate not in fc_port_no_inventory_phystates: + + inventory.append( (fc_port_getitem(len(info), index, portname), \ + 'fc_port_default_levels') ) + return inventory + + +def check_fc_port(item, params, info): + # Accept item, even if port name has changed + item_index = int(item.split()[0]) + portinfo = [ line for line in info if int(line[0]) == item_index + 1 ] + index = int(portinfo[0][0]) + porttype = int(portinfo[0][1]) + admstate = int(portinfo[0][2]) + opstate = int(portinfo[0][3]) + phystate = int(portinfo[0][6]) + counter_list = map(fc_parse_counter,portinfo[0][7:] ) + txobjects, rxobjects, txelements, rxelements, notxcredits, c3discards, \ + rxcrcs, rxencoutframes = counter_list + + summarystate = 0 + output = [] + perfdata = [] + perfaverages = [] + + wirespeed = savefloat(portinfo[0][4]) * 1000.0 # speed in Bytes/sec, 0 if unknown + if wirespeed == 0: + # let user specify assumed speed via check parameter, default is 16.0 Gbit/sec + gbit = params.get("assumed_speed", 16.0) + wirespeed = gbit * 1000.0 * 1000.0 * 1000.0 / 8.0 # in Bytes/sec + speedmsg = "assuming %g Gbit/s" % gbit + else: + gbit = wirespeed * 8.0 / ( 1000.0 * 1000.0 * 1000.0 ) # in Gbit/sec + speedmsg = "%.1f Gbit/s" % gbit + output.append(speedmsg) + + # Now check rates of various counters + this_time = time.time() + + in_bytes = get_rate("fc_port.rxelements.%s" \ + % index, this_time, rxelements) + out_bytes = get_rate("fc_port.txelements.%s" \ + % index, this_time, txelements) + + average = params.get("average") # range in minutes + + # B A N D W I D T H + # convert thresholds in percentage into MB/s + bw_thresh = params.get("bw") + if bw_thresh == None: # no levels + warn_bytes, crit_bytes = None, None + else: + warn, crit = bw_thresh + if type(warn) == float: + warn_bytes = wirespeed * warn / 100.0 + else: # in MB + warn_bytes = warn * 1048576.0 + if type(crit) == float: + crit_bytes = wirespeed * crit / 100.0 + else: # in MB + crit_bytes = crit * 1048576.0 + + for what, value in [("In", in_bytes), ("Out", out_bytes)]: + output.append("%s: %s/s" % (what, get_bytes_human_readable(value))) + perfdata.append((what.lower(), value, warn_bytes, crit_bytes, 0, wirespeed)) + + # average turned on: use averaged traffic values instead of current ones + if average: + value = get_average("fc_port.%s.%s.avg" % (what, item), this_time, value, average) + output.append("Avg(%dmin): %s/s" % (average, get_bytes_human_readable(value))) + perfaverages.append( ("%s_avg" % what.lower(), value, warn_bytes, crit_bytes, 0, wirespeed)) + + # handle levels for in/out + if crit_bytes != None and value >= crit_bytes: + summarystate = 2 + output.append(" >= %s/s(!!)" % (get_bytes_human_readable(crit_bytes))) + elif warn_bytes != None and value >= warn_bytes: + summarystate = max(1, summarystate) + output.append(" >= %s/s(!!)" % (get_bytes_human_readable(warn_bytes))) + + # put perfdata of averages after perfdata for in and out in order not to confuse the perfometer + perfdata.extend(perfaverages) + + # R X O B J E C T S & T X O B J E C T S + # Put number of objects into performance data (honor averaging) + rxobjects_rate = get_rate("fc_port.rxobjects.%s" \ + % index, this_time, rxobjects) + txobjects_rate = get_rate("fc_port.txobjects.%s" \ + % index, this_time, txobjects) + for what, value in [ ("rxobjects", rxobjects_rate), ("txobjects", txobjects_rate) ]: + perfdata.append((what, value)) + if average: + value = get_average("fc_port.%s.%s.avg" % (what, item), this_time, value, average) + perfdata.append( ("%s_avg" % what, value) ) + + # E R R O R C O U N T E R S + # handle levels on error counters + + for descr, counter, value, ref in [ + ("CRC errors", "rxcrcs", rxcrcs, rxobjects_rate, ), + ("ENC-Out", "rxencoutframes", rxencoutframes, rxobjects_rate, ), + ("C3 discards", "c3discards", c3discards, txobjects_rate, ), + ("no TX buffer credits", "notxcredits", notxcredits, txobjects_rate, ),]: + per_sec = get_rate("fc_port.%s.%s" % (counter, index), this_time, value) + + perfdata.append((counter, per_sec)) + + # if averaging is on, compute average and apply levels to average + if average: + per_sec_avg = get_average("fc_port.%s.%s.avg" % \ + (counter, item), this_time, per_sec, average) + perfdata.append( ("%s_avg" % counter, per_sec_avg ) ) + + # compute error rate (errors in relation to number of frames) (from 0.0 to 1.0) + if ref > 0 or per_sec > 0: + rate = per_sec / (ref + per_sec) + else: + rate = 0 + text = "%s: %.2f%%" % (descr, rate * 100.0) + + # Honor averaging of error rate + if average: + rate = get_average("fc_port.%s.%s.avgrate" % + (counter, item), this_time, rate, average) + text += ", Avg: %.2f%%" % (rate * 100.0) + + error_percentage = rate * 100.0 + warn, crit = params[counter] + if crit != None and error_percentage >= crit: + summarystate = 2 + text += "(!!)" + output.append(text) + elif warn != None and error_percentage >= warn: + summarystate = max(1, summarystate) + text += "(!)" + output.append(text) + + + + def get_sym(state): + if state == 0: + return "" + else: + sym = state_markers[state] + return sym + + statetxt, state = fc_port_admstates.get(int(admstate), ( "unknown", 3 )) + sym = get_sym(state) + output.append(statetxt+sym) + summarystate = max(state, summarystate) + + statetxt, state = fc_port_opstates.get(int(opstate), ("unknown", 3)) + sym = get_sym(state) + output.append(statetxt+sym) + summarystate = max(state, summarystate) + + statetxt, state = fc_port_phystates.get(int(phystate), ("unknown", 3)) + sym = get_sym(state) + output.append(statetxt+sym) + summarystate = max(state, summarystate) + + output.append(porttype_list[int(porttype)]) + + + return (summarystate, ', '.join(output), perfdata) + +check_config_variables.append("fc_port_admstates") +check_config_variables.append("fc_port_opstates") +check_config_variables.append("fc_port_phystates") +check_config_variables.append("fc_port_no_inventory_types") +check_config_variables.append("fc_port_no_inventory_admstates") +check_config_variables.append("fc_port_no_inventory_opstates") +check_config_variables.append("fc_port_no_inventory_phystates") +check_config_variables.append("fc_port_inventory_use_portname") + +check_info["fc_port"] = { + 'check_function' : check_fc_port, + 'inventory_function' : inventory_fc_port, + 'service_description' : 'FC Interface %s', + 'has_perfdata' : True, + 'group' : 'fc_port', + 'default_levels_variable': 'fc_port_default_levels', + 'snmp_scan_function' : lambda oid: + oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.1588.2.1.1") \ + and oid(".1.3.6.1.4.1.1588.2.1.1.1.6.2.1.*") == None, + 'snmp_info' : ( ".1.3.6.1.3.94", [ + "1.10.1.2", # connUnitPortIndex # 0 + "1.10.1.3", # connUnitPortType # 1 + "1.10.1.6", # connUnitPortState: # 2 + # user selected state of the port hardware + "1.10.1.7", # connUnitPortStatus: # 3 + # operational status for the port + "1.10.1.15", # connUnitPortSpeed: # 4 + # The speed of the port in kilobytes per second. + "1.10.1.17", # connUnitPortName # 5 + "1.10.1.23", # connUnitPortHWSTate: # 6 + # hardware detected state of the port + "4.5.1.4", # connUnitPortStatCountTxObjects: # 7 + # The number of frames/packets/IOs/etc that have been transmitted + # by this port. Note: A Fibre Channel frame starts with SOF and + # ends with EOF. FC loop devices should not count frames passed + # through. This value represents the sum total for all other Tx + "4.5.1.5", # connUnitPortStatCountRxObjects: # 8 + # The number of frames/packets/IOs/etc that have been received + # by this port. Note: A Fibre Channel frame starts with SOF and + # ends with EOF. FC loop devices should not count frames passed + # through. This value represents the sum total for all other Rx + "4.5.1.6", # connUnitPortStatCountTxElements: # 9 + # The number of octets or bytes that have been transmitted + # by this port. One second periodic polling of the port. This + # value is saved and compared with the next polled value to + # compute net throughput. Note, for Fibre Channel, ordered + # sets are not included in the count. + "4.5.1.7", # connUnitPortStatCountRxElements: # 10 + # The number of octets or bytes that have been received. + # by this port. One second periodic polling of the port. This + # value is saved and compared with the next polled value to + # compute net throughput. Note, for Fibre Channel, ordered + # sets are not included in the count. + "4.5.1.8", # connUnitPortStatCountBBCreditZero: # 11 + # Count of transitions in/out of BBcredit zero state. + # The other side is not providing any credit. + "4.5.1.28", # connUnitPortStatCountClass3Discards: # 12 + # Count of Class 3 Frames that were discarded upon reception + # at this port. There is no FBSY or FRJT generated for Class 3 + # Frames. They are simply discarded if they cannot be delivered. + "4.5.1.40", # connUnitPortStatCountInvalidCRC: # 13 + # Count of frames received with invalid CRC. This count is + # part of the Link Error Status Block (LESB). (FC-PH 29.8). Loop + # ports should not count CRC errors passing through when + # monitoring. + "4.5.1.50", # connUnitPortStatCountEncodingDisparityErrors: # 14 + # Count of disparity errors received at this port. + ]), +} diff -Nru check-mk-1.2.2p3/fileinfo check-mk-1.2.6p12/fileinfo --- check-mk-1.2.2p3/fileinfo 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fileinfo 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -45,48 +45,54 @@ def inventory_fileinfo(info, case): inventory = [] added_groups = [] + if info: + reftime = int(info[0][0]) for line in info: if len(line) >= 3: - if line[1] == "missing": - continue - groups = fileinfo_groups_of_file(line[0]) - if case == 'single' and len(groups) == 0: + groups = fileinfo_groups_of_file(line[0], reftime) + if case == 'single' and not groups and line[1] != 'missing': inventory.append((line[0], {})); - if case == 'group' and len(groups) > 0: + + if case == 'group' and groups: for group in groups: if group not in added_groups: added_groups.append(group) - inventory.append((group, \ - { "patterns" : fileinfo_patterns_of_group(group) })) + inventory.append((group, {})) return inventory -def fileinfo_groups_of_file(check_filename): +def fileinfo_process_date(pattern,reftime): + disect = re.match('(/.*/)\$DATE:((?:%\w.?){1,})\$(.*)',pattern) + if disect: + prefix = disect.group(1) + datepattern = time.strftime(disect.group(2),time.localtime(reftime)) + postfix = disect.group(3) + pattern = prefix+datepattern+postfix + return pattern + +def fileinfo_groups_of_file(check_filename,reftime): import fnmatch groups = [] for line in host_extra_conf(g_hostname, fileinfo_groups): for group_name, pattern in line: - if fnmatch.fnmatch(check_filename, pattern): + if type(pattern) == str: # support old format + pattern = ( pattern, '' ) + inclusion, exclusion = pattern + inclusion = fileinfo_process_date(inclusion,reftime) + if fnmatch.fnmatch(check_filename, inclusion) \ + and not fnmatch.fnmatch(check_filename, exclusion): groups.append(group_name) return groups -def fileinfo_patterns_of_group(group): - patterns = [] - for line in host_extra_conf(g_hostname, fileinfo_groups): - for group_name, pattern in line: - if group_name == group: - patterns.append(pattern) - return patterns - def check_fileinfo(item, params, info): if len(info) == 0: - return (3, "UNKNOWN - no information sent by agent") + return (3, "no information sent by agent") reftime = int(info[0][0]) check_definition = False for line in info[1:]: if item == line[0]: if line[1] == "missing": - return (3, "UNKNOWN - File not found") + return (3, "File not found") state = 0 size = int(line[1]) age = reftime - int(line[2]) @@ -95,24 +101,63 @@ ("size", size, get_filesize_human_readable), ("age", age, get_age_human_readable) ] if check_definition == False: - return (3, "UNKNOWN - File not found") + return (3, "File not found") return fileinfo_check_function(check_definition, params) -def check_fileinfogroups(item, params, info): - if len(info) == 0: - return (3, "UNKNOWN - no information sent by agent") +# Extracts patterns that are relevant for the current host and item. +# Constructs simple list of patterns and makes them available for the check +def fileinfo_groups_precompile(hostname, item, params): + patterns = [] + for line in host_extra_conf(hostname, fileinfo_groups): + for group_name, pattern in line: + if group_name == item: + if type(pattern) == str: # support old format + pattern = (pattern, '') + patterns.append(pattern) + params['precompiled_patterns'] = patterns + return params + +def check_fileinfo_groups(item, params, info): + if not info: + return 3, "No information sent by agent" import fnmatch reftime = int(info[0][0]) + # Get the grouping patterns (either compile or reuse the precompiled ones) + # Check_MK creates an empty string if the precompile function has + # not been exectued yet. The precompile function creates an empty + # list when no rules/patterns are defined. + if 'precompiled_patterns' not in params: + params = fileinfo_groups_precompile(g_hostname, item, params) + count_all = 0 age_oldest = None age_newest = 0 size_all = 0 - #Start counting all values + size_smallest = None + size_largest = 0 + date_inclusion = "" + # Start counting values on all files for line in info[1:]: - for pattern in params.get('patterns',[]): - if fnmatch.fnmatch(line[0], pattern) and str(line[1]) != 'missing': - size_all += int(line[1]) + for pattern in params['precompiled_patterns']: + inclusion, exclusion = pattern + inclusion_tmp = fileinfo_process_date(inclusion,reftime) + if inclusion != inclusion_tmp: + inclusion = inclusion_tmp + date_inclusion = inclusion_tmp + # endswith("No such file...") is needed to + # support the solaris perl based version of fileinfo + if not line[0].endswith("No such file or directory") \ + and fnmatch.fnmatch(line[0], inclusion) and str(line[1]) not in ['missing',''] \ + and not fnmatch.fnmatch(line[0], exclusion): + size = int(line[1]) + size_all += size + if size_smallest == None: + size_smallest = size + else: + size_smallest = min(size_smallest, size) + size_largest = max(size_largest, size) + age = reftime - int(line[2]) if age_oldest == None: # very first match age_oldest = age @@ -121,37 +166,48 @@ age_oldest = max(age_oldest, age) age_newest = min(age_newest, age) count_all += 1 + if age_oldest == None: - age_oldest =0 - #Start Checking + age_oldest = 0 + + # Start Checking check_definition = [ - ("age_oldest", age_oldest, get_age_human_readable), - ("age_newest", age_newest, get_age_human_readable), - ("count", count_all, saveint), - ("size", size_all, get_filesize_human_readable)] + ("age_oldest", age_oldest, get_age_human_readable), + ("age_newest", age_newest, get_age_human_readable), + ("count", count_all, saveint), + ("size", size_all, get_filesize_human_readable), + ] + + if size_smallest is not None: + check_definition.append(("size_smallest", size_smallest, get_filesize_human_readable)) + if size_largest != 0: + check_definition.append(("size_largest", size_largest, get_filesize_human_readable)) + if date_inclusion: + check_definition.append(("date pattern", date_inclusion, str )) return fileinfo_check_function(check_definition, params) def fileinfo_check_function(check_definition, params): + import operator state = 0 infos = [] perfdata = [] for what, val, verbfunc in check_definition: infos.append("%s is %s" % (what, verbfunc(val))) - warn, crit = "", "" - for how, comp in [ ("min", "<" ), ("max", ">") ]: - p = params.get(how + what) - if p: - warn, crit = p - cfunc = eval("lambda a,b: a %s b" % comp) - if cfunc(val, crit): - state = 2 - infos[-1] += " (%s %s)(!!)" % (comp, verbfunc(crit)) - elif cfunc(val, warn): - state = max(state, 1) - infos[-1] += " (%s %s)(!)" % (comp, verbfunc(warn)) - perfdata.append((what, val, warn, crit)) - infotext = nagios_state_names[state] + " - " + ", ".join(infos) + if type(val) in [long, int]: # because strings go into infos but not into perfdata + warn, crit = "", "" + for how, comp, cfunc in [ ("min", "<", operator.lt), ("max", ">", operator.gt) ]: + p = params.get(how + what) + if p: + warn, crit = p + if cfunc(val, crit): + state = 2 + infos[-1] += " (%s %s)(!!)" % (comp, verbfunc(crit)) + elif cfunc(val, warn): + state = max(state, 1) + infos[-1] += " (%s %s)(!)" % (comp, verbfunc(warn)) + perfdata.append((what, val, warn, crit)) + infotext = ", ".join(infos) return (state, infotext, perfdata) @@ -163,10 +219,13 @@ "group" : "fileinfo", } + check_info['fileinfo.groups'] = { - "check_function" : check_fileinfogroups, + "check_function" : check_fileinfo_groups, "inventory_function" : lambda info: inventory_fileinfo(info, 'group'), "service_description" : "File group %s", "has_perfdata" : True, "group" : "fileinfo-groups", } + +precompile_params['fileinfo.groups'] = fileinfo_groups_precompile diff -Nru check-mk-1.2.2p3/fileinfo.groups check-mk-1.2.6p12/fileinfo.groups --- check-mk-1.2.2p3/fileinfo.groups 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fileinfo.groups 2015-06-24 09:48:36.000000000 +0000 @@ -1,44 +1,65 @@ -title: Check age, size and count of a group of files -agents: windows, linux -author: Bastian Kuhn +title: Age, size and count of a group of files +agents: windows, linux, freebsd, macosx, solaris +catalog: os/files license: GPL distribution: check_mk description: - This check monitors the size, age and the count of a group of files - in the target host's filesystem. - You can set lower and upper - limits for each: size of files, age of oldest, the newest file - and for the total count of files. - - This check needs some configuration in the agent, - as the agent needs to know the information about - which files need to be sent. - - Please refer to fileinfos manpage for information - about setting up the agent + This check monitors the size, age and the count of a group of files + in the target host's filesystem. You can set lower and upper + limits for each: size of files, age of oldest, the newest file + and for the total count of files. + + This check needs some configuration in the agent, as the agent needs to + know the information about which files need to be sent. Please refer to + fileinfo's manpage for information about setting up the agent. + + The check also needs a configured file grouping pattern. The name of the + file group is referenced in the check configuration as the item. + + File grouping patterns combine a simple pattern based on {*} and {?} to + join a subset of the file names send by the agent into a file group. In + addition an {exclude} pattern may be defined to exclude certain filenames. + + Furthermore, the current time/date in a configurable format may be + included in the pattern. The syntax is as follows: {$DATE:format-spec$}, + where {format-spec} is a list of time format directives of the unix date + command. Example: {$DATE:%Y%m%d$} is todays date, e.g. 20140127. A pattern + of {/var/tmp/backups/$DATE:%Y%m%d$.txt} would search for .txt files with + todays date as name in the directory /var/tmp/backups. item: - Configured name of the filesize group + Configured name of the file group examples: - - fileinfo_groups = [ + # old syntax of file group patterns, still valid + fileinfo_groups = [ ([("Auth_group",r"/var/log/auth*")], ALL_HOSTS), ([("Message_group", r"/var/log/messages*")], ['test2'], ALL_HOSTS), - ] + ] - check_parameters = [ - ({ - "minsize": (200.0, 300.0), - "maxsize": (400.0, 500.0), - "mincount": (2, 3), - "maxcount": (3, 4), - "minage_oldest": (50, 60), - "maxage_oldest": (100, 120), - "minage_newest": (10, 20), - "maxage_newest": (30, 40), - }, ALL_HOSTS, ["File group"]), - ] + # new syntax of file group patterns, including exclude pattern + fileinfo_groups = [ + ([("Auth_group", ("/var/log/auth*","/var/log/*.debug"))], ALL_HOSTS), + ([("Message_group", ("/var/log/messages*",""))], ['test2'], ALL_HOSTS), + ] + + # syntax of check + check_parameters = [ + ({ + "minsize": (200.0, 300.0), + "maxsize": (400.0, 500.0), + "mincount": (2, 3), + "maxcount": (3, 4), + "minage_oldest": (50, 60), + "maxage_oldest": (100, 120), + "minage_newest": (10, 20), + "maxage_newest": (30, 40), + "minsize_largest": (2048, 1024), + "maxsize_largest": (2048, 1024), + "minsize_largest": (10240, 20480), + "maxsize_largest": (10240, 20480), + }, ALL_HOSTS, ["File group"]), + ] perfdata: Four values: the aggregated size of all files in group, the age @@ -51,34 +72,48 @@ [parameters] parameters (dict): This check uses dictionary based - parameters. Per default no parameters are set and - the check is always OK. + parameters. Per default no parameters are set and + the check is always OK. + + {"minage_oldest"}: A pair of integers for the warning and + critical level of the minimum file age in seconds + for the oldest file found with the file group pattern. + A missing key or {None} disables the minimum age + checking. The borders itself always belong to the + better state, so {(60, 120)} will make the check + OK as long as the age of the file is not exceeding + 60 seconds and warning if the age is 120 seconds + or less. + + {"maxage_oldest"}: Pair of integers for the maximum file + age in seconds for the oldest file found with file group pattern. + + {"minage_newest"}: Pair of integers for the minimum file + age in seconds for the newest file found with file group pattern. + {"maxage_newest"}: Pair of integers for the maximum file + age in seconds for the newest file found with file group pattern. - {"minage_oldest"}: A pair of integers for the warning and - critical level of the minimum file age in seconds - for the oldest file found with the file group pattern. - A missing key or {None} disables the minimum age - checking. The borders itself always belong to the - better state, so {(60, 120)} will make the check - OK as long as the age of the file is not exceeding - 60 seconds and warning if the age is 120 seconds - or less. + {"minsize_smallest"}: Pair of integers for the minimum file + size in bytes for the smallest file found with the file group pattern. - {"maxage_oldest"}: Pair of integers for the maximum file - age in seconds for the oldest file found with file group pattern. + {"maxsize_smallest"}: Pair of integers for the maximum file + size in bytes for the smallest file found with the file group pattern. - {"minage_newest"}: Pair of integers for the minimum file - age in seconds for the newest file found with file group pattern. + {"minsize_largest"}: Pair of integers for the minimum file + size in bytes for the largest file found with the file group pattern. - {"maxage_newest"}: Pair of integers for the maximum file - age in seconds for the newest file found with file group pattern. + {"maxsize_largest"}: Pair of integers for the maximum file + size in bytes for the largest file found with the file group pattern. - {"minsize"}: The minimum size for the aggregation of all files in bytes (pair of - integers). + {"minsize"}: Pair of integers for the minimum size of the aggregation + of all files in bytes in group. - {"maxsize"}: The maximum size. + {"maxsize"}: Pair of integers for the maximum size of the aggregation + of all files in bytes in group. - {"mincount"}: The minimum count of all files aggregatet in the group + {"mincount"}: Pair of integers for the minimum count of all files + aggregatet in group. - {"maxcount"}: The maximum count + {"maxcount"}: Pair of integers for the maximum count of all files + aggregatet in group. diff -Nru check-mk-1.2.2p3/fjdarye100_cadaps check-mk-1.2.6p12/fjdarye100_cadaps --- check-mk-1.2.2p3/fjdarye100_cadaps 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fjdarye100_cadaps 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -38,10 +38,16 @@ # FJDARY-E100::fjdaryCaStatus.1 = INTEGER: normal(1) # FJDARY-E100::fjdaryCaStatus.2 = INTEGER: invalid(4) -check_info['fjdarye100_cadaps'] = (check_fjdarye_item, "Channel Adapter %s", 0, inventory_fjdarye_item) check_includes['fjdarye100_cadaps'] = ["fjdarye.include"] # 1: fjdaryCaIndex, 3: fjdaryCaStatus -snmp_info['fjdarye100_cadaps'] = (".1.3.6.1.4.1.211.1.21.1.100.2.3.2.1", [ 1, 3 ]) -snmp_scan_functions['fjdarye100_cadaps'] \ - = lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.100" + +check_info["fjdarye100_cadaps"] = { + 'check_function': check_fjdarye_item, + 'inventory_function': inventory_fjdarye_item, + 'service_description': 'Channel Adapter %s', + # 1: fjdaryCaIndex, 3: fjdaryCaStatus + 'snmp_info': ('.1.3.6.1.4.1.211.1.21.1.100.2.3.2.1', [1, 3]), + 'snmp_scan_function': \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.100", +} diff -Nru check-mk-1.2.2p3/fjdarye100_cmods check-mk-1.2.6p12/fjdarye100_cmods --- check-mk-1.2.2p3/fjdarye100_cmods 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fjdarye100_cmods 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -36,10 +36,16 @@ # FJDARY-E100::fjdaryCmStatus.0 = INTEGER: normal(1) # FJDARY-E100::fjdaryCmStatus.1 = INTEGER: normal(1) -check_info['fjdarye100_cmods'] = (check_fjdarye_item, "Channel Module %s", 0, inventory_fjdarye_item) check_includes['fjdarye100_cmods'] = ["fjdarye.include"] # 1: fjdaryCmIndex, 3: fjdaryCmStatus -snmp_info['fjdarye100_cmods'] = (".1.3.6.1.4.1.211.1.21.1.100.2.1.2.1", [ 1, 3 ]) -snmp_scan_functions['fjdarye100_cmods'] \ - = lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.100" + +check_info["fjdarye100_cmods"] = { + 'check_function': check_fjdarye_item, + 'inventory_function': inventory_fjdarye_item, + 'service_description': 'Channel Module %s', + # 1: fjdaryCmIndex, 3: fjdaryCmStatus + 'snmp_info': ('.1.3.6.1.4.1.211.1.21.1.100.2.1.2.1', [1, 3]), + 'snmp_scan_function': \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.100", +} diff -Nru check-mk-1.2.2p3/fjdarye100_cmods_mem check-mk-1.2.6p12/fjdarye100_cmods_mem --- check-mk-1.2.2p3/fjdarye100_cmods_mem 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fjdarye100_cmods_mem 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -38,10 +38,16 @@ # FJDARY-E100::fjdaryCmmemoryStatus.1 = INTEGER: normal(1) # FJDARY-E100::fjdaryCmmemoryStatus.2 = INTEGER: invalid(4) -check_info['fjdarye100_cmods_mem'] = (check_fjdarye_item, "Channel Module Memory %s", 0, inventory_fjdarye_item) check_includes['fjdarye100_cmods_mem'] = ["fjdarye.include"] # 1: fjdaryCmmemoryIndex, 3: fjdaryCmmemoryStatus -snmp_info['fjdarye100_cmods_mem'] = (".1.3.6.1.4.1.211.1.21.1.100.2.4.2.1", [ 1, 3 ]) -snmp_scan_functions['fjdarye100_cmods_mem'] \ - = lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.100" + +check_info["fjdarye100_cmods_mem"] = { + 'check_function': check_fjdarye_item, + 'inventory_function': inventory_fjdarye_item, + 'service_description': 'Channel Module Memory %s', + # 1: fjdaryCmmemoryIndex, 3: fjdaryCmmemoryStatus + 'snmp_info': ('.1.3.6.1.4.1.211.1.21.1.100.2.4.2.1', [1, 3]), + 'snmp_scan_function': \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.100", +} diff -Nru check-mk-1.2.2p3/fjdarye100_conencs check-mk-1.2.6p12/fjdarye100_conencs --- check-mk-1.2.2p3/fjdarye100_conencs 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fjdarye100_conencs 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -34,10 +34,16 @@ # snmpwalk -c public -v 1 dx80 .1.3.6.1.4.1.211.1.21.1.100.2.10.2.1.3 # FJDARY-E100::fjdaryCeStatus.0 = INTEGER: normal(1) -check_info['fjdarye100_conencs'] = (check_fjdarye_item, "Controller Enclosure %s", 0, inventory_fjdarye_item) check_includes['fjdarye100_conencs'] = ["fjdarye.include"] # 1: fjdaryCeIndex, 3: fjdaryCeStatus -snmp_info['fjdarye100_conencs'] = (".1.3.6.1.4.1.211.1.21.1.100.2.10.2.1", [ 1, 3 ]) -snmp_scan_functions['fjdarye100_conencs'] \ - = lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.100" + +check_info["fjdarye100_conencs"] = { + 'check_function': check_fjdarye_item, + 'inventory_function': inventory_fjdarye_item, + 'service_description': 'Controller Enclosure %s', + # 1: fjdaryCeIndex, 3: fjdaryCeStatus + 'snmp_info': ('.1.3.6.1.4.1.211.1.21.1.100.2.10.2.1', [1, 3]), + 'snmp_scan_function': \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.100", +} diff -Nru check-mk-1.2.2p3/fjdarye100_cpsus check-mk-1.2.6p12/fjdarye100_cpsus --- check-mk-1.2.2p3/fjdarye100_cpsus 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fjdarye100_cpsus 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -36,10 +36,16 @@ # FJDARY-E100::fjdaryCpsuStatus.0 = INTEGER: normal(1) # FJDARY-E100::fjdaryCpsuStatus.1 = INTEGER: normal(1) -check_info['fjdarye100_cpsus'] = (check_fjdarye_item, "CPSU %s", 0, inventory_fjdarye_item) check_includes['fjdarye100_cpsus'] = ["fjdarye.include"] # 1: fjdaryPsuIndex, 3: fjdaryPsuStatus -snmp_info['fjdarye100_cpsus'] = (".1.3.6.1.4.1.211.1.21.1.100.2.13.2.1", [ 1, 3 ]) -snmp_scan_functions['fjdarye100_cpsus'] \ - = lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.100" + +check_info["fjdarye100_cpsus"] = { + 'check_function': check_fjdarye_item, + 'inventory_function': inventory_fjdarye_item, + 'service_description': 'CPSU %s', + # 1: fjdaryPsuIndex, 3: fjdaryPsuStatus + 'snmp_info': ('.1.3.6.1.4.1.211.1.21.1.100.2.13.2.1', [1, 3]), + 'snmp_scan_function': \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.100", +} diff -Nru check-mk-1.2.2p3/fjdarye100_devencs check-mk-1.2.6p12/fjdarye100_devencs --- check-mk-1.2.2p3/fjdarye100_devencs 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fjdarye100_devencs 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -36,10 +36,16 @@ # FJDARY-E100::fjdaryDeStatus.0 = INTEGER: invalid(4) # FJDARY-E100::fjdaryDeStatus.1 = INTEGER: invalid(4) -check_info['fjdarye100_devencs'] = (check_fjdarye_item, "Device Enclosure %s", 0, inventory_fjdarye_item) check_includes['fjdarye100_devencs'] = ["fjdarye.include"] # 1: fjdaryDeIndex, 3: fjdaryDeStatus -snmp_info['fjdarye100_devencs'] = (".1.3.6.1.4.1.211.1.21.1.100.2.14.2.1", [ 1, 3 ]) -snmp_scan_functions['fjdarye100_devencs'] \ - = lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.100" + +check_info["fjdarye100_devencs"] = { + 'check_function': check_fjdarye_item, + 'inventory_function': inventory_fjdarye_item, + 'service_description': 'Device Enclosure %s', + # 1: fjdaryDeIndex, 3: fjdaryDeStatus + 'snmp_info': ('.1.3.6.1.4.1.211.1.21.1.100.2.14.2.1', [1, 3]), + 'snmp_scan_function': \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.100", +} diff -Nru check-mk-1.2.2p3/fjdarye100_disks check-mk-1.2.6p12/fjdarye100_disks --- check-mk-1.2.2p3/fjdarye100_disks 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fjdarye100_disks 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -61,23 +61,35 @@ # -------------------------------------------------------------------------------- # 1. Summary version of check -> all disks into one service -check_info['fjdarye100_disks.summary'] = (check_fjdarye_disks_summary, "Disk summary", 0, inventory_fjdarye_disks_summary) check_includes['fjdarye100_disks.summary'] = ["fjdarye.include"] # 1: fjdaryDiskIndex, 3: fjdaryDiskStatus -snmp_info['fjdarye100_disks.summary'] = (".1.3.6.1.4.1.211.1.21.1.100.2.19.2.1", [ 1, 3 ]) -snmp_scan_functions['fjdarye100_disks.summary'] \ - = lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.100" # -------------------------------------------------------------------------------- # 2. Single disk version of check -> one check for each disk -check_info['fjdarye100_disks'] = (check_fjdarye_disks, "Disk %s", 0, inventory_fjdarye_disks) check_includes['fjdarye100_disks'] = ["fjdarye.include"] -checkgroup_of['fjdarye100_disks'] = "raid_disk" # 1: fjdaryDiskIndex, 3: fjdaryDiskStatus -snmp_info['fjdarye100_disks'] = (".1.3.6.1.4.1.211.1.21.1.100.2.19.2.1", [ 1, 3 ]) -snmp_scan_functions['fjdarye100_disks'] \ - = lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.100" + +check_info["fjdarye100_disks.summary"] = { + 'check_function': check_fjdarye_disks_summary, + 'inventory_function': inventory_fjdarye_disks_summary, + 'service_description': 'Disk summary', + # 1: fjdaryDiskIndex, 3: fjdaryDiskStatus + 'snmp_info': ('.1.3.6.1.4.1.211.1.21.1.100.2.19.2.1', [1, 3]), + 'snmp_scan_function': \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.100", +} + +check_info["fjdarye100_disks"] = { + 'check_function': check_fjdarye_disks, + 'inventory_function': inventory_fjdarye_disks, + 'service_description': 'Disk %s', + # 1: fjdaryDiskIndex, 3: fjdaryDiskStatus + 'snmp_info': ('.1.3.6.1.4.1.211.1.21.1.100.2.19.2.1', [1, 3]), + 'snmp_scan_function': \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.100", + 'group': 'raid_disk', +} diff -Nru check-mk-1.2.2p3/fjdarye100_disks.summary check-mk-1.2.6p12/fjdarye100_disks.summary --- check-mk-1.2.2p3/fjdarye100_disks.summary 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fjdarye100_disks.summary 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Monitor summarized status of all disks in Fujitsu ETERNUS DX80 S2 storage systems -agents: SNMP -author: Philipp Hoefflin +title: Fujitsu ETERNUS DX80 S2 storage systems: Summarized status of all disks +agents: snmp +catalog: hw/storagehw/fujitsu license: GPLv2 distribution: none description: diff -Nru check-mk-1.2.2p3/fjdarye100_rluns check-mk-1.2.6p12/fjdarye100_rluns --- check-mk-1.2.2p3/fjdarye100_rluns 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fjdarye100_rluns 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,64 +24,12 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -# check_mk plugin to monitor storage systems like Fujitsu ETERNUS DX80 S2 supporting FJDARY-E100.MIB -# Copyright (c) 2012 FuH Entwicklungsgesellschaft mbH, Umkirch, Germany. All rights reserved. -# Author: Philipp Hoefflin, 2012, hoefflin+cmk@fuh-e.de - -# example snmpwalk output: -# snmpwalk -c public -v 1 dx80 .1.3.6.1.4.1.211.1.21.1.100.3.4.2.1.2 -# FJDARY-E100::fjdaryRluInfoTable0.0 = -# Hex-STRING: 00 00 00 A0 10 10 00 00 04 00 00 00 00 FF FF FF -# 00 00 D0 E3 00 00 00 00 80 00 00 00 80 00 00 00 -# 00 00 00 00 01 20 40 40 0F 01 01 04 32 00 00 00 -# 01 00 00 01 -# FJDARY-E100::fjdaryRluInfoTable0.1 = -# Hex-STRING: 01 00 00 A0 11 11 00 00 01 00 00 00 00 FF FF FF -# 00 00 00 11 01 00 00 00 00 02 00 00 00 02 00 00 -# 01 00 01 00 01 20 40 40 0F 01 01 02 32 00 00 00 -# 01 00 00 01 -# FJDARY-E100::fjdaryRluInfoTable0.2 = -# Hex-STRING: 02 00 03 00 00 00 00 00 00 00 00 00 00 FF FF FF -# 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 -# 02 00 02 00 00 00 00 00 00 00 00 00 00 00 00 00 -# 00 00 00 00 -# FJDARY-E100::fjdaryRluInfoTable0.3 = -# Hex-STRING: 03 00 03 00 00 00 00 00 00 00 00 00 00 FF FF FF -# 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 -# 03 00 03 00 00 00 00 00 00 00 00 00 00 00 00 00 -# 00 00 00 00 -# - -def inventoryfunc(info): - inventory = [] - for line in info: - rawdata = line[1] - if rawdata[3] == '\xa0': # RLUN is present - inventory.append( (line[0], "", None) ) - return inventory - -def checkfunc(item, _no_params, info): - for line in info: - if item == line[0]: - rawdata = line[1] - if rawdata[3] != '\xa0': - return (2, "CRIT - RLUN is not present" ) - elif rawdata[2] == '\x08': - return (1, "WARN - RLUN is rebuilding") - elif rawdata[2] == '\x07': - return (1, "WARN - RLUN copyback in progress") - elif rawdata[2] == '\x41': - return (1, "WARN - RLUN spare is in use") - elif rawdata[2] == '\x00': - return (0, "OK - RLUN is in normal state") # assumption - else: - return (2, "CRIT - RLUN in unknown state %02x" % ord(rawdata[2]) ) - - return (3, "UNKNOWN - No RLUN %s in SNMP output" % item) - -check_info['fjdarye100_rluns'] = (checkfunc, "RLUN %s", 0, inventoryfunc) -# 2: fjdaryRluInfoTable0 -snmp_info['fjdarye100_rluns'] = (".1.3.6.1.4.1.211.1.21.1.100.3.4.2.1", [ 0, "2" ]) - -snmp_scan_functions['fjdarye100_rluns'] \ - = lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.100" +check_info["fjdarye100_rluns"] = { + 'includes' : ["fjdarye.include"], + 'check_function' : check_fjdarye_rluns, + 'inventory_function' : inventory_fjdarye_rluns, + 'service_description' : 'RLUN %s', + 'snmp_info' : ('.1.3.6.1.4.1.211.1.21.1.100.3.4.2.1', [0, '2']), + 'snmp_scan_function' : \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.100", +} diff -Nru check-mk-1.2.2p3/fjdarye100_sum check-mk-1.2.6p12/fjdarye100_sum --- check-mk-1.2.2p3/fjdarye100_sum 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fjdarye100_sum 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -32,31 +32,16 @@ # snmpwalk -c public -v 1 dx80 .1.3.6.1.4.1.211.1.21.1.100.6.0 # FJDARY-E100::fjdaryUnitStatus.0 = INTEGER: ok(3) -fjdarye100_sum_status = { 1: 'unknown', 2: 'unused', 3: 'ok', - 4: 'warning', 5: 'failed' } -def inventory_fjdarye100_sum(info): - if len(info[0]) == 1: - return [ (0, None) ] - -def check_fjdarye100_sum(index, _no_param, info): - for line in info: - if len(info[0]) == 1: - status = int(line[0]) - text = "Status is %s" % fjdarye100_sum_status[status] - - if status == 3: - return (0, "OK - %s" % text) - elif status == 4: - return (1, "WARN - %s" % text) - else: - return (2, "CRIT - %s" % text) - - return (3, "UNKNOWN - No status summary present" ) - -check_info['fjdarye100_sum'] = (check_fjdarye100_sum, "Summary Status %s", 0, inventory_fjdarye100_sum) -# 1: fjdaryUnitStatus -snmp_info['fjdarye100_sum'] = (".1.3.6.1.4.1.211.1.21.1.100.6", [ "0" ]) -snmp_scan_functions['fjdarye100_sum'] \ - = lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.100" + +check_info["fjdarye100_sum"] = { + 'check_function' : check_fjdarye_sum, + 'include' : "fjdarye.include", + 'inventory_function' : inventory_fjdarye_sum, + 'service_description' : 'Summary Status %s', + # 1: fjdaryUnitStatus + 'snmp_info': ('.1.3.6.1.4.1.211.1.21.1.100.6', ['0']), + 'snmp_scan_function': \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.100", +} diff -Nru check-mk-1.2.2p3/fjdarye100_syscaps check-mk-1.2.6p12/fjdarye100_syscaps --- check-mk-1.2.2p3/fjdarye100_syscaps 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fjdarye100_syscaps 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -36,10 +36,16 @@ # FJDARY-E100::fjdaryScuStatus.0 = INTEGER: normal(1) # FJDARY-E100::fjdaryScuStatus.1 = INTEGER: normal(1) -check_info['fjdarye100_syscaps'] = (check_fjdarye_item, "System Capacitor Unit %s", 0, inventory_fjdarye_item) check_includes['fjdarye100_syscaps'] = ["fjdarye.include"] # 1: fjdaryScuIndex, 3: fjdaryScuStatus -snmp_info['fjdarye100_syscaps'] = (".1.3.6.1.4.1.211.1.21.1.100.2.9.2.1", [ 1, 3 ]) -snmp_scan_functions['fjdarye100_syscaps'] \ - = lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.100" + +check_info["fjdarye100_syscaps"] = { + 'check_function': check_fjdarye_item, + 'inventory_function': inventory_fjdarye_item, + 'service_description': 'System Capacitor Unit %s', + # 1: fjdaryScuIndex, 3: fjdaryScuStatus + 'snmp_info': ('.1.3.6.1.4.1.211.1.21.1.100.2.9.2.1', [1, 3]), + 'snmp_scan_function': \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.100", +} diff -Nru check-mk-1.2.2p3/fjdarye101_cadaps check-mk-1.2.6p12/fjdarye101_cadaps --- check-mk-1.2.2p3/fjdarye101_cadaps 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/fjdarye101_cadaps 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,36 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +check_info["fjdarye101_cadaps"] = { + 'check_function' : check_fjdarye_item, + 'include' : "fjdarye.include", + 'inventory_function' : inventory_fjdarye_item, + 'service_description' : 'Channel Adapter %s', + # 1: fjdaryCaIndex, 3: fjdaryCaStatus + 'snmp_info': ('.1.3.6.1.4.1.211.1.21.1.101.2.3.2.1', [1, 3]), + 'snmp_scan_function' : \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.101", +} diff -Nru check-mk-1.2.2p3/fjdarye101_cmods check-mk-1.2.6p12/fjdarye101_cmods --- check-mk-1.2.2p3/fjdarye101_cmods 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/fjdarye101_cmods 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,36 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +check_info["fjdarye101_cmods"] = { + 'check_function' : check_fjdarye_item, + 'inventory_function' : inventory_fjdarye_item, + 'include' : "fjdarye.include", + 'service_description' : 'Channel Module %s', + # 1: fjdaryCmIndex, 3: fjdaryCmStatus + 'snmp_info': ('.1.3.6.1.4.1.211.1.21.1.101.2.1.2.1', [1, 3]), + 'snmp_scan_function' : \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.101", +} diff -Nru check-mk-1.2.2p3/fjdarye101_cmods_mem check-mk-1.2.6p12/fjdarye101_cmods_mem --- check-mk-1.2.2p3/fjdarye101_cmods_mem 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/fjdarye101_cmods_mem 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,36 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +check_info["fjdarye101_cmods_mem"] = { + 'check_function' : check_fjdarye_item, + 'inventory_function' : inventory_fjdarye_item, + 'include' : "fjdarye.include", + 'service_description' : 'Channel Module Memory %s', + # 1: fjdaryCmmemoryIndex, 3: fjdaryCmmemoryStatus + 'snmp_info': ('.1.3.6.1.4.1.211.1.21.1.101.2.4.2.1', [1, 3]), + 'snmp_scan_function': \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.101", +} diff -Nru check-mk-1.2.2p3/fjdarye101_conencs check-mk-1.2.6p12/fjdarye101_conencs --- check-mk-1.2.2p3/fjdarye101_conencs 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/fjdarye101_conencs 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,36 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +check_info["fjdarye101_conencs"] = { + 'check_function' : check_fjdarye_item, + 'inventory_function' : inventory_fjdarye_item, + 'include' : "fjdarye.include", + 'service_description' : 'Controller Enclosure %s', + # 1: fjdaryCeIndex, 3: fjdaryCeStatus + 'snmp_info': ('.1.3.6.1.4.1.211.1.21.1.101.2.10.2.1', [1, 3]), + 'snmp_scan_function': \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.101", +} diff -Nru check-mk-1.2.2p3/fjdarye101_disks check-mk-1.2.6p12/fjdarye101_disks --- check-mk-1.2.2p3/fjdarye101_disks 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/fjdarye101_disks 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,47 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +check_info["fjdarye101_disks"] = { + 'includes' : [ "fjdarye.include"], + 'check_function' : check_fjdarye_disks, + 'inventory_function' : inventory_fjdarye_disks, + 'service_description' : 'Disk %s', + 'snmp_info' : ('.1.3.6.1.4.1.211.1.21.1.101.2.12.2.1', [1, 3]), + 'snmp_scan_function' : \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.101", + 'group' : 'raid_disk', +} + +check_info["fjdarye101_disks.summary"] = { + 'includes' : [ "fjdarye.include"], + 'check_function' : check_fjdarye_disks_summary, + 'inventory_function' : inventory_fjdarye_disks_summary, + 'service_description' : 'Disk summary', + 'snmp_info' : ('.1.3.6.1.4.1.211.1.21.1.101.2.12.2.1', [1, 3]), + 'snmp_scan_function' : \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.101", + 'group' : 'raid_disk', +} diff -Nru check-mk-1.2.2p3/fjdarye101_disks.summary check-mk-1.2.6p12/fjdarye101_disks.summary --- check-mk-1.2.2p3/fjdarye101_disks.summary 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/fjdarye101_disks.summary 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,34 @@ +title: Fujitsu storage systems FW 2013: Summarized status of all disks +agents: snmp +catalog: hw/storagehw/fujitsu +license: GPLv2 +distribution: none +description: + This check monitors the reported summary status of all installed + physical disks + in storage systems from Fujitsu which support the + FJDARY-E101.MIB. + This is an adaption of the corresponding fjdarye60 check. + +item: + {None} + +inventory: + If at least one disk is found, exactly one check will be created for + the host. + +examples: + # Expect 5 available(=online) and one spare disk + checks += [ + ( 'arc', 'fjdarye100_disks.summary', None, { "available": 5, "spare": 1 } ) + ] + +[parameters] +expected_state (dictionary): A dictionary from the statenames to the number + of disks expected in that state. Possible state names are: + {"available"}, {"broken"}, {"notavailable"}, {"notsupported"},{"present"}, + {"readying"}, {"recovering"}, {"partbroken"}, {"spare"}, {"formating"}, + {"unformated"}, {"notexist"}, {"copying"}. The check will report {CRITICAL} + if there are not enough disks in at least one state or otherwise {WARNING} if at + least one state has an exceeding number of disks. + diff -Nru check-mk-1.2.2p3/fjdarye101_rluns check-mk-1.2.6p12/fjdarye101_rluns --- check-mk-1.2.2p3/fjdarye101_rluns 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/fjdarye101_rluns 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,35 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +check_info["fjdarye101_rluns"] = { + 'includes' : ["fjdarye.include"], + 'check_function' : check_fjdarye_rluns, + 'inventory_function' : inventory_fjdarye_rluns, + 'service_description' : 'RLUN %s', + 'snmp_info' : ('.1.3.6.1.4.1.211.1.21.1.101.3.4.2.1', [ 0, 2] ), + 'snmp_scan_function' : \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.101", +} diff -Nru check-mk-1.2.2p3/fjdarye101_sum check-mk-1.2.6p12/fjdarye101_sum --- check-mk-1.2.2p3/fjdarye101_sum 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/fjdarye101_sum 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,44 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# check_mk plugin to monitor storage systems like Fujitsu ETERNUS DX80 S2 supporting FJDARY-E100.MIB +# Copyright (c) 2012 FuH Entwicklungsgesellschaft mbH, Umkirch, Germany. All rights reserved. +# Author: Philipp Hoefflin, 2012, hoefflin+cmk@fuh-e.de + +# example snmpwalk output: +# snmpwalk -c public -v 1 dx80 .1.3.6.1.4.1.211.1.21.1.100.6.0 +# FJDARY-E100::fjdaryUnitStatus.0 = INTEGER: ok(3) + +check_info["fjdarye101_sum"] = { + 'check_function' : check_fjdarye_sum, + 'include' : "fjdarye.include", + 'inventory_function' : inventory_fjdarye_sum, + 'service_description' : 'Summary Status %s', + # 1: fjdaryUnitStatus + 'snmp_info': ('.1.3.6.1.4.1.211.1.21.1.101.6', ['0']), + 'snmp_scan_function': \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.101", +} diff -Nru check-mk-1.2.2p3/fjdarye101_syscaps check-mk-1.2.6p12/fjdarye101_syscaps --- check-mk-1.2.2p3/fjdarye101_syscaps 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/fjdarye101_syscaps 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,37 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +check_info["fjdarye101_syscaps"] = { + 'check_function' : check_fjdarye_item, + 'include' : "fjdarye.include", + 'inventory_function' : inventory_fjdarye_item, + 'service_description' : 'System Capacitor Unit %s', + # 1: fjdaryScuIndex, 3: fjdaryScuStatus + 'snmp_info': ('.1.3.6.1.4.1.211.1.21.1.101.2.9.2.1', [1, 3]), + 'snmp_scan_function': \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.101", +} diff -Nru check-mk-1.2.2p3/fjdarye60_cadaps check-mk-1.2.6p12/fjdarye60_cadaps --- check-mk-1.2.2p3/fjdarye60_cadaps 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fjdarye60_cadaps 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,10 +24,16 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -check_info['fjdarye60_cadaps'] = (check_fjdarye_item, "Channel Adapter %s", 0, inventory_fjdarye_item) check_includes['fjdarye60_cadaps'] = ["fjdarye.include"] # 1: fjdaryCaIndex, 3: fjdaryCaStatus -snmp_info['fjdarye60_cadaps'] = (".1.3.6.1.4.1.211.1.21.1.60.2.2.2.1", [ 1, 3 ]) -snmp_scan_functions['fjdarye60_cadaps'] \ - = lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.60" + +check_info["fjdarye60_cadaps"] = { + 'check_function': check_fjdarye_item, + 'inventory_function': inventory_fjdarye_item, + 'service_description': 'Channel Adapter %s', + # 1: fjdaryCaIndex, 3: fjdaryCaStatus + 'snmp_info': ('.1.3.6.1.4.1.211.1.21.1.60.2.2.2.1', [1, 3]), + 'snmp_scan_function': \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.60", +} diff -Nru check-mk-1.2.2p3/fjdarye60_cmods check-mk-1.2.6p12/fjdarye60_cmods --- check-mk-1.2.2p3/fjdarye60_cmods 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fjdarye60_cmods 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,10 +24,16 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -check_info['fjdarye60_cmods'] = (check_fjdarye_item, "Channel Module %s", 0, inventory_fjdarye_item) check_includes['fjdarye60_cmods'] = ["fjdarye.include"] # 1: fjdaryCmIndex, 3: fjdaryCmStatus -snmp_info['fjdarye60_cmods'] = (".1.3.6.1.4.1.211.1.21.1.60.2.1.2.1", [ 1, 3 ]) -snmp_scan_functions['fjdarye60_cmods'] \ - = lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.60" + +check_info["fjdarye60_cmods"] = { + 'check_function': check_fjdarye_item, + 'inventory_function': inventory_fjdarye_item, + 'service_description': 'Channel Module %s', + # 1: fjdaryCmIndex, 3: fjdaryCmStatus + 'snmp_info': ('.1.3.6.1.4.1.211.1.21.1.60.2.1.2.1', [1, 3]), + 'snmp_scan_function': \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.60", +} diff -Nru check-mk-1.2.2p3/fjdarye60_cmods_flash check-mk-1.2.6p12/fjdarye60_cmods_flash --- check-mk-1.2.2p3/fjdarye60_cmods_flash 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fjdarye60_cmods_flash 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,10 +24,16 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -check_info['fjdarye60_cmods_flash'] = (check_fjdarye_item, "Channel Module Flash %s", 0, inventory_fjdarye_item) check_includes['fjdarye60_cmods_flash'] = ["fjdarye.include"] # 1: fjdaryCmflashIndex, 3: fjdaryCmflashStatus -snmp_info['fjdarye60_cmods_flash'] = (".1.3.6.1.4.1.211.1.21.1.60.2.4.2.1", [ 1, 3 ]) -snmp_scan_functions['fjdarye60_cmods_flash'] \ - = lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.60" + +check_info["fjdarye60_cmods_flash"] = { + 'check_function': check_fjdarye_item, + 'inventory_function': inventory_fjdarye_item, + 'service_description': 'Channel Module Flash %s', + # 1: fjdaryCmflashIndex, 3: fjdaryCmflashStatus + 'snmp_info': ('.1.3.6.1.4.1.211.1.21.1.60.2.4.2.1', [1, 3]), + 'snmp_scan_function': \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.60", +} diff -Nru check-mk-1.2.2p3/fjdarye60_cmods_mem check-mk-1.2.6p12/fjdarye60_cmods_mem --- check-mk-1.2.2p3/fjdarye60_cmods_mem 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fjdarye60_cmods_mem 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,10 +24,16 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -check_info['fjdarye60_cmods_mem'] = (check_fjdarye_item, "Channel Module Memory %s", 0, inventory_fjdarye_item) check_includes['fjdarye60_cmods_mem'] = ["fjdarye.include"] # 1: fjdaryCmmemoryIndex, 3: fjdaryCmmemoryStatus -snmp_info['fjdarye60_cmods_mem'] = (".1.3.6.1.4.1.211.1.21.1.60.2.3.2.1", [ 1, 3 ]) -snmp_scan_functions['fjdarye60_cmods_mem'] \ - = lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.60" + +check_info["fjdarye60_cmods_mem"] = { + 'check_function': check_fjdarye_item, + 'inventory_function': inventory_fjdarye_item, + 'service_description': 'Channel Module Memory %s', + # 1: fjdaryCmmemoryIndex, 3: fjdaryCmmemoryStatus + 'snmp_info': ('.1.3.6.1.4.1.211.1.21.1.60.2.3.2.1', [1, 3]), + 'snmp_scan_function': \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.60", +} diff -Nru check-mk-1.2.2p3/fjdarye60_conencs check-mk-1.2.6p12/fjdarye60_conencs --- check-mk-1.2.2p3/fjdarye60_conencs 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fjdarye60_conencs 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,10 +24,16 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -check_info['fjdarye60_conencs'] = (check_fjdarye_item, "Controller Enclosure %s", 0, inventory_fjdarye_item) check_includes['fjdarye60_conencs'] = ["fjdarye.include"] # 1: fjdaryCeIndex, 3: fjdaryCeStatus -snmp_info['fjdarye60_conencs'] = (".1.3.6.1.4.1.211.1.21.1.60.2.6.2.1", [ 1, 3 ]) -snmp_scan_functions['fjdarye60_conencs'] \ - = lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.60" + +check_info["fjdarye60_conencs"] = { + 'check_function': check_fjdarye_item, + 'inventory_function': inventory_fjdarye_item, + 'service_description': 'Controller Enclosure %s', + # 1: fjdaryCeIndex, 3: fjdaryCeStatus + 'snmp_info': ('.1.3.6.1.4.1.211.1.21.1.60.2.6.2.1', [1, 3]), + 'snmp_scan_function': \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.60", +} diff -Nru check-mk-1.2.2p3/fjdarye60_devencs check-mk-1.2.6p12/fjdarye60_devencs --- check-mk-1.2.2p3/fjdarye60_devencs 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fjdarye60_devencs 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,10 +24,16 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -check_info['fjdarye60_devencs'] = (check_fjdarye_item, "Device Enclosure %s", 0, inventory_fjdarye_item) check_includes['fjdarye60_devencs'] = ["fjdarye.include"] # 1: fjdaryDeIndex, 3: fjdaryDeStatus -snmp_info['fjdarye60_devencs'] = (".1.3.6.1.4.1.211.1.21.1.60.2.7.2.1", [ 1, 3 ]) -snmp_scan_functions['fjdarye60_devencs'] \ - = lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.60" + +check_info["fjdarye60_devencs"] = { + 'check_function': check_fjdarye_item, + 'inventory_function': inventory_fjdarye_item, + 'service_description': 'Device Enclosure %s', + # 1: fjdaryDeIndex, 3: fjdaryDeStatus + 'snmp_info': ('.1.3.6.1.4.1.211.1.21.1.60.2.7.2.1', [1, 3]), + 'snmp_scan_function': \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.60", +} diff -Nru check-mk-1.2.2p3/fjdarye60_disks check-mk-1.2.6p12/fjdarye60_disks --- check-mk-1.2.2p3/fjdarye60_disks 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fjdarye60_disks 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -25,20 +25,30 @@ # Boston, MA 02110-1301 USA. # 1. Summary version of check -> all disks into one service -check_info['fjdarye60_disks.summary'] = (check_fjdarye_disks_summary, "Disk summary", 0, inventory_fjdarye_disks_summary) check_includes['fjdarye60_disks.summary'] = ["fjdarye.include"] -snmp_info['fjdarye60_disks.summary'] = (".1.3.6.1.4.1.211.1.21.1.60.2.12.2.1", [ 1, 3 ]) -snmp_scan_functions['fjdarye60_disks.summary'] \ - = lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.60" # -------------------------------------------------------------------------------- # 2. Single disk version of check -> one check for each disk -check_info['fjdarye60_disks'] = (check_fjdarye_disks, "Disk %s", 0, inventory_fjdarye_disks) check_includes['fjdarye60_disks'] = ["fjdarye.include"] -snmp_info['fjdarye60_disks'] = (".1.3.6.1.4.1.211.1.21.1.60.2.12.2.1", [ 1, 3 ]) -checkgroup_of['fjdarye60_disks'] = "raid_disk" -snmp_scan_functions['fjdarye60_disks'] \ - = lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.60" + +check_info["fjdarye60_disks"] = { + 'check_function': check_fjdarye_disks, + 'inventory_function': inventory_fjdarye_disks, + 'service_description': 'Disk %s', + 'snmp_info': ('.1.3.6.1.4.1.211.1.21.1.60.2.12.2.1', [1, 3]), + 'snmp_scan_function': \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.60", + 'group': 'raid_disk', +} + +check_info["fjdarye60_disks.summary"] = { + 'check_function': check_fjdarye_disks_summary, + 'inventory_function': inventory_fjdarye_disks_summary, + 'service_description': 'Disk summary', + 'snmp_info': ('.1.3.6.1.4.1.211.1.21.1.60.2.12.2.1', [1, 3]), + 'snmp_scan_function': \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.60", +} diff -Nru check-mk-1.2.2p3/fjdarye60_disks.summary check-mk-1.2.6p12/fjdarye60_disks.summary --- check-mk-1.2.2p3/fjdarye60_disks.summary 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fjdarye60_disks.summary 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Monitor summarized status of all disks in Fujitsu ETERNUS DX storage systems -agents: SNMP -author: Lars Michelsen +title: Fujitsu ETERNUS DX storage systems: Summarized status +agents: snmp +catalog: hw/storagehw/fujitsu license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/fjdarye60_expanders check-mk-1.2.6p12/fjdarye60_expanders --- check-mk-1.2.2p3/fjdarye60_expanders 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fjdarye60_expanders 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,10 +24,16 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -check_info['fjdarye60_expanders'] = (check_fjdarye_item, "Expander %s", 0, inventory_fjdarye_item) check_includes['fjdarye60_expanders'] = ["fjdarye.include"] # 1: fjdaryExpanderIndex, 3: fjdaryExpanderStatus -snmp_info['fjdarye60_expanders'] = (".1.3.6.1.4.1.211.1.21.1.60.2.8.2.1", [ 1, 3 ]) -snmp_scan_functions['fjdarye60_expanders'] \ - = lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.60" + +check_info["fjdarye60_expanders"] = { + 'check_function': check_fjdarye_item, + 'inventory_function': inventory_fjdarye_item, + 'service_description': 'Expander %s', + # 1: fjdaryExpanderIndex, 3: fjdaryExpanderStatus + 'snmp_info': ('.1.3.6.1.4.1.211.1.21.1.60.2.8.2.1', [1, 3]), + 'snmp_scan_function': \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.60", +} diff -Nru check-mk-1.2.2p3/fjdarye60_inletthmls check-mk-1.2.6p12/fjdarye60_inletthmls --- check-mk-1.2.2p3/fjdarye60_inletthmls 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fjdarye60_inletthmls 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,10 +24,16 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -check_info['fjdarye60_inletthmls'] = (check_fjdarye_item, "Inlet Thermal %s", 0, inventory_fjdarye_item) check_includes['fjdarye60_inletthmls'] = ["fjdarye.include"] # 1: fjdaryInletthmlIndex, 3: fjdaryInletthmlStatus -snmp_info['fjdarye60_inletthmls'] = (".1.3.6.1.4.1.211.1.21.1.60.2.10.2.1", [ 1, 3 ]) -snmp_scan_functions['fjdarye60_inletthmls'] \ - = lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.60" + +check_info["fjdarye60_inletthmls"] = { + 'check_function': check_fjdarye_item, + 'inventory_function': inventory_fjdarye_item, + 'service_description': 'Inlet Thermal %s', + # 1: fjdaryInletthmlIndex, 3: fjdaryInletthmlStatus + 'snmp_info': ('.1.3.6.1.4.1.211.1.21.1.60.2.10.2.1', [1, 3]), + 'snmp_scan_function': \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.60", +} diff -Nru check-mk-1.2.2p3/fjdarye60_psus check-mk-1.2.6p12/fjdarye60_psus --- check-mk-1.2.2p3/fjdarye60_psus 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fjdarye60_psus 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,10 +24,16 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -check_info['fjdarye60_psus'] = (check_fjdarye_item, "PSU %s", 0, inventory_fjdarye_item) check_includes['fjdarye60_psus'] = ["fjdarye.include"] # 1: fjdaryPsuIndex, 3: fjdaryPsuStatus -snmp_info['fjdarye60_psus'] = (".1.3.6.1.4.1.211.1.21.1.60.2.9.2.1", [ 1, 3 ]) -snmp_scan_functions['fjdarye60_psus'] \ - = lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.60" + +check_info["fjdarye60_psus"] = { + 'check_function': check_fjdarye_item, + 'inventory_function': inventory_fjdarye_item, + 'service_description': 'PSU %s', + # 1: fjdaryPsuIndex, 3: fjdaryPsuStatus + 'snmp_info': ('.1.3.6.1.4.1.211.1.21.1.60.2.9.2.1', [1, 3]), + 'snmp_scan_function': \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.60", +} diff -Nru check-mk-1.2.2p3/fjdarye60_rluns check-mk-1.2.6p12/fjdarye60_rluns --- check-mk-1.2.2p3/fjdarye60_rluns 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fjdarye60_rluns 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,31 +24,13 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -def inventoryfunc(info): - inventory = [] - for line in info: - rawdata = line[1] - if rawdata[2] == '\xa0': # RLUN is present - inventory.append( (line[0], "", None) ) - return inventory -def checkfunc(item, _no_params, info): - for line in info: - if item == line[0]: - rawdata = line[1] - if rawdata[2] != '\xa0': - return (2, "CRIT - RLUN is not present") - elif rawdata[3] == '\x08': - return (1, "WARN - RLUN is rebuilding") # we assume that ;-) - elif rawdata[3] == '\x00': - return (0, "OK - RLUN is in normal state") # assumption - else: - return (2, "CRIT - RLUN in unknown state %02x" % ord(rawdata[3])) - - return (3, "UNKNOWN - No RLUN %s in SNMP output" % item) - -check_info['fjdarye60_rluns'] = (checkfunc, "RLUN %s", 0, inventoryfunc) -snmp_info['fjdarye60_rluns'] = (".1.3.6.1.4.1.211.1.21.1.60.3.4.2.1", [ 0, "2" ]) - -snmp_scan_functions['fjdarye60_rluns'] \ - = lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.60" +check_info["fjdarye60_rluns"] = { + 'includes' : ["fjdarye.include"], + 'check_function' : check_fjdarye_rluns, + 'inventory_function' : inventory_fjdarye_rluns, + 'service_description' : 'RLUN %s', + 'snmp_info' : ('.1.3.6.1.4.1.211.1.21.1.60.3.4.2.1', [0, '2']), + 'snmp_scan_function' : \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.60", +} diff -Nru check-mk-1.2.2p3/fjdarye60_sum check-mk-1.2.6p12/fjdarye60_sum --- check-mk-1.2.2p3/fjdarye60_sum 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fjdarye60_sum 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -38,17 +38,23 @@ text = "Status is %s" % fjdarye60_sum_status[status] if status == 3: - return (0, "OK - %s" % text) + return (0, "%s" % text) elif status == 4: - return (1, "WARN - %s" % text) + return (1, "%s" % text) else: - return (2, "CRIT - %s" % text) + return (2, "%s" % text) - return (3, "UNKNOWN - No status summary %d present" % index) + return (3, "No status summary %d present" % index) -check_info['fjdarye60_sum'] = (check_fjdarye60_sum, "Summary Status %s", 0, inventory_fjdarye60_sum) # 1: fjdaryUnitStatus -snmp_info['fjdarye60_sum'] = (".1.3.6.1.4.1.211.1.21.1.60.6", [ 0, "0" ]) -snmp_scan_functions['fjdarye60_sum'] \ - = lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.60" + +check_info["fjdarye60_sum"] = { + 'check_function': check_fjdarye60_sum, + 'inventory_function': inventory_fjdarye60_sum, + 'service_description': 'Summary Status %s', + # 1: fjdaryUnitStatus + 'snmp_info': ('.1.3.6.1.4.1.211.1.21.1.60.6', [0, '0']), + 'snmp_scan_function': \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.60", +} diff -Nru check-mk-1.2.2p3/fjdarye60_syscaps check-mk-1.2.6p12/fjdarye60_syscaps --- check-mk-1.2.2p3/fjdarye60_syscaps 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fjdarye60_syscaps 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,10 +24,16 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -check_info['fjdarye60_syscaps'] = (check_fjdarye_item, "System Capacitor Unit %s", 0, inventory_fjdarye_item) check_includes['fjdarye60_syscaps'] = ["fjdarye.include"] # 1: fjdaryScuIndex, 3: fjdaryScuStatus -snmp_info['fjdarye60_syscaps'] = (".1.3.6.1.4.1.211.1.21.1.60.2.5.2.1", [ 1, 3 ]) -snmp_scan_functions['fjdarye60_syscaps'] \ - = lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.60" + +check_info["fjdarye60_syscaps"] = { + 'check_function': check_fjdarye_item, + 'inventory_function': inventory_fjdarye_item, + 'service_description': 'System Capacitor Unit %s', + # 1: fjdaryScuIndex, 3: fjdaryScuStatus + 'snmp_info': ('.1.3.6.1.4.1.211.1.21.1.60.2.5.2.1', [1, 3]), + 'snmp_scan_function': \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.60", +} diff -Nru check-mk-1.2.2p3/fjdarye60_thmls check-mk-1.2.6p12/fjdarye60_thmls --- check-mk-1.2.2p3/fjdarye60_thmls 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fjdarye60_thmls 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,10 +24,16 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -check_info['fjdarye60_thmls'] = (check_fjdarye_item, "Thermal %s", 0, inventory_fjdarye_item) check_includes['fjdarye60_thmls'] = ["fjdarye.include"] # 1: fjdaryInletthmlIndex, 3: fjdaryInletthmlStatus -snmp_info['fjdarye60_thmls'] = (".1.3.6.1.4.1.211.1.21.1.60.2.11.2.1", [ 1, 3 ]) -snmp_scan_functions['fjdarye60_thmls'] \ - = lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.60" + +check_info["fjdarye60_thmls"] = { + 'check_function': check_fjdarye_item, + 'inventory_function': inventory_fjdarye_item, + 'service_description': 'Thermal %s', + # 1: fjdaryInletthmlIndex, 3: fjdaryInletthmlStatus + 'snmp_info': ('.1.3.6.1.4.1.211.1.21.1.60.2.11.2.1', [1, 3]), + 'snmp_scan_function': \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.211.1.21.1.60", +} diff -Nru check-mk-1.2.2p3/fjdarye.include check-mk-1.2.6p12/fjdarye.include --- check-mk-1.2.2p3/fjdarye.include 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fjdarye.include 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -51,8 +51,8 @@ code = 1 else: code = 2 - return(code, "%s - Status is %s" % (nagios_state_names[code], fjdarye_item_status[status])) - return(3, 'UNKNOWN - No status for item %d present' % index ) + return(code, "Status is %s" % (fjdarye_item_status[status])) + return(3, 'No status for item %d present' % index ) # -------------------------------------------------------------------------------- # disk checks: @@ -85,9 +85,9 @@ def check_fjdarye_disks_summary(index, expected_status, info): current_state = fjdarye_disks_summary(info) - infotext = " - " + fjdarye_disks_printstates(current_state) + infotext = fjdarye_disks_printstates(current_state) if current_state == expected_status: - return (0, "OK" + infotext) + return (0, infotext) result = 1 for ename, ecount in expected_status.items(): @@ -95,7 +95,7 @@ result = 2 break - return (result, nagios_state_names[result] + infotext + \ + return (result, infotext + \ " (expected was: %s)" % fjdarye_disks_printstates(expected_status)) # -------------------------------------------------------------------------------- @@ -109,9 +109,58 @@ status = fjdarye_disks_status[int(line[1])] if status == expected_status: - return (0, "OK - Status is %s" % status) + return (0, "Status is %s" % status) else: - return (2, "CRIT - Status is %s (expected status is %s)" % (status, expected_status)) + return (2, "Status is %s (expected status is %s)" % (status, expected_status)) - return (3, "UNKNOWN - No status for disk number %d present" % index) + return (3, "No status for disk number %d present" % index) + +def inventory_fjdarye_rluns(info): + inventory = [] + for line in info: + rawdata = line[1] + if rawdata[3] == '\xa0': # RLUN is present + inventory.append( (line[0], "", None) ) + return inventory + +def check_fjdarye_rluns(item, _no_params, info): + for line in info: + if item == line[0]: + rawdata = line[1] + if rawdata[3] != '\xa0': + return (2, "RLUN is not present" ) + elif rawdata[2] == '\x08': + return (1, "RLUN is rebuilding") + elif rawdata[2] == '\x07': + return (1, "RLUN copyback in progress") + elif rawdata[2] == '\x41': + return (1, "RLUN spare is in use") + elif rawdata[2] == '\x00': + return (0, "RLUN is in normal state") # assumption + else: + return (2, "RLUN in unknown state %02x" % ord(rawdata[2]) ) + + return (3, "No RLUN %s in SNMP output" % item) + +fjdarye_sum_status = { 1: 'unknown', 2: 'unused', 3: 'ok', + 4: 'warning', 5: 'failed' } + +def inventory_fjdarye_sum(info): + if len(info[0]) == 1: + return [ (0, None) ] + +def check_fjdarye_sum(index, _no_param, info): + for line in info: + if len(info[0]) == 1: + status = int(line[0]) + text = "Status is %s" % fjdarye_sum_status[status] + + if status == 3: + return (0, "%s" % text) + elif status == 4: + return (1, "%s" % text) + else: + return (2, "%s" % text) + + return (3, "No status summary present" ) diff -Nru check-mk-1.2.2p3/fortigate_cpu check-mk-1.2.6p12/fortigate_cpu --- check-mk-1.2.2p3/fortigate_cpu 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fortigate_cpu 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -33,17 +33,17 @@ util += int(line[0]) num_cpus += 1 if num_cpus == 0: - return (3, "UNKNOWN - no data found in SNMP output") + return (3, "no data found in SNMP output") util = float(util) / num_cpus - infotext = " - %2.1f%% utilization at %d CPUs" % (util, num_cpus) + infotext = "%2.1f%% utilization at %d CPUs" % (util, num_cpus) warn, crit = params perfdata = [("util", util, warn, crit, 0, 100)] if util >= crit: - return (2, "CRIT" + infotext + " (critical at %d%%)" % crit, perfdata) + return (2, infotext + " (critical at %d%%)" % crit, perfdata) elif util >= warn: - return (1, "WARN" + infotext + " (warning at %d%%)" % warn, perfdata) + return (1, infotext + " (warning at %d%%)" % warn, perfdata) else: - return (0, "OK" + infotext, perfdata) + return (0, infotext, perfdata) check_info["fortigate_cpu"] = { "check_function" : check_fortigate_cpu, @@ -51,7 +51,7 @@ "service_description" : "CPU utilization", "has_perfdata" : True, "group" : "cpu_utilization", - "snmp_scan_function" : lambda oid: oid(".1.3.6.1.4.1.12356.1.8.0"), + "snmp_scan_function" : lambda oid: "fortigate" in oid('.1.3.6.1.2.1.1.1.0').lower(), "snmp_info" : ( ".1.3.6.1.4.1.12356.1", [ 8 ]), } diff -Nru check-mk-1.2.2p3/fortigate_memory check-mk-1.2.6p12/fortigate_memory --- check-mk-1.2.2p3/fortigate_memory 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fortigate_memory 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -26,8 +26,24 @@ fortigate_memory_default_levels = (70, 80) +def inventory_fortigate_memory(info): + if info: + return [(None, "fortigate_memory_default_levels")] + def check_fortigate_memory(item, params, info): + # This check does not yet support averaging. We need to + # convert it to mem.include + if type(params) == dict: + params = params["levels"] + warn, crit = params + + # This check is only able to check the used space + # The checkgroup "memory" might set negative values which act as levels for free space + # These levels are converted to used space, too.. + warn = abs(warn) + crit = abs(crit) + current = saveint(info[0][0]) state = 0 icon = '' @@ -39,15 +55,15 @@ icon = "(!!)" perf = [("mem_usage", current, warn, crit)] - return(state, nagios_state_names[state] + " - %d%%%s (levels at %d/%d percent)" % (current, icon, warn, crit), perf) + return(state, "%d%%%s (levels at %d/%d percent)" % (current, icon, warn, crit), perf) check_info["fortigate_memory"] = { "check_function" : check_fortigate_memory, - "inventory_function" : lambda info: [(None, "fortigate_memory_default_levels")], + "inventory_function" : inventory_fortigate_memory, "service_description" : "Memory usage", "has_perfdata" : True, "group" : "memory", - "snmp_scan_function" : lambda oid: oid(".1.3.6.1.4.1.12356.1.9.0"), + "snmp_scan_function" : lambda oid: "fortigate" in oid('.1.3.6.1.2.1.1.1.0').lower(), "snmp_info" : ( ".1.3.6.1.4.1.12356.1", [ 9 ]), } diff -Nru check-mk-1.2.2p3/fortigate_sessions check-mk-1.2.6p12/fortigate_sessions --- check-mk-1.2.2p3/fortigate_sessions 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fortigate_sessions 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -26,27 +26,34 @@ fortigate_sessions_default_levels = (100000, 150000) + +def inventory_fortigate_sessions(info): + return [(None, "fortigate_sessions_default_levels")] + + def check_fortigate_sessions(item, params, info): warn, crit = params - current = saveint(info[0][0]) + current = int(info[0][0]) state = 0 - icon = '' - if current >= warn: - state = 1 - icon = "(!)" + infotext = "%d sessions" % current + levelstext = " (levels at %d/%d)" % (warn, crit) if current >= crit: state = 2 - icon = "(!!)" + infotext += levelstext + elif current >= warn: + state = 1 + infotext += levelstext + + return state, infotext, [("sessions", current, warn, crit)] - perf = [("session", current, warn, crit)] - return(state, nagios_state_names[state] + " - %d Session%s (levels at %d/%d)" % (current, icon, warn, crit), perf) check_info["fortigate_sessions"] = { "check_function" : check_fortigate_sessions, - "inventory_function" : lambda info: [(None, "fortigate_sessions_default_levels")], + "inventory_function" : inventory_fortigate_sessions, "service_description" : "Sessions", + "group" : "fortigate_sessions", "has_perfdata" : True, - "snmp_scan_function" : lambda oid: oid(".1.3.6.1.4.1.12356.1.10.0"), + "snmp_scan_function" : lambda oid: "fortigate" in oid('.1.3.6.1.2.1.1.1.0').lower(), "snmp_info" : ( ".1.3.6.1.4.1.12356.1", [ 10 ]), } diff -Nru check-mk-1.2.2p3/fritz check-mk-1.2.6p12/fritz --- check-mk-1.2.2p3/fritz 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/fritz 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,201 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def fritz_parse_info(info): + data = {} + for l in info: + data[l[0]] = ' '.join(l[1:]) + return data + +# +# Internet connection +# + +def inventory_fritz_conn(info): + data = fritz_parse_info(info) + if 'NewConnectionStatus' in data \ + and 'NewExternalIPAddress' in data \ + and data.get('NewConnectionStatus') != 'Unconfigured': + return [(None, {})] + +fritz_conn_states = { + 'Connected': 0, + 'Connecting': 1, + 'Disconnected': 1, + 'Unconfigured': 1, +} + +def check_fritz_conn(_unused, _no_params, info): + if not info: + return 3, 'Connection info is missing' + data = fritz_parse_info(info) + state_txt = data['NewConnectionStatus'] + nag_state = fritz_conn_states.get(state_txt) + if nag_state is None: + return 3, 'Got unhandled state output "%s"' % state_txt + + output = 'State: %s' % state_txt + if state_txt == 'Connected': + output += ', WAN IP Address: %s' % data['NewExternalIPAddress'] + + last_err = data.get('NewLastConnectionError') + if last_err and last_err != 'ERROR_NONE': + output += ', Last Error: %s' % last_err + + perfdata = [] + if data.get('NewUptime'): + conn_time = check_uptime_seconds({}, float(data['NewUptime'])) + output += ', %s' % conn_time[1] + perfdata = conn_time[2] + + return nag_state, output, perfdata + +check_info['fritz.conn'] = { + "inventory_function" : inventory_fritz_conn, + "check_function" : check_fritz_conn, + "service_description" : "Connection", + "includes" : [ "uptime.include" ], + "has_perfdata" : True, +} + + +# +# Config +# + +def inventory_fritz_config(info): + data = fritz_parse_info(info) + if 'NewDNSServer1' in data: + return [(None, {})] + +def check_fritz_config(_unused, _no_params, info): + data = fritz_parse_info(info) + + output = [] + for label, key in [ + ('DNS-Server1', 'NewDNSServer1'), + ('DNS-Server2', 'NewDNSServer2'), + ('VoIP-DNS-Server1', 'NewVoipDNSServer1'), + ('VoIP-DNS-Server2', 'NewVoipDNSServer2'), + ('uPnP Config Enabled', 'NewUpnpControlEnabled'), + ]: + if key in data and data[key] != '0.0.0.0': + output.append('%s: %s' % (label, data[key])) + + if not output: + return 3, 'Configuration info is missing' + + return 0, ', '.join(output) + +check_info['fritz.config'] = { + "inventory_function" : inventory_fritz_config, + "check_function" : check_fritz_config, + "service_description" : "Configuration", + "has_perfdata" : False, +} + +# +# WAN Interface Check +# + +def fritz_wan_if_to_if64(data): + if 'NewLinkStatus' not in data: + oper_status = None + elif data['NewLinkStatus'] == 'Up': + oper_status = '1' + else: + oper_status = '2' + + return [ + # ifIndex, ifDescr, ifType, ifSpeed, ifOperStatus, ifInOctets, inucast, + # inmcast, inbcast, ifInDiscards, ifInErrors, ifOutOctets, outucast, + # outmcast, outbcast, ifOutDiscards, ifOutErrors, ifOutQLen, ifAlias, ifPhysAddress + ('0', 'WAN', '6', data.get('NewLayer1DownstreamMaxBitRate'), oper_status, + data.get('NewTotalBytesReceived'), '0', '0', '0', '0', '0', + data.get('NewTotalBytesSent'), '0', '0', '0', '0', '0', '0', + 'WAN', '') + ] + +def inventory_fritz_wan_if(info): + data = fritz_parse_info(info) + return inventory_if_common(fritz_wan_if_to_if64(data)) + +def check_fritz_wan_if(item, params, info): + # TODO: This check modifies params!! This is strictly forbidden + if not info: + return 3, 'Interface info is missing' + data = fritz_parse_info(info) + if 'assumed_speed_in' not in params: + params['assumed_speed_in'] = int(data['NewLayer1DownstreamMaxBitRate']) + if 'assumed_speed_out' not in params: + params['assumed_speed_out'] = int(data['NewLayer1UpstreamMaxBitRate']) + if 'unit' not in params: + params['unit'] = 'Bit' + return check_if_common(item, params, fritz_wan_if_to_if64(data)) + +check_info["fritz.wan_if"] = { + 'check_function': check_fritz_wan_if, + 'inventory_function': inventory_fritz_wan_if, + 'service_description': 'Interface %s', + 'has_perfdata': True, + 'group': 'if', + 'default_levels_variable': 'if_default_levels', + 'includes': [ 'if.include' ], +} + +# +# Link +# + +def inventory_fritz_link(info): + data = fritz_parse_info(info) + if 'NewLinkStatus' in data and 'NewPhysicalLinkStatus' in data: + return [ (None, {}) ] + +def check_fritz_link(_no_item, _no_params, info): + data = fritz_parse_info(info) + + output = [] + for label, key in [ + ('Link Status', 'NewLinkStatus'), + ('Physical Link Status', 'NewPhysicalLinkStatus'), + ('Link Type', 'NewLinkType'), + ('WAN Access Type', 'NewWANAccessType'), + ]: + if key in data: + output.append('%s: %s' % (label, data[key])) + + if not output: + return 3, 'Link info is missing' + + return 0, ', '.join(output) + +check_info["fritz.link"] = { + 'check_function': check_fritz_link, + 'inventory_function': inventory_fritz_link, + 'service_description': 'Link Info', + 'has_perfdata': False, +} diff -Nru check-mk-1.2.2p3/fritz.config check-mk-1.2.6p12/fritz.config --- check-mk-1.2.2p3/fritz.config 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/fritz.config 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,16 @@ +title: Configuration Info of Fritz!Box +agents: fritzbox +catalog: hw/network/avm +license: GPL +distribution: check_mk +description: + The check is always OK, it outputs information about the current DNS-Servers + and wether or not configuration via uPnP is enabled. + + The check uses data provided by the fritzbox special agent. You need to + configure this agent to be used for the Fritz!Box you like to monitor. The + special agent uses uPnP to get the needed information from the device. + +inventory: + One check per system is created if the agent has a section {<<>>} and + at least the key {NewDNSServer1} is provided in this section. diff -Nru check-mk-1.2.2p3/fritz.conn check-mk-1.2.6p12/fritz.conn --- check-mk-1.2.2p3/fritz.conn 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/fritz.conn 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,17 @@ +title: Connection State of Fritz!Box +agents: fritzbox +catalog: hw/network/avm +license: GPL +distribution: check_mk +description: + This check outputs the connection state of the Fritz!Box WAN connection. + If available, it also outputs the current WAN IP Address and the last + reported connection error. + + The check uses data provided by the fritzbox special agent. You need to + configure this agent to be used for the Fritz!Box you like to monitor. The + special agent uses uPnP to get the needed information from the device. + +inventory: + One check per system is created if the agent has a section {<<>>} and + the keys {NewConnectionStatus} and {NewExternalIPAddress} are in this section. diff -Nru check-mk-1.2.2p3/fritz.link check-mk-1.2.6p12/fritz.link --- check-mk-1.2.2p3/fritz.link 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/fritz.link 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,17 @@ +title: Network Link of Fritz!Box +agents: fritzbox +catalog: hw/network/avm +license: GPL +distribution: check_mk +description: + The check is always OK, it outputs information about the network link + connected to the Fritz!Box. It outputs the {Link Status}, {Physical Link Status}. + {Link Type} and {WAN Access Type} if those information are provided. + + The check uses data provided by the fritzbox special agent. You need to + configure this agent to be used for the Fritz!Box you like to monitor. The + special agent uses uPnP to get the needed information from the device. + +inventory: + One check per system is created if the agent has a section {<<>>} and + at least the keys {NewLinkStatus} and {NewPhysicalLinkStatus} are provided in this section. diff -Nru check-mk-1.2.2p3/fritz.wan_if check-mk-1.2.6p12/fritz.wan_if --- check-mk-1.2.2p3/fritz.wan_if 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/fritz.wan_if 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,133 @@ +title: State of WAN network interface of Fritz!Box +agents: fritzbox +catalog: hw/network/avm +license: GPL +distribution: check_mk +description: + This check monitors the operational status, link speed, traffic of a + Fritz!Box WAN interface. + + The check uses data provided by the fritzbox special agent. You need to + configure this agent to be used for the Fritz!Box you like to monitor. The + special agent uses uPnP to get the needed information from the device. + + Depending on the check parameters this check can go WARN or CRIT when the + port status changes (i.e. is down), when the link speed changes (e.g. a + port expected to be set to 1GBit/s operates only at 100MBit/s), when the + absolute or procentual traffic of a port exceeds certain levels or if the + rate of errors or discards exceeds configurable limits. + + This check supports averaging the in- and + outgoing traffic over a configurable range of time by using an exponentially + weighted moving average - just as Linux does for the CPU load averages. + The averaging can be configured on a per host and per port base. This is + done by adding a key {"average"} to the parameter dictionary with the number + of minutes that the average should cover as its key. Port with averaging + turned on output two additional performance values: the averaged traffic + in bytes. If you have configured traffic levels, then those levels are + applied to the averaged values. + +item: + There are three allowed ways to specify a port: {1}: the last component of + the SNMP OID number (as string), the {ifDescr} of the port or the {ifAlias} of + the port. If you are using the alias, you have to make sure that it is unique + by configuring useful aliases in the switch. Check_MK does not check for uniqueness. + +inventory: + The inventory creates one service per WAN interface, usually one per system. + +perfdata: + {in}: The number of bytes received per second. + {out}: The number of bytes sent per second. + {in_avg_?}: optional: The averaged number of received bytes over a longer range of time + {out_avg_?}: optional: The averaged number of sent bytes + +[parameters] +parameters (dict): Due to its complexity, this check now uses a dictionary as + parameter. The tuple representation used up to version 1.1.8 are + currently still supported but discouraged. The dictionary can have + the following keys: + + {"errors"} - a pair of two float values to be used as WARN/CRIT + percentages for errors + discards. The default is {(0.01, 0.1)} - which + means that the services gets WARN if the error rate is at least + 0.01 percent. That means that one error per 10,000 packages is seen. + + {"traffic"} - a pair of two float values to be used as WARN/CRIT + levels for the bandwidth usage of the port. Integer numbers are interpreted + as bytes per seconds. So a value of {(500,1000)} will trigger WARN/CRIT + if either the ingoing or outgoing traffic exceeds 500/1000 bytes per second. + If you use floating point number, those are interpreted as percentages + of the available bandwidth as signalled by the port. A value of {(50.0, 80.0)} + will trigger WARN/CRIT, if at least 50%/80% of the nominal bandwidth is + used. The default is to impose no levels on the traffic. + + {"average"} - if you set this key to a number, then it is interpreted as + a time in minutes. The check will then compute the averaged used traffic + over approximately that range of time. Note: The algorithm used here is + the same as Linux uses for the CPU load. This means that more recent values + are weighted higher than older values and that even values out of the configured + time range are - to a low degree - represented in the average. If you configure + an average, then the traffic levels are applied to the averaged values. + + {"state"} - the expected operational status of the interface (as string or list of strings. If this + is {None} then the state is not checked. Possible values for the + state are {"1"}(up), {"2"}(down), {"3"}(testing), {"4"}(unknown), + {"5"}(dormant), {"6"}(notPresent) and {"7"}(lowerLayerDown). The default is to + remember the state found during inventory and enforce this. It is allowed to + set {"state"} either to a string (one single allowed state) or a list of + strings. For example set this to '{["1", "5"]}' if you want to allow {up} + and {dormant}. + + {"speed"} - the expected port speed as an integer of the bits per second (not + the bytes!). If you set this to {None}, then the port speed is not checked. + The default is to remember and enforce the port speed found during inventory. + + {"assumed_speed_in"} - the assumed port input speed as an integer of the bits per second (not + the bytes!). If this value is not set the check takes the value configured in {speed} + before using the automatically detected port speed + + {"assumed_speed_out"} - the assumed port output speed as an integer of the bits per second (not + the bytes!). If this value is not set the check takes the value configured in {speed} + before using the automatically detected port speed + +[configuration] +if_inventory_porttypes (list of strings): List of interface types the inventory should create checks for. + Default is {[ '6', '32', '117' ]}, which means that all Ethernet, Frame Relay and Gigabit Ethernet ports + will be monitored. Virtual and loopback ports will be ignored. Please look into the check implementation + of in the SNMP MIB description of {ifType} for a complete list of port types. Another good source for possible + port types and their meanings might be http://www.iana.org/assignments/ianaiftype-mib/ianaiftype-mib. + +if_inventory_portstates (list of strings): Per default this variable is set to {['1']}, which means that + only ports found in the state {up} are being added to the monitoring. If you set this to {['1', '2', '4']} then + also ports in state {down} and {unknown} will be monitored. + +if_inventory_pad_portnumbers (boolean): If this is set to {True} (the default), then port numbers used as + items are padded with zeroes so that all items have the same length and ports will sort correctly in + the GUI. You can set this to {False} if you want to keep the same service descriptions as in versions + prior to 1.1.13i3. + +if_inventory_uses_description (boolean): Whether inventory should use the interface name as item (instead + of the interface index). Default is {False}. + +if_inventory_uses_alias (boolean): Whether inventory should use the interface alias as item. Please note, + that in Linux network cards have no aliases. The description is used instead of the alias. + +if_inventory_monitor_speed (boolean): Whether inventory should code the current speed setting of the port + into the check parameters and thus enforces a static speed setting on this port. Default is {False}, + which will code {None} as target speed. + +if_inventory_monitor_state (boolean): Whether inventory should code the current port status into the + check parameters and thus enforces the status of the port not to be changed in future. Default is {True} - + so the port states will be monitored. Setting this to {False} will disable status checking of all + newly inventorized ports. + +if_default_error_levels (float, float): Default levels for errors. The default is {(0.01, 0.1)}, setting + the levels to 0.01 and 0.1 percent of total packages. + +if_default_traffic_levels (float/int, float/int): Default levels for checking traffic (used bandwitdh). The + default is {(None, None)}, which means the the traffic is not being checked - just monitored. + +if_default_average (int): Default time range for averaging in minutes. This is preset to {None}, which disables + averaging of port traffic. + diff -Nru check-mk-1.2.2p3/fsc_fans check-mk-1.2.6p12/fsc_fans --- check-mk-1.2.2p3/fsc_fans 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fsc_fans 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -29,24 +29,40 @@ return [ (line[0], line[1], (int(line[1]) * 0.9, int(line[1]) * 0.8)) for line in info if int(line[1]) > 0 ] def check_fsc_fans(item, params, info): - warn, crit = params + if type(params) == tuple: + low_warn, low_crit = params + up_warn, up_crit = None, None + else: + low_warn, low_crit = params['lower'] + up_warn, up_crit = params.get('upper', ( None, None)) + for line in info: # , value1, value2 in info: name = line[0] if name != item: continue rpm = int(line[1]) - perfdata = [ ( 'rpm', rpm, warn, crit, 0 ) ] + perfdata = [ ( 'rpm', rpm, low_warn, low_crit, 0 ) ] infotext = "%d RPM" % rpm - if rpm < crit: - return (2, "CRIT - %s" % infotext, perfdata) - elif rpm < warn: - return (1, "WARN - %s" % infotext, perfdata) + levels = "Warn/Crit Lower: %d/%d " % (low_warn, low_crit ) + if up_crit: + levels += "Upper: %d/%d" % ( up_warn, up_crit ) + if rpm < low_crit or (up_crit and rpm > up_crit): + return 2, infotext + levels, perfdata + elif rpm < low_warn or (up_warn and rpm > up_warn): + return 1, infotext + levels, perfdata else: - return (0, "OK - %s" % infotext, perfdata) + return 0, infotext, perfdata + + return (3, "FAN %s not found in SNMP data" % item) - return (3, "UNKNOWN - FAN %s not found in SNMP data" % item) -check_info['fsc_fans'] = (check_fsc_fans, "FSC %s", 1, inventory_fsc_fans) -snmp_info['fsc_fans'] = ( ".1.3.6.1.4.1.231.2.10.2.2.5.2.2.1", [ 16, 8 ]) # , 9, 10 ] ) -snmp_scan_functions['fsc_fans'] = lambda oid: \ - oid(".1.3.6.1.4.1.231.2.10.2.1.1.0") +check_info["fsc_fans"] = { + 'check_function': check_fsc_fans, + 'inventory_function': inventory_fsc_fans, + 'group': "hw_fans", + 'service_description': 'FSC %s', + 'has_perfdata': True, + 'snmp_info': ('.1.3.6.1.4.1.231.2.10.2.2.5.2.2.1', [16, 8]), + 'snmp_scan_function': lambda oid: \ + oid(".1.3.6.1.4.1.231.2.10.2.1.1.0"), +} diff -Nru check-mk-1.2.2p3/fsc_ipmi_mem_status check-mk-1.2.6p12/fsc_ipmi_mem_status --- check-mk-1.2.2p3/fsc_ipmi_mem_status 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fsc_ipmi_mem_status 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -58,13 +58,16 @@ def check_fsc_ipmi_mem_status(name, _no_params, info): for line in info: if line[0] == 'E': - return (3, "UNKNOWN: Error in agent plugin output (%s)" % " ".join(line[1:])) + return (3, "Error in agent plugin output (%s)" % " ".join(line[1:])) elif line[1] == name: - level = int(line[2]) - return (fsc_ipmi_mem_status_levels[level][0], "%s: %s" % ( - nagios_state_names[fsc_ipmi_mem_status_levels[level][0]], - fsc_ipmi_mem_status_levels[level][1])) + return fsc_ipmi_mem_status_levels[int(line[2])] - return (3, "UNKNOWN - item %s not found" % name) + return (3, "item %s not found" % name) -check_info['fsc_ipmi_mem_status'] = (check_fsc_ipmi_mem_status, "IPMI Memory status %s", 1, inventory_fsc_ipmi_mem_status) + +check_info["fsc_ipmi_mem_status"] = { + 'check_function': check_fsc_ipmi_mem_status, + 'inventory_function': inventory_fsc_ipmi_mem_status, + 'service_description': 'IPMI Memory status %s', + 'has_perfdata': True, +} diff -Nru check-mk-1.2.2p3/fsc_subsystems check-mk-1.2.6p12/fsc_subsystems --- check-mk-1.2.2p3/fsc_subsystems 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fsc_subsystems 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -36,15 +36,20 @@ status = int(line[1]) statusname = { 1:'ok', 2:'degraded', 3:'error', 4:'failed', 5:'unknown-init'}.get(status, 'invalid') if status == 1 or status == 5: - return (0, "OK - %s - no problems" % statusname) + return (0, "%s - no problems" % statusname) elif status >= 2 and status <= 4: - return (2, "CRIT - %s" % statusname) + return (2, "%s" % statusname) else: - return (3, "UNKNOWN - unknown status %d" % status) + return (3, "unknown status %d" % status) - return (3, "UNKNOWN - Subsystem %s not found in SNMP data" % item) + return (3, "Subsystem %s not found in SNMP data" % item) -check_info['fsc_subsystems'] = (check_fsc_subsystems, "FSC %s", 0, inventory_fsc_subsystems) -snmp_info['fsc_subsystems'] = ( ".1.3.6.1.4.1.231.2.10.2.11.3.1.1", [ 2, 3 ]) -snmp_scan_functions['fsc_subsystems'] = lambda oid: \ - oid(".1.3.6.1.4.1.231.2.10.2.1.1.0") + +check_info["fsc_subsystems"] = { + 'check_function': check_fsc_subsystems, + 'inventory_function': inventory_fsc_subsystems, + 'service_description': 'FSC %s', + 'snmp_info': ('.1.3.6.1.4.1.231.2.10.2.11.3.1.1', [2, 3]), + 'snmp_scan_function': lambda oid: \ + oid(".1.3.6.1.4.1.231.2.10.2.1.1.0"), +} diff -Nru check-mk-1.2.2p3/fsc_temp check-mk-1.2.6p12/fsc_temp --- check-mk-1.2.2p3/fsc_temp 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/fsc_temp 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -32,29 +32,21 @@ # 8: critical level def inventory_fsc_temp(info): - # Ignore non-connected sensors - return [ (line[0], None) for line in info if int(line[1]) < 500 ] + for line in info: + # Ignore non-connected sensors + if int(line[1]) < 500: + yield (line[0], None) + def check_fsc_temp(item, _no_params, info): - for name, current, warn, crit in info: - if name != item: continue - current = int(current) - warn = int(warn) - crit = int(crit) - - infotext = "%d C (levels at %d C / %d C)" % (current, warn, crit) - perfdata = [ ("temp", current, warn, crit, 0 ) ] - - if current == -1 or current == 4294967295: - return(3, "UNKNOWN - Problem with sensor") - elif current >= crit and crit > 0: - return (2, "CRIT - %s" % infotext, perfdata) - elif current >= warn and warn > 0: - return (1, "WARN - %s" % infotext, perfdata) - else: - return (0, "OK - %s" % infotext, perfdata) + for name, rawtemp, warn, crit in info: + if name == item: + temp = int(rawtemp) + if temp == -1 or temp == 4294967295: + return 3, "Sensor or component missing" + else: + return check_temperature(temp, (int(warn), int(crit))) - return (3, "UNKNOWN - Sensor %s not found in SNMP data" % item) check_info['fsc_temp'] = { "inventory_function" : inventory_fsc_temp, @@ -64,4 +56,5 @@ "snmp_info" : ( ".1.3.6.1.4.1.231.2.10.2.2.5.2.1.1", [ 13, 11, 6, 8 ] ), "snmp_scan_function" : lambda oid: oid(".1.3.6.1.4.1.231.2.10.2.1.1.0"), "group" : "temperature_auto", + "includes" : [ "temperature.include" ], } diff -Nru check-mk-1.2.2p3/genua_carp check-mk-1.2.6p12/genua_carp --- check-mk-1.2.2p3/genua_carp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/genua_carp 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,172 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example Agent Output: +# GENUA-MIB: + +#.1.3.6.1.4.1.3137.2.1.2.1.1.9 = INTEGER: 9 +#.1.3.6.1.4.1.3137.2.1.2.1.1.10 = INTEGER: 10 +#.1.3.6.1.4.1.3137.2.1.2.1.2.9 = STRING: "carp0" +#.1.3.6.1.4.1.3137.2.1.2.1.2.10 = STRING: "carp1" +#.1.3.6.1.4.1.3137.2.1.2.1.3.9 = INTEGER: 5 +#.1.3.6.1.4.1.3137.2.1.2.1.3.10 = INTEGER: 5 +#.1.3.6.1.4.1.3137.2.1.2.1.4.9 = INTEGER: 2 +#.1.3.6.1.4.1.3137.2.1.2.1.4.10 = INTEGER: 2 +#.1.3.6.1.4.1.3137.2.1.2.1.7.9 = INTEGER: 2 +#.1.3.6.1.4.1.3137.2.1.2.1.7.10 = INTEGER: 2 + + +def inventory_genua_carp(info): + inventory = [] + + # remove empty elements due to two alternative enterprise ids in snmp_info + info = filter(None, info) + + if info[0]: + for ifIndex, ifName, ifType, ifLinkState, ifCarpState in info[0]: + if ifCarpState in [ "0", "1", "2" ]: + inventory.append( (ifName, None) ) + return inventory + + +def genua_iftype(st): + names = { + '0' : 'unknown', + '1' : 'physical', + '2' : 'gif', + '3' : 'pppoe', + '4' : 'vlan', + '5' : 'lo', + '6' : 'carp', + '7' : 'unknown', + } + return names.get(st, st) + + +def genua_linkstate(st): + names = { + '0' : 'unknown', + '1' : 'down', + '2' : 'up', + '3' : 'hd', + '4' : 'fd', + } + return names.get(st, st) + + +def genua_carpstate(st): + names = { + '0' : 'init', + '1' : 'backup', + '2' : 'master', + } + return names.get(st, st) + +def check_genua_carp(item, _no_params, info): + + # remove empty elements due to two alternative enterprise ids in snmp_info + info = filter(None, info) + + if not info[0]: + return(3, "Invalid Output from Agent") + state = 0 + nodes = len(info) + masters = 0 + output = "" + if nodes > 1: + prefix = "Cluster test: " + else: + prefix = "Node test: " + + # Loop over all nodes, just one line if not a cluster + for line in info: + # Loop over interfaces on node + for ifIndex, ifName, ifType, ifLinkState, ifCarpState in line: + ifTypeStr = genua_iftype(str(ifType)) + ifLinkStateStr = genua_linkstate(str(ifLinkState)) + ifCarpStateStr = genua_carpstate(str(ifCarpState)) + # is inventorized interface in state carp master ? + if ifName == item and ifCarpState == "2": + # is master + masters += 1 + if masters == 1: + if nodes > 1: + output = "one " + output += "node in carp state %s with IfLinkState %s" \ + % (ifCarpStateStr,ifLinkStateStr) + # first master + if ifLinkState == "2": + state = 0 + elif ifLinkState == "1": + state = 2 + elif ifLinkState in [ "0", "3" ]: + state = 1 + else: + state = 3 + else: + state = 2 + output = "%d nodes in carp state %s on cluster with %d nodes" \ + % (masters,ifCarpStateStr,nodes) + # look for non-masters, only interesting if no cluster + elif ifName == item and nodes == 1: + output = "node in carp state %s with IfLinkState %s" \ + % (ifCarpStateStr,ifLinkStateStr) + # carp backup + if ifCarpState == "1" and ifLinkState == "1": + state = 0 + else: + state = 1 + + # no masters found in cluster + if nodes > 1 and masters == 0: + state = 2 + output = "No master found on cluster with %d nodes" % nodes + + output = prefix + output + return(state, output) + +check_info['genua_carp'] = { + "inventory_function" : inventory_genua_carp, + "check_function" : check_genua_carp, + "service_description": "Carp Interface %s", + "has_perfdata" : False, + "snmp_info" : [( ".1.3.6.1.4.1.3137.2.1.2",[ + "1.1", # "ifIndex" + "1.2", # "ifName" + "1.3", # "ifType" + "1.4", # "ifLinkState" + "1.7", # "ifCarpState" + ]), + ( ".1.3.6.1.4.1.3717.2.1.2",[ + "1.1", # "ifIndex" + "1.2", # "ifName" + "1.3", # "ifType" + "1.4", # "ifLinkState" + "1.7", # "ifCarpState" + ]), + ], + "snmp_scan_function" : lambda oid: "genuscreen" in oid(".1.3.6.1.2.1.1.1.0").lower() +} diff -Nru check-mk-1.2.2p3/genua_fan check-mk-1.2.6p12/genua_fan --- check-mk-1.2.2p3/genua_fan 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/genua_fan 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,101 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +genua_fan_default_levels = { "lower": ( 2000, 1000), "upper": (8000, 8400) } + +def inventory_genua_fan(info): + # remove empty elements due to alternative enterprise id in snmp_info + info = filter(None, info) + + inventory = [] + for fanNo, fanName, fanRPM, fanState in info[0]: + inventory.append( (fanName, "genua_fan_default_levels") ) + return inventory + +def check_genua_fan(item, params, info): + + # remove empty elements due to alternative enterprise id in snmp_info + info = filter(None, info) + + fanStateStr = { "1":"OK", "2":"Warning", "3":"Critical", + "4":"Unkown", "5":"Unkown", "6":"Unkown"} + + lowerwarn, lowercrit = params["lower"] + upperwarn, uppercrit = params["upper"] + for line in info[0]: + fanNo, fanName, fanRPM, fanState = line + if fanName != item: continue + + rpm = saveint(fanRPM) + + if rpm > uppercrit or rpm < lowercrit: + rpmstate = 2 + rpmsym = "(!!)" + elif rpm > upperwarn or rpm < lowerwarn: + rpmstate = 1 + rpmsym = "(!)" + else: + rpmstate = 0 + rpmsym = "" + + if fanState in ( "3", "4", "5", "6" ): + statestate = 2 + statesym = "(!!)" + elif fanState == "2": + statestate = 1 + statesym = "(!)" + else: + statestate = 0 + statesym = "" + + perfdata = [ ( 'rpm', fanRPM, upperwarn, uppercrit, 0, uppercrit ) ] + infotext = "State: %s %s, RPM: %s %s" % \ + (fanStateStr[fanState], statesym, fanRPM, rpmsym) + state = max(statestate,rpmstate) + return (state, infotext, perfdata) + + return (3, "UNKNOWN - FAN %s not found in SNMP data" % item) + +check_info['genua_fan'] = { + "inventory_function" : inventory_genua_fan, + "check_function" : check_genua_fan, + "group" : "hw_fans", + "service_description": "FAN %s", + "has_perfdata" : True, + "snmp_info" : [( ".1.3.6.1.4.1.3717.2.1.1.1.1",[ + "1", # "fanNo" + "2", # "fanName" + "3", # "fanRPM" + "4" # "fanState" + ]), + ( ".1.3.6.1.4.1.3137.2.1.1.1.1",[ + "1", # "fanNo" + "2", # "fanName" + "3", # "fanRPM" + "4" # "fanState" + ])], + "snmp_scan_function" : lambda oid: "genuscreen" in oid(".1.3.6.1.2.1.1.1.0").lower() +} diff -Nru check-mk-1.2.2p3/genua_pfstate check-mk-1.2.6p12/genua_pfstate --- check-mk-1.2.2p3/genua_pfstate 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/genua_pfstate 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,108 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example Agent Output: +# GENUA-MIB: +# .1.3.6.1.4.1.3717.2.1.1.6.1 = INTEGER: 300000 +# .1.3.6.1.4.1.3717.2.1.1.6.2 = INTEGER: 1268 +# .1.3.6.1.4.1.3717.2.1.1.6.3 = INTEGER: 1 + +genua_pfstate_default_levels = { "used": ( None , None ) } + +def inventory_genua_pfstate(info): + # remove empty elements due to alternative enterprise id in snmp_info + info = filter(None, info) + + if info[0]: + if len(info[0][0]) == 3: + return [ (None, genua_pfstate_default_levels) ] + else: + return [] + + +def pfstate(st): + names = { + '0' : 'notOK', + '1' : 'OK', + '2' : 'unknown', + } + return names.get(st, st) + + +def check_genua_pfstate(item, params, info): + # remove empty elements due to alternative enterprise id in snmp_info + info = filter(None, info) + + if info[0]: + if len(info[0][0]) == 3: + pfstateMax = saveint(info[0][0][0]) + pfstateUsed = saveint(info[0][0][1]) + pfstateStatus = info[0][0][2] + else: + return(3, "Invalid Output from Agent") + + warn,crit = params.get("used") + if crit == None: + crit = pfstateMax + + state = 0 + usedsym = "" + statussym = "" + if pfstateStatus != "1": + state = 1 + statussym = "(!)" + + if crit and pfstateUsed > crit: + state = 2 + usedsym = "(!!)" + elif warn and pfstateUsed > warn: + state = 1 + usedsym = "(!)" + + pfstatus = pfstate(str(pfstateStatus)) + infotext = "PF State: %s%s States used: %d%s States max: %d" \ + % (pfstatus, statussym, pfstateUsed, usedsym, pfstateMax ) + perfdata = [ ( "statesused", pfstateUsed, None, pfstateMax ) ] + return (state, infotext, perfdata) + +check_info['genua_pfstate'] = { + "inventory_function" : inventory_genua_pfstate, + "check_function" : check_genua_pfstate, + "service_description": "Paketfilter Status", + "has_perfdata" : True, + "group" : "pf_used_states", + "snmp_info" : [( ".1.3.6.1.4.1.3717.2.1.1.6",[ + 1, # "pfstateMax" + 2, # "pfstateUsed" + 3, # "pfstateStatus" + ]), + ( ".1.3.6.1.4.1.3137.2.1.1.6",[ + 1, # "pfstateMax" + 2, # "pfstateUsed" + 3, # "pfstateStatus" + ])], + "snmp_scan_function" : lambda oid: "genuscreen" in oid(".1.3.6.1.2.1.1.1.0").lower() +} diff -Nru check-mk-1.2.2p3/genua_state_correlation check-mk-1.2.6p12/genua_state_correlation --- check-mk-1.2.2p3/genua_state_correlation 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/genua_state_correlation 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,112 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example Agent Output: +# GENUA-MIB: + +# .1.3.6.1.4.1.3137.2.1.2.1.1.9 = INTEGER: 9 +# .1.3.6.1.4.1.3137.2.1.2.1.1.10 = INTEGER: 10 +# .1.3.6.1.4.1.3137.2.1.2.1.2.9 = STRING: "carp0" +# .1.3.6.1.4.1.3137.2.1.2.1.2.10 = STRING: "carp1" +# .1.3.6.1.4.1.3137.2.1.2.1.3.9 = INTEGER: 5 +# .1.3.6.1.4.1.3137.2.1.2.1.3.10 = INTEGER: 5 +# .1.3.6.1.4.1.3137.2.1.2.1.4.9 = INTEGER: 2 +# .1.3.6.1.4.1.3137.2.1.2.1.4.10 = INTEGER: 2 +# .1.3.6.1.4.1.3137.2.1.2.1.7.9 = INTEGER: 2 +# .1.3.6.1.4.1.3137.2.1.2.1.7.10 = INTEGER: 2 + + +def inventory_genua_state(info): + # remove empty elements due to two alternative enterprise ids in snmp_info + info = filter(None, info) + if info[0]: + numifs = 0 + for ifIndex, ifName, ifType, ifLinkState, ifCarpState in info[0]: + if ifCarpState in [ "0", "1", "2" ]: + numifs += 1 + # inventorize only if we find at least two carp interfaces + if numifs > 1: + return [(None, None)] + return None + +def genua_state_str(st): + names = { + '0' : 'init', + '1' : 'backup', + '2' : 'master', + } + return names.get(st, st) + +def check_genua_state(item, _no_params, info): + + # remove empty elements due to two alternative enterprise ids in snmp_info + info = filter(None, info) + if not info[0]: + return(3, "Invalid Output from Agent") + + state = 0 + carp_info = [] + + for ifIndex, ifName, ifType, ifLinkState, ifCarpState in info[0]: + if ifType == "6": + carp_info.append((ifIndex, ifName, ifType, ifLinkState, ifCarpState)) + + # critical if the carp interfaces dont have the same state + carp_states = [ 0, 0, 0 ] + for i in range (0, len(carp_info)): + carp_states[int(carp_info[i][4])] += 1 + if carp_info[0][4] != carp_info[i][4]: + state = 2 + + output = "Number of carp IFs in states " + for i in ('0', '1', '2'): + output += genua_state_str(i) + output += ":%d " % carp_states[int(i)] + + return(state, output) + +check_info['genua_state_correlation'] = { + "inventory_function" : inventory_genua_state, + "check_function" : check_genua_state, + "service_description": "Carp Correlation", + "has_perfdata" : False, + "snmp_info" : [( ".1.3.6.1.4.1.3717.2.1.2",[ + "1.1", # "ifIndex" + "1.2", # "ifName" + "1.3", # "ifType" + "1.4", # "ifLinkState" + "1.7", # "ifCarpState" + ]), + ( ".1.3.6.1.4.1.3137.2.1.2",[ + "1.1", # "ifIndex" + "1.2", # "ifName" + "1.3", # "ifType" + "1.4", # "ifLinkState" + "1.7", # "ifCarpState" + ])], + "snmp_scan_function" : lambda oid: "genuscreen" in oid(".1.3.6.1.2.1.1.1.0").lower() + #"snmp_scan_function" : lambda oid: oid(".1.3.6.1.4.1.3717.2.1.2.1.7") != None +} diff -Nru check-mk-1.2.2p3/h3c_lanswitch_cpu check-mk-1.2.6p12/h3c_lanswitch_cpu --- check-mk-1.2.2p3/h3c_lanswitch_cpu 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/h3c_lanswitch_cpu 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -72,17 +72,17 @@ for line in info: if h3c_lanswitch_cpu_genitem(line[0]) == item: util = int(line[1]) - infotext = (" - average usage was %d%% over last 5 minutes." % util) + infotext = ("average usage was %d%% over last 5 minutes." % util) perfdata = [ ( "usage", util, warn, crit, 0, 100) ] if util > crit: - return (2, "CRIT" + infotext, perfdata) + return (2, infotext, perfdata) elif util > warn: - return (1, "WARN" + infotext, perfdata) + return (1, infotext, perfdata) else: - return (0, "OK" + infotext, perfdata) + return (0, infotext, perfdata) - return (3, "UNKNOWN - %s not found" % item) + return (3, "%s not found" % item) check_info["h3c_lanswitch_cpu"] = (check_h3c_lanswitch_cpu, "CPU Utilization %s", 1, inventory_h3c_lanswitch_cpu ) diff -Nru check-mk-1.2.2p3/h3c_lanswitch_sensors check-mk-1.2.6p12/h3c_lanswitch_sensors --- check-mk-1.2.2p3/h3c_lanswitch_sensors 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/h3c_lanswitch_sensors 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -34,19 +34,18 @@ for (endoid, status) in info: if h3c_lanswitch_genitem(endoid) == item: if status == "2": - return (2, "CRIT - Sensor %s status is %s" % (item, status)) + return (2, "Sensor %s status is %s" % (item, status)) elif status == "1": - return (0, "OK - Sensor %s status is %s" % (item, status)) + return (0, "Sensor %s status is %s" % (item, status)) else: - return (1, "WARN - Sensor % status is %s" % (item, status)) - return (3, "UNKNOWN - Sensor %s not found" % item ) + return (1, "Sensor % status is %s" % (item, status)) + return (3, "Sensor %s not found" % item ) check_info["h3c_lanswitch_sensors"] = (check_h3c_lanswitch_sensors, "%s", 0, inventory_h3c_lanswitch_sensors ) def h3c_lanswitch_genitem(endoid): deviceclass, one, id = endoid.split(".") -# print ("%s %s %s" % (deviceclass, one, id)) if deviceclass == "1": hwLswdev = "Fan" else: diff -Nru check-mk-1.2.2p3/heartbeat_crm check-mk-1.2.6p12/heartbeat_crm --- check-mk-1.2.2p3/heartbeat_crm 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/heartbeat_crm 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,8 +24,6 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -# Author: Lars Michelsen - # Example outputs from agent: # <<>> # ============ @@ -49,26 +47,42 @@ # Nails down the DC to the node which is the DC during inventory. The check # will report CRITICAL when another node becomes the DC during later checks. # If set to "False" the check will be passed. +# +# Leave this option to be compatible with inventorized pre 1.2.5i6 heartbeat_crm_naildown = True # Max age of "last updated" +# +# Leave this option to be compatible with inventorized pre 1.2.5i6 heartbeat_crm_default_max_age = 60 # Naildown the resources to the nodes which care about the resources during # the inventory run +# +# Leave this option to be compatible with inventorized pre 1.2.5i6 heartbeat_crm_resources_naildown = True -def heartbeat_crm_parse_general(info, dc = '', numNodes = -1, numResources = -1): +# Holds a dict of settings which tell the inventory functions whether or not +# some options like the resources and DC role shal be nailed down to the +# node which holds these resources during inventory. +inventory_heartbeat_crm_rules = [] + +factory_settings["heartbeat_crm_default_levels"] = { + "max_age" : heartbeat_crm_default_max_age, +} + +def heartbeat_crm_parse_general(info): + dc, num_nodes, num_resources = None, None, None for line in info: if ' '.join(line[0:2]) == 'Last updated:': - lastUpdated = ' '.join(line[2:]) - elif dc == '' and ' '.join(line[0:2]) == 'Current DC:' and heartbeat_crm_naildown: + last_updated = ' '.join(line[2:]) + elif ' '.join(line[0:2]) == 'Current DC:': dc = line[2] - elif numNodes == -1 and ' '.join(line[1:3])[:-1] == 'Nodes configured': - numNodes = int(line[0]) - elif numResources == -1 and ' '.join(line[1:3]) == 'Resources configured.': - numResources = int(line[0]) - return (lastUpdated, dc, numNodes, numResources) + elif ' '.join(line[1:3]).rstrip('.,') == 'Nodes configured': + num_nodes = int(line[0]) + elif ' '.join(line[1:3]).rstrip('.,') == 'Resources configured': + num_resources = int(line[0]) + return (last_updated, dc, num_nodes, num_resources) def inventory_heartbeat_crm(info): # Use these lines to gather the inventory and perform this check: @@ -82,55 +96,78 @@ # - Naildown the DC or not. # - Check the number of nodes/resources # - Check the age of "last updated" - if len(info) > 0: - lastUpdated, dc, numNodes, numResources = heartbeat_crm_parse_general(info) - if not heartbeat_crm_naildown: - dc = None # Ignore DC on check - return [(None, '(heartbeat_crm_default_max_age, %r, %d, %d)' % (dc, numNodes, numResources))] + settings = host_extra_conf_merged(g_hostname, inventory_heartbeat_crm_rules) + try: + last_updated, dc, num_nodes, num_resources = heartbeat_crm_parse_general(info) + except: + # In the case that CRM is not working, add it as a service and show the error later + last_updated, dc, num_nodes, num_resources = 0,0,0,0 + params = { + 'num_nodes' : num_nodes, + 'num_resources' : num_resources, + } + if settings.get('naildown_dc', False): + params['dc'] = dc + return [(None, params)] def check_heartbeat_crm(item, params, info): if len(info) > 0: - lastUpdated, dc, numNodes, numResources = heartbeat_crm_parse_general(info) - param_max_age, param_dc, param_nodes, param_ressources = params + if info[0][0].lower().startswith("critical"): + return 2, " ".join(info[0]) + last_updated, dc, numNodes, numResources = heartbeat_crm_parse_general(info) + + # Convert old tuple params (pre 1.2.5i6) + if type(params) == tuple: + params = { + 'max_age' : params[0], + 'dc' : params[1] != "" and params[1] or None, + 'num_nodes' : params[2] != -1 and params[2] or None, + 'num_resources' : params[3] != -1 and params[3] or None, + } # Check the freshness of the crm_mon output and terminate with CRITICAL # when too old information are found - dt = int(datetime.datetime(*time.strptime(lastUpdated, '%a %b %d %H:%M:%S %Y')[:6]).strftime("%s")) + dt = int(datetime.datetime(*time.strptime(last_updated, '%a %b %d %H:%M:%S %Y')[:6]).strftime("%s")) now = time.time() delta = now - dt - if delta > param_max_age: - return (2, 'CRIT - Status output too old: %d secs' % delta) + if delta > params['max_age']: + return 3, 'Ignoring reported data (Status output too old: %d secs)' % delta output = '' status = 0 # Check for correct DC when enabled - if param_dc: - if dc == param_dc: - output += 'DC: %s, ' % dc - else: - output += 'DC: %s (Expected %s), ' % (dc, param_dc) - status = 2 + if params.get('dc') == None or dc == params['dc']: + output += 'DC: %s, ' % dc + else: + output += 'DC: %s (Expected %s (!!)), ' % (dc, params['dc']) + status = 2 # Check for number of nodes when enabled - if param_nodes != -1: - if numNodes == param_nodes: + if params['num_nodes'] != None: + if numNodes == params['num_nodes']: output += 'Nodes: %d, ' % numNodes else: - output += 'Nodes: %d (Expected %d), ' % (numNodes, param_nodes) + output += 'Nodes: %d (Expected %d (!!)), ' % (numNodes, params['num_nodes']) status = 2 # Check for number of resources when enabled - if param_ressources != -1: - if numResources == param_ressources: + if params['num_resources'] != None: + if numResources == params['num_resources']: output += 'Resources: %d, ' % numResources else: - output += 'Resources: %d (Expected %d), ' % (numResources, param_ressources) + output += 'Resources: %d (Expected %d (!!)), ' % (numResources, params['num_ressources']) status = 2 - return (status, '%s - %s' % (nagios_state_names[status], output.rstrip(', '))) + return (status, output.rstrip(', ')) - return (3, "UNKNOWN - Empty output from agent") +check_info["heartbeat_crm"] = { + 'check_function' : check_heartbeat_crm, + 'inventory_function' : inventory_heartbeat_crm, + 'service_description' : 'Heartbeat CRM General', + 'group' : 'heartbeat_crm', + 'default_levels_variable' : 'heartbeat_crm_default_levels', +} def heartbeat_crm_parse_resources(info): blockStart = False @@ -138,7 +175,9 @@ resource = '' mode = 'single' for line in info: - if not blockStart and ' '.join(line) == 'Full list of resources:': + if ' '.join(line) == 'Failed actions:': + blockStart = False + elif not blockStart and ' '.join(line) == 'Full list of resources:': blockStart = True elif blockStart: if ' '.join(line[0:2]) == 'Resource Group:': @@ -183,15 +222,14 @@ # resource_slapmaster (ocf::heartbeat:OpenLDAP): Started mwp # resource_slapslave (ocf::heartbeat:OpenLDAP): Started smwp inventory = [] - if len(info) > 0: - for name, resources in heartbeat_crm_parse_resources(info).iteritems(): - # In naildown mode only resources which are started somewhere can be - # inventorized - if heartbeat_crm_resources_naildown and resources[0][2] != 'Stopped': - - inventory.append((name, '"%s"' % resources[0][3])) - else: - inventory.append((name, None)) + settings = host_extra_conf_merged(g_hostname, inventory_heartbeat_crm_rules) + for name, resources in heartbeat_crm_parse_resources(info).iteritems(): + # In naildown mode only resources which are started somewhere can be + # inventorized + if settings.get('naildown_resources', False) and resources[0][2] != 'Stopped': + inventory.append((name, '"%s"' % resources[0][3])) + else: + inventory.append((name, None)) return inventory def check_heartbeat_crm_resources(item, target_node, info): @@ -201,16 +239,17 @@ output += ' '.join(resource) if len(resource) == 3 or resource[2] != 'Started': status = 2 - output += ' (CRITICAL: Resource is in state "%s")' % resource[2] + output += ' (Resource is in state "%s" (!!))' % resource[2] elif target_node and target_node != resource[3] and resource[1] != 'Slave' and resource[1] != 'Clone': status = 2 - output += ' (CRITICAL: Expected node: %s)' % target_node + output += ' (Expected node: %s (!!))' % target_node output += ', ' - return (status, '%s - %s' % (nagios_state_names[status], output.rstrip(', '))) - -check_info['heartbeat_crm'] = (check_heartbeat_crm, "Heartbeat CRM General", 0, inventory_heartbeat_crm) -check_info['heartbeat_crm.resources'] = (check_heartbeat_crm_resources, "Heartbeat CRM %s", 0, inventory_heartbeat_crm_resources) + return (status, output.rstrip(', ')) -check_config_variables.append("heartbeat_crm_naildown") -check_config_variables.append("heartbeat_crm_resources_naildown") +check_info["heartbeat_crm.resources"] = { + 'check_function' : check_heartbeat_crm_resources, + 'inventory_function' : inventory_heartbeat_crm_resources, + 'service_description' : 'Heartbeat CRM %s', + 'group' : 'heartbeat_crm_resources', +} diff -Nru check-mk-1.2.2p3/heartbeat_crm.resources check-mk-1.2.6p12/heartbeat_crm.resources --- check-mk-1.2.2p3/heartbeat_crm.resources 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/heartbeat_crm.resources 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Check status of the resources in heartbeat clusters using crm=yes +title: Status of the resources in heartbeat clusters using crm=yes agents: linux -author: Lars Michelsen +catalog: os/services license: GPL distribution: check_mk description: @@ -12,17 +12,19 @@ In addition the check can report a problem if a ressource is not handled by a specified node. + Till version 1.2.5i6 the check nailed down the DC to the node which was DC during + inventory. This has been changed to not nailing down the node. You can change this back + to the old default by changing your configuration. + inventory: On each node one service for each resource group or resource will be created. If {heartbeat_crm_resources_naildown} is set to {True}, then for each resource the current node of that resource is hardcoded as target parameter and will be checked in future. + One service will be created for reach resource group or resource. If {naildown_resources} is + set to {True} in {inventory_heartbeat_crm_rules}, then the node which is currently + holding the resource will be required to also hold it in future. [parameters] node (string): The expected node to handle this resource. When set to {None}, then the node will not be checked, only the resource state. - -[configuration] -heartbeat_crm_resources_naildown (bool): Naildown the resources to the nodes - which care about the resources during the inventory run. - diff -Nru check-mk-1.2.2p3/heartbeat_nodes check-mk-1.2.6p12/heartbeat_nodes --- check-mk-1.2.2p3/heartbeat_nodes 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/heartbeat_nodes 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -48,7 +48,7 @@ state_txt = '' if state != 'up': status = 2 - state_txt = ' (CRIT)' + state_txt = ' (!!)' linkOutput += '%s: %s%s, ' % (link, state, state_txt) linkOutput = linkOutput.rstrip(', ') @@ -58,10 +58,15 @@ status = 2 if not nodeStatus in [ 'active', 'up', 'ping', 'dead' ]: - return (3, "UNKNOWN - Node %s has an unhandled state: %s" % (line[0], nodeStatus)) + return (3, "Node %s has an unhandled state: %s" % (line[0], nodeStatus)) - return (status, '%s - Node %s is in state "%s". Links: %s' % (nagios_state_names[status], line[0], nodeStatus, linkOutput)) + return (status, 'Node %s is in state "%s". Links: %s' % (line[0], nodeStatus, linkOutput)) - return (3, "UNKNOWN - Node is not present anymore") + return (3, "Node is not present anymore") -check_info['heartbeat_nodes'] = (check_heartbeat_nodes, "Heartbeat Node %s", 0, inventory_heartbeat_nodes) + +check_info["heartbeat_nodes"] = { + 'check_function': check_heartbeat_nodes, + 'inventory_function': inventory_heartbeat_nodes, + 'service_description': 'Heartbeat Node %s', +} diff -Nru check-mk-1.2.2p3/heartbeat_rscstatus check-mk-1.2.6p12/heartbeat_rscstatus --- check-mk-1.2.2p3/heartbeat_rscstatus 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/heartbeat_rscstatus 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -38,10 +38,15 @@ def check_heartbeat_rscstatus(item, expected_state, info): if len(info) > 0 and len(info[0]) > 0: if (type(expected_state) == list and info[0][0] in expected_state) or info[0][0] == expected_state: - return (0, "OK - Current state: %s" % (info[0][0])) + return (0, "Current state: %s" % (info[0][0])) else: - return (2, "CRIT - Current state: %s (Expected: %s)" % (info[0][0], expected_state)) + return (2, "Current state: %s (Expected: %s)" % (info[0][0], expected_state)) else: - return (3, "UNKNOWN - Got no information from agent") + return (3, "Got no information from agent") -check_info['heartbeat_rscstatus'] = (check_heartbeat_rscstatus, "Heartbeat Ressource Status", 0, inventory_heartbeat_rscstatus) + +check_info["heartbeat_rscstatus"] = { + 'check_function': check_heartbeat_rscstatus, + 'inventory_function': inventory_heartbeat_rscstatus, + 'service_description': 'Heartbeat Ressource Status', +} diff -Nru check-mk-1.2.2p3/helpers/df_magic_number.py check-mk-1.2.6p12/helpers/df_magic_number.py --- check-mk-1.2.2p3/helpers/df_magic_number.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/helpers/df_magic_number.py 2015-06-24 09:48:39.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/helpers/headrify check-mk-1.2.6p12/helpers/headrify --- check-mk-1.2.2p3/helpers/headrify 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/helpers/headrify 2015-09-21 10:59:54.000000000 +0000 @@ -12,7 +12,7 @@ printf "|%-66s|\n" "${line//@/ }" done echo '| |' - echo '| Copyright Mathias Kettner 2013 mk@mathias-kettner.de |' + echo '| Copyright Mathias Kettner 2014 mk@mathias-kettner.de |' echo '+------------------------------------------------------------------+' cat <> $TMP fi - mv $TMP $file + mv $TMP $file || rm -f $TMP chmod $PERMISSIONS "$file" } @@ -101,7 +117,13 @@ headrify "$file" done else - { find -not -type l -and -not -name "jquery*" -and \( \ + { find -not -type l -and \ + -not -name "jquery*" \ + -not -name "*.rc" \ + -not -name "*.rc.in" \ + -not -name "ltmain.sh" \ + -not -name "*.exe" \ + -and \( \ -name "Makefile" -or \ -name "*.cfg" -or \ -name "Makefile.am" -or \ @@ -115,8 +137,8 @@ -name "*.php" -or \ -name "*.sh" -or \ -name "*.spec" \) ; \ - rm -f checks/*~ ; ls checks/* ; echo agents/plugins/* agents/* | tr ' ' \\n | grep -vx agents/waitmax | fgrep -v agents/hpux | fgrep -v windows | fgrep -v .exe | fgrep -v /plugins | fgrep -v .hpux ; ls agents/windows/*.cc ; echo agents/windows/Makefile ; } \ - | grep -v /Privat/ | grep -v livestatus/nagios/ | grep -vx '' | \ + rm -f checks/*~ ; ls checks/* ; echo agents/plugins/* agents/special/* agents/* | tr ' ' \\n | grep -vx agents/waitmax | fgrep -v windows | fgrep -v .exe | fgrep -v windows/plugins ; ls agents/windows/*.cc ; echo agents/windows/Makefile ; } \ + | egrep -v 'livestatus/nagios4?/' | grep -vx '' | \ while read file do headrify "$file" & diff -Nru check-mk-1.2.2p3/helpers/reindent.py check-mk-1.2.6p12/helpers/reindent.py --- check-mk-1.2.2p3/helpers/reindent.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/helpers/reindent.py 2015-06-24 09:48:39.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/helpers/wato-migrate-1.2.0.sh check-mk-1.2.6p12/helpers/wato-migrate-1.2.0.sh --- check-mk-1.2.2p3/helpers/wato-migrate-1.2.0.sh 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/helpers/wato-migrate-1.2.0.sh 2015-06-24 09:48:39.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/hitachi_hnas_cifs check-mk-1.2.6p12/hitachi_hnas_cifs --- check-mk-1.2.2p3/hitachi_hnas_cifs 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/hitachi_hnas_cifs 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,49 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_hitachi_hnas_cifs(info): + # import pprint; pprint.pprint(info) + inventory = [] + for evs_id, share_name, users in info: + inventory.append( (evs_id + " " + share_name, None) ) + return inventory + +def check_hitachi_hnas_cifs(item, _no_params, info): + for evs_id, share_name, users in info: + if evs_id + " " + share_name == item: + perfdata = [ ('users', users, '', '', 0) ] + return 0, "%s users" % users, perfdata + return 3, "Share not found" + +check_info["hitachi_hnas_cifs"] = { + "check_function" : check_hitachi_hnas_cifs, + "inventory_function" : inventory_hitachi_hnas_cifs, + "service_description" : "CIFS Share EVS %s", + "has_perfdata" : True, + "snmp_info" : (".1.3.6.1.4.1.11096.6.1.1.3.2.1.3.1", [1, 2, 5]), + # cifsShareEvsId, cifsShareName, cifsShareUsers + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.11096.6"), +} diff -Nru check-mk-1.2.2p3/hitachi_hnas_cpu check-mk-1.2.6p12/hitachi_hnas_cpu --- check-mk-1.2.2p3/hitachi_hnas_cpu 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/hitachi_hnas_cpu 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,59 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +hitachi_hnas_cpu_default_levels = { "levels" : (80.0, 90.0) } + +def inventory_hitachi_hnas_cpu(info): + inventory = [] + for id, util in info: + inventory.append( (id, hitachi_hnas_cpu_default_levels) ) + return inventory + +def check_hitachi_hnas_cpu(item, params, info): + warn, crit = params["levels"] + rc = 0 + + for id, util in info: + if id == item: + util=float(util) + if util > warn: + rc = 1 + if util > crit: + rc = 2 + perfdata = [ ('cpu_util', str(util) + '%', warn, crit, 0, 100) ] + return rc, "CPU utilization is %s%%" % util, perfdata + + return 3, "No CPU utilization found" + +check_info["hitachi_hnas_cpu"] = { + "check_function" : check_hitachi_hnas_cpu, + "inventory_function" : inventory_hitachi_hnas_cpu, + "service_description" : "CPU utilization PNode %s", + "has_perfdata" : True, + "snmp_info" : (".1.3.6.1.4.1.11096.6.1.1.6.1.2.1", [1, 3]), # ID, Utilization + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.11096.6"), + "group" : "cpu_utilization_multiitem", +} diff -Nru check-mk-1.2.2p3/hitachi_hnas_fan check-mk-1.2.6p12/hitachi_hnas_fan --- check-mk-1.2.2p3/hitachi_hnas_fan 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/hitachi_hnas_fan 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,92 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_hitachi_hnas_fan(info): + inventory = [] + for clusternode, fan_id, fitted_status, speed_status, speed in info: + inventory.append( (clusternode + "." + fan_id, None) ) + return inventory + + +def check_hitachi_hnas_fan(item, _no_params, info): + + fitted_status_map = ( + ("ok", 0), # 1 + ("okIdWrong(!)", 1), # 2 + ("notFitted(!!)", 2), # 3 + ("unknown(!)", 1) # 4 + ) + + speed_status_map = ( + ("ok", 0), # 1 + ("warning(!)", 1), # 2 + ("severe(!!)", 2), # 3 + ("unknown(!)", 1), # 4 + ) + + for clusternode, fan_id, fitted_status, speed_status, speed in info: + if clusternode + "." + fan_id == item: + fitted_status = int(fitted_status) + speed_status = int(speed_status) + speed = int(speed) + infotext = "PNode %s fan %s" % (clusternode, fan_id) + + worststate = 0 + + # check fitted status + name, state = fitted_status_map[fitted_status - 1] + infotext += ", fitted status is %s" % name + worststate = max(worststate, state) + + # check speed status + name, state = speed_status_map[speed_status - 1] + infotext += ", speed status is %s" % name + worststate = max(worststate, state) + + # report speed + infotext += ", speed is %s rpm" % speed + perfdata = [ ('fanspeed', str(speed) + 'rpm', '', '', 0, '') ] + + return worststate, infotext, perfdata + + return 3, "No fan %s found" % item + + + +check_info["hitachi_hnas_fan"] = { + "check_function" : check_hitachi_hnas_fan, + "inventory_function" : inventory_hitachi_hnas_fan, + "service_description" : "Fan %s", + "has_perfdata" : True, + "snmp_info" : (".1.3.6.1.4.1.11096.6.1.1.1.2.1.11.1", [ + 1, # fanClusterNode + 2, # fanIndex + 3, # fanFittedStatus + 4, # fanSpeedStatus + 5, # fanSpeed + ]), + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.11096.6"), +} diff -Nru check-mk-1.2.2p3/hitachi_hnas_fc_if check-mk-1.2.6p12/hitachi_hnas_fc_if --- check-mk-1.2.2p3/hitachi_hnas_fc_if 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/hitachi_hnas_fc_if 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,90 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +def hitachi_hnas_fc_if_convert_info(info): + converted = [] + for line in info: + converted.append(map(str, [ + "%d%03d" % (int(line[0]), int(line[1])), # ifIndex + line[0] + "." + line[1], # ifDescr (use ClusterNode.InterfaceIndex) + "", # ifType: do not set port type + int(line[3]) * 1000000000, # ifHighSpeed + line[2] == "1" and 1 or 2, # ifOperStatus (map other states to down) + line[4], # ifHCInOctets + 0, # ifHCInUcastPkts + 0, # ifHCInMulticastPkts + 0, # ifHCInBroadcastPkts + line[13], # ifInDiscards + sum(map(int, line[6:13])), # ifInErrors + line[5], # ifHCOutOctets + 0, # ifHCOutUcastPkts + 0, # ifHCOutMulticastPkts + 0, # ifHCOutBroadcastPkts + 0, # ifOutDiscards + 0, # ifOutErrors + 0, # ifOutQLen + line[0] + "." + line[1], # ifAlias, same as description + "", # ifPhysAddress + ])) + return converted + +def inventory_hitachi_hnas_fc_if(info): + converted = hitachi_hnas_fc_if_convert_info(info) + return inventory_if_common(converted) + +def check_hitachi_hnas_fc_if(item, params, info): + converted = hitachi_hnas_fc_if_convert_info(info) + return check_if_common(item, params, converted) + + +check_info["hitachi_hnas_fc_if"] = { + "check_function" : check_hitachi_hnas_fc_if, + "inventory_function" : inventory_hitachi_hnas_fc_if, + "service_description" : "Interface FC %s", + "has_perfdata" : True, + "snmp_info" : (".1.3.6.1.4.1.11096.6.1.1.1.3.6.25.1", [ + 1, # fcStatsClusterNode 0 + 2, # fcStatsInterfaceIndex 1 + 4, # fcStatsInterfaceStatus 2 + 5, # fcStatsInterfaceLinkSpeed 3 + 7, # fcStatsInstantaneousInRate 4 + 8, # fcStatsInstantaneousOutRate 5 + 13, # fcStatsSignalLossErrors 6 + 14, # fcStatsBadRXCharErrors 7 + 15, # fcStatsLossSyncErrors 8 + 16, # fcStatsLinkFailErrors 9 + 17, # fcStatsRXEOFErrors 10 + 19, # fcStatsBadCRCErrors 11 + 20, # fcStatsProtocolErrors 12 + 18, # fcStatsDiscardedFrameErrors 13 + ]), + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.11096.6"), + "group" : "if", + "default_levels_variable": "if_default_levels", + "includes" : [ "if.include" ], +} + diff -Nru check-mk-1.2.2p3/hitachi_hnas_fpga check-mk-1.2.6p12/hitachi_hnas_fpga --- check-mk-1.2.2p3/hitachi_hnas_fpga 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/hitachi_hnas_fpga 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,61 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +hitachi_hnas_fpga_default_levels = { "levels" : (80.0, 90.0) } + +def inventory_hitachi_hnas_fpga(info): + inventory = [] + for clusternode, id, name, util in info: + inventory.append( (clusternode + "." + id + " " + name, hitachi_hnas_fpga_default_levels) ) + return inventory + +def check_hitachi_hnas_fpga(item, params, info): + warn, crit = params["levels"] + rc = 0 + + for clusternode, id, name, util in info: + if clusternode + "." + id + " " + name == item: + util=float(util) + if util > warn: + rc = 1 + if util > crit: + rc = 2 + perfdata = [ ('fpga_util', str(util) + '%', warn, crit, 0, 100) ] + return rc, "PNode %s FPGA %s %s utilization is %s%%" % (clusternode, id, name, util), perfdata + + return 3, "No utilization found for FPGA %s" % item + +check_info["hitachi_hnas_fpga"] = { + "check_function" : check_hitachi_hnas_fpga, + "inventory_function" : inventory_hitachi_hnas_fpga, + "service_description" : "FPGA %s", + "has_perfdata" : True, + "snmp_info" : (".1.3.6.1.4.1.11096.6.1.1.6.1.4.1", [1, 2, 3, 4]), + # fpgaUtilizationCnIndex (=PNode), fpgaUtilizationFpgaIndex (=ID), + # fpgaUtilizationFpgaName, fpgaUtilization + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.11096.6"), + "group" : "fpga_utilization", +} diff -Nru check-mk-1.2.2p3/hitachi_hnas_pnode check-mk-1.2.6p12/hitachi_hnas_pnode --- check-mk-1.2.2p3/hitachi_hnas_pnode 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/hitachi_hnas_pnode 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,67 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def hitachi_hnas_pnode_combine_item(id, name): + combined = str(id) + if name != "": + combined += " " + name + return combined + +def inventory_hitachi_hnas_pnode(info): + inventory = [] + for id, name, status in info: + inventory.append( (hitachi_hnas_pnode_combine_item(id, name), None) ) + return inventory + +def check_hitachi_hnas_pnode(item, _no_params, info): + statusmap = (("", 3), + ("unknown", 3), + ("up", 0), + ("notUp", 1), + ("onLine", 0), + ("dead", 2), + ("dormant", 2), + ) + + for id, name, status in info: + if hitachi_hnas_pnode_combine_item(id, name) == item: + status = int(status) + if status == 0 or status >= len(statusmap): + return 3, "PNode reports unidentified status %s" % status + else: + return statusmap[status][1], "PNode reports status %s" % statusmap[status][0] + + return 3, "SNMP did not report a status of this PNode" + +check_info["hitachi_hnas_pnode"] = { + "check_function" : check_hitachi_hnas_pnode, + "inventory_function" : inventory_hitachi_hnas_pnode, + "service_description" : "PNode %s", + "has_perfdata" : False, + "snmp_info" : (".1.3.6.1.4.1.11096.6.1.1.1.2.5.9.1", [1, 2, 4]), + # ID, Name, Status + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.11096.6"), +} diff -Nru check-mk-1.2.2p3/hitachi_hnas_psu check-mk-1.2.6p12/hitachi_hnas_psu --- check-mk-1.2.2p3/hitachi_hnas_psu 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/hitachi_hnas_psu 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,59 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_hitachi_hnas_psu(info): + inventory = [] + for clusternode, id, status in info: + inventory.append( (clusternode + "." + id, None) ) + return inventory + +def check_hitachi_hnas_psu(item, _no_params, info): + statusmap = (("", 3), # 0 + ("ok", 0), # 1 + ("failed", 2), # 2 + ("notFitted", 1), # 3 + ("unknown", 3), # 4 + ) + + for clusternode, id, status in info: + if clusternode + "." + id == item: + status = int(status) + if status == 0 or status >= len(statusmap): + return 3, "PNode %s PSU %s reports unidentified status %s" % (clusternode, id, status) + else: + return statusmap[status][1], "PNode %s PSU %s reports status %s" % (clusternode, id, statusmap[status][0]) + + return 3, "SNMP did not report a status of this PSU" + +check_info["hitachi_hnas_psu"] = { + "check_function" : check_hitachi_hnas_psu, + "inventory_function" : inventory_hitachi_hnas_psu, + "service_description" : "PSU %s", + "has_perfdata" : False, + "snmp_info" : (".1.3.6.1.4.1.11096.6.1.1.1.2.1.13.1", [1, 2, 3]), + # ClusterNode, ID, Status + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.11096.6"), +} diff -Nru check-mk-1.2.2p3/hitachi_hnas_quorumdevice check-mk-1.2.6p12/hitachi_hnas_quorumdevice --- check-mk-1.2.2p3/hitachi_hnas_quorumdevice 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/hitachi_hnas_quorumdevice 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,51 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_hitachi_hnas_quorumdevice(info): + return [(None, None)] + +def check_hitachi_hnas_quorumdevice(item, _no_params, info): + status=int(info[0][0]) + statusmap = ("unknown", "unconfigured", "offLine", "owned", "configured", + "granted", "clusterNodeNotUp", "misconfigured") + if status >= len(statusmap): + return 3, "Quorum Device reports unidentified status %s" % status + else: + if status == 4: + rc = 0 + else: + rc = 1 + return rc, "Quorum Device reports status %s" % statusmap[status] + +check_info["hitachi_hnas_quorumdevice"] = { + "check_function" : check_hitachi_hnas_quorumdevice, + "inventory_function" : inventory_hitachi_hnas_quorumdevice, + "service_description" : "Quorum Device", + "has_perfdata" : False, + "snmp_info" : (".1.3.6.1.4.1.11096.6.1.1.1.2.5", [7]), # clusterQuorumDeviceStatus + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.11096.6"), +} + diff -Nru check-mk-1.2.2p3/hitachi_hnas_span check-mk-1.2.6p12/hitachi_hnas_span --- check-mk-1.2.2p3/hitachi_hnas_span 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/hitachi_hnas_span 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,59 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +check_includes["hitachi_hnas_span"] = [ "df.include" ] + +def inventory_hitachi_hnas_span(info): + mplist = [] + for id, label, total_upper, total_lower, used_upper, used_lower in info: + mplist.append(id + " " + label) + return df_inventory(mplist) + +def check_hitachi_hnas_span(item, params, info): + fslist = [] + for id, label, total_upper, total_lower, used_upper, used_lower in info: + if id + " " + label == item: + size_mb = (int(total_upper) * 2 ** 32 + int(total_lower)) / 1048576.0 + used_mb = (int(used_upper) * 2 ** 32 + int(used_lower)) / 1048576.0 + avail_mb = size_mb - used_mb + fslist.append((item, size_mb, avail_mb)) + + return df_check_filesystem_list(item, params, fslist) + + return 3, "Span %s not found" % item + +check_info["hitachi_hnas_span"] = { + "check_function" : check_hitachi_hnas_span, + "inventory_function" : inventory_hitachi_hnas_span, + "service_description" : "Span %s", + "has_perfdata" : True, + "snmp_info" : (".1.3.6.1.4.1.11096.6.1.1.6.4.2.1", [1, 2, 3, 4, 5, 6]), + # spanStatsSpanId, spanLabel, spanCapacityTotalUpper, spanCapacityTotalLower, + # spanCapacityUsedUpper, spanCapacityUsedLower + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.11096.6"), + "group" : "filesystem", + "default_levels_variable": "filesystem_default_levels", +} diff -Nru check-mk-1.2.2p3/hitachi_hnas_temp check-mk-1.2.6p12/hitachi_hnas_temp --- check-mk-1.2.2p3/hitachi_hnas_temp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/hitachi_hnas_temp 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,63 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_hitachi_hnas_temp(info): + inventory = [] + for clusternode, id, status, temp in info: + inventory.append( (clusternode + "." + id, None) ) + return inventory + +def check_hitachi_hnas_temp(item, _no_params, info): + temp_status_map = ( ("", 3), # 0 + ("ok", 0), # 1 + ("tempWarning", 1), # 2 + ("tempSevere", 2), # 3 + ("tempSensorFailed", 2), # 4 + ("tempSensorWarning", 1), # 5 + ("unknown", 3), # 6 + ) + + for clusternode, id, status, temp in info: + if clusternode + "." + id == item: + status=int(status) + temp=int(temp) + + if status == 0 or status >= len(temp_status_map): + return 3, "PNode %s Sensor %s reported unidentified status %s" % (clusternode, id, status) + else: + perfdata = [ ('temp', str(temp) + 'C') ] + return temp_status_map[status][1], "PNode %s Sensor %s reported status %s, temperature is %s °C" % (clusternode, id, temp_status_map[status][0], temp), perfdata + return 3, "No sensor PNode %s Sensor %s found" % (clusternode, id) + +check_info["hitachi_hnas_temp"] = { + "check_function" : check_hitachi_hnas_temp, + "inventory_function" : inventory_hitachi_hnas_temp, + "service_description" : "Temperature %s", + "has_perfdata" : True, + "snmp_info" : (".1.3.6.1.4.1.11096.6.1.1.1.2.1.9.1", [1, 2, 3, 4]), + # temperatureSensorClusterNode, temperatureSensorIndex, temperatureSensorStatus, temperatureSensorCReading + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.11096.6"), +} diff -Nru check-mk-1.2.2p3/hitachi_hnas_vnode check-mk-1.2.6p12/hitachi_hnas_vnode --- check-mk-1.2.2p3/hitachi_hnas_vnode 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/hitachi_hnas_vnode 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,69 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def hitachi_hnas_vnode_combine_item(id, name): + combined = str(id) + if name != "": + combined += " " + name + return combined + +def inventory_hitachi_hnas_vnode(info): + inventory = [] + for id, name, status, is_admin, hosted_by in info: + inventory.append( (hitachi_hnas_vnode_combine_item(id, name), None) ) + return inventory + +def check_hitachi_hnas_vnode(item, _no_params, info): + statusmap = (("", 3), + ("unknown", 3), + ("onLine", 0), + ("offLine", 2), + ) + + for id, name, status, is_admin, hosted_by in info: + if hitachi_hnas_vnode_combine_item(id, name) == item: + status = int(status) + nodetype = "" + if status == 0 or status >= len(statusmap): + return 3, "EVS reports unidentified status %s" % status + else: + if is_admin == "0": + nodetype = "This is a service node." + if is_admin == "1": + nodetype = "This is a administrative node." + return statusmap[status][1], "EVS is hosted by PNode %s and reports status %s. %s" % (hosted_by, statusmap[status][0], nodetype) + + return 3, "SNMP did not report a status of this EVS" + +check_info["hitachi_hnas_vnode"] = { + "check_function" : check_hitachi_hnas_vnode, + "inventory_function" : inventory_hitachi_hnas_vnode, + "service_description" : "EVS %s", + "has_perfdata" : False, + "snmp_info" : (".1.3.6.1.4.1.11096.6.1.1.1.2.5.11.1", [1, 2, 4, 5, 6]), + # ID, Name, Status, isAdmin, hostedBy + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.11096.6"), +} diff -Nru check-mk-1.2.2p3/hitachi_hnas_volume check-mk-1.2.6p12/hitachi_hnas_volume --- check-mk-1.2.2p3/hitachi_hnas_volume 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/hitachi_hnas_volume 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,88 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +check_includes["hitachi_hnas_volume"] = [ "df.include" ] + +def inventory_hitachi_hnas_volume(info): + mplist = [] + for id, label, volume_status, total, free, evs in info: + mplist.append(id + " " + label) + return df_inventory(mplist) + +def check_hitachi_hnas_volume(item, params, info): + # use checks from df.include + fslist = [] + for volid, label, volume_status, total, free, evs in info: + if volid == '': + continue + if volid + " " + label == item: + if total and free: + size_mb = int(total) / 1048576.0 + avail_mb = int(free) / 1048576.0 + fslist.append((item, size_mb, avail_mb)) + status, message, perfdata = df_check_filesystem_list(item, params, fslist) + else: + status = 1 + message = "no filesystem size information (!)" + perfdata = [] + + # evaluate volumeStatus + statusmap = (("", 3), + ("unformatted", 1), + ("mounted", 0), + ("formatted", 1), + ("needsChecking", 2), + ) + + error_string = ( "", " (!)", " (!!)", "" ) + volume_status = int(volume_status) + + if volume_status == 0 or volume_status >= len(statusmap): + message += ", Volume reports unidentified status %s" % volume_status + status = max(status, 3) + else: + message += ", Status %s%s" % (statusmap[volume_status][0], error_string[statusmap[volume_status][1]]) + status = max(status, statusmap[volume_status][1]) + + # report evs + message += ", assigned to EVS %s" % evs + return status, message, perfdata + + return 3, "Volume %s not found" % item + + +check_info["hitachi_hnas_volume"] = { + "check_function" : check_hitachi_hnas_volume, + "inventory_function" : inventory_hitachi_hnas_volume, + "service_description" : "Volume %s", + "has_perfdata" : True, + "snmp_info" : (".1.3.6.1.4.1.11096.6.1.1.1.3.5.2.1", [1, 3, 4, 5, 6, 7]), + # volumeSysDriveIndex, volumeLabel, volumeStatus, volumeCapacity, + # volumeFreeCapacity, volumeEnterpriseVirtualServer + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.11096.6"), + "group" : "filesystem", + "default_levels_variable": "filesystem_default_levels", +} diff -Nru check-mk-1.2.2p3/hivemanager_devices check-mk-1.2.6p12/hivemanager_devices --- check-mk-1.2.2p3/hivemanager_devices 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/hivemanager_devices 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,117 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +#<<>> +#BBSA-WIFI-LSN-Rhod-F4-1|8|Cleared|True|21 Days, 17 Hrs 43 Mins 43 Secs +#BBSA-WIFI-LSN-Rhod-F4-2|8|Cleared|True|21 Days, 17 Hrs 43 Mins 43 Secs +#BBSA-WIFI-LSN-Hald-F4-1|4|Cleared|True|2 Days, 0 Hrs 30 Mins 41 Secs +#BBSA-WIFI-LSN-Hald-F2-1|24|Cleared|True|57 Days, 3 Hrs 24 Mins 22 Secs + + + +factory_settings['hivemanger_devices'] = { + 'alert_on_loss' : True, + 'max_clients' : (25, 50), + 'crit_states' : ['Critical'], + 'warn_states' : ['Maybe', 'Major', 'Minor'], +} + +def inventory_hivemanager_devices(info): + for line in info: + infos = dict([ x.split('::') for x in line ]) + yield infos['hostName'], {} + +def check_hivemanager_devices(item, params, info): + for line in info: + infos = dict([ x.split('::') for x in line ]) + if infos['hostName'] == item: + + # Check for Alarm State + alarmstate = "Alarm state: " + infos['alarm'] + if infos['alarm'] in params['warn_states']: + yield 1, alarmstate + elif infos['alarm'] in params['crit_states']: + yield 2, alarmstate + + # If activated, Check for lost connection of client + if params['alert_on_loss']: + if infos['connection'] == 'False': + yield 2, "Connection lost" + + # The number of clients + number_of_clients = int(infos['clients']) + warn, crit = params['max_clients'] + + perfdata = [('client_count', number_of_clients, warn, crit)] + infotext = "Clients: %s" % number_of_clients + levels = ' Warn/Crit at %s/%s' % ( warn, crit ) + + if number_of_clients >= crit: + yield 2, infotext+levels, perfdata + elif number_of_clients >= warn: + yield 1, infotext+levels, perfdata + else: + yield 0, infotext, perfdata + + + # Uptime + state = 0 + warn, crit = 0, 0 + infotext = None + uptime_secs = 0 + if infos['upTime'] != 'down': + uptime_tokens = map(int, infos['upTime'].split()[-2::-2]) + token_multiplier = [1, 60, 3600, 86400] + for idx, entry in enumerate(uptime_tokens): + uptime_secs += token_multiplier[idx] * entry + infotext = "Uptime: %s" % get_age_human_readable(uptime_secs) + if 'max_uptime' in params: + warn, crit = params['max_uptime'] + if uptime_secs >= crit: + state = 2 + elif uptime_secs >= warn: + state = 1 + yield state, infotext, [('uptime', uptime_secs, warn, crit)] + + # Additional Information + additional_informations = [ + 'eth0LLDPPort', 'eth0LLDPSysName', 'hive', 'hiveOS', 'hwmodel', + 'serialNumber', 'nodeId', 'location', 'networkPolicy'] + yield 0, ", ".join(["%s: %s" % (x,y ) for x, y in infos.items() \ + if x in additional_informations ]) + + + +check_info["hivemanager_devices"] = { + "check_function" : check_hivemanager_devices, + "inventory_function" : inventory_hivemanager_devices, + "service_description" : "Client %s", + "default_levels_variable" : "hivemanger_devices", + "group" : "hivemanager_devices", + "has_perfdata" : True, + +} + diff -Nru check-mk-1.2.2p3/hp_blade check-mk-1.2.6p12/hp_blade --- check-mk-1.2.2p3/hp_blade 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/hp_blade 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -45,11 +45,14 @@ def check_hp_blade_general(item, params, info): snmp_state = hp_blade_status_map[int(info[0][1])] status = hp_blade_status2nagios_map[snmp_state] - return (status, "%s - General Status is %s (Firmware: %s, S/N: %s)" % - (nagios_state_names[status], snmp_state, info[0][0], info[0][2])) + return (status, "General Status is %s (Firmware: %s, S/N: %s)" % + (snmp_state, info[0][0], info[0][2])) - -check_info['hp_blade'] = (check_hp_blade_general, "General Status", 0, inventory_hp_blade_general) -snmp_info['hp_blade'] = ( ".1.3.6.1.4.1.232.22.2.3.1.1.1", [ "8", "16", "7" ] ) -snmp_scan_functions['hp_blade'] = \ - lambda oid: ".11.5.7.1.2" in oid(".1.3.6.1.2.1.1.2.0") +check_info["hp_blade"] = { + 'check_function': check_hp_blade_general, + 'inventory_function': inventory_hp_blade_general, + 'service_description': 'General Status', + 'snmp_info': ('.1.3.6.1.4.1.232.22.2.3.1.1.1', ['8', '16', '7']), + 'snmp_scan_function': \ + lambda oid: ".11.5.7.1.2" in oid(".1.3.6.1.2.1.1.2.0"), +} diff -Nru check-mk-1.2.2p3/hp_blade_blades check-mk-1.2.6p12/hp_blade_blades --- check-mk-1.2.2p3/hp_blade_blades 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/hp_blade_blades 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -50,15 +50,15 @@ hp_blade_status2nagios_map = { 'Other': 2, 'Ok': 0, 'Degraded': 1, 'Failed': 2, } def inventory_hp_blade_blades(info): - return [ (line[0], None) for line in info if hp_blade_present_map[int(line[1])] == 'present' ] + return [ (line[0], None) for line in info if hp_blade_present_map.get(int(line[1]), "") == 'present' ] def check_hp_blade_blades(item, params, info): for line in info: if line[0] == item: present_state = hp_blade_present_map[int(line[1])] if present_state != 'present': - return (2, 'CRIT - Blade was present but is not available anymore.' - ' (Present state: %s' % present_state) + return (2, 'Blade was present but is not available anymore' + ' (Present state: %s)' % present_state) # Status field can be an empty string. # Seems not to be implemented. The MIB file tells me that this value @@ -69,12 +69,15 @@ snmp_state = hp_blade_status_map[state] status = hp_blade_status2nagios_map[snmp_state] - return (status, '%s - Blade status is %s (Product: %s Name: %s S/N: %s)' % - (nagios_state_names[status], snmp_state, line[3], line[4], line[5])) - return (3, "UNKNOWN - item not found in snmp data") - - -check_info['hp_blade_blades'] = (check_hp_blade_blades, "Blade %s", 0, inventory_hp_blade_blades) -snmp_info['hp_blade_blades'] = ( ".1.3.6.1.4.1.232.22.2.4.1.1.1", [ "3", "12", "21", "17", "4", "16" ] ) -snmp_scan_functions['hp_blade_blades'] = \ - lambda oid: ".11.5.7.1.2" in oid(".1.3.6.1.2.1.1.2.0") + return (status, 'Blade status is %s (Product: %s Name: %s S/N: %s)' % + (snmp_state, line[3], line[4], line[5])) + return (3, "item not found in snmp data") + +check_info["hp_blade_blades"] = { + 'check_function': check_hp_blade_blades, + 'inventory_function': inventory_hp_blade_blades, + 'service_description': 'Blade %s', + 'snmp_info': ('.1.3.6.1.4.1.232.22.2.4.1.1.1', ['3', '12', '21', '17', '4', '16']), + 'snmp_scan_function': \ + lambda oid: ".11.5.7.1.2" in oid(".1.3.6.1.2.1.1.2.0"), +} diff -Nru check-mk-1.2.2p3/hp_blade_fan check-mk-1.2.6p12/hp_blade_fan --- check-mk-1.2.2p3/hp_blade_fan 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/hp_blade_fan 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -48,16 +48,19 @@ if line[0] == item: present_state = hp_blade_present_map[int(line[1])] if present_state != 'present': - return (2, 'CRIT - FAN was present but is not available anymore.' - ' (Present state: %s' % present_state) + return (2, 'FAN was present but is not available anymore' + ' (Present state: %s)' % present_state) snmp_state = hp_blade_status_map[int(line[2])] status = hp_blade_status2nagios_map[snmp_state] - return (status, '%s - FAN condition is %s' % (nagios_state_names[status], snmp_state)) - return (3, "UNKNOWN - item not found in snmp data") + return (status, 'FAN condition is %s' % (snmp_state)) + return (3, "item not found in snmp data") - -check_info['hp_blade_fan'] = (check_hp_blade_fan, "FAN %s", 0, inventory_hp_blade_fan) -snmp_info['hp_blade_fan'] = ( ".1.3.6.1.4.1.232.22.2.3.1.3.1", [ "3", "8", "11" ] ) -snmp_scan_functions['hp_blade_fan'] = \ - lambda oid: ".11.5.7.1.2" in oid(".1.3.6.1.2.1.1.2.0") +check_info["hp_blade_fan"] = { + 'check_function': check_hp_blade_fan, + 'inventory_function': inventory_hp_blade_fan, + 'service_description': 'FAN %s', + 'snmp_info': ('.1.3.6.1.4.1.232.22.2.3.1.3.1', ['3', '8', '11']), + 'snmp_scan_function': \ + lambda oid: ".11.5.7.1.2" in oid(".1.3.6.1.2.1.1.2.0"), +} diff -Nru check-mk-1.2.2p3/hp_blade_manager check-mk-1.2.6p12/hp_blade_manager --- check-mk-1.2.2p3/hp_blade_manager 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/hp_blade_manager 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -54,7 +54,7 @@ if line[0] == item: expected_role = params[0] if line[3] != expected_role: - return (2, 'CRIT - Unexpected role: %s (Expected: %s)' % + return (2, 'Unexpected role: %s (Expected: %s)' % (hp_blade_role_map[int(line[3])], hp_blade_role_map[int(expected_role)])) # The SNMP answer is not fully compatible to the MIB file. The value of 0 will @@ -63,12 +63,15 @@ snmp_state = hp_blade_status_map[state] status = hp_blade_status2nagios_map[snmp_state] - return (status, '%s - Enclosure Manager condition is %s (Role: %s, S/N: %s)' % - (nagios_state_names[status], snmp_state, hp_blade_role_map[int(line[3])], line[4])) - return (3, "UNKNOWN - item not found in snmp data") - - -check_info['hp_blade_manager'] = (check_hp_blade_manager, "Manager %s", 0, inventory_hp_blade_manager) -snmp_info['hp_blade_manager'] = ( ".1.3.6.1.4.1.232.22.2.3.1.6.1", [ "3", "10", "12", "9", "8" ] ) -snmp_scan_functions['hp_blade_manager'] = \ - lambda oid: ".11.5.7.1.2" in oid(".1.3.6.1.2.1.1.2.0") + return (status, 'Enclosure Manager condition is %s (Role: %s, S/N: %s)' % + (snmp_state, hp_blade_role_map[int(line[3])], line[4])) + return (3, "item not found in snmp data") + +check_info["hp_blade_manager"] = { + 'check_function': check_hp_blade_manager, + 'inventory_function': inventory_hp_blade_manager, + 'service_description': 'Manager %s', + 'snmp_info': ('.1.3.6.1.4.1.232.22.2.3.1.6.1', ['3', '10', '12', '9', '8']), + 'snmp_scan_function': \ + lambda oid: ".11.5.7.1.2" in oid(".1.3.6.1.2.1.1.2.0"), +} diff -Nru check-mk-1.2.2p3/hp_blade_psu check-mk-1.2.6p12/hp_blade_psu --- check-mk-1.2.2p3/hp_blade_psu 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/hp_blade_psu 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -71,7 +71,7 @@ if line[0] == item: present_state = hp_blade_present_map[int(line[1])] if present_state != 'present': - return (2, 'CRIT - PSU was present but is not available anymore.' + return (2, 'PSU was present but is not available anymore.' ' (Present state: %s' % present_state) snmp_state = hp_blade_status_map[int(line[2])] @@ -86,12 +86,18 @@ perfdata = [ ('output', line[3]) ] - return (status, '%s - PSU is %s%s (S/N: %s)' % - (nagios_state_names[status], snmp_state, detail_output, line[6]), perfdata) - return (3, "UNKNOWN - item not found in snmp data") + return (status, 'PSU is %s%s (S/N: %s)' % + (snmp_state, detail_output, line[6]), perfdata) + return (3, "item not found in snmp data") -check_info['hp_blade_psu'] = (check_hp_blade_psu, "PSU %s", 1, inventory_hp_blade_psu) -snmp_info['hp_blade_psu'] = ( ".1.3.6.1.4.1.232.22.2.5.1.1.1", [ "3", "16", "17", "10", "14", "15", "5" ] ) -snmp_scan_functions['hp_blade_psu'] = \ - lambda oid: ".11.5.7.1.2" in oid(".1.3.6.1.2.1.1.2.0") + +check_info["hp_blade_psu"] = { + 'check_function': check_hp_blade_psu, + 'inventory_function': inventory_hp_blade_psu, + 'service_description': 'PSU %s', + 'has_perfdata': True, + 'snmp_info': ('.1.3.6.1.4.1.232.22.2.5.1.1.1', ['3', '16', '17', '10', '14', '15', '5']), + 'snmp_scan_function': \ + lambda oid: ".11.5.7.1.2" in oid(".1.3.6.1.2.1.1.2.0"), +} diff -Nru check-mk-1.2.2p3/hp_eml_sum check-mk-1.2.6p12/hp_eml_sum --- check-mk-1.2.2p3/hp_eml_sum 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/hp_eml_sum 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -40,23 +40,27 @@ def check_hp_eml_sum(_no_item, _no_param, info): if not info or not info[0]: - return (3, "UNKNOWN - Summary status information missing") + return (3, "Summary status information missing") op_status, manufacturer, model, serial, version = info[0] status, status_txt = hp_eml_sum_map.get(op_status, (3, 'unhandled op_status (%s)' % op_status)) - return (status, '%s - Summary State is "%s", Manufacturer: %s, ' + return (status, 'Summary State is "%s", Manufacturer: %s, ' 'Model: %s, Serial: %s, Version: %s' % - (nagios_state_names[status], status_txt, manufacturer, model, serial, version)) + (status_txt, manufacturer, model, serial, version)) -check_info['hp_eml_sum'] = (check_hp_eml_sum, "Summary Status", 0, inventory_hp_eml_sum) -snmp_info['hp_eml_sum'] = (".1.3.6.1.4.1.11.2.36.1.1.5.1.1", [ - 3, # op_status - 7, # manufacturer - 9, # model - 10, # serial - 11, # version -]) - -snmp_scan_functions['hp_eml_sum'] \ - = lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.11.10.2.1.3.20" +check_info["hp_eml_sum"] = { + 'check_function': check_hp_eml_sum, + 'inventory_function': inventory_hp_eml_sum, + 'service_description': 'Summary Status', + 'snmp_info': ( + ".1.3.6.1.4.1.11.2.36.1.1.5.1.1", [ + 3, # op_status + 7, # manufacturer + 9, # model + 10, # serial + 11, # version + ]), + 'snmp_scan_function': \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.11.10.2.1.3.20", +} diff -Nru check-mk-1.2.2p3/hp_procurve_cpu check-mk-1.2.6p12/hp_procurve_cpu --- check-mk-1.2.2p3/hp_procurve_cpu 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/hp_procurve_cpu 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -46,14 +46,19 @@ status = 1 output = ' (Above %d%%)' % params[0] - return (status, '%s - CPU utilization is %d%% %s' % - (nagios_state_names[status], cpu_util, output), + return (status, 'CPU utilization is %d%% %s' % (cpu_util, output), [('util', '%d%%' % cpu_util, params[0], params[1], 0, 100)]) - return (3, "UNKNOWN - Invalid information in snmp data") + return (3, "Invalid information in snmp data") -check_info['hp_procurve_cpu'] = (check_hp_procurve_cpu, "CPU utilization", 1, inventory_hp_procurve_cpu) -snmp_info['hp_procurve_cpu'] = ( ".1.3.6.1.4.1.11.2.14.11.5.1.9.6", [ "1" ] ) -snmp_scan_functions['hp_procurve_cpu'] = \ - lambda oid: ".11.2.3.7.11" in oid(".1.3.6.1.2.1.1.2.0") + +check_info["hp_procurve_cpu"] = { + 'check_function': check_hp_procurve_cpu, + 'inventory_function': inventory_hp_procurve_cpu, + 'service_description': 'CPU utilization', + 'has_perfdata': True, + 'snmp_info': ('.1.3.6.1.4.1.11.2.14.11.5.1.9.6', ['1']), + 'snmp_scan_function': \ + lambda oid: ".11.2.3.7.11" in oid(".1.3.6.1.2.1.1.2.0"), +} diff -Nru check-mk-1.2.2p3/hp_procurve_mem check-mk-1.2.6p12/hp_procurve_mem --- check-mk-1.2.2p3/hp_procurve_mem 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/hp_procurve_mem 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -51,14 +51,17 @@ status = 1 output = ' (Above %d%%)' % params[0] - return (status, '%s - Memory usage is %d%% %s' % - (nagios_state_names[status], perc, output), + return (status, 'Memory usage is %d%% %s' % (perc, output), [('bytes_used', '%db' % mem_used, params[0], params[1], 0, mem_total)]) - return (3, "UNKNOWN - Invalid information in snmp data") + return (3, "Invalid information in snmp data") - -check_info['hp_procurve_mem'] = (check_hp_procurve_mem, "Memory", 1, inventory_hp_procurve_mem) -snmp_info['hp_procurve_mem'] = ( ".1.3.6.1.4.1.11.2.14.11.5.1.1.2.1.1.1", [ "5", "7" ] ) -snmp_scan_functions['hp_procurve_mem'] = \ - lambda oid: ".11.2.3.7.11" in oid(".1.3.6.1.2.1.1.2.0") +check_info["hp_procurve_mem"] = { + 'check_function': check_hp_procurve_mem, + 'inventory_function': inventory_hp_procurve_mem, + 'service_description': 'Memory', + 'has_perfdata': True, + 'snmp_info': ('.1.3.6.1.4.1.11.2.14.11.5.1.1.2.1.1.1', ['5', '7']), + 'snmp_scan_function': \ + lambda oid: ".11.2.3.7.11" in oid(".1.3.6.1.2.1.1.2.0"), +} diff -Nru check-mk-1.2.2p3/hp_procurve_sensors check-mk-1.2.6p12/hp_procurve_sensors --- check-mk-1.2.2p3/hp_procurve_sensors 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/hp_procurve_sensors 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -93,12 +93,17 @@ procurve_status = hp_procurve_status_map[line[2]] status = hp_procurve_status2nagios_map[procurve_status] - return (status, '%s - Condition of %s "%s" is %s' % - (nagios_state_names[status], get_hp_procurve_sensor_type(line[1]), line[3], procurve_status)) - return (3, "UNKNOWN - item not found in snmp data") + return (status, 'Condition of %s "%s" is %s' % + (get_hp_procurve_sensor_type(line[1]), line[3], procurve_status)) + return (3, "item not found in snmp data") -check_info['hp_procurve_sensors'] = (check_hp_procurve_sensors, "Sensor %s", 0, inventory_hp_procurve_sensors) -snmp_info['hp_procurve_sensors'] = ( ".1.3.6.1.4.1.11.2.14.11.1.2.6.1", [ "1", "2", "4", "7" ] ) -snmp_scan_functions['hp_procurve_sensors'] = \ - lambda oid: ".11.2.3.7.11" in oid(".1.3.6.1.2.1.1.2.0") + +check_info["hp_procurve_sensors"] = { + 'check_function': check_hp_procurve_sensors, + 'inventory_function': inventory_hp_procurve_sensors, + 'service_description': 'Sensor %s', + 'snmp_info': ('.1.3.6.1.4.1.11.2.14.11.1.2.6.1', ['1', '2', '4', '7']), + 'snmp_scan_function': \ + lambda oid: ".11.2.3.7.11" in oid(".1.3.6.1.2.1.1.2.0"), +} diff -Nru check-mk-1.2.2p3/hp_proliant check-mk-1.2.6p12/hp_proliant --- check-mk-1.2.2p3/hp_proliant 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/hp_proliant 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,29 +24,32 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -# Author: Bastian Kuhn - # General Status: # '.1.3.6.1.4.1.232.2.2.2.1.0' => Serial Number of Server # '.1.3.6.1.4.1.232.11.2.14.1.1.5.0' => cpqHoFwVerVersion # '.1.3.6.1.4.1.232.11.1.3.0' => cpqHoMibCondition -hp_proliant_generel_status_map = { 1: 'Unknown', 2: 'Ok', 3: 'Degraded', 4: 'Failed' } -hp_proliant_generel_status2nagios_map = { 'Unknown': 0, 'Ok': 0, 'Degraded': 1, 'Failed': 2, } +hp_proliant_general_status_map = { 1: 'Unknown', 2: 'Ok', 3: 'Degraded', 4: 'Failed' } +hp_proliant_general_status2nagios_map = { 'Unknown': 0, 'Ok': 0, 'Degraded': 1, 'Failed': 2, } def inventory_proliant_general(info): if len(info) > 0 and len(info[0]) > 1: return [ (None, None) ] def check_proliant_general(item, no_params, info): - snmp_state = hp_proliant_generel_status_map[int(info[0][0])] - status = hp_proliant_generel_status2nagios_map[snmp_state] - return (status, "%s - General Status is %s (Firmware: %s, S/N: %s)" % - (nagios_state_names[status], snmp_state, info[0][1], info[0][2])) - - -check_info['hp_proliant'] = (check_proliant_general, "General Status", 0, inventory_proliant_general) -snmp_info['hp_proliant'] = ( ".1.3.6.1.4.1.232", [ "11.1.3.0", "11.2.14.1.1.5.0", "2.2.2.1.0" ] ) -snmp_scan_functions['hp_proliant'] = \ - lambda oid: "8072.3.2.10" in oid(".1.3.6.1.2.1.1.2.0") or \ - (".1.3.6.1.4.1.311.1.1.3.1.2" in oid(".1.3.6.1.2.1.1.2.0") and oid(".1.3.6.1.4.1.232.11.1.3.0")) + if not info: + return 3, "status not found in snmp data" + snmp_state = hp_proliant_general_status_map[int(info[0][0])] + status = hp_proliant_general_status2nagios_map[snmp_state] + return (status, "General Status is %s (Firmware: %s, S/N: %s)" % + (snmp_state, info[0][1], info[0][2])) + +check_info["hp_proliant"] = { + 'check_function': check_proliant_general, + 'inventory_function': inventory_proliant_general, + 'service_description': 'General Status', + 'snmp_info': ('.1.3.6.1.4.1.232', ['11.1.3.0', '11.2.14.1.1.5.0', '2.2.2.1.0']), + 'snmp_scan_function': \ + lambda oid: "8072.3.2.10" in oid(".1.3.6.1.2.1.1.2.0") or \ + (".1.3.6.1.4.1.311.1.1.3.1.2" in oid(".1.3.6.1.2.1.1.2.0") and oid(".1.3.6.1.4.1.232.11.1.3.0")), +} diff -Nru check-mk-1.2.2p3/hp_proliant_cpu check-mk-1.2.6p12/hp_proliant_cpu --- check-mk-1.2.2p3/hp_proliant_cpu 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/hp_proliant_cpu 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -40,17 +40,22 @@ snmp_status = hp_proliant_status_map[int(status)] status = hp_proliant_status2nagios_map[snmp_status] - return (status, '%s - CPU%s "%s" in slot %s is in state "%s"' % - (nagios_state_names[status], index, name, slot, snmp_status)) - return (3, "UNKNOWN - item not found in snmp data") - - -check_info['hp_proliant_cpu'] = (check_hp_proliant_cpu, "HW CPU %s", 0, inventory_hp_proliant_cpu) -snmp_info['hp_proliant_cpu'] = ( ".1.3.6.1.4.1.232.1.2.2.1.1", [ "1", # cpqSeCpuUnitIndex - "2", # cpqSeCpuSlot - "3", # cpqSeCpuName - "6", # cpqSeCpuStatus - ] ) - -snmp_scan_functions['hp_proliant_cpu'] = \ - lambda oid: "proliant" in oid(".1.3.6.1.4.1.232.2.2.4.2.0").lower() + return (status, 'CPU%s "%s" in slot %s is in state "%s"' % + (index, name, slot, snmp_status)) + return (3, "item not found in snmp data") + +check_info["hp_proliant_cpu"] = { + 'check_function': check_hp_proliant_cpu, + 'inventory_function': inventory_hp_proliant_cpu, + 'service_description': 'HW CPU %s', + 'snmp_info': ( + '.1.3.6.1.4.1.232.1.2.2.1.1', [ + '1', # cpqSeCpuUnitIndex + '2', # cpqSeCpuSlot + '3', # cpqSeCpuName + '6', # cpqSeCpuStatus + ] + ), + 'snmp_scan_function': \ + lambda oid: "proliant" in oid(".1.3.6.1.4.1.232.2.2.4.2.0").lower(), +} diff -Nru check-mk-1.2.2p3/hp_proliant_da_cntlr check-mk-1.2.6p12/hp_proliant_da_cntlr --- check-mk-1.2.2p3/hp_proliant_da_cntlr 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/hp_proliant_da_cntlr 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -76,22 +76,26 @@ output.append('(Role: %s, Model: %s, Slot: %s, Serial: %s)' % (hp_proliant_da_cntlr_role_map.get(role, 'unknown'), model, slot, serial)) - return (sum_state, '%s - %s' % (nagios_state_names[sum_state], ', '.join(output))) - return (3, "UNKNOWN - Controller not found in snmp data") + return (sum_state, ', '.join(output)) + return (3, "Controller not found in snmp data") - -check_info['hp_proliant_da_cntlr'] = (check_hp_proliant_da_cntlr, "HW Controller %s", 0, inventory_hp_proliant_da_cntlr) -snmp_info['hp_proliant_da_cntlr'] = ( ".1.3.6.1.4.1.232.3.2.2.1.1", [ - "1", # cpqDaCntlrIndex - "2", # cpqDaCntlrModel - "5", # cpqDaCntlrSlot - "6", # cpqDaCntlrCondition - "8", # cpqDaCntlrPartnerSlot - "9", # cpqDaCntlrCurrentRole - "10", # cpqDaCntlrBoardStatus - "12", # cpqDaCntlrBoardCondition - "15", # cpqDaCntlrSerialNumber -] ) - -snmp_scan_functions['hp_proliant_da_cntlr'] = \ - lambda oid: "proliant" in oid(".1.3.6.1.4.1.232.2.2.4.2.0").lower() +check_info["hp_proliant_da_cntlr"] = { + 'check_function': check_hp_proliant_da_cntlr, + 'inventory_function': inventory_hp_proliant_da_cntlr, + 'service_description': 'HW Controller %s', + 'snmp_info': ( + ".1.3.6.1.4.1.232.3.2.2.1.1", [ + "1", # cpqDaCntlrIndex + "2", # cpqDaCntlrModel + "5", # cpqDaCntlrSlot + "6", # cpqDaCntlrCondition + "8", # cpqDaCntlrPartnerSlot + "9", # cpqDaCntlrCurrentRole + "10", # cpqDaCntlrBoardStatus + "12", # cpqDaCntlrBoardCondition + "15", # cpqDaCntlrSerialNumber + ] + ), + 'snmp_scan_function': \ + lambda oid: "proliant" in oid(".1.3.6.1.4.1.232.2.2.4.2.0").lower(), +} diff -Nru check-mk-1.2.2p3/hp_proliant_da_phydrv check-mk-1.2.6p12/hp_proliant_da_phydrv --- check-mk-1.2.2p3/hp_proliant_da_phydrv 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/hp_proliant_da_phydrv 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -65,25 +65,30 @@ condition = hp_proliant_da_phydrv_condition[int(condition)] status = hp_proliant_da_phydrv_condition_status2nagios[condition] - return (status, '%s - Bay: %s, BusNumber: %s, Status: %s, ' + return (status, 'Bay: %s, BusNumber: %s, Status: %s, ' 'SmartStatus: %s, RefHours: %s, Size: %sMB, Condition: %s' % - (nagios_state_names[status], bay, bus_number, snmp_status, + (bay, bus_number, snmp_status, hp_proliant_da_phydrv_smart_status[int(smart_status)], ref_hours, size, condition)) - return (3, "UNKNOWN - item not found in snmp data") + return (3, "item not found in snmp data") - -check_info['hp_proliant_da_phydrv'] = (check_hp_proliant_da_phydrv, "HW Phydrv %s", 0, inventory_hp_proliant_da_phydrv) -snmp_info['hp_proliant_da_phydrv'] = ( ".1.3.6.1.4.1.232.3.2.5.1.1", [ "1", # cpqDaPhyDrvCntlrIndex - "2", # cpqDaPhyDrvIndex - "5", # cpqDaPhyDrvBay - "6", # cpqDaPhyDrvStatus - "9", # cpqDaPhyDrvRefHours - "45", # cpqDaPhyDrvSize - "37", # cpqDaPhyDrvCondition - "50", # cpqDaPhyDrvBusNumber - "57", # cpqDaPhyDrvSmartStatus - ] ) - -snmp_scan_functions['hp_proliant_da_phydrv'] = \ - lambda oid: "proliant" in oid(".1.3.6.1.4.1.232.2.2.4.2.0").lower() +check_info["hp_proliant_da_phydrv"] = { + 'check_function': check_hp_proliant_da_phydrv, + 'inventory_function': inventory_hp_proliant_da_phydrv, + 'service_description': 'HW Phydrv %s', + 'snmp_info': ( + ".1.3.6.1.4.1.232.3.2.5.1.1", [ + "1", # cpqDaPhyDrvCntlrIndex + "2", # cpqDaPhyDrvIndex + "5", # cpqDaPhyDrvBay + "6", # cpqDaPhyDrvStatus + "9", # cpqDaPhyDrvRefHours + "45", # cpqDaPhyDrvSize + "37", # cpqDaPhyDrvCondition + "50", # cpqDaPhyDrvBusNumber + "57", # cpqDaPhyDrvSmartStatus + ] + ), + 'snmp_scan_function': \ + lambda oid: "proliant" in oid(".1.3.6.1.4.1.232.2.2.4.2.0").lower(), +} diff -Nru check-mk-1.2.2p3/hp_proliant_fans check-mk-1.2.6p12/hp_proliant_fans --- check-mk-1.2.2p3/hp_proliant_fans 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/hp_proliant_fans 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -73,22 +73,28 @@ detailOutput = ', RPM: %s' % currentSpeed perfdata = [('temp', int(currentSpeed))] - return (status, '%s - FAN Sensor %s "%s", Speed is %s, State is %s%s' % - (nagios_state_names[status], index, label, hp_proliant_speed_map[int(speed)], + return (status, 'FAN Sensor %s "%s", Speed is %s, State is %s%s' % + (index, label, hp_proliant_speed_map[int(speed)], snmp_status, detailOutput), perfdata) - return (3, "UNKNOWN - item not found in snmp data") + return (3, "item not found in snmp data") - -check_info['hp_proliant_fans'] = (check_hp_proliant_fans, "HW FAN%s", 1, inventory_hp_proliant_fans) -snmp_info['hp_proliant_fans'] = ( ".1.3.6.1.4.1.232.6.2.6.7.1", [ "1", # cpqHeFltTolFanChassis - "2", # cpqHeFltTolFanIndex - "3", # cpqHeFltTolFanLocale - "4", # cpqHeFltTolFanPresent - "6", # cpqHeFltTolFanSpeed - "9", # cpqHeFltTolFanCondition - "12", # cpqHeFltTolFanCurrentSpeed - ] ) - -snmp_scan_functions['hp_proliant_fans'] = \ - lambda oid: "proliant" in oid(".1.3.6.1.4.1.232.2.2.4.2.0").lower() +check_info["hp_proliant_fans"] = { + 'check_function': check_hp_proliant_fans, + 'inventory_function': inventory_hp_proliant_fans, + 'service_description': 'HW FAN%s', + 'has_perfdata': True, + 'snmp_info': ( + ".1.3.6.1.4.1.232.6.2.6.7.1", [ + "1", # cpqHeFltTolFanChassis + "2", # cpqHeFltTolFanIndex + "3", # cpqHeFltTolFanLocale + "4", # cpqHeFltTolFanPresent + "6", # cpqHeFltTolFanSpeed + "9", # cpqHeFltTolFanCondition + "12", # cpqHeFltTolFanCurrentSpeed + ] + ), + 'snmp_scan_function': \ + lambda oid: "proliant" in oid(".1.3.6.1.4.1.232.2.2.4.2.0").lower(), +} diff -Nru check-mk-1.2.2p3/hp_proliant_mem check-mk-1.2.6p12/hp_proliant_mem --- check-mk-1.2.2p3/hp_proliant_mem 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/hp_proliant_mem 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -34,12 +34,19 @@ 5: 'simm', 6: 'pcmcia', 7: 'compaq-specific', - 8: 'dimm', + 8: 'DIMM', 9: 'smallOutlineDimm', - 10: 'rimm', - 11: 'srimm', - 12: 'fb-dimm', + 10: 'RIMM', + 11: 'SRIMM', + 12: 'FB-DIMM', + 13: 'DIMM DDR', + 14: 'DIMM DDR2', + 15: 'DIMM DDR3', + 16: 'DIMM FBD2', + 17: 'FB-DIMM DDR2', + 18: 'FB-DIMM DDR3', } + hp_proliant_mem_status_map = { 1: "other", 2: "notPresent", @@ -52,7 +59,10 @@ 9: "notSupported", 10: "badConfig", 11: "degraded", + 12: "spare", + 13: "partial", } + hp_proliant_mem_status2nagios_map = { 'n/a': 3, 'other': 3, @@ -66,14 +76,24 @@ 'notSupported': 2, 'badConfig': 2, 'degraded': 2, + 'spare': 0, + 'partial': 1, +} + +hp_proliant_mem_condition_status2nagios_map = { + 'other' : 3, + 'ok' : 0, + 'degraded' : 2, + 'failed' : 2, + 'degradedModuleIndexUnknown' : 3 } -hp_proliant_mem_condition_status2nagios_map = { 'other': 3, 'ok': 0, 'degraded': 2, 'failed': 2 } hp_proliant_mem_condition_map = { - 0: 'n/a', - 1: 'other', - 2: 'ok', - 3: 'degraded', + 0: 'n/a', + 1: 'other', + 2: 'ok', + 3: 'degraded', + 4: 'degradedModuleIndexUnknown', } def inventory_hp_proliant_mem(info): @@ -98,34 +118,51 @@ detail_output = ', Status: %s ' % snmp_status status = hp_proliant_mem_status2nagios_map[snmp_status] - if status > 0: - detail_output += '(%s) ' % nagios_state_names[status] + if status == 0: + detail_output += '' + elif status == 1: + detail_output += '(!) ' + elif status == 2: + detail_output += '(!!) ' + else: + detail_output += '(?) ' condition = 'n/a' - if int(module_condition) in hp_proliant_mem_condition_map: - condition = hp_proliant_mem_condition_map[int(module_condition)] + if saveint(module_condition) in hp_proliant_mem_condition_map: + condition = hp_proliant_mem_condition_map[saveint(module_condition)] condition_status = hp_proliant_mem_condition_status2nagios_map[condition] detail_output += ', Condition: %s ' % condition - if condition_status > 0: - detail_output += '(%s) ' % nagios_state_names[condition_status] + if condition_status == 0: + detail_output += '' + elif condition_status == 1: + detail_output += '(!) ' + elif condition_status == 2: + detail_output += '(!!) ' + else: + detail_output += '(?) ' if condition_status > status: status = condition_status - return (status, '%s - Board: %s, Num: %s, Type: %s, Size: %s MB%s' % - (nagios_state_names[status], board_index, module_index, + return (status, 'Board: %s, Num: %s, Type: %s, Size: %s MB%s' % + (board_index, module_index, type, module_size_mb, detail_output)) - return (3, "UNKNOWN - item not found in snmp data") - + return (3, "item not found in snmp data") -check_info['hp_proliant_mem'] = (check_hp_proliant_mem, "HW Mem %s", 0, inventory_hp_proliant_mem) -snmp_info['hp_proliant_mem'] = ( ".1.3.6.1.4.1.232.6.2.14.13.1", [ "2", # cpqHeResMem2BoardNum - "1", # cpqHeResMem2Module - "6", # cpqHeResMem2ModuleSize - "7", # cpqHeResMem2ModuleType - "19", # cpqHeResMem2ModuleStatus - "20", # cpqHeResMem2ModuleCondition - ] ) - -snmp_scan_functions['hp_proliant_mem'] = \ - lambda oid: "proliant" in oid(".1.3.6.1.4.1.232.2.2.4.2.0").lower() +check_info["hp_proliant_mem"] = { + 'check_function': check_hp_proliant_mem, + 'inventory_function': inventory_hp_proliant_mem, + 'service_description': 'HW Mem %s', + 'snmp_info': ( + ".1.3.6.1.4.1.232.6.2.14.13.1", [ + "2", # cpqHeResMem2BoardNum + "1", # cpqHeResMem2Module + "6", # cpqHeResMem2ModuleSize + "7", # cpqHeResMem2ModuleType + "19", # cpqHeResMem2ModuleStatus + "20", # cpqHeResMem2ModuleCondition + ] + ), + 'snmp_scan_function': \ + lambda oid: "proliant" in oid(".1.3.6.1.4.1.232.2.2.4.2.0").lower(), +} diff -Nru check-mk-1.2.2p3/hp_proliant_power check-mk-1.2.6p12/hp_proliant_power --- check-mk-1.2.2p3/hp_proliant_power 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/hp_proliant_power 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,68 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +hp_prolaint_power_default_levels = ( 300, 400 ) + +def inventory_hp_proliant_power(info): + if len(info) > 0 and info[0][0] != '0': + return [ ( None, 'hp_prolaint_power_default_levels') ] + +def check_hp_proliant_power(item, params, info): + status_table = { + 1 : "other", + 3 : "absent", + } + status, reading = map(int, info[0]) + if status != 2: + return 2, "Power Meter state: %s" % ( status_table[status] ) + warn, crit = params + state = 0 + levels = '' + if reading >= crit: + state = 2 + levels = '( Warning/Critical %s/%s )' % ( warn, crit ) + elif reading >= warn: + state = 1 + levels = '( Warning/Critical %s/%s )' % ( warn, crit ) + + perf = [ ('watt', reading, warn, crit ) ] + return state, "Current reading: %d Watt" % reading + levels, perf + +check_info["hp_proliant_power"] = { + 'group' : 'epower_single', + 'check_function' : check_hp_proliant_power, + 'inventory_function' : inventory_hp_proliant_power, + 'service_description' : 'HW Power Meter', + 'snmp_info': ( + ".1.3.6.1.4.1.232.6.2.15", [ + "2", # cpqHePowerMeterStatus + "3", # cpqHePowerMeterCurrReading + ] + ), + 'snmp_scan_function' : \ + lambda oid: "proliant" in oid(".1.3.6.1.4.1.232.2.2.4.2.0").lower(), + 'has_perfdata' : True +} diff -Nru check-mk-1.2.2p3/hp_proliant_psu check-mk-1.2.6p12/hp_proliant_psu --- check-mk-1.2.2p3/hp_proliant_psu 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/hp_proliant_psu 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -42,18 +42,23 @@ snmp_status = hp_proliant_psu_status_map[int(status)] status = hp_proliant_status2nagios_map[snmp_status] - return (status, '%s - PSU in chassis %s, bay %s is in state "%s"' % - (nagios_state_names[status], chassis, bay, snmp_status)) - return (3, "UNKNOWN - item not found in snmp data") - - -check_info['hp_proliant_psu'] = (check_hp_proliant_psu, "HW PSU %s", 0, inventory_hp_proliant_psu) -snmp_info['hp_proliant_psu'] = ( ".1.3.6.1.4.1.232.6.2.9.3.1", [ "1", # cpqHeFltTolPowerSupplyChassis - "2", # cpqHeFltTolPowerSupplyBay - "3", # cpqHeFltTolPowerSupplyPresent - "4", # cpqHeFltTolPowerSupplyCondition - "9", # cpqHeFltTolPowerSupplyRedundant - ] ) - -snmp_scan_functions['hp_proliant_psu'] = \ - lambda oid: "proliant" in oid(".1.3.6.1.4.1.232.2.2.4.2.0").lower() + return (status, 'PSU in chassis %s, bay %s is in state "%s"' % + (chassis, bay, snmp_status)) + return (3, "item not found in snmp data") + +check_info["hp_proliant_psu"] = { + 'check_function': check_hp_proliant_psu, + 'inventory_function': inventory_hp_proliant_psu, + 'service_description': 'HW PSU %s', + 'snmp_info': ( + ".1.3.6.1.4.1.232.6.2.9.3.1", [ + "1", # cpqHeFltTolPowerSupplyChassis + "2", # cpqHeFltTolPowerSupplyBay + "3", # cpqHeFltTolPowerSupplyPresent + "4", # cpqHeFltTolPowerSupplyCondition + "9", # cpqHeFltTolPowerSupplyRedundant + ] + ), + 'snmp_scan_function': \ + lambda oid: "proliant" in oid(".1.3.6.1.4.1.232.2.2.4.2.0").lower(), +} diff -Nru check-mk-1.2.2p3/hp_proliant_raid check-mk-1.2.6p12/hp_proliant_raid --- check-mk-1.2.2p3/hp_proliant_raid 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/hp_proliant_raid 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,78 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +def inventory_hp_proliant_raid(info): + if len(info) > 0: + inventory = [] + for line in info: + inventory.append((line[0], None)) + return inventory + +def check_hp_proliant_raid(item, _no_params, info): + hp_proliant_raid_statusmap = { + 3 : "More physical drives have failed than the fault tolerance mode can handle.", + 4 : "Logical drive is not configured.", + 5 : "Recovery for at least one physical drive hasfailed. No data loss currently.", + 6 : "Logical drive is ready for recovery but is still operating in Interim Recovery Mode.", + 8 : "The wrong physical drive was replaced after failure.", + 9 : "A physical drive is not responding.", + 10 : "Enclosue is overheated but drives still functioning and should be shutdown.", + 11 : "Enclosure is overheated and drives are shutdown.", + 12 : "Logical drive is currently doing Automatic Data Expansion.", + 13 : "Logical drive is currently unavailable.", + 14 : "Logical drive is in the queue for expansion.", + + } + for line in info: + if line[0] == item: + state, size_mb, percent_rebuild = map(saveint, line[1:]) + drive_size = "Logical Volume Size: %s" % get_bytes_human_readable(size_mb * 1024 * 1024 ) + if state == 7: + return 1, "Rebuild: %d%% finished. %s" % ( percent_rebuild, drive_size ) + if state == 2: + return 0, "In normal operation mode. " + drive_size + if state in [ 4, 5, 6, 12, 14 ]: + return 1, hp_proliant_raid_statusmap[state] + drive_size + if state in [ 3, 8, 9, 10, 12, 13 ]: + return 2, hp_proliant_raid_statusmap[state] + drive_size + return (3, "Drive not found or Uknown state") + +check_info["hp_proliant_raid"] = { + 'check_function': check_hp_proliant_raid, + 'inventory_function': inventory_hp_proliant_raid, + 'service_description': 'Logical Device %s', + 'snmp_info': ( + ".1.3.6.1.4.1.232.3.2.3.1.1", [ + 14, # Drive Name + 4, # Logical Drive status + 9, # Drive Size + 12, # Percent rebuild + ] + ), + 'snmp_scan_function': \ + lambda oid: "proliant" in oid(".1.3.6.1.4.1.232.2.2.4.2.0").lower(), +} diff -Nru check-mk-1.2.2p3/hp_proliant_temp check-mk-1.2.6p12/hp_proliant_temp --- check-mk-1.2.2p3/hp_proliant_temp 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/hp_proliant_temp 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -59,21 +59,26 @@ if int(value) > int(threshold): detail_output += ' (Above %s)' % threshold - return (status, '%s - Temperature Sensor %s "%s"%s' % - (nagios_state_names[status], index, hp_proliant_temp_locale[int(name)], + return (status, 'Temperature Sensor %s "%s"%s' % + (index, hp_proliant_temp_locale[int(name)], detail_output), [('temp', value, threshold, threshold)]) - return (3, "UNKNOWN - item not found in snmp data") + return (3, "item not found in snmp data") - -check_info['hp_proliant_temp'] = (check_hp_proliant_temp, "Temperature %s", 1, inventory_hp_proliant_temp) -snmp_info['hp_proliant_temp'] = ( ".1.3.6.1.4.1.232.6.2.6.8.1", [ "1", # cpqHeTemperatureChassis - "2", # cpqHeTemperatureIndex - "3", # cpqHeTemperatureLocale - "4", # cpqHeTemperatureCelsius - "5", # cpqHeTemperatureThresholdCelsius - "6", # cpqHeTemperatureCondition - ] ) -snmp_scan_functions['hp_proliant_temp'] = \ - lambda oid: "proliant" in oid(".1.3.6.1.4.1.232.2.2.4.2.0").lower() -checkgroup_of["hp_proliant_temp"] = "temperature_auto" +check_info["hp_proliant_temp"] = { + 'check_function': check_hp_proliant_temp, + 'inventory_function': inventory_hp_proliant_temp, + 'service_description': 'Temperature %s', + 'has_perfdata': True, + 'snmp_info': ( ".1.3.6.1.4.1.232.6.2.6.8.1", [ + "1", # cpqHeTemperatureChassis + "2", # cpqHeTemperatureIndex + "3", # cpqHeTemperatureLocale + "4", # cpqHeTemperatureCelsius + "5", # cpqHeTemperatureThresholdCelsius + "6", # cpqHeTemperatureCondition + ] ), + 'snmp_scan_function': \ + lambda oid: "proliant" in oid(".1.3.6.1.4.1.232.2.2.4.2.0").lower(), + 'group': 'temperature_auto', +} diff -Nru check-mk-1.2.2p3/hp_sts_drvbox check-mk-1.2.6p12/hp_sts_drvbox --- check-mk-1.2.2p3/hp_sts_drvbox 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/hp_sts_drvbox 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -109,24 +109,27 @@ output.append('(Type: %s, Model: %s, Serial: %s, Location: %s)' % (hp_sts_drvbox_type_map.get(ty, 'unknown'), model, serial, loc)) - return (sum_state, '%s - %s' % (nagios_state_names[sum_state], ', '.join(output))) - return (3, "UNKNOWN - Controller not found in snmp data") + return (sum_state, ', '.join(output)) + return (3, "Controller not found in snmp data") - -check_info['hp_sts_drvbox'] = (check_hp_sts_drvbox, "Drive Box %s", 0, inventory_hp_sts_drvbox) -snmp_info['hp_sts_drvbox'] = ( ".1.3.6.1.4.1.232.8.2.1.1", [ - "1", # cpqSsBoxCntlrIndex - "2", # cpqSsBoxBusIndex - "3", # cpqSsBoxType - "4", # cpqSsBoxModel - "7", # cpqSsBoxFanStatus - "8", # cpqSsBoxCondition - "9", # cpqSsBoxTempStatus - "10", # cpqSsBoxSidePanelStatus - "11", # cpqSsBoxFltTolPwrSupplyStatus - "17", # cpqSsBoxSerialNumber - "23", # cpqSsBoxLocationString -] ) - -snmp_scan_functions['hp_sts_drvbox'] = \ - lambda oid: "proliant" in oid(".1.3.6.1.4.1.232.2.2.4.2.0").lower() +check_info["hp_sts_drvbox"] = { + 'check_function': check_hp_sts_drvbox, + 'inventory_function': inventory_hp_sts_drvbox, + 'service_description': 'Drive Box %s', + 'snmp_info': ( + '.1.3.6.1.4.1.232.8.2.1.1', [ + '1', # cpqSsBoxCntlrIndex + '2', # cpqSsBoxBusIndex + '3', # cpqSsBoxType + '4', # cpqSsBoxModel + '7', # cpqSsBoxFanStatus + '8', # cpqSsBoxCondition + '9', # cpqSsBoxTempStatus + '10', # cpqSsBoxSidePanelStatus + '11', # cpqSsBoxFltTolPwrSupplyStatus + '17', # cpqSsBoxSerialNumber + '23', # cpqSsBoxLocationString + ]), + 'snmp_scan_function': \ + lambda oid: "proliant" in oid(".1.3.6.1.4.1.232.2.2.4.2.0").lower(), +} diff -Nru check-mk-1.2.2p3/hpux/hpux_lunstats check-mk-1.2.6p12/hpux/hpux_lunstats --- check-mk-1.2.2p3/hpux/hpux_lunstats 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/hpux/hpux_lunstats 1970-01-01 00:00:00.000000000 +0000 @@ -1,68 +0,0 @@ -#!/usr/bin/ksh - -# Put this file into /usr/lib/check_mk_agent/plugins. Then -# reinventorize your host. -# Actually querying these stats is quite slow since they freshly update -# on each call. If you have a few 1000 luns then this will not work. - -get_stats() -{ - scsimgr get_stat -D $LUN | tr '\=' ':' | grep -e 'STATISTICS FOR LUN' -e 'Bytes' -e 'Total I/Os processed' -e 'I/O failure' -e 'IO failures due -to' - return $? -} - - -# Ex: -#LUN PATH INFORMATION FOR LUN : /dev/pt/pt2 -#World Wide Identifier(WWID) = -#LUN PATH INFORMATION FOR LUN : /dev/rdisk/disk5 -#World Wide Identifier(WWID) = 0x60a98000572d44745634645076556357 -#LUN PATH INFORMATION FOR LUN : /dev/rdisk/disk6 - -get_lun_map() -{ -scsimgr lun_map | egrep '^[[:space:]]*(LUN PATH|World Wide Identifier)' | tr '\=' ':' -} - - -main() -{ -get_lun_map | while read line ; do - descr=$(echo $line | awk -F: '{print $1}') - val=$( echo $line | awk -F: '{print $2}') - case $descr in - LUN*) - if echo $val | grep /dev/rdisk 1>/dev/null; then - DMP=yes - LUN=$val - else - DMP=no - unset LUN - fi - ;; - World*) - if [ $DMP = "yes" ]; then - echo "WWID: $val" - get_stats $LUN - fi - ;; - *) - echo "Fehler:" - echo $line - echo $descr - echo $val - sleep 1 - ;; - esac -done -} - - - -# Verify the system is using new multipath device model. -if [ -d /dev/rdisk ] && [ -d /dev/disk ]; then - echo '<<>>' - main -fi - diff -Nru check-mk-1.2.2p3/hpux/hpux_statgrab check-mk-1.2.6p12/hpux/hpux_statgrab --- check-mk-1.2.2p3/hpux/hpux_statgrab 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/hpux/hpux_statgrab 1970-01-01 00:00:00.000000000 +0000 @@ -1,23 +0,0 @@ -#!/bin/sh - -# this is for users who compiled statgrab on hp-ux. -# note you'll need a 0.18+ version, from their github page at -# https://github.com/i-scream/libstatgrab -# flags used for compiling - disable documentation, examples and set*id - - -if which statgrab > /dev/null ; then - if statgrab const. cpu. general. mem. page. proc. swap. user. > /tmp/statgrab.$$ 2>/dev/null - then - for s in proc cpu page - do - echo "<<>>" - cat /tmp/statgrab.$$ | grep "^$s\." | cut -d. -f2-99 | sed 's/ *= */ /' - done - - echo '<<>>' - cat /tmp/statgrab.$$ | egrep "^(swap|mem)\." | sed 's/ *= */ /' - - fi - [ -f /tmp/statgrab.$$ ] && rm -f /tmp/statgrab.$$ -fi diff -Nru check-mk-1.2.2p3/hpux_cpu check-mk-1.2.6p12/hpux_cpu --- check-mk-1.2.2p3/hpux_cpu 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/hpux_cpu 2015-09-16 14:25:30.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -28,7 +28,7 @@ # 2:23pm up 196 days, 4:47, 5 users, load average: 0.05, 0.04, 0.04 # 8 logical processors (2 per socket) -cpuload_default_levels = (5, 10) +cpuload_default_levels = (5.0, 10.0) def inventory_hpux_cpu_load(info): if len(info) >= 1 and 'load' in info[0]: @@ -42,16 +42,14 @@ except: num_cpus = 1 - warn, crit = params # apply on 15min average, relative to number of CPUs - warn = warn * num_cpus - crit = crit * num_cpus - perfdata = [ ('load' + str(z), l, warn, crit, 0 ) for (z, l) in [ (1, load[0]), (5, load[1]), (15, load[2]) ] ] - - if load[2] >= crit: - return (2, "CRIT - 15min Load %.2f at %s CPUs (critical at %.2f)" % (load[2], num_cpus, crit), perfdata) - elif load[2] >= warn: - return (1, "WARN - 15min Load %.2f at %s CPUs (warning at %.2f)" % (load[2], num_cpus, warn), perfdata) - else: - return (0, "OK - 15min Load %.2f at %s CPUs" % (load[2], num_cpus), perfdata) + return check_cpu_load_generic(params, load, num_cpus) -check_info['hpux_cpu'] = (check_hpux_cpu_load, "CPU load", 1, inventory_hpux_cpu_load ) + +check_info["hpux_cpu"] = { + 'check_function': check_hpux_cpu_load, + 'inventory_function': inventory_hpux_cpu_load, + 'service_description': 'CPU load', + 'has_perfdata': True, + "group": "cpu_load", + "includes": ["cpu_load.include"], +} diff -Nru check-mk-1.2.2p3/hpux_fchba check-mk-1.2.6p12/hpux_fchba --- check-mk-1.2.2p3/hpux_fchba 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/hpux_fchba 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -62,7 +62,7 @@ def check_hpux_fchba(item, _no_params, info): parsed = parse_hpux_fchba(info) if item not in parsed: - return (3, "UNKNOWN - HBA noch found") + return (3, "HBA noch found") hba = parsed[item] @@ -86,10 +86,7 @@ infos.append("Driver-Firmware Dump Available(!!)") state = 2 - - - infotext = ", ".join(infos) - return state, nagios_state_names[state] + " - " + infotext + return (state, ", ".join(infos)) check_info['hpux_fchba'] = { diff -Nru check-mk-1.2.2p3/hpux_if check-mk-1.2.6p12/hpux_if --- check-mk-1.2.2p3/hpux_if 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/hpux_if 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -116,7 +116,7 @@ mult = 1000 * 1000 * 1000 else: mult = 1000 * 1000 - return int(parts[0]) * mult + return float(parts[0]) * mult def hpux_parse_operstatus(txt): if txt.lower() == "up": @@ -131,6 +131,12 @@ return check_if_common(item, params, hpux_convert_to_if64(info)) check_includes['hpux_if'] = [ "if.include" ] -check_info['hpux_if'] = (check_hpux_if, "NIC %s", 1, inventory_hpux_if ) -checkgroup_of['hpux_if'] = "if" -check_default_levels['hpux_if'] = "if_default_levels" + +check_info["hpux_if"] = { + 'check_function': check_hpux_if, + 'inventory_function': inventory_hpux_if, + 'service_description': 'NIC %s', + 'has_perfdata': True, + 'group': 'if', + 'default_levels_variable': 'if_default_levels', +} diff -Nru check-mk-1.2.2p3/hpux_lunstats check-mk-1.2.6p12/hpux_lunstats --- check-mk-1.2.2p3/hpux_lunstats 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/hpux_lunstats 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -62,7 +62,7 @@ bytes_read = int(right) / 512 elif left == 'Bytes written': bytes_written = int(right) / 512 - luns.append((lun, bytes_read, bytes_written)) + luns.append((None, lun, bytes_read, bytes_written)) return luns def check_hpux_lunstats(item, params, info): @@ -71,6 +71,10 @@ def inventory_hpux_lunstats(info): return inventory_diskstat_generic(hpux_lunstats_convert(info)) -check_info['hpux_lunstats'] = (check_hpux_lunstats, "Disk IO %s", 1, inventory_hpux_lunstats) -checkgroup_of["diskstat"] = "disk_io" - +check_info["hpux_lunstats"] = { + 'check_function': check_hpux_lunstats, + 'inventory_function': inventory_hpux_lunstats, + 'service_description': 'Disk IO %s', + 'has_perfdata': True, + 'group': 'disk_io', +} diff -Nru check-mk-1.2.2p3/hpux_lvm check-mk-1.2.6p12/hpux_lvm --- check-mk-1.2.2p3/hpux_lvm 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/hpux_lvm 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -57,12 +57,17 @@ lv_name = line[0].split("=")[1] if lv_name == item: status = line[1].split("=")[1] - infotext = " - status is %s (VG = %s)" % (status, vg_name) + infotext = "status is %s (VG = %s)" % (status, vg_name) if status == "available,syncd": - return (0, "OK" + infotext) + return (0, infotext) else: - return (2, "CRIT" + infotext) + return (2, infotext) - return (3, "UNKNOWN - no such volume found") + return (3, "no such volume found") -check_info['hpux_lvm'] = (check_hpux_lvm, "Logical Volume %s", 0, inventory_hpux_lvm ) + +check_info["hpux_lvm"] = { + 'check_function': check_hpux_lvm, + 'inventory_function': inventory_hpux_lvm, + 'service_description': 'Logical Volume %s', +} diff -Nru check-mk-1.2.2p3/hpux_multipath check-mk-1.2.6p12/hpux_multipath --- check-mk-1.2.2p3/hpux_multipath 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/hpux_multipath 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -60,7 +60,7 @@ # State = ACTIVE # State = STANDBY -hpux_multipath_pathstates = { "ACTIVE": 0, "STANDBY": 1, "FAILED": 2, "UNOPEN": 3, "CLOSING" : 1} +hpux_multipath_pathstates = { "ACTIVE": 0, "STANDBY": 1, "FAILED": 2, "UNOPEN": 3, "OPENING" : 0, "CLOSING" : 1} def parse_hpux_multipath(info): disks = {} @@ -95,19 +95,24 @@ def check_hpux_multipath(item, params, info): disks = parse_hpux_multipath(info) if item not in disks: - return (3, "UNKNOWN - no LUN with this WWID found") + return (3, "no LUN with this WWID found") disk, pathcounts = disks[item] if pathcounts[2] > 0: - return (2, "CRIT - %s: %d failed paths! (%s)" % (disk, pathcounts[2], hpux_multipath_format_pathstatus(pathcounts))) + return (2, "%s: %d failed paths! (%s)" % (disk, pathcounts[2], hpux_multipath_format_pathstatus(pathcounts))) elif list(pathcounts) != list(params): - return (1, "WARN - %s: Invalid path status %s (should be %s)" % + return (1, "%s: Invalid path status %s (should be %s)" % (disk, hpux_multipath_format_pathstatus(pathcounts), hpux_multipath_format_pathstatus(params))) else: - return (0, "OK - %s: %s" % (disk, hpux_multipath_format_pathstatus(pathcounts))) + return (0, "%s: %s" % (disk, hpux_multipath_format_pathstatus(pathcounts))) -check_info['hpux_multipath'] = (check_hpux_multipath, "Multipath %s", 0, inventory_hpux_multipath ) -checkgroup_of['hpux_multipath'] = "hpux_multipath" + +check_info["hpux_multipath"] = { + 'check_function': check_hpux_multipath, + 'inventory_function': inventory_hpux_multipath, + 'service_description': 'Multipath %s', + 'group': 'hpux_multipath', +} diff -Nru check-mk-1.2.2p3/hpux_serviceguard check-mk-1.2.6p12/hpux_serviceguard --- check-mk-1.2.2p3/hpux_serviceguard 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/hpux_serviceguard 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -62,8 +62,13 @@ code = 1 else: code = 2 - return (code, nagios_state_names[code] + " - state is %s" % status) + return (code, "state is %s" % status) - return (3, "UNKNOWN - No such item found") + return (3, "No such item found") -check_info['hpux_serviceguard'] = (check_hpux_serviceguard, "Serviceguard", 0, inventory_hpux_serviceguard ) + +check_info["hpux_serviceguard"] = { + 'check_function': check_hpux_serviceguard, + 'inventory_function': inventory_hpux_serviceguard, + 'service_description': 'Serviceguard', +} diff -Nru check-mk-1.2.2p3/hpux_snmp_cs check-mk-1.2.6p12/hpux_snmp_cs --- check-mk-1.2.2p3/hpux_snmp_cs 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/hpux_snmp_cs 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -61,7 +61,6 @@ def check_hpux_snmp_cpu(item, _no_params, info): parts = dict(info) - some_counter_wrapped = False this_time = time.time() total_rate = 0 rates = [] @@ -71,18 +70,12 @@ ( "idle", "15.0" ), ( "nice", "16.0" )]: value = int(parts[oid]) - try: - timedif, rate = get_counter("snmp_cpu_util.%s" % what, this_time, value) - total_rate += rate - rates.append(rate) - except MKCounterWrapped: - some_counter_wrapped = True - - if some_counter_wrapped: - raise MKCounterWrapped("", "Some counter wrapped, no data this time") + rate = get_rate("snmp_cpu_util.%s" % what, this_time, value) + total_rate += rate + rates.append(rate) if total_rate == 0: - raise MKCounterWrapped("", "No counter counted. Time has ceased to flow.") + raise MKCounterWrapped("No counter counted. Time has ceased to flow.") perfdata = [] infos = [] @@ -92,7 +85,7 @@ perfdata.append((what, perc, None, None, 0, 100)) infos.append("%s: %.0f%%" % (what, perc)) - return (0, "OK - " + ", ".join(infos), perfdata) + return (0, ", ".join(infos), perfdata) check_info['hpux_snmp_cs.cpu'] = (check_hpux_snmp_cpu, "CPU utilization", 1, inventory_hpux_snmp_cpu) diff -Nru check-mk-1.2.2p3/hpux_snmp_cs.cpu check-mk-1.2.6p12/hpux_snmp_cs.cpu --- check-mk-1.2.2p3/hpux_snmp_cs.cpu 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/hpux_snmp_cs.cpu 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Check CPU usage (utilisation) via SNMP on HP-UX +title: CPU utilization via SNMP on HP-UX agents: snmp -author: Mathias Kettner +catalog: os/kernel license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/hpux_tunables check-mk-1.2.6p12/hpux_tunables --- check-mk-1.2.2p3/hpux_tunables 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/hpux_tunables 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -42,8 +42,6 @@ # Setting: 1073741824 # Percentage: 16.5 - - def parse_hpux_tunables(info): parsed = {} for line in info: @@ -68,11 +66,10 @@ def check_hpux_tunables(item, params, info, hpux_tunable, thingname): - parsed = parse_hpux_tunables(info) if hpux_tunable not in parsed: - return (3, "UNKNOWN - tunable not found in agent output") + return (3, "tunable not found in agent output") usage, threshold = parsed[hpux_tunable] mimi = float(threshold) / 100 @@ -91,8 +88,7 @@ else: perfdata = [ (thingname, usage, None, None, 0, threshold) ] - return (state, nagios_state_names[state] + \ - " - %.2f%% used" % pct + state * "!" + " (%d/%d %s) " % (usage, threshold, thingname), perfdata) + return (state, "%.2f%% used" % pct + state * "!" + " (%d/%d %s) " % (usage, threshold, thingname), perfdata) diff -Nru check-mk-1.2.2p3/hpux_tunables.maxfiles_lim check-mk-1.2.6p12/hpux_tunables.maxfiles_lim --- check-mk-1.2.2p3/hpux_tunables.maxfiles_lim 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/hpux_tunables.maxfiles_lim 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,22 @@ +title: Kernel tunables: Number of open files +agents: hpux +catalog: os/kernel +license: GPL +distribution: check_mk +description: + The check uses the output of {kcusage -l} for monitoring the + current number of open files (maxfiles_lim). Per default this + check is always {OK}, but you can set levels for the used + percentage. + + See check manpage for hpux_tunables too. + +perfdata: + See check manpage for hpux_tunables. + +inventory: + See check manpage for hpux_tunables. + +[parameters] +warn(float): See check manpage for hpux_tunables. +crit(float): See check manpage for hpux_tunables. diff -Nru check-mk-1.2.2p3/hpux_tunables.nkthread check-mk-1.2.6p12/hpux_tunables.nkthread --- check-mk-1.2.2p3/hpux_tunables.nkthread 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/hpux_tunables.nkthread 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,23 @@ +title: Kernel tunables: Number of threads +agents: hpux +catalog: os/kernel +license: GPL +distribution: check_mk +description: + The check uses the output of {kcusage -l} for monitoring the + current number of threads (nkthread). Per default this + check is always {OK}, but you can set levels for the used + percentage. + + See check manpage for hpux_tunables too. + +perfdata: + See check manpage for hpux_tunables. + +inventory: + See check manpage for hpux_tunables. + +[parameters] +warn(float): See check manpage for hpux_tunables. +crit(float): See check manpage for hpux_tunables. + diff -Nru check-mk-1.2.2p3/hpux_tunables.nproc check-mk-1.2.6p12/hpux_tunables.nproc --- check-mk-1.2.2p3/hpux_tunables.nproc 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/hpux_tunables.nproc 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,23 @@ +title: Kernel tunables: Number of Processes +agents: hpux +catalog: os/kernel +license: GPL +distribution: check_mk +description: + The check uses the output of {kcusage -l} for monitoring the + current Number of processes (npoc). Per default this + check is always {OK}, but you can set levels for the used + percentage. + + See check manpage for hpux_tunables too. + +perfdata: + See check manpage for hpux_tunables. + +inventory: + See check manpage for hpux_tunables. + +[parameters] +warn(float): See check manpage for hpux_tunables. +crit(float): See check manpage for hpux_tunables. + diff -Nru check-mk-1.2.2p3/hpux_tunables.semmni check-mk-1.2.6p12/hpux_tunables.semmni --- check-mk-1.2.2p3/hpux_tunables.semmni 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/hpux_tunables.semmni 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,23 @@ +title: Kernel tunables: Number of IPC Semaphore IDs +agents: hpux +catalog: os/kernel +license: GPL +distribution: check_mk +description: + The check uses the output of {kcusage -l} for monitoring the + current number of IPC semaphore IDs (semmni). Per default this + check is always {OK}, but you can set levels for the used + percentage. + + See check manpage for hpux_tunables too. + +perfdata: + See check manpage for hpux_tunables. + +inventory: + See check manpage for hpux_tunables. + +[parameters] +warn(float): See check manpage for hpux_tunables. +crit(float): See check manpage for hpux_tunables. + diff -Nru check-mk-1.2.2p3/hpux_tunables.semmns check-mk-1.2.6p12/hpux_tunables.semmns --- check-mk-1.2.2p3/hpux_tunables.semmns 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/hpux_tunables.semmns 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,23 @@ +title: Kernel tunables: Number of IPC Semaphores +agents: hpux +catalog: os/kernel +license: GPL +distribution: check_mk +description: + The check uses the output of {kcusage -l} for monitoring the + current number of IPC semaphores (semmns). Per default this + check is always {OK}, but you can set levels for the used + percentage. + + See check manpage for hpux_tunables too. + +perfdata: + See check manpage for hpux_tunables. + +inventory: + See check manpage for hpux_tunables. + +[parameters] +warn(float): See check manpage for hpux_tunables. +crit(float): See check manpage for hpux_tunables. + diff -Nru check-mk-1.2.2p3/hpux_tunables.shmseg check-mk-1.2.6p12/hpux_tunables.shmseg --- check-mk-1.2.2p3/hpux_tunables.shmseg 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/hpux_tunables.shmseg 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,23 @@ +title: Kernel tunables: Number of Shared Memory Segments +agents: hpux +catalog: os/kernel +license: GPL +distribution: check_mk +description: + The check uses the output of {kcusage -l} for monitoring the + current number of shared memory segments (semmns). Per default this + check is always {OK}, but you can set levels for the used + percentage. + + See check manpage for hpux_tunables too. + +perfdata: + See check manpage for hpux_tunables. + +inventory: + See check manpage for hpux_tunables. + +[parameters] +warn(float): See check manpage for hpux_tunables. +crit(float): See check manpage for hpux_tunables. + diff -Nru check-mk-1.2.2p3/hr_cpu check-mk-1.2.6p12/hr_cpu --- check-mk-1.2.2p3/hr_cpu 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/hr_cpu 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -36,24 +36,33 @@ util += int(line[0]) num_cpus += 1 if num_cpus == 0: - return (3, "UNKNOWN - no data found in SNMP output") + return (3, "no data found in SNMP output") util = float(util) / num_cpus - infotext = " - %2.1f%% utilization at %d CPUs" % (util, num_cpus) - warn, crit = params + infotext = "%2.1f%% utilization at %d CPUs" % (util, num_cpus) + if params: + warn, crit = params + else: + warn, crit = None, None perfdata = [("util", util, warn, crit, 0, 100)] - if util >= crit: - return (2, "CRIT" + infotext + " (critical at %d%%)" % crit, perfdata) - elif util >= warn: - return (1, "WARN" + infotext + " (warning at %d%%)" % warn, perfdata) + if crit and util >= crit: + return (2, infotext + " (critical at %d%%)" % crit, perfdata) + elif warn and util >= warn: + return (1, infotext + " (warning at %d%%)" % warn, perfdata) else: - return (0, "OK" + infotext, perfdata) + return (0, infotext, perfdata) -check_info["hr_cpu"] = (check_hr_cpu, "CPU utilization", 1, inventory_hr_cpu) -snmp_info["hr_cpu"] = ( ".1.3.6.1.2.1.25.3.3.1", [ 2 ] ) -checkgroup_of['hr_cpu'] = "cpu_utilization" # HOST-RESOURCES-MIB::hrSystemUptime.0 -snmp_scan_functions["hr_cpu"] = lambda oid: \ - not not oid('.1.3.6.1.2.1.25.1.1.0') and not \ - ("linux" in oid(".1.3.6.1.2.1.1.1.0").lower() - and oid(".1.3.6.1.4.1.2021.10.1.6.1")) + +check_info["hr_cpu"] = { + 'check_function': check_hr_cpu, + 'inventory_function': inventory_hr_cpu, + 'service_description': 'CPU utilization', + 'has_perfdata': True, + 'snmp_info': ('.1.3.6.1.2.1.25.3.3.1', [2]), + 'snmp_scan_function': lambda oid: \ + not not oid('.1.3.6.1.2.1.25.1.1.0') and not \ + ("linux" in oid(".1.3.6.1.2.1.1.1.0").lower() + and oid(".1.3.6.1.4.1.2021.10.1.6.1")), + 'group': 'cpu_utilization', +} diff -Nru check-mk-1.2.2p3/hr_fs check-mk-1.2.6p12/hr_fs --- check-mk-1.2.2p3/hr_fs 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/hr_fs 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,20 +24,31 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -check_includes['hr_fs'] = [ "df.include" ] +# Juniper devices put information about the device into the +# field where we expect the mount point. Ugly. Remove that crap. +def fix_hr_fs_mountpoint(mp): + mp = snmp_decode_string(mp) + if "mounted on:" in mp: + return mp.rsplit(":",1)[-1].strip() + else: + return mp + def inventory_hr_fs(info): mplist = [] for hrtype, hrdescr, hrunits, hrsize, hrused in info: + hrdescr = fix_hr_fs_mountpoint(hrdescr) if hrtype in [ ".1.3.6.1.2.1.25.2.1.4" ] and \ hrdescr not in inventory_df_exclude_mountpoints and \ saveint(hrsize) != 0: mplist.append(hrdescr) return df_inventory(mplist) + def check_hr_fs(item, params, info): fslist = [] for hrtype, hrdescr, hrunits, hrsize, hrused in info: + hrdescr = fix_hr_fs_mountpoint(hrdescr) if "patterns" in params or item == hrdescr: unit_size = saveint(hrunits) hrsize = saveint(hrsize) @@ -56,17 +67,22 @@ return df_check_filesystem_list(item, params, fslist) -check_info["hr_fs"] = (check_hr_fs, "fs_%s", 1, inventory_hr_fs) -snmp_info["hr_fs"] = ( ".1.3.6.1.2.1.25.2.3.1", [ - 2, # hrStorageType - 3, # hrStorageDescr - 4, # hrStorageAllocationUnits - 5, # hrStorageSize - 6, # hrStorageUsed -] ) -checkgroup_of['hr_fs'] = "filesystem" - -snmp_scan_functions["hr_fs"] = lambda oid: \ - not not oid('.1.3.6.1.2.1.25.1.1.0') # HOST-RESOURCES-MIB::hrSystemUptime.0 - -check_default_levels["hr_fs"] = "filesystem_default_levels" +check_info["hr_fs"] = { + 'check_function': check_hr_fs, + 'inventory_function': inventory_hr_fs, + 'service_description': 'Filesystem %s', + 'has_perfdata': True, + 'snmp_info': ('.1.3.6.1.2.1.25.2.3.1', [ + 2, # hrStorageType + 3, # hrStorageDescr + 4, # hrStorageAllocationUnits + 5, # hrStorageSize + 6, # hrStorageUsed + ]), + # HOST-RESOURCES-MIB::hrSystemUptime.0 + 'snmp_scan_function': lambda oid: \ + not not oid('.1.3.6.1.2.1.25.1.1.0'), + 'group': 'filesystem', + 'default_levels_variable': 'filesystem_default_levels', + 'includes': [ "df.include" ], +} diff -Nru check-mk-1.2.2p3/hr_mem check-mk-1.2.6p12/hr_mem --- check-mk-1.2.2p3/hr_mem 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/hr_mem 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -34,10 +34,15 @@ for hrtype, hrdescr, hrunits, hrsize, hrused in info: if hrtype in [ ".1.3.6.1.2.1.25.2.1.2", ".1.3.6.1.2.1.25.2.1.3" ]: - if int(hrsize) > 0: # some device have zero (broken) values + if saveint(hrsize) > 1: # some device have zero (broken) values return [(None, "memused_default_levels")] -def check_hr_mem(item, params, info): +def check_hr_mem(_no_item, params, info): + # This check does not yet support averaging. We need to + # convert it to mem.include + if type(params) == dict: + params = params["levels"] + usage = {} for hrtype, hrdescr, hrunits, hrsize, hrused in info: if hrtype in [ ".1.3.6.1.2.1.25.2.1.2", @@ -47,7 +52,7 @@ # We use only the first entry of each type. We have # seen devices (pfSense), that have lots of additional # entries that are not useful. - if hrtype not in usage: + if hrtype not in usage and hrdescr != "Virtual memory": usage[hrtype] = (size, used) # Account for cached memory (this works at least for systems using @@ -55,14 +60,17 @@ cached_mb = 0 for hrtype, hrdescr, hrunits, hrsize, hrused in info: if hrdescr in [ "Cached memory", "Memory buffers" ]: - cached_mb += saveint(hrused) * saveint(hrunits) / 1048576.0 + hr_mem = saveint(hrused) + if hr_mem < 0: # some devices report negative used cache values... + hr_mem = saveint(hrsize) + cached_mb += hr_mem * saveint(hrunits) / 1048576.0 totalram_mb, ramused_mb = usage.get(".1.3.6.1.2.1.25.2.1.2", (0,0)) ramused_mb -= cached_mb totalvirt_mb, virtused_mb = usage.get(".1.3.6.1.2.1.25.2.1.3", (0,0)) totalmem_mb, totalused_mb = totalram_mb + totalvirt_mb, ramused_mb + virtused_mb - if totalmem_mb > 0: + if totalmem_mb > 0 and totalram_mb > 0: totalused_perc = 100 * totalused_mb / totalram_mb perfdata = [ @@ -77,30 +85,36 @@ perfdata.append(('memused', str(totalused_mb)+'MB', int(warn/100.0 * totalram_mb), int(crit/100.0 * totalram_mb), 0, totalvirt_mb)) if totalused_perc >= crit: - return (2, 'CRIT - %s, critical at %.1f%%' % (infotext, crit), perfdata) + return (2, '%s, critical at %.1f%%' % (infotext, crit), perfdata) elif totalused_perc >= warn: - return (1, 'WARN - %s, warning at %.1f%%' % (infotext, warn), perfdata) + return (1, '%s, warning at %.1f%%' % (infotext, warn), perfdata) else: - return (0, 'OK - %s' % infotext, perfdata) + return (0, '%s' % infotext, perfdata) else: perfdata.append(('memused', str(totalused_mb)+'MB', warn, crit, 0, totalram_mb)) if totalused_mb >= crit: - return (2, 'CRIT - %s, critical at %.2f GB' % (infotext, crit / 1024.0), perfdata) + return (2, '%s, critical at %.2f GB' % (infotext, crit / 1024.0), perfdata) elif totalused_mb >= warn: - return (1, 'WARN - %s, warning at %.2f GB' % (infotext, warn / 1024.0), perfdata) + return (1, '%s, warning at %.2f GB' % (infotext, warn / 1024.0), perfdata) else: - return (0, 'OK - %s' % infotext, perfdata) + return (0, '%s' % infotext, perfdata) return (3, "Invalid information. Total memory is empty.") -check_info["hr_mem"] = (check_hr_mem, "Memory used", 1, inventory_hr_mem) -snmp_info["hr_mem"] = ( ".1.3.6.1.2.1.25.2.3.1", [ - 2, # hrStorageType - 3, # hrStorageDescr - 4, # hrStorageAllocationUnits - 5, # hrStorageSize - 6, # hrStorageUsed -] ) -snmp_scan_functions["hr_mem"] = lambda oid: \ - not not oid('.1.3.6.1.2.1.25.1.1.0') # HOST-RESOURCES-MIB::hrSystemUptime.0 -checkgroup_of['hr_mem'] = "memory" +check_info["hr_mem"] = { + 'check_function': check_hr_mem, + 'inventory_function': inventory_hr_mem, + 'service_description': 'Memory used', + 'has_perfdata': True, + 'snmp_info': ('.1.3.6.1.2.1.25.2.3.1', [ + 2, # hrStorageType + 3, # hrStorageDescr + 4, # hrStorageAllocationUnits + 5, # hrStorageSize + 6, # hrStorageUsed + ]), + # HOST-RESOURCES-MIB::hrSystemUptime.0 + 'snmp_scan_function': lambda oid: \ + not not oid('.1.3.6.1.2.1.25.1.1.0'), + 'group': 'memory', +} diff -Nru check-mk-1.2.2p3/htdocs/actions.py check-mk-1.2.6p12/htdocs/actions.py --- check-mk-1.2.2p3/htdocs/actions.py 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/htdocs/actions.py 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -34,7 +34,7 @@ if action == "reschedule": action_reschedule() else: - raise MKGeneralException("Invalid action '%s'" % action) + raise MKGeneralException("Invalid action.") except Exception, e: html.write("['ERROR', %r]\n" % str(e)) @@ -57,10 +57,10 @@ if wait_svc: wait_spec = u'%s;%s' % (host, wait_svc) - add_filter = "Filter: service_description = %s\n" % wait_svc + add_filter = "Filter: service_description = %s\n" % lqencode(wait_svc) else: wait_spec = spec - add_filter = "Filter: service_description = %s\n" % service + add_filter = "Filter: service_description = %s\n" % lqencode(service) else: cmd = "HOST" what = "host" @@ -70,7 +70,7 @@ try: now = int(time.time()) - html.live.command("[%d] SCHEDULE_FORCED_%s_CHECK;%s;%d" % (now, cmd, spec, now), site) + html.live.command("[%d] SCHEDULE_FORCED_%s_CHECK;%s;%d" % (now, cmd, lqencode(spec), now), site) html.live.set_only_sites([site]) query = u"GET %ss\n" \ "WaitObject: %s\n" \ @@ -79,7 +79,7 @@ "WaitTrigger: check\n" \ "Columns: last_check state plugin_output\n" \ "Filter: host_name = %s\n%s" \ - % (what, wait_spec, now, config.reschedule_timeout * 1000, host, add_filter) + % (what, lqencode(wait_spec), now, config.reschedule_timeout * 1000, lqencode(host), add_filter) row = html.live.query_row(query) html.live.set_only_sites() last_check = row[0] diff -Nru check-mk-1.2.2p3/htdocs/bi.css check-mk-1.2.6p12/htdocs/bi.css --- check-mk-1.2.2p3/htdocs/bi.css 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/htdocs/bi.css 2015-06-24 09:48:38.000000000 +0000 @@ -82,7 +82,7 @@ display:inline-block; } -.aggrtree img.assumption { +.aggrtree img.assumption, .aggrtree img.bi.icon { width: 13px; height: 13px; margin: 0; @@ -90,6 +90,13 @@ vertical-align: middle; } +.aggrtree img.bi.icon { + margin-right: 5px; + margin-left: 0px; + position: relative; + top: -1px; +} + b.bullet { color: #aaa; margin: 0px 5px; diff -Nru check-mk-1.2.2p3/htdocs/bi.py check-mk-1.2.6p12/htdocs/bi.py --- check-mk-1.2.2p3/htdocs/bi.py 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/htdocs/bi.py 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,8 +24,7 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -import config, re, pprint, time -import weblib, htmllib +import config, re, pprint, time, views from lib import * @@ -79,8 +78,8 @@ UNKNOWN = 3 UNAVAIL = 4 -service_state_names = { OK:"OK", WARN:"WARN", CRIT:"CRIT", UNKNOWN:"UNKNOWN", PENDING:"PENDING", UNAVAIL:"UNAVAILABLE"} -host_state_names = { 0:"UP", 1:"DOWN", 2:"UNREACHABLE" } +service_state_names = { OK:_("OK"), WARN:_("WARN"), CRIT:_("CRIT"), UNKNOWN:_("UNKNOWN"), PENDING:_("PENDING"), UNAVAIL:_("UNAVAILABLE")} +host_state_names = { 0:_("UP"), 1:_("DOWN"), 2:_("UNREACHABLE") } AGGR_HOST = 0 AGGR_MULTI = 1 @@ -254,6 +253,9 @@ used_cache = True return # In this case simply skip further compilations + if not config.aggregations and not config.host_aggregations: + return # nothing to do, BI not used + # If we have previously only partly compiled and now there is no # filter, then throw away partly compiled data. if (cache["compiled_hosts"] or cache["compiled_groups"]) \ @@ -315,8 +317,17 @@ single_affected_hosts = [] for aggr_type, aggregations in aggr_list: for entry in aggregations: + if entry[0] == config.DISABLED: + continue + + if entry[0] == config.HARD_STATES: + use_hard_states = True + entry = entry[1:] + else: + use_hard_states = False + if len(entry) < 3: - raise MKConfigError(_("

Invalid aggregation %s'

" + raise MKConfigError(_("

Invalid aggregation %s

" "Must have at least 3 entries (has %d)") % (entry, len(entry))) if type(entry[0]) == list: @@ -337,6 +348,7 @@ for this_entry in new_entries: remove_empty_nodes(this_entry) + this_entry["use_hard_states"] = use_hard_states new_entries = [ e for e in new_entries if len(e["nodes"]) > 0 ] @@ -518,7 +530,8 @@ if middle in g_services_by_hostname: entries = [ ((e[0], host_re), e[1]) for e in g_services_by_hostname[middle] ] host_re = "(.*)" - elif not honor_site and not '*' in host_re and not '$' in host_re and not '|' in host_re: + elif not honor_site and not '*' in host_re and not '$' in host_re \ + and not '|' in host_re and not '[' in host_re: # Exact host match entries = [ ((e[0], host_re), e[1]) for e in g_services_by_hostname.get(host_re, []) ] @@ -562,14 +575,14 @@ if host_matches != None: if what == config.FOREACH_CHILD: list_of_matches = [ host_matches + (child,) for child in childs ] - if what == config.FOREACH_PARENT: + elif what == config.FOREACH_PARENT: list_of_matches = [ host_matches + (parent,) for parent in parents ] else: list_of_matches = [ host_matches ] - for host_matches in list_of_matches: + for matched_host in list_of_matches: if service_re == config.HOST_STATE: - matches.add(host_matches) + matches.add(matched_host) else: for service in services: mo = (service_re, service) @@ -578,7 +591,7 @@ m = regex(service_re).match(service) if m: svc_matches = tuple(m.groups()) - matches.add(host_matches + svc_matches) + matches.add(matched_host + svc_matches) else: service_nomatch_cache.add(mo) @@ -694,14 +707,24 @@ global g_remaining_refs g_remaining_refs = [] + # Convert new dictionary style rule into old tuple based + # format + if type(rule) == dict: + rule = ( + rule.get("title", _("Untitled BI rule")), + rule.get("params", []), + rule.get("aggregation", "worst"), + rule.get("nodes", []) + ) + if len(rule) != 4: - raise MKConfigError(_("

Invalid aggregation rule

" + raise MKConfigError(_("Invalid aggregation rule

" "Aggregation rules must contain four elements: description, argument list, " "aggregation function and list of nodes. Your rule has %d elements: " "
%s
") % (len(rule), pprint.pformat(rule))) if lvl == 50: - raise MKConfigError(_("

Depth limit reached

" + raise MKConfigError(_("Depth limit reached

" "The nesting level of aggregations is limited to 50. You either configured " "too many levels or built an infinite recursion. This happened in rule
%s
") % pprint.pformat(rule)) @@ -710,7 +733,7 @@ # check arguments and convert into dictionary if len(arglist) != len(args): - raise MKConfigError(_("

Invalid rule usage

" + raise MKConfigError(_("Invalid rule usage

" "The rule '%s' needs %d arguments: %s
" "You've specified %d arguments: %s") % ( description, len(arglist), repr(arglist), len(args), repr(args))) @@ -872,7 +895,8 @@ def compile_leaf_node(host_re, service_re = config.HOST_STATE): found = [] honor_site = SITE_SEP in host_re - if not honor_site and not '*' in host_re and not '$' in host_re and not '|' in host_re: + if not honor_site and not '*' in host_re and not '$' in host_re \ + and not '|' in host_re and '[' not in host_re: entries = [ ((e[0], host_re), e[1]) for e in g_services_by_hostname.get(host_re, []) ] else: @@ -946,20 +970,6 @@ service_nomatch_cache = set([]) -regex_cache = {} -def regex(r): - rx = regex_cache.get(r) - if rx: - return rx - try: - rx = re.compile(r) - except Exception, e: - raise MKConfigError(_("Invalid regular expression '%s': %s") % (r, e)) - regex_cache[r] = rx - return rx - - - # _____ _ _ # | ____|_ _____ ___ _ _| |_(_) ___ _ __ @@ -984,19 +994,20 @@ # Execution of the trees. Returns a tree object reflecting # the states of all nodes def execute_tree(tree, status_info = None): + use_hard_states = tree["use_hard_states"] if status_info == None: required_hosts = tree["reqhosts"] status_info = get_status_info(required_hosts) - return execute_node(tree, status_info) + return execute_node(tree, status_info, use_hard_states) -def execute_node(node, status_info): +def execute_node(node, status_info, use_hard_states): if node["type"] == NT_LEAF: - return execute_leaf_node(node, status_info) + return execute_leaf_node(node, status_info, use_hard_states) else: - return execute_rule_node(node, status_info) + return execute_rule_node(node, status_info, use_hard_states) -def execute_leaf_node(node, status_info): +def execute_leaf_node(node, status_info, use_hard_states): site, host = node["host"] service = node.get("service") @@ -1004,8 +1015,13 @@ # Get current state of host and services status = status_info.get((site, host)) if status == None: - return ({ "state" : MISSING, "output" : _("Host %s not found") % host}, None, node) - host_state, host_output, service_state = status + return ({ + "state" : MISSING, + "output" : _("Host %s not found") % host, + "in_downtime" : False, + "acknowledged" : False, + }, None, node) + host_state, host_hard_state, host_output, host_in_downtime, host_acknowledged, service_state = status # Get state assumption from user if service: @@ -1018,32 +1034,64 @@ if service: for entry in service_state: # list of all services of that host if entry[0] == service: - state, has_been_checked, output = entry[1:] + state, has_been_checked, output, hard_state, attempt, max_attempts, downtime_depth, acknowledged = entry[1:9] if has_been_checked == 0: output = _("This service has not been checked yet") state = PENDING - state = {"state":state, "output":output} + if use_hard_states: + st = hard_state + else: + st = state + state = { + "state" : st, + "output" : output, + "in_downtime" : downtime_depth > 0, + "acknowledged" : not not acknowledged, + } if state_assumption != None: - assumed_state = {"state":state_assumption, - "output" : _("Assumed to be %s") % service_state_names[state_assumption]} + assumed_state = { + "state" : state_assumption, + "output" : _("Assumed to be %s") % service_state_names[state_assumption], + "in_downtime" : downtime_depth > 0, + "acknowledged" : not not acknowledged, + } + else: assumed_state = None return (state, assumed_state, node) - return ({"state":MISSING, "output": _("This host has no such service")}, None, node) + return ({ + "state" : MISSING, + "output" : _("This host has no such service"), + "in_downtime" : False, + "acknowledged" : False, + }, None, node) else: - aggr_state = {0:OK, 1:CRIT, 2:UNKNOWN, -1:PENDING}[host_state] - state = {"state":aggr_state, "output" : host_output} + if use_hard_states: + st = host_hard_state + else: + st = host_state + aggr_state = {0:OK, 1:CRIT, 2:UNKNOWN, -1:PENDING}[st] + state = { + "state" : aggr_state, + "output" : host_output, + "in_downtime" : host_in_downtime, + "acknowledged" : host_acknowledged, + } if state_assumption != None: - assumed_state = {"state": state_assumption, - "output" : _("Assumed to be %s") % host_state_names[state_assumption]} + assumed_state = { + "state" : state_assumption, + "output" : _("Assumed to be %s") % host_state_names[state_assumption], + "in_downtime" : host_in_downtime, + "acknowledged" : host_acknowledged, + } else: assumed_state = None return (state, assumed_state, node) -def execute_rule_node(node, status_info): +def execute_rule_node(node, status_info, use_hard_states): # get aggregation function funcspec = node["func"] parts = funcspec.split('!') @@ -1058,11 +1106,23 @@ subtrees = [] node_states = [] assumed_states = [] + downtime_states = [] + ack_states = [] # Needed for computing the acknowledgement of non-OK nodes one_assumption = False for n in node["nodes"]: - result = execute_node(n, status_info) # state, assumed_state, node [, subtrees] + result = execute_node(n, status_info, use_hard_states) # state, assumed_state, node [, subtrees] subtrees.append(result) + # Assume items in downtime as CRIT when computing downtime state + downtime_states.append(({"state": result[0]["in_downtime"] and 2 or 0, "output" : ""}, result[2])) + + # Assume non-OK nodes that are acked as OK + if result[0]["acknowledged"]: + acked_state = 0 + else: + acked_state = result[0]["state"] + ack_states.append(({"state": acked_state, "output" : ""}, result[2])) + node_states.append((result[0], result[2])) if result[1] != None: assumed_states.append((result[1], result[2])) @@ -1071,9 +1131,19 @@ # no assumption, take real state into assumption array assumed_states.append(node_states[-1]) + downtime_state = func(*([downtime_states] + funcargs)) state = func(*([node_states] + funcargs)) + state["in_downtime"] = downtime_state["state"] >= 2 + if state["state"] > 0: # Non-OK-State -> compute acknowledgedment + ack_state = func(*([ack_states] + funcargs)) + state["acknowledged"] = ack_state["state"] == 0 # would be OK if acked problems would be OK + else: + state["acknowledged"] = False + if one_assumption: assumed_state = func(*([assumed_states] + funcargs)) + assumed_state["in_downtime"] = state["in_downtime"] + assumed_state["acknowledged"] = state["acknowledged"] else: assumed_state = None return (state, assumed_state, node, subtrees) @@ -1101,7 +1171,7 @@ html.live.set_auth_domain('bi') data = html.live.query( "GET hosts\n" - "Columns: name state plugin_output services_with_info\n" + "Columns: name state hard_state plugin_output scheduled_downtime_depth acknowledged services_with_fullstate\n" + filter) html.live.set_auth_domain('read') tuples += [((site, e[0]), e[1:]) for e in data] @@ -1111,17 +1181,17 @@ # This variant of the function is configured not with a list of # hosts but with a livestatus filter header and a list of columns # that need to be fetched in any case -def get_status_info_filtered(filter_header, only_sites, limit, add_columns, fetch_parents = True): - columns = [ "name", "state", "plugin_output", "services_with_info", "parents" ] + add_columns +def get_status_info_filtered(filter_header, only_sites, limit, add_columns, fetch_parents = True, bygroup=False): + columns = [ "name", "host_name", "state", "hard_state", "plugin_output", "scheduled_downtime_depth", + "acknowledged", "services_with_fullstate", "parents" ] + add_columns html.live.set_only_sites(only_sites) html.live.set_prepend_site(True) - query = "GET hosts\n" + query = "GET hosts%s\n" % (bygroup and "bygroup" or "") query += "Columns: " + (" ".join(columns)) + "\n" query += filter_header - if config.debug_livestatus_queries \ and html.output_format == "html" and 'W' in html.display_options: html.write('
' @@ -1147,10 +1217,10 @@ if fetch_parents: parent_filter = [] for row in data: - parent_filter += [ 'Filter: name = %s\n' % p for p in row[5] ] + parent_filter += [ 'Filter: name = %s\n' % p for p in row[8] ] parent_filter_txt = ''.join(parent_filter) parent_filter_txt += 'Or: %d\n' % len(parent_filter) - for row in get_status_info_filtered(filter_header, only_sites, limit, add_columns, False): + for row in get_status_info_filtered(filter_header, only_sites, limit, add_columns, False, bygroup): if row['name'] not in hostnames: rows.append(row) @@ -1199,9 +1269,14 @@ return ll[n-1][1] -def aggr_nth_state(nodelist, n, worst_state): - states = [ i[0]["state"] for i in nodelist ] - state = x_best_state(states, n) +def aggr_nth_state(nodelist, n, worst_state, ignore_states = None): + states = [ i[0]["state"] for i in nodelist if not ignore_states or i[0]["state"] not in ignore_states ] + # In case of the ignored states it might happen that the states list is empty. Use the + # OK state in this case. + if not states: + state = OK + else: + state = x_best_state(states, n) # limit to worst state if state_weight(state) > state_weight(worst_state): @@ -1209,11 +1284,11 @@ return { "state" : state, "output" : "" } -def aggr_worst(nodes, n = 1, worst_state = CRIT): - return aggr_nth_state(nodes, -int(n), int(worst_state)) +def aggr_worst(nodes, n = 1, worst_state = CRIT, ignore_states = None): + return aggr_nth_state(nodes, -int(n), int(worst_state), ignore_states) -def aggr_best(nodes, n = 1, worst_state = CRIT): - return aggr_nth_state(nodes, int(n), int(worst_state)) +def aggr_best(nodes, n = 1, worst_state = CRIT, ignore_states = None): + return aggr_nth_state(nodes, int(n), int(worst_state), ignore_states) config.aggregation_functions["worst"] = aggr_worst config.aggregation_functions["best"] = aggr_best @@ -1226,18 +1301,33 @@ def aggr_countok(nodes, needed_for_ok=2, needed_for_warn=1): states = [ i[0]["state"] for i in nodes ] - num_ok = len([s for s in states if s == 0 ]) + num_ok = len([s for s in states if s == 0 ]) + num_nonok = len([s for s in states if s > 0 ]) + num_pending = len(states) - num_ok - num_nonok + num_nodes = num_ok + num_nonok + + # We need to handle the special case "PENDING" separately. + # Example: count is set to 50%. You have 10 nodes, all of + # which are PENDING, then the outcome must be PENDING, not + # CRIT. + if num_nodes == 0: # All are pending + return { "state": -1, "output": "" } # counts can be specified as integer (e.g. '2') or # as percentages (e.g. '70%'). + ok_count = aggr_countok_convert(needed_for_ok, num_nodes) + warn_count = aggr_countok_convert(needed_for_warn, num_nodes) + + # Enough nodes are OK -> state is OK + if num_ok >= ok_count: + return { "state": 0, "output": "" } + # Enough nodes OK in order to trigger warn level -> WARN + elif num_ok >= warn_count: + return { "state": 1, "output": "" } - if num_ok >= aggr_countok_convert(needed_for_ok, len(states)): - return { "state" : 0, "output" : "" } - elif num_ok >= aggr_countok_convert(needed_for_warn, len(states)): - return { "state" : 1, "output" : "" } else: - return { "state" : 2, "output" : "" } + return { "state": 2, "output": "" } config.aggregation_functions["count_ok"] = aggr_countok @@ -1315,23 +1405,23 @@ save_assumptions() def ajax_save_treestate(): - path_id = html.var("path") + path_id = html.var_utf8("path") current_ex_level, path = path_id.split(":", 1) current_ex_level = int(current_ex_level) saved_ex_level = load_ex_level() if saved_ex_level != current_ex_level: - weblib.set_tree_states('bi', {}) - weblib.set_tree_state('bi', path, html.var("state") == "open") - weblib.save_tree_states() + html.set_tree_states('bi', {}) + html.set_tree_state('bi', path, html.var("state") == "open") + html.save_tree_states() save_ex_level(current_ex_level) def ajax_render_tree(): - aggr_group = html.var("group") + aggr_group = html.var_utf8("group") reqhosts = [ tuple(sitehost.split('#')) for sitehost in html.var("reqhosts").split(',') ] - aggr_title = html.var("title") + aggr_title = html.var_utf8("title") omit_root = not not html.var("omit_root") boxes = not not html.var("boxes") only_problems = not not html.var("only_problems") @@ -1367,11 +1457,11 @@ def render_tree_foldable(row, boxes, omit_root, expansion_level, only_problems, lazy): saved_expansion_level = load_ex_level() - treestate = weblib.get_tree_states('bi') + treestate = html.get_tree_states('bi') if expansion_level != saved_expansion_level: treestate = {} - weblib.set_tree_states('bi', treestate) - weblib.save_tree_states() + html.set_tree_states('bi', treestate) + html.save_tree_states() def render_subtree(tree, path, show_host): is_leaf = len(tree) == 3 @@ -1444,7 +1534,7 @@ h += aggr_render_node(tree, tree[2]["title"], mc, show_host) if not is_empty: - h += '
\n") + end_footnote_links() def iconbutton(what, url, target="side", handler="", name="", css_class = ""): if target == "side": @@ -103,58 +115,115 @@ def nagioscgilink(text, target): html.write("" % \ - (defaults.url_prefix, target, htmllib.attrencode(text))) + (defaults.url_prefix, target, html.attrencode(text))) def heading(text): - html.write("

%s

\n" % htmllib.attrencode(text)) + html.write("

%s

\n" % html.attrencode(text)) +# Load current state of user's sidebar. Convert from +# old format (just a snapin list) to the new format +# (dictionary) on the fly def load_user_config(): path = config.user_confdir + "/sidebar.mk" try: user_config = eval(file(path).read()) + if type(user_config) == list: + user_config = { + "snapins" : user_config, + "fold": False, + } except: - user_config = config.sidebar + user_config = { + "snapins": config.sidebar, + "fold": False, + } # Remove entries the user is not allowed for or which have state "off" (from legacy version) # silently skip configured but not existant snapins - return [ entry for entry in user_config - if entry[0] in sidebar_snapins - and entry[1] != "off" - and config.may("sidesnap." + entry[0])] + user_config["snapins"] = [ + entry for entry in user_config["snapins"] + if entry[0] in sidebar_snapins + and entry[1] != "off" + and config.may("sidesnap." + entry[0])] + + return user_config def save_user_config(user_config): if config.may("general.configure_sidebar"): config.save_user_file("sidebar", user_config) def sidebar_head(): - html.write('
' - '' + html.write('\n' % (_("Go to main overview"), config.start_url, defaults.check_mk_version)) + '
\n' % (_("Go to main overview"), html.attrencode(config.user.get("start_url") or config.start_url), defaults.check_mk_version)) + +def render_messages(): + for msg in notify.get_gui_messages(): + if 'gui_hint' in msg['methods']: + html.write('\n') + if 'gui_popup' in msg['methods']: + html.javascript('alert(\'%s\'); mark_message_read("%s")' % + (html.attrencode(msg['text']).replace('\n', '\\n'), msg['id'])) + +def ajax_get_messages(): + render_messages() + +def ajax_message_read(): + try: + notify.delete_gui_message(html.var('id')) + html.write("OK") + except: + if config.debug: + raise + html.write("ERROR") def sidebar_foot(): html.write('') + + if load_user_config()["fold"]: + html.final_javascript("foldSidebar();") + # Standalone sidebar def page_side(): if not config.may("general.see_sidebar"): return + if config.sidebar_notify_interval is not None: + interval = config.sidebar_notify_interval + else: + interval = 'null' html.html_head(_("Check_MK Sidebar"), javascripts=["sidebar"], stylesheets=["sidebar", "status"]) - html.write('\n') + html.write('\n' % interval) html.write('
\n') views.load_views() @@ -163,8 +232,12 @@ refresh_snapins = [] restart_snapins = [] - html.write('
') - for name, state in user_config: + scrolling = '' + if config.sidebar_show_scrollbar: + scrolling = ' class=scroll' + + html.write('
' % scrolling) + for name, state in user_config["snapins"]: if not name in sidebar_snapins or not config.may("sidesnap." + name): continue # Performs the initial rendering and might return an optional refresh url, @@ -188,10 +261,11 @@ html.write("refresh_snapins = %r;\n" % refresh_snapins) html.write("restart_snapins = %r;\n" % restart_snapins) html.write("sidebar_scheduler();\n") - html.write("window.onresize = function() { setSidebarHeight(); }\n") + html.write("window.onresize = function() { setSidebarHeight(); };\n") + html.write("if (contentFrameAccessible()) { update_content_location(); };\n") html.write("\n") - html.write("\n") + html.body_end() def render_snapin(name, state): snapin = sidebar_snapins.get(name) @@ -265,17 +339,27 @@ "

%s

\n" "

%s

" % (_('Error'), e)) +def ajax_fold(): + config = load_user_config() + config["fold"] = not not html.var("fold") + save_user_config(config) + + def ajax_openclose(): config = load_user_config() - new_config = [] - for name, usage in config: + new_snapins = [] + for name, usage in config["snapins"]: if html.var("name") == name: usage = html.var("state") if usage != "off": - new_config.append((name, usage)) - save_user_config(new_config) + new_snapins.append((name, usage)) + config["snapins"] = new_snapins + save_user_config(config) def ajax_snapin(): + # Update online state of the user (if enabled) + userdb.update_user_access_time(config.user_id) + snapname = html.var("name") if snapname: snapnames = [ snapname ] @@ -291,8 +375,8 @@ snapin = sidebar_snapins.get(snapname) # When restart snapins are about to be refreshed, only render - # them, when core restarted after they have been redendered - # before + # them, when the core has been restarted after their initial + # rendering if not snapin.get('refresh') and snapin.get('restart'): since = float(html.var('since', 0)) newest = since @@ -326,11 +410,11 @@ snapname_to_move = html.var("name") beforename = html.var("before") - snapin_config = load_user_config() + user_config = load_user_config() # Get current state of snaping being moved (open, closed) snap_to_move = None - for name, state in snapin_config: + for name, state in user_config["snapins"]: if name == snapname_to_move: snap_to_move = name, state if not snap_to_move: @@ -338,29 +422,32 @@ # Build new config by removing snaping at current position # and add before "beforename" or as last if beforename is not set - new_config = [] - for name, state in snapin_config: + new_snapins = [] + for name, state in user_config["snapins"]: if name == snapname_to_move: continue # remove at this position elif name == beforename: - new_config.append(snap_to_move) - new_config.append( (name, state) ) + new_snapins.append(snap_to_move) + new_snapins.append( (name, state) ) if not beforename: # insert as last - new_config.append(snap_to_move) - save_user_config(new_config) + new_snapins.append(snap_to_move) + + user_config["snapins"] = new_snapins + save_user_config(user_config) def page_add_snapin(): if not config.may("general.configure_sidebar"): raise MKGeneralException(_("You are not allowed to change the sidebar.")) html.header(_("Available snapins"), stylesheets=["pages", "sidebar", "status"]) - used_snapins = [name for (name, state) in load_user_config()] + used_snapins = [name for (name, state) in load_user_config()["snapins"]] addname = html.var("name") if addname in sidebar_snapins and addname not in used_snapins and html.check_transaction(): - user_config = load_user_config() + [(addname, "open")] + user_config = load_user_config() + user_config["snapins"].append((addname, "open")) save_user_config(user_config) - used_snapins = [name for (name, state) in load_user_config()] + used_snapins = [name for (name, state) in load_user_config()["snapins"]] html.reload_sidebar() names = sidebar_snapins.keys() @@ -375,7 +462,7 @@ snapin = sidebar_snapins[name] title = snapin["title"] description = snapin.get("description", "") - transid = html.fresh_transid() + transid = html.get_transid() url = 'sidebar_add_snapin.py?name=%s&_transid=%s&pos=top' % (name, transid) html.write('
number of services - num_svcs = dict(html.live.query_table( - "GET services\n" - "Columns: host_name\n" - "Stats: check_command ~ ^check_mk-")) - - for host_name, check_interval in intervals: - num_services = num_svcs.get(host_name, 0) - scheduled_rate += float(num_services) / check_interval / 60.0 + "Stats: suminv check_interval\n")[0] / 60.0 percentage = 100.0 * current_rate / scheduled_rate; - title = _("Scheduled check rate: %.1f/s, current rate: %.1f/s, that is " + title = _("Scheduled service check rate: %.1f/s, current rate: %.1f/s, that is " "%.0f%% of the scheduled rate" % (scheduled_rate, current_rate, percentage)) @@ -468,6 +526,7 @@ html.write(repr([scheduled_rate, program_start, percentage, last_perc, str(title)])) + def ajax_switch_masterstate(): site = html.var("site") column = html.var("switch") @@ -479,6 +538,8 @@ ( "execute_service_checks", 0) : "STOP_EXECUTING_SVC_CHECKS", ( "execute_host_checks", 1) : "START_EXECUTING_HOST_CHECKS", ( "execute_host_checks", 0) : "STOP_EXECUTING_HOST_CHECKS", + ( "enable_flap_detection", 1) : "ENABLE_FLAP_DETECTION", + ( "enable_flap_detection", 0) : "DISABLE_FLAP_DETECTION", ( "process_performance_data", 1) : "ENABLE_PERFORMANCE_DATA", ( "process_performance_data", 0) : "DISABLE_PERFORMANCE_DATA", ( "enable_event_handlers", 1) : "ENABLE_EVENT_HANDLERS", @@ -494,7 +555,7 @@ html.live.set_only_sites() render_master_control() else: - html.write(_("Command %s/%d not found") % (column, state)) + html.write(_("Command %s/%d not found") % (html.attrencode(column), state)) def ajax_del_bookmark(): try: @@ -505,7 +566,7 @@ try: del bookmarks[num] except IndexError: - raise MKGeneralException(_("Unknown bookmark id: %d. This is probably a problem with reload or browser history. Please try again.") % htmllib.attrencode(num)) + raise MKGeneralException(_("Unknown bookmark id: %d. This is probably a problem with reload or browser history. Please try again.") % html.attrencode(num)) save_bookmarks(bookmarks) render_bookmarks() @@ -514,20 +575,44 @@ href = html.var("href") if title and href: bookmarks = load_bookmarks() - # We try to remove http://hostname/some/path/check_mk from the - # URI. That keeps the configuration files (bookmarks) portable. - # Problem here: We have not access to our own URL, only to the - # path part. The trick: we use the Referrer-field from our - # request. That points to the sidebar. referer = html.req.headers_in.get("Referer") + if referer: - while '/' in referer and referer.split('/')[0] == href.split('/')[0]: - referer = referer.split('/', 1)[1] - href = href.split('/', 1)[1] + ref_p = urlparse.urlsplit(referer) + url_p = urlparse.urlsplit(href) + + # If http/https or user, pw, host, port differ, don't try to shorten + # the URL to be linked. Simply use the full URI + if ref_p.scheme == url_p.scheme and ref_p.netloc == url_p.netloc: + # We try to remove http://hostname/some/path/check_mk from the + # URI. That keeps the configuration files (bookmarks) portable. + # Problem here: We have not access to our own URL, only to the + # path part. The trick: we use the Referrer-field from our + # request. That points to the sidebar. + referer = ref_p.path + href = url_p.path + if url_p.query: + href += '?' + url_p.query + removed = 0 + while '/' in referer and referer.split('/')[0] == href.split('/')[0]: + referer = referer.split('/', 1)[1] + href = href.split('/', 1)[1] + removed += 1 + + if removed == 1: + # removed only the first "/". This should be an absolute path. + href = '/' + href + elif '/' in referer: + # there is at least one other directory layer in the path, make + # the link relative to the sidebar.py's topdir. e.g. for pnp + # links in OMD setups + href = '../' + href + bookmarks.append((title, href)) save_bookmarks(bookmarks) render_bookmarks() + def page_edit_bookmark(): html.header(_("Edit Bookmark")) try: @@ -536,7 +621,7 @@ raise MKGeneralException(_("Invalid bookmark id.")) bookmarks = load_bookmarks() if n >= len(bookmarks): - raise MKGeneralException(_("Unknown bookmark id: %d. This is probably a problem with reload or browser history. Please try again.") % htmllib.attrencode(n)) + raise MKGeneralException(_("Unknown bookmark id: %d. This is probably a problem with reload or browser history. Please try again.") % html.attrencode(n)) if html.var("save") and html.check_transaction(): title = html.var("title") @@ -569,3 +654,371 @@ html.end_form() html.footer() + +def ajax_tag_tree(): + newconf = int(html.var("conf")) + tree_conf = config.load_user_file("virtual_host_tree", {"tree": 0, "cwd": {}}) + if type(tree_conf) == int: + tree_conf = {"cwd":{}} # convert from old style + tree_conf["tree"] = newconf + config.save_user_file("virtual_host_tree", tree_conf) + +def ajax_tag_tree_enter(): + path = html.var("path") and html.var("path").split("|") or [] + tree_conf = config.load_user_file("virtual_host_tree", {"tree": 0, "cwd": {}}) + tree_conf["cwd"][tree_conf["tree"]] = path + config.save_user_file("virtual_host_tree", tree_conf) + + +#. +# .--Quicksearch---------------------------------------------------------. +# | ___ _ _ _ | +# | / _ \ _ _(_) ___| | _____ ___ __ _ _ __ ___| |__ | +# | | | | | | | | |/ __| |/ / __|/ _ \/ _` | '__/ __| '_ \ | +# | | |_| | |_| | | (__| <\__ \ __/ (_| | | | (__| | | | | +# | \__\_\\__,_|_|\___|_|\_\___/\___|\__,_|_| \___|_| |_| | +# | | +# +----------------------------------------------------------------------+ +# | Handles ajax search reuquests (like issued by the quicksearch dialog | +# '----------------------------------------------------------------------' + +def parse_search_query(s): + types = {'h': 'hosts', 'hg': 'hostgroups', 's': 'services', 'sg': 'servicegroups'} + + found_filters = [] + if ":" in s: + regex = "(^((hg)|h|(sg)|s)| (hg|h|sg|s)):" + found = [] + matches = re.finditer(regex, s) + for match in matches: + found.append((match.group(1), match.start())) + + found_filters = [] + current_string = s + for token_type, token_offset in found[-1::-1]: + found_filters.append( (types[token_type.lstrip()], + to_regex(current_string[token_offset+len(token_type)+1:]).strip()) ) + current_string = current_string[:token_offset] + + if found_filters: + return found_filters + else: + return [("hosts", to_regex(s))] + +# Ensures the provided search string is a regex, does some basic conversion +# and then tries to verify it is a regex +def to_regex(s): + s = s.replace('*', '.*') + try: + re.compile(s) + except re.error: + raise MKGeneralException(_('You search statement is not valid. You need to provide a regular ' + 'expression (regex). For example you need to e use \\\\ instead of \\ ' + 'if you like to search for a single backslash.')) + return s + +def is_ipaddress(s): + try: + octets = map(int, s.strip(".").split(".")) + for o in octets: + if o < 0 or o > 255: + return False + return True + except: + return False + +def plugin_matches_filters(plugin, used_filters): + if not ((len(used_filters) > 1) == (plugin.get("required_types") != None)): + return False + + if len(used_filters) == 1: # Simple filters + if plugin.get("lq_table", plugin.get("id")) != used_filters[0][0]: + return False + else: # Multi filters + # used_filters example [ ('services', 'CPU'), ('hosts', 'localhost'), ('services', 'Mem') ] + search_types = list(set(map(lambda x: x[0], used_filters))) + # Only allow plugins with specified "required_types" + if not plugin.get("required_types"): + return False + + # If search_types does not include all required fields -> do not use + for entry in plugin["required_types"]: + if entry not in search_types: + return False + + # If there are unknown types in the search -> do not use + for entry in search_types: + if entry not in plugin["required_types"] + plugin.get("optional_types", []): + return False + return True + +def search_url_tmpl(used_filters, row, exact = True): + if not row: + def find_plugin(filters): + for entry in search_plugins: + if plugin_matches_filters(entry, filters): + return entry, {}, {} + return None, None, None + plugin, row_options, row_data = find_plugin(used_filters) + if not plugin: # find a plugin for the first used filter + plugin, row_options, row_data = find_plugin([used_filters[0]]) + if not plugin: + return "" # shouldn't happen.. + else: + plugin, row_options, row_data = row + + def find_tmpl(): + if exact: # Get the match template + if plugin.get("match_url_tmpl_func"): + return False, plugin['match_url_tmpl_func'](used_filters, row_data) + if plugin.get("match_url_tmpl"): + return False, plugin.get("match_url_tmpl") + + # Default match templates + ty = plugin.get("dftl_url_tmpl", plugin.get("id")) + if ty == 'hosts': + return False, 'view.py?view_name=host&host=%(name)s&site=%(site)s' + elif ty == 'hostgroups': + return False, 'view.py?view_name=hostgroup&hostgroup=%(name)s&site=%(site)s' + elif ty == 'servicegroups': + return False, 'view.py?view_name=servicegroup&servicegroup=%(name)s&site=%(site)s' + elif ty == 'services': + return True, 'view.py?view_name=allservices&service_regex=%(name)s&site=%(site)s' + else: # Get the search template + if plugin.get("search_url_tmpl_func"): + return False, plugin['search_url_tmpl_func'](used_filters, row_data) + if plugin.get("search_url_tmpl"): + return False, plugin.get("search_url_tmpl") + + # Default search templates + ty = plugin.get("dftl_url_tmpl", plugin.get("id")) + if ty == 'hosts': + return False, 'view.py?view_name=searchhost&host_regex=%(name)s&filled_in=filter' + elif ty == 'hostgroups': + return False, 'view.py?view_name=hostgroups&hostgroup_regex=%(name)s&site=%(site)s' + elif ty == 'servicegroups': + return False, 'view.py?view_name=svcgroups&servicegroup_name=%(name)s&site=%(site)s' + elif ty == 'services': + return False, 'view.py?view_name=allservices&service_regex=%(name)s&site=%(site)s' + + # Search the template + escape_regex, url_tmpl = find_tmpl() + + # Some templates with single filters contain %(name)s, %(search)s, %(site) + if len(used_filters) == 1: + if exact: + site = row_data.get("site") + name = row_data.get(get_row_name(row)) + # In case of an exact match, not the original search statement is used, + # instead the name of the row provided by livestatus is used. This needs + # to be escaped as it is no regex + if escape_regex: + name = name.replace('\\', '\\\\') + else: + site = "" + name = used_filters[0][1] + + url_tmpl = url_tmpl % { + 'name' : html.urlencode(name), + 'search' : html.urlencode(name), + 'site' : site, + } + + return url_tmpl + + +def search_livestatus(used_filters): + try: + limit = config.quicksearch_dropdown_limit + except: + limit = 80 + + # We need to know which plugin lead to finding a particular host, so it + # is neccessary to make one query for each plugin - sorry. For example + # for the case, that a host can be found via alias or name. + data = [] + + html.live.set_prepend_site(True) + for plugin in search_plugins: + if 'filter_func' not in plugin: + continue + + if not plugin_matches_filters(plugin, used_filters): + continue + + lq_filter = plugin['filter_func'](used_filters) + if lq_filter: + lq_table = plugin.get("lq_table", plugin.get("id")) + lq_columns = plugin.get("lq_columns") + lq = "GET %s\nCache: reload\nColumns: %s\n%sLimit: %d\n" % \ + (lq_table, " ".join(lq_columns), lq_filter, limit) + #html.debug("
%s" % lq.replace("\n", "
")) + + lq_columns = [ "site" ] + lq_columns + for row in html.live.query(lq): + # Put result columns into a dict + row_dict = {} + for idx, col in enumerate(row): + row_dict[lq_columns[idx]] = col + + # The plugin itself might add more info to the row + # This is saved into an extra dict named options + options = {} + if plugin.get("match_url_tmpl_func"): + options["url"] = plugin["match_url_tmpl_func"](used_filters, row_dict) + + data.append([ plugin ] + [ options ] + [ row_dict ]) + if len(data) >= limit: + break + + for plugin in search_plugins: + if "search_func" in plugin and plugin_matches_filters(plugin, used_filters): + for row in plugin['search_func'](used_filters): + row_options, row_data = row + data.append((plugin, row_options, row_data)) + + html.live.set_prepend_site(False) + + # Apply the limit once again (search_funcs of plugins could have added some results) + data = data[:limit] + + used_keys = [] + + # Function to create a unqiue hashable key from a row + def get_key(row): + plugin, row_options, row_data = row + name = row_data.get(get_row_name(row)) + return (row_data.get("site"), row_data.get("host_name"), name) + + # Remove duplicate rows + used_keys = [] + new_data = [] + for row in data: + row_key = get_key(row) + if row_key not in used_keys: + new_data.append(row) + used_keys.append(row_key) + data = new_data + + # Sort data if its not a host filter + def sort_data(data): + sorted_data = data + def sort_fctn(a, b): + return cmp(get_key(a), get_key(b)) + data.sort(cmp = sort_fctn) + return sorted_data + + if len(used_filters) == 1 and used_filters[0][0] != "hosts": + data = sort_data(data) + + return data + + +def format_result(row, render_options): + plugin, row_options, row_data = row + name_column = get_row_name(row) + name = row_data.get(name_column) + url = row_options["url"] + css = plugin.get("css_class", plugin["id"]) + + name_append = "" + if render_options.get("display_site"): + name_append += " (%s)" % row_data.get("site") + if render_options.get("display_host"): + # Don't append the host name if its already the display name.. + if not name_column == "host_name" and row_data.get("host_name"): + name_append += " <%s>" % row_data.get("host_name") + if name_append: + name = "%s %s" % (name, name_append) + + escaped_name = name.replace('\\', '\\\\') + html.write('%s' % + (escaped_name, css, url, name)) + html.write('\n') + + +def get_row_name(row): + plugin, row_options, row_data = row + if plugin.get("qs_show"): + return plugin.get("qs_show") + elif plugin.get("lq_columns"): + return plugin.get("lq_columns")[0] + return "" + +def render_search_results(used_filters, objects, format_func = format_result): + # When results contain infos from several sites or hosts, display + # display that info in the result text + options = {} + values = {} + for row in objects: + plugin, row_options, row_data = row + name = get_row_name(row) + + for action, name in [ ("display_site", "site"), + ("display_host", "host_name") ]: + if row_data.get(name): + values.setdefault(action, row_data.get(name)) + # If this values differs from the default setting -> set is as option + if values.get(action) != row_data.get(name): + options[action] = True + + # Remove duplicate entries, i.e. with the same name and the same URL. + unique = set([]) + for row in objects: + plugin, row_options, row_data = row + # Find missing urls + name = get_row_name(row) + if "url" not in row_options: + row_options["url"] = search_url_tmpl(used_filters, row) + + obj_id = (row_options["url"], name) + if obj_id not in unique: + format_func(row, options) + unique.add(obj_id) + +def process_search(q): + used_filters = parse_search_query(q) + + data = search_livestatus(used_filters) + if len(used_filters) == 1 and used_filters[0][0] == "hosts" and not data: + # When asking for hosts and no host found, try searching services instead + data = search_livestatus([("services", used_filters[0][1])]) + return data, [("services", used_filters[0][1])] + + return data, used_filters + +def ajax_search(): + q = html.var('q').strip() + if not q: + return + + data, used_filters = process_search(q) + if not data: + return + + try: + render_search_results(used_filters, data) + except Exception, e: + html.write("error") + import traceback + html.write(traceback.format_exc()) + html.write(repr(e)) + + +def search_open(): + q = html.var('q').strip() + if not q: + return + + data, used_filters = process_search(q) + if not used_filters: + return + + if data and len(data) == 1: + url = search_url_tmpl(used_filters, data[0]) + else: + url = search_url_tmpl(used_filters, data and data[0] or None, exact = False) + + html.set_http_header('Location', url) + from mod_python import apache + raise apache.SERVER_RETURN, apache.HTTP_MOVED_TEMPORARILY diff -Nru check-mk-1.2.2p3/htdocs/status.css check-mk-1.2.6p12/htdocs/status.css --- check-mk-1.2.2p3/htdocs/status.css 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/htdocs/status.css 2015-09-21 10:59:54.000000000 +0000 @@ -43,6 +43,19 @@ width: 32px; } +td.svcstate.stale { + background-image: url("images/white_60percent.png"); + background-repeat: repeat; +} + +table.data tr.data td.stale, +table.data td.perfometer.stale div.title { + color: #888; + text-shadow: 0.8px 0.8px 0.8px #fff; +} + + + /* OK/UP */ .state0, .hstate0, .state0 a, .hstate0 a { background-color: #0b3; color: #fff; @@ -78,11 +91,20 @@ /* inline state markers in check output */ b.stmark { - margin-left: 4px; + margin-left: 2px; padding: 1px 3px; border-radius: 4px; font-size: 7pt; border: 1px solid #666; + position: relative; + top: -1px; +} + +/* inline icon in plugin output for URLs */ +img.pluginurl { + width: 16px; + height: 16px; + vertical-align: middle; } /* *some* problem */ @@ -216,20 +238,20 @@ } /* Row coloring in case of the different states */ -tr.odd0 { background-color: #eee; } -tr.even0 { background-color: #ddd; } +tr.odd0, tr.oddp { background-color: #eee; } +tr.even0, tr.evenp { background-color: #ddd; } td.odd0 { background-color: #eee; } td.even0 { background-color: #ddd; } -tr.odd1 { background-color: #ffc; } -tr.even1 { background-color: #ffa; } +tr.odd1 { background-color: #ffc; } +tr.even1 { background-color: #ffa; } -tr.odd2 { background-color: #fcc; } -tr.even2 { background-color: #faa; } +tr.odd2 { background-color: #fcc; } +tr.even2 { background-color: #faa; } -tr.odd3 { background-color: #ffe0a0; } -tr.even3 { background-color: #ffefaf; } +tr.odd3 { background-color: #ffe0a0; } +tr.even3 { background-color: #ffefaf; } /* Row headers in single dataset layout */ @@ -260,6 +282,10 @@ color: #666; white-space: nowrap; } +table.data td.age.staletime { + color: red; + font-weight: bold; +} table.data td.age.recent { font-weight: bold; @@ -281,8 +307,12 @@ table.data td.buttons { white-space: nowrap; + width: 1%; } +table.data td.buttons.visuals { + width: 75px; +} table.data td.count { text-align: right; font-weight: bold; @@ -294,15 +324,23 @@ table.data td.number { text-align: right; + white-space: nowrap; } table.data td.nobr { white-space: nowrap; } +table.data td.center { + text-align: center; +} + td.narrow { width: 10px; } +td.wide { + width: 100%; +} td.icons { white-space: nowrap; @@ -334,7 +372,7 @@ padding: 0; } -tr.data td.pnpgraph img { +tr.data td.pnpgraph div.graph > a > img { border: 1px solid #888; margin: 4px 4px 4px 4px; box-shadow: 1px 1px 3px #000000; @@ -349,3 +387,26 @@ height: 24px; border: 1px solid #666; } + +table.customvars { + border-collapse: collapse; + padding: 0px; +} +table.data tr.data td table.customvars td { + /* border: 1px solid #ccc; */ + padding: 0px 5px 0px 0px; +} +table.data tr.data td table.customvars td:last-child { + border-left: 1px solid #bbb; + padding-left: 5px; +} + +td.matchgroups span { + margin-right: 3px; + background-color: #ffffff; + border: 1px solid #eeeeee; + border-radius: 3px; + box-shadow: 0px 0px 1px #888; + padding: 0px 3px; + font-family: monospace; +} diff -Nru check-mk-1.2.2p3/htdocs/table.py check-mk-1.2.6p12/htdocs/table.py --- check-mk-1.2.2p3/htdocs/table.py 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/htdocs/table.py 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,21 +24,43 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -table = None -mode = None +import config +from lib import num_split + +table = None +mode = None next_func = None -row_css = None +row_css = None -def begin(title=None, **kwargs): +def begin(table_id=None, title=None, **kwargs): global table, mode, next_func - if table: - end() + # Use our pagename as table id if none is specified + if table_id == None: + table_id = html.myfile + + try: + limit = config.table_row_limit + except: + pass + + limit = kwargs.get('limit', limit) + if html.var('limit') == 'none' or kwargs.get("output_format", "html") != "html": + limit = None table = { - "title": title, - "headers" : [], - "rows" : [], + "id" : table_id, + "title" : title, + "headers" : [], + "collect_headers" : False, # also: True, "finished" + "rows" : [], + "limit" : limit, + "omit_if_empty" : kwargs.get("omit_if_empty", False), + "omit_headers" : kwargs.get("omit_headers", False), + "searchable" : kwargs.get("searchable", True), + "sortable" : kwargs.get("sortable", True), + "next_header" : None, + "output_format" : kwargs.get("output_format", "html"), # possible: html, csv, fetch } if kwargs.get("empty_text"): table["empty_text"] = kwargs["empty_text"] @@ -67,9 +89,24 @@ next_func = add_row next_args = posargs, kwargs -def add_row(css=None): - table["rows"].append(([], css)) - +def add_row(css=None, state=0, collect_headers=True, fixed=False): + if table["next_header"]: + table["rows"].append((table["next_header"], None, "header", True)) + table["next_header"] = None + table["rows"].append(([], css, state, fixed)) + if collect_headers: + if table["collect_headers"] == False: + table["collect_headers"] = True + elif table["collect_headers"] == True: + table["collect_headers"] = "finished" + elif not collect_headers and table["collect_headers"]: + table["collect_headers"] = False + +# Intermediate title, shown as soon as there is a following row. +# We store the group headers in the list of rows, with css None +# and state set to "header" +def groupheader(title): + table["next_header"] = title def cell(*posargs, **kwargs): finish_previous() @@ -77,53 +114,249 @@ next_func = add_cell next_args = posargs, kwargs -def add_cell(title, text="", css=None): +def add_cell(title="", text="", css=None, help=None, colspan=None, sortable=True): if type(text) != unicode: text = str(text) htmlcode = text + html.drain() - if len(table["rows"]) == 1: # first row -> pick headers - table["headers"].append(title) - table["rows"][-1][0].append((htmlcode, css)) + if table["collect_headers"] == True: + # small helper to make sorting introducion easier. Cells which contain + # buttons are never sortable + if css and 'buttons' in css and sortable: + sortable = False + table["headers"].append((title, help, sortable)) + table["rows"][-1][0].append((htmlcode, css, colspan)) def end(): global table finish_previous() html.unplug() - if table["title"]: + + if not table: + return + + # Output-Format "fetch" simply means that all data is being + # returned as Python-values to be rendered somewhere else. + if table["output_format"] == "fetch": + return table["headers"], table["rows"] + + if table["output_format"] == "csv": + do_csv = True + csv_separator = html.var("csv_separator", ";") + else: + do_csv = False + + if not table["rows"] and table["omit_if_empty"]: + table = None + return + + if table["title"] and not do_csv: html.write("

%s

" % table["title"]) - if table.get("help"): + if table.get("help") and not do_csv: html.help(table["help"]) - if not table["rows"]: + if not table["rows"] and not do_csv: html.write("
%s
" % table["empty_text"]) table = None return - html.write('\n') - html.write(" ") - for header in table["headers"]: - html.write(" \n" % header) - html.write(" \n") + table_id = table['id'] + rows = table["rows"] - odd = "even" - # TODO: Sorting - for row, css in table["rows"]: - # TODO: Filtering - odd = odd == "odd" and "even" or "odd" - html.write(' case insensitive + search_term = html.var('_%s_search' % table_id, table_opts.get('search', '')).lower() + if search_term: + html.set_var('_%s_search' % table_id, search_term) + table_opts['search'] = search_term # persist + filtered_rows = [] + for row, css, state, fixed in rows: + if state == "header" or fixed: + continue # skip filtering of headers or fixed rows + for cell_content, css_classes, colspan in row: + if fixed or search_term in cell_content.lower(): + filtered_rows.append((row, css, state, fixed)) + break # skip other cells when matched + rows = filtered_rows + + if html.var('_%s_reset_sorting' % table_id): + html.del_var('_%s_sort' % table_id) + if 'sort' in table_opts: + del table_opts['sort'] # persist + + if table["sortable"]: + # Now apply eventual sorting settings + sort = html.var('_%s_sort' % table_id, table_opts.get('sort')) + if sort != None: + html.set_var('_%s_sort' % table_id, sort) + table_opts['sort'] = sort # persist + sort_col, sort_reverse = map(int, sort.split(',', 1)) + + # remove and remind fixed rows, add to separate list + fixed_rows = [] + for index, row in enumerate(rows[:]): + if row[3] == True: + rows.remove(row) + fixed_rows.append((index, row)) + + # Then use natural sorting to sort the list + rows.sort(cmp=lambda a, b: cmp(num_split(a[0][sort_col][0]), + num_split(b[0][sort_col][0])), + reverse=sort_reverse==1) + + # Now re-add the removed "fixed" rows to the list again + if fixed_rows: + for index, row in fixed_rows: + rows.insert(index, row) + + num_rows_unlimited = len(rows) + num_cols = len(table["headers"]) + + # Apply limit after search / sorting etc. + limit = table['limit'] + if limit is not None: + rows = rows[:limit] + + if not do_csv: + html.write('
%s
\n') - for cell_content, css_classes in row: - html.write(" " % (css_classes and (" class='%s'" % css_classes) or "")) - html.write(cell_content) - html.write("\n") - html.write("\n") - html.write("
\n") + + def render_headers(): + if table["omit_headers"]: + return + + if do_csv: + html.write(csv_separator.join([html.strip_tags(header) or "" for (header, help, sortable) in table["headers"]]) + "\n") + else: + html.write(" ") + first_col = True + for nr, (header, help, sortable) in enumerate(table["headers"]): + text = header + if help: + header = '%s' % (html.attrencode(help), header) + if not table["sortable"] or not sortable: + html.write(" ") + else: + reverse = 0 + sort = html.var('_%s_sort' % table_id) + if sort: + sort_col, sort_reverse = map(int, sort.split(',', 1)) + if sort_col == nr: + reverse = sort_reverse == 0 and 1 or 0 + html.write(" " % + (_('Sort by %s') % text, html.makeactionuri([('_%s_sort' % table_id, '%d,%d' % (nr, reverse))]))) + + # Add the table action link + if first_col: + if actions_enabled: + if actions_visible: + state = '0' + help = _('Hide table actions') + img = 'table_actions_on' + else: + state = '1' + help = _('Display table actions') + img = 'table_actions_off' + html.icon_button(html.makeuri([('_%s_actions' % table_id, state)]), + help, img, cssclass = 'toggle_actions') + first_col = False + + html.write("%s\n" % header) + html.write(" \n") + + # If we have no group headers then paint the headers now + if table["rows"] and table["rows"][0][2] != "header": + render_headers() + + if actions_enabled and actions_visible and not do_csv: + html.write('' % num_cols) + if not html.in_form(): + html.begin_form("%s_actions" % table_id) + + if table["searchable"]: + html.write("\n") + + if html.has_var('_%s_sort' % table_id): + html.write("
") + html.button("_%s_reset_sorting" % table_id, _("Reset sorting")) + html.write("
\n") + + if not html.in_form(): + html.begin_form("%s_actions" % table_id) + + html.hidden_fields() + html.end_form() + html.write('') + + odd = "even" + for nr, (row, css, state, fixed) in enumerate(rows): + if do_csv: + html.write(csv_separator.join([ html.strip_tags(cell_content) for cell_content, css_classes, colspan in row ])) + html.write("\n") + + else: # HTML output + # Intermediate header + if state == "header": + # Show the header only, if at least one (non-header) row follows + if nr < len(rows) - 1 and rows[nr+1][2] != "header": + html.write('
%s' % (num_cols, row)) + odd = "even" + render_headers() + continue + + odd = odd == "odd" and "even" or "odd" + html.write(' \n') + for cell_content, css_classes, colspan in row: + colspan = colspan and (' colspan="%d"' % colspan) or '' + html.write(" " % (css_classes and (" class='%s'" % css_classes) or "", colspan)) + html.write(cell_content) + html.write("\n") + html.write("\n") + + if table["searchable"] and search_term and not rows and not do_csv: + html.write('%s' % + (num_cols, _('Found no matching rows. Please try another search term.'))) + + if not do_csv: + html.write("\n") + + if limit is not None and num_rows_unlimited > limit and not do_csv: + html.message(_('This table is limited to show only %d of %d rows. ' + 'Click here to disable the limitation.') % + (limit, num_rows_unlimited, html.makeuri([('limit', 'none')]))) + + if actions_enabled and not do_csv: + config.save_user_file("tableoptions", user_opts) + table = None diff -Nru check-mk-1.2.2p3/htdocs/userdb.py check-mk-1.2.6p12/htdocs/userdb.py --- check-mk-1.2.2p3/htdocs/userdb.py 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/htdocs/userdb.py 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -34,17 +34,35 @@ # Custom user attributes user_attributes = {} +builtin_user_attribute_names = [] -# Load all login plugins +# Load all userdb plugins def load_plugins(): + global user_attributes + global multisite_user_connectors + global builtin_user_attribute_names + + # Do not cache the custom user attributes. They can be created by the user + # during runtime, means they need to be loaded during each page request. + # But delete the old definitions before to also apply removals of attributes + if user_attributes: + for attr_name in user_attributes.keys(): + if attr_name not in builtin_user_attribute_names: + del user_attributes[attr_name] + declare_custom_user_attrs() + global loaded_with_language if loaded_with_language == current_language: return # declare & initialize global vars - global multisite_user_connectors ; multisite_user_connectors = [] + user_attributes = {} + multisite_user_connectors = [] load_web_plugins("userdb", globals()) + builtin_user_attribute_names = user_attributes.keys() + declare_custom_user_attrs() + hook_load() # This must be set after plugin loading to make broken plugins raise # exceptions all the time and not only the first time (when the plugins @@ -104,7 +122,7 @@ return new_user def create_non_existing_user(connector_id, username): - users = load_users() + users = load_users(lock = True) if username in users: return # User exists. Nothing to do... @@ -114,16 +132,67 @@ # Call the sync function for this new user hook_sync(connector_id = connector_id, only_username = username) +# FIXME: Can we improve this easily? Would be nice not to have to call "load_users". +# Maybe a directory listing of profiles or a list of a small file would perform better +# than having to load the users, contacts etc. during each http request to multisite +def user_exists(username): + return username in load_users().keys() + def user_locked(username): users = load_users() return users[username].get('locked', False) +def update_user_access_time(username): + if not config.save_user_access_times: + return + save_custom_attr(username, 'last_seen', repr(time.time())) +def on_succeeded_login(username): + num_failed = load_custom_attr(username, 'num_failed', saveint) + if num_failed != None and num_failed != 0: + save_custom_attr(username, 'num_failed', '0') + + update_user_access_time(username) + +# userdb.need_to_change_pw returns either False or the reason description why the +# password needs to be changed +def need_to_change_pw(username): + if load_custom_attr(username, 'enforce_pw_change', saveint) == 1: + return 'enforced' + + last_pw_change = load_custom_attr(username, 'last_pw_change', saveint) + max_pw_age = config.password_policy.get('max_age') + if max_pw_age: + if not last_pw_change: + # The age of the password is unknown. Assume the user has just set + # the password to have the first access after enabling password aging + # as starting point for the password period. This bewares all users + # from needing to set a new password after enabling aging. + save_custom_attr(username, 'last_pw_change', str(int(time.time()))) + return False + elif time.time() - last_pw_change > max_pw_age: + return 'expired' + return False + +def on_failed_login(username): + users = load_users(lock = True) + if username in users: + if "num_failed" in users[username]: + users[username]["num_failed"] += 1 + else: + users[username]["num_failed"] = 1 + + if config.lock_on_logon_failures: + if users[username]["num_failed"] >= config.lock_on_logon_failures: + users[username]["locked"] = True + + save_users(users) root_dir = defaults.check_mk_configdir + "/wato/" multisite_dir = defaults.default_config_dir + "/multisite.d/wato/" -# .--Users---------------------------------------------------------------. +#. +# .-Users----------------------------------------------------------------. # | _ _ | # | | | | |___ ___ _ __ ___ | # | | | | / __|/ _ \ '__/ __| | @@ -132,20 +201,39 @@ # | | # +----------------------------------------------------------------------+ +def declare_user_attribute(name, vs, user_editable = True, permission = None, + show_in_table = False, topic = None, add_custom_macro = False, + domain = "multisite"): + + user_attributes[name] = { + 'valuespec' : vs, + 'user_editable' : user_editable, + 'show_in_table' : show_in_table, + 'topic' : topic and topic or 'personal', + 'add_custom_macro' : add_custom_macro, + 'domain' : domain, + } + + # Permission needed for editing this attribute + if permission: + user_attributes[name]["permission"] = permission + def get_user_attributes(): return user_attributes.items() -def reset_user_attributes(): - global user_attributes - user_attributes = {} - -def load_users(): +def load_users(lock = False): filename = root_dir + "contacts.mk" - # Make sure that the file exists without modifying it, *if* it exists. - # Note the lock will be released at end of page request automatically. - file(filename, "a") - aquire_lock(filename) + if lock: + # Make sure that the file exists without modifying it, *if* it exists + # to be able to lock and realease the file properly. + # Note: the lock will be released on next save_users() call or at + # end of page request automatically. + file(filename, "a") + aquire_lock(filename) + + if html.is_cached('users'): + return html.get_cached('users') # First load monitoring contacts from Check_MK's world. If this is # the first time, then the file will be empty, which is no problem. @@ -154,6 +242,8 @@ vars = { "contacts" : {} } execfile(filename, vars, vars) contacts = vars["contacts"] + except IOError: + contacts = {} # a not existing file is ok, start with empty data except Exception, e: if config.debug: raise MKGeneralException(_("Cannot read configuration file %s: %s" % @@ -165,20 +255,19 @@ # Now add information about users from the Web world filename = multisite_dir + "users.mk" - if os.path.exists(filename): - try: - vars = { "multisite_users" : {} } - execfile(filename, vars, vars) - users = vars["multisite_users"] - except Exception, e: - if config.debug: - raise MKGeneralException(_("Cannot read configuration file %s: %s" % - (filename, e))) - else: - html.log('load_users: Problem while loading users (%s - %s). ' - 'Initializing structure...' % (filename, e)) - users = {} - else: + try: + vars = { "multisite_users" : {} } + execfile(filename, vars, vars) + users = vars["multisite_users"] + except IOError: + users = {} # not existing is ok -> empty structure + except Exception, e: + if config.debug: + raise MKGeneralException(_("Cannot read configuration file %s: %s" % + (filename, e))) + else: + html.log('load_users: Problem while loading users (%s - %s). ' + 'Initializing structure...' % (filename, e)) users = {} # Merge them together. Monitoring users not known to Multisite @@ -206,41 +295,45 @@ # they are getting according to the multisite old-style # configuration variables. + def readlines(f): + try: + return file(f) + except IOError: + return [] + filename = defaults.htpasswd_file - if os.path.exists(filename): - for line in file(filename): - line = line.strip() - if ':' in line: - id, password = line.strip().split(":")[:2] - if password.startswith("!"): - locked = True - password = password[1:] - else: - locked = False - if id in result: - result[id]["password"] = password - result[id]["locked"] = locked - else: - # Create entry if this is an admin user - new_user = { - "roles" : config.roles_of_user(id), - "password" : password, - "locked" : False, - } - result[id] = new_user - # Make sure that the user has an alias - result[id].setdefault("alias", id) - # Other unknown entries will silently be dropped. Sorry... + for line in readlines(filename): + line = line.strip() + if ':' in line: + id, password = line.strip().split(":")[:2] + if password.startswith("!"): + locked = True + password = password[1:] + else: + locked = False + if id in result: + result[id]["password"] = password + result[id]["locked"] = locked + else: + # Create entry if this is an admin user + new_user = { + "roles" : config.roles_of_user(id), + "password" : password, + "locked" : False, + } + result[id] = new_user + # Make sure that the user has an alias + result[id].setdefault("alias", id) + # Other unknown entries will silently be dropped. Sorry... # Now read the serials, only process for existing users serials_file = '%s/auth.serials' % os.path.dirname(defaults.htpasswd_file) - if os.path.exists(serials_file): - for line in file(serials_file): - line = line.strip() - if ':' in line: - user_id, serial = line.split(':')[:2] - if user_id in result: - result[user_id]['serial'] = saveint(serial) + for line in readlines(serials_file): + line = line.strip() + if ':' in line: + user_id, serial = line.split(':')[:2] + if user_id in result: + result[user_id]['serial'] = saveint(serial) # Now read the user specific files dir = defaults.var_dir + "/web/" @@ -248,11 +341,25 @@ if d[0] != '.': id = d + # read special values from own files + if id in result: + for attr, conv_func in [ + ('num_failed', saveint), + ('last_pw_change', saveint), + ('last_seen', savefloat), + ('enforce_pw_change', lambda x: bool(saveint(x))), + ]: + val = load_custom_attr(id, attr, conv_func) + if val != None: + result[id][attr] = val + # read automation secrets and add them to existing # users or create new users automatically - secret_file = dir + d + "/automation.secret" - if os.path.exists(secret_file): - secret = file(secret_file).read().strip() + try: + secret = file(dir + d + "/automation.secret").read().strip() + except IOError: + secret = None + if secret: if id in result: result[id]["automation_secret"] = secret else: @@ -261,14 +368,44 @@ "automation_secret" : secret, } + # populate the users cache + html.set_cache('users', result) + return result +def load_custom_attr(userid, key, conv_func, default = None): + basedir = defaults.var_dir + "/web/" + userid + try: + return conv_func(file(basedir + '/' + key + '.mk').read().strip()) + except IOError: + return default + +def save_custom_attr(userid, key, val): + basedir = defaults.var_dir + "/web/" + userid + make_nagios_directory(basedir) + create_user_file('%s/%s.mk' % (basedir, key), 'w').write('%s\n' % val) + +def get_online_user_ids(): + online_threshold = time.time() - config.user_online_maxage + users = [] + for user_id, user in load_users(lock = False).items(): + if user.get('last_seen', 0) >= online_threshold: + users.append(user_id) + return users + def split_dict(d, keylist, positive): return dict([(k,v) for (k,v) in d.items() if (k in keylist) == positive]) - def save_users(profiles): - custom_values = user_attributes.keys() + + # Add custom macros + core_custom_macros = [ k for k,o in user_attributes.items() if o.get('add_custom_macro') ] + for user in profiles.keys(): + for macro in core_custom_macros: + if profiles[user].get(macro): + profiles[user]['_'+macro] = profiles[user][macro] + + multisite_custom_values = [ k for k,v in user_attributes.items() if v["domain"] == "multisite" ] # Keys not to put into contact definitions for Check_MK non_contact_keys = [ @@ -279,7 +416,11 @@ "language", "serial", "connector", - ] + custom_values + "num_failed", + "enforce_pw_change", + "last_pw_change", + "last_seen", + ] + multisite_custom_values # Keys to put into multisite configuration multisite_keys = [ @@ -289,7 +430,7 @@ "alias", "language", "connector", - ] + custom_values + ] + multisite_custom_values # Remove multisite keys in contacts. contacts = dict( @@ -305,31 +446,46 @@ for p, val in profile.items() if p in multisite_keys + multisite_attributes(profile.get('connector'))]) - filename = root_dir + "contacts.mk" # Check_MK's monitoring contacts + filename = root_dir + "contacts.mk.new" out = create_user_file(filename, "w") out.write("# Written by Multisite UserDB\n# encoding: utf-8\n\n") out.write("contacts.update(\n%s\n)\n" % pprint.pformat(contacts)) out.close() + os.rename(filename, filename[:-4]) # Users with passwords for Multisite + filename = multisite_dir + "users.mk.new" make_nagios_directory(multisite_dir) - filename = multisite_dir + "users.mk" out = create_user_file(filename, "w") out.write("# Written by Multisite UserDB\n# encoding: utf-8\n\n") out.write("multisite_users = \\\n%s\n" % pprint.pformat(users)) out.close() + os.rename(filename, filename[:-4]) # Execute user connector save hooks hook_save(profiles) # Write out the users serials - serials_file = '%s/auth.serials' % os.path.dirname(defaults.htpasswd_file) - out = create_user_file(serials_file, "w") + serials_file = '%s/auth.serials.new' % os.path.dirname(defaults.htpasswd_file) + rename_file = True + try: + out = create_user_file(serials_file, "w") + except: + rename_file = False + out = create_user_file(serials_file[:-4], "w") + + def encode_utf8(value): + if type(value) == unicode: + value = value.encode("utf-8") + return value + for user_id, user in profiles.items(): - out.write('%s:%d\n' % (user_id, user.get('serial', 0))) + out.write('%s:%d\n' % (encode_utf8(user_id), user.get('serial', 0))) out.close() + if rename_file: + os.rename(serials_file, serials_file[:-4]) # Write user specific files for id, user in profiles.items(): @@ -343,9 +499,17 @@ elif os.path.exists(auth_file): os.remove(auth_file) - # Write out the users serial - serial_file = user_dir + '/serial.mk' - create_user_file(serial_file, 'w').write('%d\n' % user.get('serial', 0)) + # Write out user attributes which are written to dedicated files in the user + # profile directory. The primary reason to have separate files, is to reduce + # the amount of data to be loaded during regular page processing + save_custom_attr(id, 'serial', str(user.get('serial', 0))) + save_custom_attr(id, 'num_failed', str(user.get('num_failed', 0))) + save_custom_attr(id, 'enforce_pw_change', str(int(user.get('enforce_pw_change', False)))) + save_custom_attr(id, 'last_pw_change', str(user.get('last_pw_change', int(time.time())))) + + # Write out the last seent time + if 'last_seen' in user: + save_custom_attr(id, 'last_seen', repr(user['last_seen'])) # Remove settings directories of non-existant users. # Beware: we removed this since it leads to violent destructions @@ -366,9 +530,18 @@ if os.path.isdir(entry) and os.path.exists(entry + '/automation.secret'): os.unlink(entry + '/automation.secret') + # Release the lock to make other threads access possible again asap + # This lock is set by load_users() only in the case something is expected + # to be written (like during user syncs, wato, ...) + release_lock(root_dir + "contacts.mk") + + # populate the users cache + html.set_cache('users', profiles) + # Call the users_saved hook - hooks.call("users-saved", users) + hooks.call("users-saved", profiles) +#. # .-Roles----------------------------------------------------------------. # | ____ _ | # | | _ \ ___ | | ___ ___ | @@ -392,9 +565,6 @@ for id in config.builtin_role_ids ]) filename = multisite_dir + "roles.mk" - if not os.path.exists(filename): - return roles - try: vars = { "roles" : roles } execfile(filename, vars, vars) @@ -414,6 +584,8 @@ return vars["roles"] + except IOError: + return roles # Use empty structure, not existing file is ok! except Exception, e: if config.debug: raise MKGeneralException(_("Cannot read configuration file %s: %s" % @@ -423,6 +595,7 @@ 'Initializing structure...' % (filename, e)) return roles +#. # .-Groups---------------------------------------------------------------. # | ____ | # | / ___|_ __ ___ _ _ _ __ ___ | @@ -434,18 +607,39 @@ def load_group_information(): try: - filename = root_dir + "groups.mk" - if not os.path.exists(filename): - return {} - + # Load group information from Check_MK world vars = {} for what in ["host", "service", "contact" ]: vars["define_%sgroups" % what] = {} - execfile(filename, vars, vars) + filename = root_dir + "groups.mk" + try: + execfile(filename, vars, vars) + except IOError: + return {} # skip on not existing file + + # Now load information from the Web world + multisite_vars = {} + for what in ["host", "service", "contact" ]: + multisite_vars["multisite_%sgroups" % what] = {} + + filename = multisite_dir + "groups.mk" + try: + execfile(filename, multisite_vars, multisite_vars) + except IOError: + pass + + # Merge information from Check_MK and Multisite worlds together groups = {} for what in ["host", "service", "contact" ]: - groups[what] = vars.get("define_%sgroups" % what, {}) + groups[what] = {} + for id, alias in vars['define_%sgroups' % what].items(): + groups[what][id] = { + 'alias': alias + } + if id in multisite_vars['multisite_%sgroups' % what]: + groups[what][id].update(multisite_vars['multisite_%sgroups' % what][id]) + return groups except Exception, e: @@ -457,7 +651,57 @@ 'Initializing structure...' % (filename, e)) return {} -# .----------------------------------------------------------------------. +#. +# .-Custom-Attrs.--------------------------------------------------------. +# | ____ _ _ _ _ | +# | / ___| _ ___| |_ ___ _ __ ___ / \ | |_| |_ _ __ ___ | +# | | | | | | / __| __/ _ \| '_ ` _ \ _____ / _ \| __| __| '__/ __| | +# | | |__| |_| \__ \ || (_) | | | | | |_____/ ___ \ |_| |_| | \__ \_ | +# | \____\__,_|___/\__\___/|_| |_| |_| /_/ \_\__|\__|_| |___(_) | +# | | +# +----------------------------------------------------------------------+ +# | Mange custom attributes of users (in future hosts etc.) | +# '----------------------------------------------------------------------' + +def load_custom_attrs(): + try: + filename = multisite_dir + "custom_attrs.mk" + if not os.path.exists(filename): + return {} + + vars = { + 'wato_user_attrs': [], + } + execfile(filename, vars, vars) + + attrs = {} + for what in [ "user" ]: + attrs[what] = vars.get("wato_%s_attrs" % what, []) + return attrs + + except Exception, e: + if config.debug: + raise MKGeneralException(_("Cannot read configuration file %s: %s" % + (filename, e))) + else: + html.log('load_custom_attrs: Problem while loading custom attributes (%s - %s). ' + 'Initializing structure...' % (filename, e)) + return {} + +def declare_custom_user_attrs(): + all_attrs = load_custom_attrs() + attrs = all_attrs.setdefault('user', []) + for attr in attrs: + vs = globals()[attr['type']](title = attr['title'], help = attr['help']) + declare_user_attribute(attr['name'], vs, + user_editable = attr['user_editable'], + show_in_table = attr.get('show_in_table', False), + topic = attr.get('topic', 'personal'), + add_custom_macro = attr.get('add_custom_macro', False ) + ) + +#. +# .-Hooks----------------------------------------------------------------. # | _ _ _ | # | | | | | ___ ___ | | _____ | # | | |_| |/ _ \ / _ \| |/ / __| | @@ -504,35 +748,44 @@ # Is called on: # a) before rendering the user management page in WATO # b) a user is created during login (only for this user) -def hook_sync(connector_id = None, add_to_changelog = False, only_username = None): +# c) Before activating the changes in WATO +def hook_sync(connector_id = None, add_to_changelog = False, only_username = None, raise_exc = False): if connector_id: connectors = [ get_connector(connector_id) ] else: connectors = enabled_connectors() + no_errors = True for connector in connectors: handler = connector.get('sync', None) if handler: try: handler(add_to_changelog, only_username) except MKLDAPException, e: + if raise_exc: + raise if config.debug: import traceback html.show_error( - "

" + _("Error executing sync hook") + "

" + "

" + _("Error during sync") + "

" "
%s
" % (traceback.format_exc()) ) else: html.show_error( - "

" + _("Error executing sync hook") + "

" + "

" + _("Error during sync") + "

" "
%s
" % (e) ) + no_errors = False except: + if raise_exc: + raise import traceback html.show_error( - "

" + _("Error executing sync hook") + "

" + "

" + _("Error during sync") + "

" "
%s
" % (traceback.format_exc()) ) + no_errors = False + return no_errors # Hook function can be registered here to be executed during saving of the # new user construct @@ -547,12 +800,18 @@ if config.debug: import traceback html.show_error( - "

" + _("Error executing sync hook") + "

" + "

" + _("Error during saving") + "

" "
%s
" % (traceback.format_exc()) ) else: raise +def hook_load(): + for connector in enabled_connectors(): + handler = connector.get('load', None) + if handler: + handler() + # This function registers general stuff, which is independet of the single # connectors to each page load. It is exectued AFTER all other page hooks. def general_page_hook(): @@ -568,13 +827,16 @@ # Create initial auth.serials file, same issue as auth.php above serials_file = '%s/auth.serials' % os.path.dirname(defaults.htpasswd_file) if not os.path.exists(serials_file) or os.path.getsize(serials_file) == 0: - save_users(load_users()) + save_users(load_users(lock = True)) # Hook function can be registered here to execute actions on a "regular" base without # user triggered action. This hook is called on each page load. # Catch all exceptions and log them to apache error log. Let exceptions raise trough # when debug mode is enabled. def hook_page(): + if 'page' not in config.userdb_automatic_sync: + return + for connector in enabled_connectors(): handler = connector.get('page', None) if not handler: @@ -590,3 +852,13 @@ (connector['id'], traceback.format_exc())) general_page_hook() + +def ajax_sync(): + try: + hook_sync(add_to_changelog = False, raise_exc = True) + html.write('OK\n') + except Exception, e: + if config.debug: + raise + else: + html.write('ERROR %s\n' % e) diff -Nru check-mk-1.2.2p3/htdocs/valuespec.py check-mk-1.2.6p12/htdocs/valuespec.py --- check-mk-1.2.2p3/htdocs/valuespec.py 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/htdocs/valuespec.py 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,23 +24,23 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -import math, os, time, re, sre_constants, urlparse, forms, htmllib +import math, os, time, re, sre_constants, urlparse, forms from lib import * def type_name(v): try: return type(v).__name__ except: - return htmllib.attrencode(type(v)) + return html.attrencode(type(v)) # Abstract base class of all value declaration classes. class ValueSpec: def __init__(self, **kwargs): self._title = kwargs.get("title") self._help = kwargs.get("help") - self._attrencode = kwargs.get("attrencode", False) if "default_value" in kwargs: self._default_value = kwargs.get("default_value") + self._validate = kwargs.get("validate") def title(self): return self._title @@ -110,7 +110,17 @@ # has been returned by from_html_vars() or because it has # been checked with validate_datatype()). def validate_value(self, value, varprefix): - pass + self.custom_validate(value, varprefix) + + # Needed for implementation of customer validation + # functions that are configured by the user argument + # validate = .... Problem: this function must be + # called by *every* validate_value() function in all + # subclasses - explicitely. + def custom_validate(self, value, varprefix): + if self._validate: + self._validate(value, varprefix) + # A fixed non-editable value, e.g. to be use in "Alternative" class FixedValue(ValueSpec): @@ -142,15 +152,21 @@ def validate_value(self, value, varprefix): self.validate_datatype(value, varprefix) + ValueSpec.custom_validate(self, value, varprefix) # Time in seconds class Age(ValueSpec): def __init__(self, **kwargs): ValueSpec.__init__(self, **kwargs) self._label = kwargs.get("label") + self._minvalue = kwargs.get("minvalue") + self._display = kwargs.get("display", ["days", "hours", "minutes", "seconds"]) def canonical_value(self): - return 0 + if self._minvalue: + return self._minvalue + else: + return 0 def render_input(self, varprefix, value): days, rest = divmod(value, 60*60*24) @@ -158,35 +174,61 @@ minutes, seconds = divmod(rest, 60) html.write("
") - html.number_input(varprefix+'_days', days, 2) - html.write(" %s " % _("days")) - html.number_input(varprefix+'_hours', hours, 2) - html.write(" %s " % _("hours")) - html.number_input(varprefix+'_minutes', minutes, 2) - html.write(" %s " % _("min")) - html.number_input(varprefix+'_seconds', seconds, 2) - html.write(" %s " % _("sec")) + if self._label: + html.write(self._label + " ") + + takeover = 0 + first = True + for uid, title, value, tkovr_fac in [ ("days", _("days"), days, 24), + ("hours", _("hours"), hours, 60), + ("minutes", _("mins"), minutes, 60), + ("seconds", _("secs"), seconds, 60) ]: + if uid in self._display: + value += takeover + takeover = 0 + html.number_input(varprefix + "_" + uid, value, first and 3 or 2) + html.write(" %s " % title) + first = False + else: + takeover = (takeover + value) * tkovr_fac html.write("
") def from_html_vars(self, varprefix): - return ( - saveint(html.var(varprefix+'_days')) * 3600 * 24 - + saveint(html.var(varprefix+'_hours')) * 3600 - + saveint(html.var(varprefix+'_minutes')) * 60 - + saveint(html.var(varprefix+'_seconds')) - ) + return ( + saveint(html.var(varprefix+'_days', 0)) * 3600 * 24 + + saveint(html.var(varprefix+'_hours',0)) * 3600 + + saveint(html.var(varprefix+'_minutes',0)) * 60 + + saveint(html.var(varprefix+'_seconds',0)) + ) def value_to_text(self, value): days, rest = divmod(value, 60*60*24) hours, rest = divmod(rest, 60*60) minutes, seconds = divmod(rest, 60) - return "%sd %sh %sm %ss" % (days, hours, minutes, seconds) + parts = [] + for title, count in [ + ( _("days"), days, ), + ( _("hours"), hours, ), + ( _("minutes"), minutes, ), + ( _("seconds"), seconds, )]: + if count: + parts.append("%d %s" % (count, title)) + + if parts: + return " ".join(parts) + else: + return _("no time") + def validate_datatype(self, value, varprefix): if type(value) != int: raise MKUserError(varprefix, _("The value %r has type %s, but must be of type int") % (value, type_name(value))) + def validate_value(self, value, varprefix): + if self._minvalue != None and value < self._minvalue: + raise MKUserError(varprefix, _("%s is too low. The minimum allowed value is %s." % ( + value, self._minvalue))) # Editor for a single integer class Integer(ValueSpec): @@ -211,7 +253,7 @@ else: return 0 - def render_input(self, varprefix, value): + def render_input(self, varprefix, value, convfunc = saveint): if self._label: html.write(self._label) html.write(" ") @@ -219,7 +261,10 @@ style = "text-align: right;" else: style = "" - html.number_input(varprefix, str(value), size = self._size, style = style) + if value == "": # This is needed for ListOfIntegers + html.text_input(varprefix, "", "number", size = self._size, style = style) + else: + html.number_input(varprefix, self._display_format % convfunc(value), size = self._size, style = style) if self._unit: html.write(" ") html.write(self._unit) @@ -247,7 +292,7 @@ return text def validate_datatype(self, value, varprefix): - if type(value) != int: + if type(value) not in [ int, long ]: raise MKUserError(varprefix, _("The value %r has the wrong type %s, but must be of type int") % (value, type_name(value))) @@ -258,6 +303,8 @@ if self._maxvalue != None and value > self._maxvalue: raise MKUserError(varprefix, _("%s is too high. The maximum allowed value is %s." % ( value, self._maxvalue))) + ValueSpec.custom_validate(self, value, varprefix) + # Filesize in Byte,Kbyte,Mbyte,Gigatbyte, Terrabyte class Filesize(Integer): def __init__(self, **kwargs): @@ -276,7 +323,8 @@ exp, count = self.get_exponent(value) html.number_input(varprefix + '_size', count, size = self._size) html.write(" ") - html.select(varprefix + '_unit', enumerate(self._names), exp) + choices = [ (str(nr), name) for (nr, name) in enumerate(self._names) ] + html.select(varprefix + '_unit', choices, str(exp)) def from_html_vars(self, varprefix): try: @@ -294,15 +342,22 @@ def __init__(self, **kwargs): ValueSpec.__init__(self, **kwargs) self._label = kwargs.get("label") - self._size = kwargs.get("size", 25) + self._size = kwargs.get("size", 25) # also possible: "max" + self._cssclass = kwargs.get("cssclass", "text") self._strip = kwargs.get("strip", True) - self._allow_empty = kwargs.get("allow_empty", True) + self._attrencode = kwargs.get("attrencode", True) + self._allow_empty = kwargs.get("allow_empty", _("none")) + self._empty_text = kwargs.get("empty_text", "") + self._read_only = kwargs.get("read_only") self._none_is_empty = kwargs.get("none_is_empty", False) + self._forbidden_chars = kwargs.get("forbidden_chars", "") self._regex = kwargs.get("regex") self._regex_error = kwargs.get("regex_error", _("Your input does not match the required format.")) + self._minlen = kwargs.get('minlen', None) if type(self._regex) == str: self._regex = re.compile(self._regex) + self._prefix_buttons = kwargs.get("prefix_buttons", []) def canonical_value(self): return "" @@ -310,18 +365,37 @@ def render_input(self, varprefix, value): if value == None: value = "" + elif type(value) != unicode: + value = str(value) if self._label: html.write(self._label) html.write(" ") - html.text_input(varprefix, str(value), size = self._size) + + if self._prefix_buttons: + html.write('
') + html.text_input(varprefix, value, size = self._size, read_only = self._read_only, cssclass = self._cssclass) + self.render_buttons() + if self._prefix_buttons: + html.write('
') + + def render_buttons(self): + if self._prefix_buttons: + html.write(" ") + for icon, textfunc, help in self._prefix_buttons: + try: + text = textfunc() + except: + text = textfunc + html.icon_button("#", help, icon, onclick="vs_textascii_button(this, '%s', 'prefix');" % text) + def value_to_text(self, value): - if value == None: - return _("none") + if not value: + return self._empty_text else: if self._attrencode: - return htmllib.attrencode(value) + return html.attrencode(value) else: return value @@ -343,6 +417,14 @@ type_name(value)) def validate_value(self, value, varprefix): + try: + unicode(value) + except: + raise MKUserError(varprefix, _("Non-ASCII characters are not allowed here.")) + if self._forbidden_chars: + for c in self._forbidden_chars: + if c in value: + raise MKUserError(varprefix, _("The character %s is not allowed here.") % c) if self._none_is_empty and value == "": raise MKUserError(varprefix, _("An empty value must be represented with None here.")) if not self._allow_empty and value.strip() == "": @@ -351,13 +433,15 @@ if not self._regex.match(value): raise MKUserError(varprefix, self._regex_error) + if self._minlen != None and len(value) < self._minlen: + raise MKUserError(varprefix, _("You need to provide at least %d characters.") % self._minlen) + + ValueSpec.custom_validate(self, value, varprefix) + class TextUnicode(TextAscii): def __init__(self, **kwargs): TextAscii.__init__(self, **kwargs) - def render_input(self, varprefix, value): - html.text_input(varprefix, value, size = self._size) - def from_html_vars(self, varprefix): return html.var_utf8(varprefix, "").strip() @@ -374,39 +458,65 @@ self._regex = re.compile('^[a-zA-Z_][-a-zA-Z0-9_]*$') self._regex_error = _("An identifier must only consist of letters, digits, dash and underscore and it must start with a letter or underscore.") +# Same as the ID class, but allowing unicode objects +class UnicodeID(TextUnicode): + def __init__(self, **kwargs): + TextAscii.__init__(self, **kwargs) + self._regex = re.compile(r'^[\w][-\w0-9_]*$', re.UNICODE) + self._regex_error = _("An identifier must only consist of letters, digits, dash and underscore and it must start with a letter or underscore.") + class RegExp(TextAscii): def __init__(self, **kwargs): - TextAscii.__init__(self, attrencode = True, **kwargs) + TextAscii.__init__(self, attrencode = True, cssclass = 'text regexp', **kwargs) + self._mingroups = kwargs.get("mingroups", 0) + self._maxgroups = kwargs.get("maxgroups") def validate_value(self, value, varprefix): TextAscii.validate_value(self, value, varprefix) # Check if the string is a valid regex try: - re.compile(value) + compiled = re.compile(value) except sre_constants.error, e: raise MKUserError(varprefix, _('Invalid regular expression: %s') % e) -class RegExpUnicode(TextUnicode): + if compiled.groups < self._mingroups: + raise MKUserError(varprefix, _("Your regular expression containes %d groups. " + "You need at least %d groups.") % (compiled.groups, self._mingroups)) + if self._maxgroups != None and compiled.groups > self._maxgroups: + raise MKUserError(varprefix, _("Your regular expression containes %d groups. " + "It must have at most %d groups.") % (compiled.groups, self._maxgroups)) + + ValueSpec.custom_validate(self, value, varprefix) + + +class RegExpUnicode(TextUnicode, RegExp): def __init__(self, **kwargs): TextUnicode.__init__(self, attrencode = True, **kwargs) + RegExp.__init__(self, **kwargs) def validate_value(self, value, varprefix): TextUnicode.validate_value(self, value, varprefix) - - # Check if the string is a valid regex - try: - re.compile(value) - except sre_constants.error, e: - raise MKUserError(varprefix, _('Invalid regular expression: %s') % e) + RegExp.validate_value(self, value, varprefix) + ValueSpec.custom_validate(self, value, varprefix) class EmailAddress(TextAscii): def __init__(self, **kwargs): + kwargs.setdefault("size", 40) TextAscii.__init__(self, **kwargs) - self._regex = re.compile('^[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,4}$', re.I) + # The "new" top level domains are very unlimited in length. Theoretically they can be + # up to 63 chars long. But currently the longest is 24 characters. Check this out with: + # wget -qO - http://data.iana.org/TLD/tlds-alpha-by-domain.txt | tail -n+2 | wc -L + self._regex = re.compile('^[A-Z0-9._%+-]+@(localhost|[A-Z0-9.-]+\.[A-Z]{2,24})$', re.I) + self._make_clickable = kwargs.get("make_clickable", False) def value_to_text(self, value): - return '%s' % (value, value) + if not value: + return TextAscii.value_to_text(self, value) + elif self._make_clickable: + return '%s' % (html.attrencode(value), html.attrencode(value)) + else: + return value # Network as used in routing configuration, such as @@ -441,6 +551,9 @@ if l & (2 ** (31-b)) != 0: raise MKUserError(varprefix, _("Please make sure that only the %d non-network bits are non-zero") % bits) + ValueSpec.custom_validate(self, value, varprefix) + + def validate_ipaddress(self, value, varprefix): try: octets = map(int, value.split(".")) @@ -463,6 +576,20 @@ def validate_value(self, value, varprefix): self.validate_ipaddress(value, varprefix) + ValueSpec.custom_validate(self, value, varprefix) + +# A host name with or without domain part. Also allow IP addresses +class Hostname(TextAscii): + def __init__(self, **kwargs): + TextAscii.__init__(self, **kwargs) + self._regex = re.compile('^[-0-9a-zA-Z_.]+$') + self._regex_error = _("Please enter a valid hostname or IPv4 address.") + +class AbsoluteDirname(TextAscii): + def __init__(self, **kwargs): + TextAscii.__init__(self, **kwargs) + self._regex = re.compile('^(/|(/[^/]+)+)$') + self._regex_error = _("Please enter a valid absolut pathname with / as a path separator.") # Valuespec for a HTTP Url (not HTTPS), that @@ -477,6 +604,7 @@ if value: if not value.startswith("http://"): raise MKUserError(varprefix, _("The URL must begin with http://")) + ValueSpec.custom_validate(self, value, varprefix) def from_html_vars(self, varprefix): value = TextAscii.from_html_vars(self, varprefix) @@ -501,36 +629,53 @@ # any path component return '%s' % ( (self._target and 'target="%s" ' % self._target or ""), - url, text) - + html.attrencode(url), html.attrencode(text)) class TextAreaUnicode(TextUnicode): def __init__(self, **kwargs): TextUnicode.__init__(self, **kwargs) self._cols = kwargs.get("cols", 60) self._rows = kwargs.get("rows", 20) # Allowed: "auto" -> Auto resizing + self._minrows = kwargs.get("minrows", 0) # Minimum number of initial rows when "auto" + self._monospaced = kwargs.get("monospaced", False) # select TT font def value_to_text(self, value): - return "
%s
" % value + if self._monospaced: + return "
%s
" % html.attrencode(value) + else: + return html.attrencode(value).replace("\n", "
") def render_input(self, varprefix, value): + if value == None: + value = "" # should never happen, but avoids exception for invalid input if self._rows == "auto": - attrs = { "onkeyup" : 'valuespec_textarea_resize(this);' } + func = 'valuespec_textarea_resize(this);' + attrs = { "onkeyup" : func, "onmousedown" : func, "onmouseup" : func, "onmouseout" : func } if html.has_var(varprefix): rows = len(html.var(varprefix).splitlines()) else: rows = len(value.splitlines()) + rows = max(rows, self._minrows) else: attrs = {} rows = self._rows + + if self._monospaced: + attrs["class"] = "tt" + html.text_area(varprefix, value, rows=rows, cols=self._cols, attrs = attrs) + # Overridded because we do not want to strip() here and remove '\r' def from_html_vars(self, varprefix): - return html.var_utf8(varprefix, "").replace('\r', '') + text = html.var_utf8(varprefix, "").replace('\r', '') + if text and not text.endswith("\n"): + text += "\n" # force newline at end + return text # A variant of TextAscii() that validates a path to a filename that # lies in an existing directory. +# TODO: Rename the valuespec here to ExistingFilename or somehting similar class Filename(TextAscii): def __init__(self, **kwargs): TextAscii.__init__(self, **kwargs) @@ -540,11 +685,21 @@ self._default_path = kwargs["default"] else: self._default_path = "/tmp/foo" + if "trans_func" in kwargs: + self._trans_func = kwargs["trans_func"] + else: + self._trans_func = None def canonical_value(self): return self._default_path def validate_value(self, value, varprefix): + # The transformation function only changes the value for validation. This is + # usually a function which is later also used within the code which uses + # this variable to e.g. replace macros + if self._trans_func: + value = self._trans_func(value) + if len(value) == 0: raise MKUserError(varprefix, _("Please enter a filename.")) @@ -562,12 +717,20 @@ # permissions and the file might be created with Nagios permissions (on OMD this # is the same, but for others not) + ValueSpec.custom_validate(self, value, varprefix) + class ListOfStrings(ValueSpec): def __init__(self, **kwargs): ValueSpec.__init__(self, **kwargs) - self._valuespec = kwargs.get("valuespec", TextAscii()) + if "valuespec" in kwargs: + self._valuespec = kwargs.get("valuespec") + elif "size" in kwargs: + self._valuespec = TextAscii(size=kwargs["size"]) + else: + self._valuespec = TextAscii() self._vertical = kwargs.get("orientation", "vertical") == "vertical" self._allow_empty = kwargs.get("allow_empty", True) + self._empty_text = kwargs.get("empty_text", "") def render_input(self, vp, value): # Form already submitted? @@ -593,6 +756,9 @@ return [] def value_to_text(self, value): + if not value: + return self._empty_text + if self._vertical: s = '' for v in value: @@ -604,10 +770,12 @@ def from_html_vars(self, vp): value = [] nr = 0 - while html.has_var(vp + "_%d" % nr): - s = html.var(vp + "_%d" % nr).strip() - if s: - value.append(s) + while True: + varname = vp + "_%d" % nr + if not html.has_var(varname): + break + if html.var(varname, "").strip(): + value.append(self._valuespec.from_html_vars(varname)) nr += 1 return value @@ -620,9 +788,29 @@ def validate_value(self, value, vp): if len(value) == 0 and not self._allow_empty: - raise MKUserError(vp + "_0", _("Please specify at least one value")) - for nr, s in enumerate(value): - self._valuespec.validate_value(s, vp + "_%d" % nr) + if self._empty_text: + msg = self._empty_text + else: + msg = _("Please specify at least one value") + raise MKUserError(vp + "_0", msg) + if self._valuespec: + for nr, s in enumerate(value): + self._valuespec.validate_value(s, vp + "_%d" % nr) + ValueSpec.custom_validate(self, value, vp) + +class ListOfIntegers(ListOfStrings): + def __init__(self, **kwargs): + int_args = {} + for key in [ "minvalue", "maxvalue" ]: + if key in kwargs: + int_args[key] = kwargs[key] + int_args["display_format"] = "%s" + int_args["convfunc"] = lambda x: x != "" and saveint(x) or "" + int_args["minvalue"] = 17 + int_args["default_value"] = 34 + valuespec = Integer(**int_args) + kwargs["valuespec"] = valuespec + ListOfStrings.__init__(self, **kwargs) # Generic list-of-valuespec ValueSpec with Javascript-based # add/delete/move @@ -633,13 +821,17 @@ self._magic = kwargs.get("magic", "@!@") self._rowlabel = kwargs.get("row_label") self._add_label = kwargs.get("add_label", _("Add new element")) + self._del_label = kwargs.get("del_label", _("Delete this entry")) self._movable = kwargs.get("movable", True) self._totext = kwargs.get("totext") self._allow_empty = kwargs.get("allow_empty", True) + self._empty_text = kwargs.get("empty_text") + if not self._empty_text: + self._empty_text = _("Please specify at least on entry") def del_button(self, vp, nr): js = "valuespec_listof_delete(this, '%s', '%s')" % (vp, nr) - html.icon_button("#", _("Delete this entry"), "delete", onclick=js) + html.icon_button("#", self._del_label, "delete", onclick=js) def move_button(self, vp, nr, where): js = "valuespec_listof_move(this, '%s', '%s', '%s')" % (vp, nr, where) @@ -669,7 +861,7 @@ # Render reference element for cloning html.write('
' % varprefix) html.write('') + + if self._include_time: + if self._show_titles: + html.write('') + + if self._show_titles: + html.write('') def set_focus(self, varprefix): html.set_focus(varprefix + "_year") @@ -1404,29 +1935,52 @@ def from_html_vars(self, varprefix): parts = [] - for what, mmin, mmax in [ + entries = [ ("year", 1970, 2038), ("month", 1, 12), - ("day", 1, 31)]: + ("day", 1, 31)] + if self._include_time: + entries += [ + ("hour", 0, 23), + ("min", 0, 59), + ("sec", 0, 59), + ] + + for what, mmin, mmax in entries: try: varname = varprefix + "_" + what part = int(html.var(varname)) except: - raise MKUserError(varname, _("Please enter a correct number")) + if self._allow_empty: + return None + else: + raise MKUserError(varname, _("Please enter a correct number")) if part < mmin or part > mmax: - raise MKUserError(varname, _("The value for %s must be between %d and %d" % (mmin, mmax))) + raise MKUserError(varname, _("The value for %s must be between %d and %d" % (_(what), mmin, mmax))) parts.append(part) - parts += [0] * 6 + + # Construct broken time from input fields. Assume no-dst + parts += [0] * (self._include_time and 3 or 6) + # Convert to epoch + epoch = time.mktime(tuple(parts)) + # Convert back to localtime in order to know DST setting + localtime = time.localtime(epoch) + # Enter DST setting of that time + parts[-1] = localtime.tm_isdst + # Convert to epoch again return time.mktime(tuple(parts)) def validate_datatype(self, value, varprefix): + if value == None and self._allow_empty: + return if type(value) not in [ int, float ]: raise MKUserError(varprefix, _("The type of the timestamp must be int or float, but is %s") % type_name(value)) def validate_value(self, value, varprefix): - if value < 0 or int(value) > (2**31-1): + if (not self._allow_empty and value == None) or value < 0 or int(value) > (2**31-1): return MKUserError(varprefix, _("%s is not a valid UNIX timestamp") % value) + ValueSpec.custom_validate(self, value, varprefix) # Valuespec for entering times like 00:35 or 16:17. Currently @@ -1458,7 +2012,7 @@ def from_html_vars(self, varprefix): # Fully specified - text = html.var(varprefix).strip() + text = html.var(varprefix, "").strip() if not text: return None @@ -1498,6 +2052,7 @@ raise MKUserError(varprefix, _("The time must not be greater than %02d:%02d." % max_value)) elif value[0] < 0 or value[1] < 0 or value[0] > 24 or value[1] > 59: raise MKUserError(varprefix, _("Hours/Minutes out of range")) + ValueSpec.custom_validate(self, value, varprefix) # Range like 00:15 - 18:30 @@ -1557,6 +2112,186 @@ self._bounds[1].validate_value(value[1], varprefix + "_until") if value[0] > value[1]: raise MKUserError(varprefix + "_until", _("The from time must not be greater then the until time.")) + ValueSpec.custom_validate(self, value, varprefix) + +month_names = [ + _("January"), _("February"), _("March"), _("April"), + _("May"), _("June"), _("July"), _("August"), + _("September"), _("October"), _("November"), _("December") +] + +class Timerange(CascadingDropdown): + def __init__(self, **kwargs): + self._title = _('Time range') + + if 'choices' not in kwargs: + kwargs['choices'] = [] + + if kwargs.get('allow_empty', False): + kwargs['choices'] += [ + (None, ''), + ] + + kwargs['choices'] += [ + ( "d0", _("Today") ), + ( "d1", _("Yesterday") ), + + ( "w0", _("This week") ), + ( "w1", _("Last week") ), + + ( "m0", _("This month") ), + ( "m1", _("Last month") ), + + ( "y0", _("This year") ), + ( "y1", _("Last year") ), + + ( "age", _("The last..."), Age() ), + ( "date", _("Explicit date..."), + Tuple( + orientation = "horizontal", + title_br = False, + elements = [ + AbsoluteDate(title = _("From:")), + AbsoluteDate(title = _("To:")), + ], + ), + ), + ] + + if kwargs.get('include_time', False): + kwargs['choices'].append( + ( "time", _("Explicit time..."), + Tuple( + orientation = "horizontal", + title_br = False, + elements = [ + AbsoluteDate( + title = _("From:"), + include_time = True, + ), + AbsoluteDate( + title = _("To:"), + include_time = True, + ), + ], + ), + ) + ) + + CascadingDropdown.__init__(self, **kwargs) + + def compute_range(self, rangespec): + now = time.time() + if rangespec[0] == 'age': + from_time = now - rangespec[1] + until_time = now + title = _("The last ") + Age().value_to_text(rangespec[1]) + return (from_time, until_time), title + + elif rangespec[0] in [ 'date', 'time' ]: + from_time, until_time = rangespec[1] + if from_time > until_time: + raise MKUserError("avo_rangespec_9_0_year", _("The end date must be after the start date")) + if rangespec[0] == 'date': + until_time += 86400 # Consider *end* of this day + title = AbsoluteDate().value_to_text(from_time) + " ... " + \ + AbsoluteDate().value_to_text(until_time) + return (from_time, until_time), title + + else: + # year, month, day_of_month, hour, minute, second, day_of_week, day_of_year, is_daylightsavingtime + broken = list(time.localtime(now)) + broken[3:6] = 0, 0, 0 # set time to 00:00:00 + midnight = time.mktime(broken) + + until_time = now + if rangespec[0] == 'd': # this/last Day + from_time = time.mktime(broken) + titles = _("Today"), _("Yesterday") + + elif rangespec[0] == 'w': # week + from_time = midnight - (broken[6]) * 86400 + titles = _("This week"), _("Last week") + + elif rangespec[0] == 'm': # month + broken[2] = 1 + from_time = time.mktime(broken) + last_year = broken[0] - ((broken[1] == 1) and 1 or 0) + titles = month_names[broken[1] - 1] + " " + str(broken[0]), \ + month_names[(broken[1] + 10) % 12] + " " + str(last_year) + + elif rangespec[0] == 'y': # year + broken[1:3] = [1, 1] + from_time = time.mktime(broken) + titles = str(broken[0]), str(broken[0]-1) + + if rangespec[1] == '0': + return (from_time, now), titles[0] + + else: # last (previous) + if rangespec[0] == 'd': + return (from_time - 86400, from_time), titles[1] + elif rangespec[0] == 'w': + return (from_time - 7 * 86400, from_time), titles[1] + + until_time = from_time + from_broken = list(time.localtime(from_time)) + if rangespec[0] == 'y': + from_broken[0] -= 1 + else: # m + from_broken[1] -= 1 + if from_broken[1] == 0: + from_broken[1] = 12 + from_broken[0] -= 1 + return (time.mktime(from_broken), until_time), titles[1] + +class PNPTimerange(Timerange): + def __init__(self, **kwargs): + choosable = [ + ("0", _("4 Hours")), ("1", _("25 Hours")), + ("2", _("One Week")), ("3", _("One Month")), + ("4", _("One Year")), + ] + + if kwargs.get('allow_all', True): + choosable.append(("", _("All"))) + + kwargs['choices'] = [ + ('pnp_view', _("PNP View"), DropdownChoice( + default_value = '1', + choices = choosable, + )), + ] + Timerange.__init__(self, **kwargs) + + +# A selection of various date formats +def DateFormat(**args): + args.setdefault("title", _("Date format")) + args.setdefault("default_value", "%Y-%m-%d") + args["choices"] = [ + ("%Y-%m-%d", "1970-12-18"), + ("%d.%m.%Y", "18.12.1970"), + ("%m/%d/%Y", "12/18/1970"), + ("%d.%m.", "18.12."), + ("%m/%d", "12/18"), + ] + return DropdownChoice(**args) + + +def TimeFormat(**args): + args.setdefault("title", _("Time format")) + args.setdefault("default_value", "%H:%M:%S") + args["choices"] = [ + ("%H:%M:%S", "18:27:36"), + ("%l:%M:%S %p", "12:27:36 PM"), + ("%H:%M", "18:27"), + ("%l:%M %p", "6:27 PM"), + ("%H", "18"), + ("%l %p", "6 PM"), + ] + return DropdownChoice(**args) + # Make a configuration value optional, i.e. it may be None. @@ -1587,7 +2322,7 @@ html.write("") - if self._label: + if self._label is not None: label = self._label elif self.title(): label = _(self.title()) @@ -1642,6 +2377,63 @@ def validate_value(self, value, varprefix): if value != self._none_value: self._valuespec.validate_value(value, varprefix + "_value") + ValueSpec.custom_validate(self, value, varprefix) + +# Makes a configuration value optional, while displaying the current +# value as text with a checkbox in front of it. When the checkbox is being checked, +# the text hides and the encapsulated valuespec is being shown. +class OptionalEdit(Optional): + def __init__(self, valuespec, **kwargs): + Optional.__init__(self, valuespec, **kwargs) + self._label = '' + + def render_input(self, varprefix, value): + div_id = "option_" + varprefix + checked = html.get_checkbox(varprefix + "_use") + if checked == None: + if self._negate: + checked = True + else: + checked = False + + html.write("") + + if self._label is not None: + label = self._label + elif self.title(): + label = _(self.title()) + elif self._negate: + label = _(" Ignore this option") + else: + label = _(" Activate this option") + + html.checkbox(varprefix + "_use" , checked, + onclick="valuespec_toggle_option(this, %r, %r);valuespec_toggle_option(this, %r, %r)" % + (div_id + '_on', self._negate and 1 or 0, + div_id + '_off', self._negate and 0 or 1), + label = label) + + html.write(" ") + html.write("") + + display_on = checked == self._negate and 'none' or '' + display_off = checked != self._negate and 'none' or '' + + if value == None: + value = self._valuespec.default_value() + + html.write('' % (div_id, display_off)) + html.write(value) + html.write('') + + html.write('' % (div_id, display_on)) + if self._valuespec.title(): + html.write(self._valuespec.title() + " ") + self._valuespec.render_input(varprefix + "_value", value) + html.write('\n') + + def from_html_vars(self, varprefix): + return self._valuespec.from_html_vars(varprefix + "_value") # Handle case when there are several possible allowed formats # for the value (e.g. strings, 4-tuple or 6-tuple like in SNMP-Communities) @@ -1652,6 +2444,8 @@ ValueSpec.__init__(self, **kwargs) self._elements = kwargs["elements"] self._match = kwargs.get("match") # custom match function + self._style = kwargs.get("style", "radio") # alternative: "dropdown" + self._show_alternative_title = kwargs.get("show_alternative_title") # Return the alternative (i.e. valuespec) # that matches the datatype of a given value. We assume @@ -1668,6 +2462,38 @@ pass def render_input(self, varprefix, value): + if self._style == "radio": + self.render_input_radio(varprefix, value) + else: + self.render_input_dropdown(varprefix, value) + + def render_input_dropdown(self, varprefix, value): + mvs = self.matching_alternative(value) + options = [] + sel_option = html.var(varprefix + "_use") + for nr, vs in enumerate(self._elements): + if not sel_option and vs == mvs: + sel_option = str(nr) + options.append((str(nr), vs.title())) + onchange="valuespec_cascading_change(this, '%s', %d);" % (varprefix, len(options)) + html.select(varprefix + "_use", options, sel_option, onchange) + html.write(" ") + + for nr, vs in enumerate(self._elements): + if str(nr) == sel_option: + disp = "" + cur_val = value + else: + disp = "none" + cur_val = vs.default_value() + + html.write('' % + (varprefix, nr, disp)) + html.help(vs.help()) + vs.render_input(varprefix + "_%d" % nr, cur_val) + html.write("") + + def render_input_radio(self, varprefix, value): mvs = self.matching_alternative(value) for nr, vs in enumerate(self._elements): if html.has_var(varprefix + "_use"): @@ -1686,7 +2512,7 @@ if vs == mvs: val = value else: - val = vs.canonical_value() + val = vs.default_value() vs.render_input(varprefix + "_%d" % nr, val) if title: html.write("\n") @@ -1698,12 +2524,24 @@ def canonical_value(self): return self._elements[0].canonical_value() + def default_value(self): + try: + if type(self._default_value) == type(lambda:True): + return self._default_value() + else: + return self._default_value + except: + return self._elements[0].default_value() + def value_to_text(self, value): vs = self.matching_alternative(value) if vs: - return vs.value_to_text(value) + output = "" + if self._show_alternative_title and vs.title(): + output = "%s
" % vs.title() + return output + vs.value_to_text(value) else: - return _("invalid:") + " " + str(value) + return _("invalid:") + " " + html.attrencode(str(value)) def from_html_vars(self, varprefix): nr = int(html.var(varprefix + "_use")) @@ -1726,6 +2564,7 @@ for nr, v in enumerate(self._elements): if vs == v: vs.validate_value(value, varprefix + "_%d" % nr) + ValueSpec.custom_validate(self, value, varprefix) # Edit a n-tuple (with fixed size) of values @@ -1735,6 +2574,7 @@ self._elements = kwargs["elements"] self._show_titles = kwargs.get("show_titles", True) self._orientation = kwargs.get("orientation", "vertical") + self._title_br = kwargs.get("title_br", True) def canonical_value(self): return tuple([x.canonical_value() for x in self._elements]) @@ -1772,7 +2612,11 @@ elif self._orientation == "horizontal": html.write("%s" % title) html.help(element.help()) - html.write("
") + html.write("
") + if self._title_br: + html.write("
") + else: + html.write(" ") else: html.write(" ") html.help(element.help()) @@ -1808,6 +2652,7 @@ for no, (element, val) in enumerate(zip(self._elements, value)): vp = varprefix + "_" + str(no) element.validate_value(val, vp) + ValueSpec.custom_validate(self, value, varprefix) def validate_datatype(self, value, varprefix): if type(value) != tuple: @@ -1827,6 +2672,8 @@ self._elements = kwargs["elements"] self._empty_text = kwargs.get("empty_text", _("(no parameters)")) self._required_keys = kwargs.get("required_keys", []) + self._ignored_keys = kwargs.get("ignored_keys", []) + self._default_keys = kwargs.get("default_keys", []) # keys present in default value if "optional_keys" in kwargs: ok = kwargs["optional_keys"] if type(ok) == list: @@ -1839,22 +2686,42 @@ self._optional_keys = False else: self._optional_keys = True + if "hidden_keys" in kwargs: + self._hidden_keys = kwargs["hidden_keys"] + else: + self._hidden_keys = [] self._columns = kwargs.get("columns", 1) # possible: 1 or 2 self._render = kwargs.get("render", "normal") # also: "form" -> use forms.section() self._form_narrow = kwargs.get("form_narrow", False) # used if render == "form" - self._headers = kwargs.get("headers") # "sup" -> small headers in online mode + self._form_isopen = kwargs.get("form_isopen", True) # used if render == "form" + self._headers = kwargs.get("headers") # "sup" -> small headers in oneline mode + self._migrate = kwargs.get("migrate") # value migration from old tuple version + self._indent = kwargs.get("indent", True) + + def migrate(self, value): + if self._migrate: + return self._migrate(value) + else: + return value def _get_elements(self): - if type(self._elements) == list: + if type(self._elements) == type(lambda: None): + return self._elements() + elif type(self._elements) == list: return self._elements else: - return self._elements() + return [] - def render_input(self, varprefix, value): + # Additional variale form allows to specify the rendering + # style right now + def render_input(self, varprefix, value, form=None): + value = self.migrate(value) if type(value) != dict: value = {} # makes code simpler in complain phase - if self._render == "form": + if form == True: + self.render_input_form(varprefix, value) + elif self._render == "form" and form == None: self.render_input_form(varprefix, value) else: self.render_input_normal(varprefix, value, self._render == "oneline") @@ -1866,17 +2733,24 @@ if headers_sup: html.write('') for param, vs in self._get_elements(): + if param in self._hidden_keys: + continue if not oneline: html.write('') div_id = varprefix + "_d_" + param vp = varprefix + "_p_" + param + colon_printed = False if self._optional_keys and param not in self._required_keys: visible = html.get_checkbox(vp + "_USE") if visible == None: visible = param in value + label = vs.title() + if self._columns == 2: + label += ":" + colon_printed = True html.checkbox(vp + "_USE", param in value, onclick="valuespec_toggle_option(this, %r)" % div_id, - label=vs.title()) + label=label) else: visible = True if vs.title(): @@ -1890,7 +2764,7 @@ html.write(": ") if self._columns == 2: - if vs.title(): + if vs.title() and not colon_printed: html.write(':') html.help(vs.help()) if not oneline: @@ -1899,7 +2773,8 @@ if not oneline: html.write("
") - html.write('
' % ( + html.write('
' % ( + ((self._indent and self._columns == 1) and " indent" or ""), div_id, not visible and "none" or (oneline and "inline-block" or ""))) if self._columns == 1: html.help(vs.help()) @@ -1926,11 +2801,14 @@ for header, sections in self._headers: self.render_input_form_header(varprefix, value, header, sections) else: - self.render_input_form_header(varprefix, value, self.title(), None) + self.render_input_form_header(varprefix, value, self.title() or _("Properties"), None) def render_input_form_header(self, varprefix, value, title, sections): - forms.header(title, narrow=self._form_narrow) + forms.header(title, isopen=self._form_isopen, narrow=self._form_narrow) for param, vs in self._get_elements(): + if param in self._hidden_keys: + continue + if sections and param not in sections: continue @@ -1970,12 +2848,13 @@ def default_value(self): def_val = {} for name, vs in self._get_elements(): - if name in self._required_keys or not self._optional_keys: + if name in self._required_keys or not self._optional_keys or name in self._default_keys: def_val[name] = vs.default_value() return def_val def value_to_text(self, value): + value = self.migrate(value) oneline = self._render == "oneline" if not value: return self._empty_text @@ -2009,6 +2888,8 @@ return value def validate_datatype(self, value, varprefix): + value = self.migrate(value) + if type(value) != dict: raise MKUserError(varprefix, _("The type must be a dictionary, but it is a %s") % type_name(value)) @@ -2018,24 +2899,29 @@ try: vs.validate_datatype(value[param], vp) except MKUserError, e: - raise MKUserError(e.varname, _("%s: %s") % (vs.title(), e.message)) + raise MKUserError(e.varname, _("%s: %s") % (vs.title(), e)) elif not self._optional_keys or param in self._required_keys: - raise MKUserError(varprefix, _("The entry %s is missing") % vp.title()) + raise MKUserError(varprefix, _("The entry %s is missing") % vs.title()) # Check for exceeding keys allowed_keys = [ p for (p,v) in self._get_elements() ] + if self._ignored_keys: + allowed_keys += self._ignored_keys for param in value.keys(): if param not in allowed_keys: raise MKUserError(varprefix, _("Undefined key '%s' in the dictionary. Allowed are %s.") % (param, ", ".join(allowed_keys))) def validate_value(self, value, varprefix): + value = self.migrate(value) + for param, vs in self._get_elements(): if param in value: vp = varprefix + "_p_" + param vs.validate_value(value[param], vp) elif not self._optional_keys or param in self._required_keys: raise MKUserError(varprefix, _("The entry %s is missing") % vs.title()) + ValueSpec.custom_validate(self, value, varprefix) # Base class for selection of a Nagios element out @@ -2049,6 +2935,7 @@ ValueSpec.__init__(self, **kwargs) self._loaded_at = None self._label = kwargs.get("label") + self._empty_text = kwargs.get("empty_text", _("There are not defined any elements for this selection yet.")) def load_elements(self): if self._loaded_at != id(html): @@ -2059,14 +2946,11 @@ self.load_elements() if len(self._elements) > 0: return self._elements.keys()[0] - else: - raise MKUserError(None, - _("There are not defined any elements for this selection yet.")) def render_input(self, varprefix, value): self.load_elements() if len(self._elements) == 0: - html.write(_("There are not defined any elements for this selection yet.")) + html.write(self._empty_text) else: if self._label: html.write("%s " % self._label) @@ -2082,10 +2966,10 @@ def validate_value(self, value, varprefix): self.load_elements() if len(self._elements) == 0: - raise MKUserError(varprefix, - _("You cannot save this rule. There are not defined any elements for this selection yet.")) + raise MKUserError(varprefix, _("You cannot save this rule.") + ' ' + self._empty_text) if value not in self._elements: raise MKUserError(varprefix, _("%s is not an existing element in this selection.") % (value,)) + ValueSpec.custom_validate(self, value, varprefix) def validate_datatype(self, value, varprefix): if type(value) != str: @@ -2157,6 +3041,7 @@ def validate_value(self, value, varprefix): self._valuespec.validate_value(value, varprefix) + ValueSpec.custom_validate(self, value, varprefix) # Transforms the value from one representation to @@ -2185,17 +3070,23 @@ else: return value + def title(self): + if self._title: + return self._title + else: + return self._valuespec.title() + def render_input(self, varprefix, value): - self._valuespec.render_input( varprefix, self.forth(value)) + self._valuespec.render_input(varprefix, self.forth(value)) def set_focus(self, *args): self._valuespec.set_focus(*args) def canonical_value(self): - return self._back(self._valuespec.canonical_value()) + return self.back(self._valuespec.canonical_value()) def default_value(self): - return self._back(self._valuespec.default_value()) + return self.back(self._valuespec.default_value()) def value_to_text(self, value): return self._valuespec.value_to_text(self.forth(value)) @@ -2208,8 +3099,9 @@ def validate_value(self, value, varprefix): self._valuespec.validate_value(self.forth(value), varprefix) + ValueSpec.custom_validate(self, value, varprefix) -class LDAPDistinguishedName(TextAscii): +class LDAPDistinguishedName(TextUnicode): def __init__(self, **kwargs): TextAscii.__init__(self, **kwargs) self.enforce_suffix = kwargs.get('enforce_suffix') @@ -2220,6 +3112,7 @@ # Check wether or not the given DN is below a base DN if self.enforce_suffix and value and not value.lower().endswith(self.enforce_suffix.lower()): raise MKUserError(varprefix, _('Does not ends with "%s".') % self.enforce_suffix) + ValueSpec.custom_validate(self, value, varprefix) class Password(TextAscii): @@ -2251,4 +3144,125 @@ html.icon_button("#", _(u"Randomize password"), "random", onclick="vs_passwordspec_randomize(this);") +class FileUpload(ValueSpec): + def __init__(self, **kwargs): + ValueSpec.__init__(self, **kwargs) + self._allow_empty = kwargs.get('allow_empty', True) + + def canonical_value(self): + if self._allow_empty: + return None + else: + return '' + + def validate_value(self, value, varprefix): + if not self._allow_empty and value == None: + raise MKUserError(varprefix, _('Please select a file.')) + + def render_input(self, varprefix, value): + html.upload_file(varprefix) + + def from_html_vars(self, varprefix): + # returns a triple of (filename, mime-type, content) + return html.uploaded_file(varprefix) + +class IconSelector(ValueSpec): + def __init__(self, **kwargs): + ValueSpec.__init__(self, **kwargs) + self._prefix = kwargs.get('prefix', 'icon_') + self._subdir = kwargs.get('subdir', '') + self._num_cols = kwargs.get('num_cols', 12) + self._allow_empty = kwargs.get('allow_empty', True) + if self._subdir: + self._html_path = os.path.join('images', self._subdir) + else: + self._html_path = 'images' + self._empty_img = kwargs.get('emtpy_img', 'empty') + + self._exclude = [ + 'trans', + 'empty', + ] + + def available_icons(self): + if defaults.omd_root: + dirs = [ + os.path.join(defaults.omd_root, "local/share/check_mk/web/htdocs/images", self._subdir), + os.path.join(defaults.omd_root, "share/check_mk/web/htdocs/images", self._subdir), + ] + else: + dirs = [ os.path.join(defaults.web_dir, "htdocs/images", self._subdir) ] + icons = set([]) + for dir in dirs: + if os.path.exists(dir): + icons.update([ i[len(self._prefix):-4] for i in os.listdir(dir) + if i[-4:] == '.png' and os.path.isfile(dir + "/" + i) + and i.startswith(self._prefix) ]) + + for exclude in self._exclude: + try: + icons.remove(exclude) + except KeyError: + pass + + icons = list(icons) + icons.sort() + return icons + + def render_icon(self, icon, onclick = '', title = '', id = ''): + path = "%s/%s%s.png" % (self._html_path, self._prefix, html.attrencode(icon)) + + if onclick: + html.write('' % onclick) + html.write('' % (id, title, path)) + if onclick: + html.write('') + + def render_input(self, varprefix, value): + if not value: + value = self._empty_img + + html.write('') + + def from_html_vars(self, varprefix): + icon = html.var(varprefix + '_value') + if icon == 'empty': + return None + else: + return icon + + def value_to_text(self, value): + self.render_icon(value) + + def validate_datatype(self, value, varprefix): + if value is not None and type(value) != str: + raise MKUserError(varprefix, _("The type is %s, but should be str") % type(value)) + + def validate_value(self, value, varprefix): + if value and value not in self.available_icons(): + raise MKUserError(varprefix, _("The selected icon image does not exist.")) diff -Nru check-mk-1.2.2p3/htdocs/views.css check-mk-1.2.6p12/htdocs/views.css --- check-mk-1.2.2p3/htdocs/views.css 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/htdocs/views.css 2015-09-21 10:59:54.000000000 +0000 @@ -70,6 +70,7 @@ div.floatfilter.double { height: 110px; + display: table-cell; } div.floatfilter input[type=text] { @@ -92,10 +93,13 @@ height: 30px; margin-top: 0px; margin-left: 0px; - vertical-align: middle; display: table-cell; padding-left: 4px; text-align: left; + padding-top: 5px; +} +div.floatfilter.double .content { + height: 82px; } div.floatfilter .content > select { width: 300px; @@ -107,13 +111,52 @@ width: 240px; } +div.floatfilter .content .multigroup table { + margin-top: -6px; +} + +div.floatfilter .content .multigroup select { + width: 148px; + height: 66px; +} + +div.floatfilter .content .multigroup select.large { + width: 228px; +} + +div.floatfilter .content .multigroup select.small { + width: 68px; +} + div.floatfilter .content .filtertime select { width: 136px; } -div.floatfilter.double .content { - height: 72px; +/* Improving styling of floatfilters that are contained in a ListOf() */ +table.valuespec_listof.filter { + width: 300px; + border-spacing: 0px; +} +select.vlof_filter { + width: 312px; +} +table.valuespec_listof td.vlof_content.filter { + width: 1px; + border-style: none; +} +table.valuespec_listof td.vlof_buttons.filter { + width: 1px; + border-style: none; + vertical-align: top; } +table.valuespec_listof td.vlof_buttons.filter img { + position: relative; + top: 2px; + left: -28px; + width: 16px; + height: 16px; +} + table.filtertime td { vertical-align: middle; } @@ -364,52 +407,6 @@ div.clear { clear: left; } -div.filtersetting > select { - float: left; - margin-right: 10px; - height: 26px; -} -div.filtersetting.show > select { border: 3px solid #ff4; } -div.filtersetting.hide > select { border: 3px solid #f8f; } -div.filtersetting.off > select { border: 3px solid #444; } -div.filtersetting.hard > select { border: 3px solid #45f; } - -div.filtersetting.off > div, div.filtersetting.hide > div { - display: none; -} - - -/* The column editor is part of the view editor */ -div.columneditor { - margin-bottom: 6px; -} - -div.columneditor table { - padding: 0; - margin: 0; - background-image: url("images/form_background.png"); - background-repeat: repeat; - width: 100%; -} - -div.columneditor td.cebuttons { - width: 33px; -} -div.columneditor td.celeft { - width: 60px; -} - -div.columneditor td { - padding: 0 4px; -} - -div.columneditor select { - width: 100%; -} - -/*div.columneditor td, div.columneditor select { - font-size: 0.9em; -}*/ input#try{ float:left; @@ -469,3 +466,251 @@ vertical-align: middle; background-color: #8EAEB8; } + +/* Availability */ +table.data.availability { + width: 100%; +} + +table.data.availability td.number { + width: 60px; +} + +table.data.availability td.unused { + color: #888; + text-shadow: 1px 1px 0.5px #fff; +} + +table.data.availability tr.summary { + background-color: #bbb; +} +table.data.availability tr.summary td { + border-top: 1px solid black; + font-weight: bold; + border-bottom-style: none; + padding-bottom: 1px; + padding-top: 1px; +} + +table.data td.flapping, table.timeline td.flapping, div.avlegend.timeline div.state.flapping { + /* background-image: url("images/bg_flapping.png"); + background-repeat: repeat; */ + background-color: #f0f; +} + +table.data td.downtime, table.timeline td.downtime, div.avlegend.timeline div.state.downtime { + background-color: #0af; +} +table.data td.hostdown, table.timeline td.hostdown, div.avlegend.timeline div.state.hostdown { + background-color: #048; + color: white; +} +table.data td.unmonitored, table.timeline td.unmonitored, div.avlegend.timeline div.state.unmonitored { + background-color: #cacaca; + color: #888; +} +table.timeline td.ooservice, div.avlegend.timeline div.state.ooservice { + background-color: #cacaca; + background-image: url("images/ooservice.png"); + background-repeat: repeat; +} +table.data.availability tr.odd0 td.number { + background-image: url("images/white_30percent.png"); + background-repeat: repeat; +} + +table.data.availability td.number.stats { + background-image: url("images/white_30percent.png"); +} + +table.data.availability tr.odd0 td.number.stats { + background-image: url("images/white_50percent.png"); +} + + +div.floatfilter.rangespec table.valuespec_tuple { + width: 100%; +} + +div.floatfilter.rangespec table.valuespec_tuple span.title { + position: relative; + top: 7px; +} + +div#avoptions_on { + margin-right: 3px; +} + +table.data tr.data > td.timeline { + width: 500px; + padding: 0px; +} + +table.timeline { + height: 30px; + border-spacing: 0px; + border-collapse: collapse; +} + +table.timeline.standalone { + box-shadow: 0px 0px 1px #ccf; + margin-bottom: 20px; + width: 800px; +} + +table.timeline.inline { + border-style: none; + width: 500px; +} +table.timeline.inline { + border-style: none; + width: 500px; + box-shadow: 0px 0px 1px #555 inset; +} + +table.timeline td { + padding: 0px; + border-style: none; +} + +table.timeline td.chaos { + background-color: #8844ff; +} + +table.timeline.standalone td { + border: 1px solid #444; +} + +div.timelinerange { + position: relative; +} + +/* We render the small vertical lines in a way that only + the left border is visible but the thing is thick 5 pixels. + That makes hovering over it more easy. */ +div.timelinerange div.timelinechoord { + position: absolute; + top: 0px; + width: 5px; + height: 30px; + border-style: none none none solid; + border-width: 0px 0px 0px 1px; + border-color: #666; +} + +table.timelineevents { + width: 800px; +} +table.timelineevents tr.hilite { + background-color: #6ab; + color: white; +} +div.timelinerange { + width: 800px; + float: none; +} +div.timelinerange.inline { + width: 500px; +} +div.timelinerange div { + width: 300px; + float: left; +} +div.timelinerange div.until { + text-align: right; + float: right; +} + +table.data.timewarp { + width: 800px; + margin-bottom: 20px; +} + +div.avlegend { + background-image: url("images/white_20percent.png"); + background-repeat: repeat; + padding: 5px 15px; + border-radius: 5px; + margin-top: 10px; + text-align: right; +} +div.avlegend h3 { + margin-top: 0px; + font-size: 13px; + color: black; +} +div.avlegend div { + display: inline-block; + width: 50px; + white-space: nowrap; + margin-right: 5px; +} +div.avlegend div.state { + text-align: center; + padding: 2px 0px; + font-size: 12px; +} +div.avlegend div:last-child { + margin-right: 0px; +} +div.avlegend.levels div.level { + margin-right: 15px; + color: black; +} + +div.avlegend.timeline div.state { + width: 75px; +} +div.avlegend.timeline div.state.ooservice { + color: black; +} + + +/*--Inventory--------------------------------------------------------------. +| ___ _ | +| |_ _|_ ____ _____ _ __ | |_ ___ _ __ _ _ | +| | || '_ \ \ / / _ \ '_ \| __/ _ \| '__| | | | | +| | || | | \ V / __/ | | | || (_) | | | |_| | | +| |___|_| |_|\_/ \___|_| |_|\__\___/|_| \__, | | +| |___/ | ++--------------------------------------------------------------------------+ +| Styles for displaying HW/SW-Inventory data | +'-------------------------------------------------------------------------*/ + +td.invtree { + width: 100%; +} + +td.invtree b.treeangle.title { + color: black; + font-weight: normal; +} + +td.invtree ul.treeangle { + margin: 0px; + margin-bottom: 3px; +} + +td.invtree table { + border-collapse: collapse; + border: 0.5px solid black; + box-shadow: 0.5px 0.5px 1px #ccc; + margin-top: 3px; +} + +td.invtree table td, +td.invtree table th, +table.data.single tr.data td.invtree table td, +table.data.single tr.data td.invtree table th +{ + border: 1px solid #888; + padding: 1px 5px; + height: 14px; +} +td.invtree table th { + text-align: left; +} +td.invtree table td { + background-color: #fff; +} + diff -Nru check-mk-1.2.2p3/htdocs/views.py check-mk-1.2.6p12/htdocs/views.py --- check-mk-1.2.2p3/htdocs/views.py 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/htdocs/views.py 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,13 +24,9 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -import config, defaults, livestatus, htmllib, time, os, re, pprint, time, copy -import weblib, traceback, forms +import config, defaults, livestatus, time, os, re, pprint, time +import weblib, traceback, forms, valuespec, inventory, visuals from lib import * -from pagefunctions import * - -max_display_columns = 12 -max_sort_columns = 5 # Python 2.3 does not have 'set' in normal namespace. # But it can be imported from 'sets' @@ -45,23 +41,27 @@ # Load all view plugins def load_plugins(): global loaded_with_language + if loaded_with_language == current_language: + # always reload the hosttag painters, because new hosttags might have been + # added during runtime + load_host_tag_painters() return global multisite_datasources ; multisite_datasources = {} - global multisite_filters ; multisite_filters = {} global multisite_layouts ; multisite_layouts = {} global multisite_painters ; multisite_painters = {} global multisite_sorters ; multisite_sorters = {} global multisite_builtin_views ; multisite_builtin_views = {} global multisite_painter_options ; multisite_painter_options = {} global multisite_commands ; multisite_commands = [] - global ubiquitary_filters ; ubiquitary_filters = [] # Always show this filters global view_hooks ; view_hooks = {} + global inventory_displayhints ; inventory_displayhints = {} - config.declare_permission_section("action", _("Commands on host and services")) + config.declare_permission_section("action", _("Commands on host and services"), do_sort = True) load_web_plugins("views", globals()) + load_host_tag_painters() # This must be set after plugin loading to make broken plugins raise # exceptions all the time and not only the first time (when the plugins @@ -69,24 +69,645 @@ loaded_with_language = current_language # Declare permissions for builtin views - config.declare_permission_section("view", _("Builtin views")) + config.declare_permission_section("view", _("Multisite Views"), do_sort = True) for name, view in multisite_builtin_views.items(): config.declare_permission("view.%s" % name, - view["title"], - view["description"], + _u(view["title"]), + _u(view["description"]), config.builtin_role_ids) + # Make sure that custom views also have permissions + config.declare_dynamic_permissions(lambda: visuals.declare_custom_permissions('views')) + # Add painter names to painter objects (e.g. for JSON web service) for n, p in multisite_painters.items(): p["name"] = n +# Load all views - users or builtins +def load_views(): + global multisite_views, available_views + # Skip views which do not belong to known datasources + multisite_views = visuals.load('views', multisite_builtin_views, + skip_func = lambda v: v['datasource'] not in multisite_datasources) + available_views = visuals.available('views', multisite_views) + transform_old_views() + +def permitted_views(): + return available_views + +def all_views(): + return multisite_views + +# Convert views that are saved in the pre 1.2.6-style +# FIXME: Can be removed one day. Mark as incompatible change or similar. +def transform_old_views(): + + for view in multisite_views.values(): + ds_name = view['datasource'] + datasource = multisite_datasources[ds_name] + + if "context" not in view: # legacy views did not have this explicitly + view.setdefault("user_sortable", True) + + if 'context_type' in view: + # This code transforms views from user_views.mk which have been migrated with + # daily snapshots from 2014-08 till beginning 2014-10. + visuals.transform_old_visual(view) + + elif 'single_infos' not in view: + # This tries to map the datasource and additional settings of the + # views to get the correct view context + # + # This code transforms views from views.mk (legacy format) to the current format + try: + hide_filters = view.get('hide_filters') + + if 'service' in hide_filters and 'host' in hide_filters: + view['single_infos'] = ['service', 'host'] + elif 'service' in hide_filters and 'host' not in hide_filters: + view['single_infos'] = ['service'] + elif 'host' in hide_filters: + view['single_infos'] = ['host'] + elif 'hostgroup' in hide_filters: + view['single_infos'] = ['hostgroup'] + elif 'servicegroup' in hide_filters: + view['single_infos'] = ['servicegroup'] + elif 'aggr_service' in hide_filters: + view['single_infos'] = ['service'] + elif 'aggr_name' in hide_filters: + view['single_infos'] = ['aggr'] + elif 'aggr_group' in hide_filters: + view['single_infos'] = ['aggr_group'] + elif 'log_contact_name' in hide_filters: + view['single_infos'] = ['contact'] + elif 'event_host' in hide_filters: + view['single_infos'] = ['host'] + elif hide_filters == ['event_id', 'history_line']: + view['single_infos'] = ['history'] + elif 'event_id' in hide_filters: + view['single_infos'] = ['event'] + elif 'aggr_hosts' in hide_filters: + view['single_infos'] = ['host'] + else: + # For all other context types assume the view is showing multiple objects + # and the datasource can simply be gathered from the datasource + view['single_infos'] = [] + except: # Exceptions can happen for views saved with certain GIT versions + if config.debug: + raise + + # Convert from show_filters, hide_filters, hard_filters and hard_filtervars + # to context construct + if 'context' not in view: + view['show_filters'] = view['hide_filters'] + view['hard_filters'] + view['show_filters'] + + single_keys = visuals.get_single_info_keys(view) + + # First get vars for the classic filters + context = {} + filtervars = dict(view['hard_filtervars']) + all_vars = {} + for filter_name in view['show_filters']: + if filter_name in single_keys: + continue # skip conflictings vars / filters + + context.setdefault(filter_name, {}) + try: + f = visuals.get_filter(filter_name) + except: + # The exact match filters have been removed. They where used only as + # link filters anyway - at least by the builtin views. + continue + + for var in f.htmlvars: + # Check whether or not the filter is supported by the datasource, + # then either skip or use the filter vars + if var in filtervars and f.info in datasource['infos']: + value = filtervars[var] + all_vars[var] = value + context[filter_name][var] = value + + # We changed different filters since the visuals-rewrite. This must be treated here, since + # we need to transform views which have been created with the old filter var names. + # Changes which have been made so far: + changed_filter_vars = { + 'serviceregex': { # Name of the filter + # old var name: new var name + 'service': 'service_regex', + }, + 'hostregex': { + 'host': 'host_regex', + }, + 'hostgroupnameregex': { + 'hostgroup_name': 'hostgroup_regex', + }, + 'servicegroupnameregex': { + 'servicegroup_name': 'servicegroup_regex', + }, + 'opthostgroup': { + 'opthostgroup': 'opthost_group', + 'neg_opthostgroup': 'neg_opthost_group', + }, + 'optservicegroup': { + 'optservicegroup': 'optservice_group', + 'neg_optservicegroup': 'neg_optservice_group', + }, + 'hostgroup': { + 'hostgroup': 'host_group', + 'neg_hostgroup': 'neg_host_group', + }, + 'servicegroup': { + 'servicegroup': 'service_group', + 'neg_servicegroup': 'neg_service_group', + }, + 'host_contactgroup': { + 'host_contactgroup': 'host_contact_group', + 'neg_host_contactgroup': 'neg_host_contact_group', + }, + 'service_contactgroup': { + 'service_contactgroup': 'service_contact_group', + 'neg_service_contactgroup': 'neg_service_contact_group', + }, + } + + if filter_name in changed_filter_vars and f.info in datasource['infos']: + for old_var, new_var in changed_filter_vars[filter_name].items(): + if old_var in filtervars: + value = filtervars[old_var] + all_vars[new_var] = value + context[filter_name][new_var] = value + + # Now, when there are single object infos specified, add these keys to the + # context + for single_key in single_keys: + if single_key in all_vars: + context[single_key] = all_vars[single_key] + + view['context'] = context + + # Cleanup unused attributes + for k in [ 'hide_filters', 'hard_filters', 'show_filters', 'hard_filtervars' ]: + try: + del view[k] + except KeyError: + pass + +def save_views(us): + visuals.save('views', multisite_views) + +#. +# .--Table of views------------------------------------------------------. +# | _____ _ _ __ _ | +# | |_ _|_ _| |__ | | ___ ___ / _| __ _(_) _____ _____ | +# | | |/ _` | '_ \| |/ _ \ / _ \| |_ \ \ / / |/ _ \ \ /\ / / __| | +# | | | (_| | |_) | | __/ | (_) | _| \ V /| | __/\ V V /\__ \ | +# | |_|\__,_|_.__/|_|\___| \___/|_| \_/ |_|\___| \_/\_/ |___/ | +# | | +# +----------------------------------------------------------------------+ +# | Show list of all views with buttons for editing | +# '----------------------------------------------------------------------' + +def page_edit_views(): + load_views() + cols = [ (_('Datasource'), lambda v: multisite_datasources[v["datasource"]]['title']) ] + visuals.page_list('views', _("Edit Views"), multisite_views, cols) + +#. +# .--Create View---------------------------------------------------------. +# | ____ _ __ ___ | +# | / ___|_ __ ___ __ _| |_ ___ \ \ / (_) _____ __ | +# | | | | '__/ _ \/ _` | __/ _ \ \ \ / /| |/ _ \ \ /\ / / | +# | | |___| | | __/ (_| | || __/ \ V / | | __/\ V V / | +# | \____|_| \___|\__,_|\__\___| \_/ |_|\___| \_/\_/ | +# | | +# +----------------------------------------------------------------------+ +# | Select the view type of the new view | +# '----------------------------------------------------------------------' + +# First step: Select the data source + +# Create datasource selection valuespec, also for other modules +# FIXME: Sort the datasources by (assumed) common usage +def DatasourceSelection(): + # FIXME: Sort the datasources by (assumed) common usage + datasources = [] + for ds_name, ds in multisite_datasources.items(): + datasources.append((ds_name, ds['title'])) + + return DropdownChoice( + title = _('Datasource'), + help = _('The datasources define which type of objects should be displayed with this view.'), + choices = datasources, + sorted = True, + columns = 1, + default_value = 'services', + ) + +def page_create_view(next_url = None): + + vs_ds = DatasourceSelection() + + ds = 'services' # Default selection + html.header(_('Create View'), stylesheets=["pages"]) + html.begin_context_buttons() + back_url = html.var("back", "") + html.context_button(_("Back"), back_url or "edit_views.py", "back") + html.end_context_buttons() + + if html.var('save') and html.check_transaction(): + try: + ds = vs_ds.from_html_vars('ds') + vs_ds.validate_value(ds, 'ds') + if not next_url: + next_url = html.makeuri([('datasource', ds)], filename = "create_view_infos.py") + else: + next_url = next_url + '&datasource=%s' % ds + html.http_redirect(next_url) + return + + except MKUserError, e: + html.write("
%s
\n" % e) + html.add_user_error(e.varname, e) -################################################################################## -# Layouts -################################################################################## + html.begin_form('create_view') + html.hidden_field('mode', 'create') + + forms.header(_('Select Datasource')) + forms.section(vs_ds.title()) + vs_ds.render_input('ds', ds) + html.help(vs_ds.help()) + forms.end() + html.button('save', _('Continue'), 'submit') + + html.hidden_fields() + html.end_form() + html.footer() + +def page_create_view_infos(): + ds_name = html.var('datasource') + if ds_name not in multisite_datasources: + raise MKGeneralException(_('The given datasource is not supported')) + + visuals.page_create_visual('views', multisite_datasources[ds_name]['infos'], + next_url = 'edit_view.py?mode=create&datasource=%s&single_infos=%%s' % ds_name) + +#. +# .--Edit View-----------------------------------------------------------. +# | _____ _ _ _ __ ___ | +# | | ____|__| (_) |_ \ \ / (_) _____ __ | +# | | _| / _` | | __| \ \ / /| |/ _ \ \ /\ / / | +# | | |__| (_| | | |_ \ V / | | __/\ V V / | +# | |_____\__,_|_|\__| \_/ |_|\___| \_/\_/ | +# | | +# +----------------------------------------------------------------------+ +# | | +# '----------------------------------------------------------------------' + +# Return list of available datasources (used to render filters) +def get_view_infos(view): + ds_name = view.get('datasource', html.var('datasource')) + return multisite_datasources[ds_name]['infos'] + +def page_edit_view(): + load_views() + + visuals.page_edit_visual('views', multisite_views, + custom_field_handler = render_view_config, + load_handler = transform_view_to_valuespec_value, + create_handler = create_view_from_valuespec, + info_handler = get_view_infos, + try_handler = lambda view: show_view(view, False, False) + ) + +def view_choices(only_with_hidden = False): + choices = [("", "")] + for name, view in available_views.items(): + if not only_with_hidden or view['single_infos']: + if view.get('mobile', False): + title = _('Mobile: ') + _u(view["title"]) + else: + title = _u(view["title"]) + choices.append(("%s" % name, title)) + return choices + +def view_editor_options(): + return [ + ('mobile', _('Show this view in the Mobile GUI')), + ('mustsearch', _('Show data only on search')), + ('force_checkboxes', _('Always show the checkboxes')), + ('user_sortable', _('Make view sortable by user')), + ('play_sounds', _('Play alarm sounds')), + # FIXME + #html.help(_("If enabled and the view shows at least one host or service problem " + # "the a sound will be played by the browser. Please consult the %s for details.") + # % docu_link("multisite_sounds", _("documentation"))) + ] + +def view_editor_specs(ds_name, general_properties=True): + load_views() # make sure that available_views is present + specs = [] + if general_properties: + specs.append( + ('view', Dictionary( + title = _('View Properties'), + render = 'form', + optional_keys = None, + elements = [ + ('datasource', FixedValue(ds_name, + title = _('Datasource'), + totext = multisite_datasources[ds_name]['title'], + help = _('The datasource of a view cannot be changed.'), + )), + ('options', ListChoice( + title = _('Options'), + choices = view_editor_options(), + default_value = ['user_sortable'], + )), + ('browser_reload', Integer( + title = _('Automatic page reload'), + unit = _('seconds'), + minvalue = 0, + help = _('Leave this empty or at 0 for no automatic reload.'), + )), + ('layout', DropdownChoice( + title = _('Basic Layout'), + choices = [ (k, v["title"]) for k,v in multisite_layouts.items() if not v.get("hide")], + default_value = 'table', + sorted = True, + )), + ('num_columns', Integer( + title = _('Number of Columns'), + default_value = 1, + minvalue = 1, + maxvalue = 50, + )), + ('column_headers', DropdownChoice( + title = _('Column Headers'), + choices = [ + ("off", _("off")), + ("pergroup", _("once per group")), + ("repeat", _("repeat every 20'th row")), + ], + default_value = 'pergroup', + )), + ], + )) + ) + + allowed = allowed_for_datasource(multisite_sorters, ds_name) + + def column_spec(ident, title, ds_name): + allowed = allowed_for_datasource(multisite_painters, ds_name) + collist = collist_of_collection(allowed) + + allow_empty = True + empty_text = None + if ident == 'columns': + allow_empty = False + empty_text = _("Please add at least one column to your view.") + + vs_column = Tuple( + title = _('Column'), + elements = [ + DropdownChoice( + title = _('Column'), + choices = collist, + sorted = True, + no_preselect = True, + ), + DropdownChoice( + title = _('Link'), + choices = view_choices, + sorted = True, + ), + DropdownChoice( + title = _('Tooltip'), + choices = [(None, "")] + collist, + ), + ] + ) + + joined = allowed_for_joined_datasource(multisite_painters, ds_name) + if ident == 'columns' and joined: + joined_cols = collist_of_collection(joined, collist) + + vs_column = Alternative( + elements = [ + vs_column, + + Tuple( + title = _('Joined column'), + elements = [ + DropdownChoice( + title = _('Column'), + choices = joined_cols, + sorted = True, + no_preselect = True, + ), + TextUnicode( + title = _('of Service'), + allow_empty = False, + ), + DropdownChoice( + title = _('Link'), + choices = view_choices, + sorted = True, + ), + DropdownChoice( + title = _('Tooltip'), + choices = [(None, "")] + joined_cols, + ), + TextUnicode( + title = _('Title'), + ), + ], + ), + ], + style = 'dropdown', + match = lambda x: x != None and len(x) == 5 and 1 or 0, + ) + + return (ident, Dictionary( + title = title, + render = 'form', + optional_keys = None, + elements = [ + (ident, ListOf(vs_column, + title = title, + add_label = _('Add column'), + allow_empty = allow_empty, + empty_text = empty_text, + )), + ], + )) + + specs.append(column_spec('columns', _('Columns'), ds_name)) + + specs.append( + ('sorting', Dictionary( + title = _('Sorting'), + render = 'form', + optional_keys = None, + elements = [ + ('sorters', ListOf( + Tuple( + elements = [ + DropdownChoice( + title = _('Column'), + choices = [ (name, p["title"]) for name, p in allowed.items() ], + sorted = True, + no_preselect = True, + ), + DropdownChoice( + title = _('Order'), + choices = [(False, _("Ascending")), + (True, _("Descending"))], + ), + ], + orientation = 'horizontal', + ), + title = _('Sorting'), + add_label = _('Add column'), + )), + ], + )), + ) + + specs.append(column_spec('grouping', _('Grouping'), ds_name)) + + return specs + +def render_view_config(view, general_properties=True): + ds_name = view.get("datasource", html.var("datasource")) + if not ds_name: + raise MKInternalError(_("No datasource defined.")) + if ds_name not in multisite_datasources: + raise MKInternalError(_('The given datasource is not supported.')) + + view['datasource'] = ds_name + + for ident, vs in view_editor_specs(ds_name, general_properties): + vs.render_input(ident, view.get(ident)) + +# Is used to change the view structure to be compatible to +# the valuespec This needs to perform the inverted steps of the +# transform_valuespec_value_to_view() function. FIXME: One day we should +# rewrite this to make no transform needed anymore +def transform_view_to_valuespec_value(view): + view["view"] = {} # Several global variables are put into a sub-dict + # Only copy our known keys. Reporting element, etc. might have their own keys as well + for key in [ "datasource", "browser_reload", "layout", "num_columns", "column_headers" ]: + if key in view: + view["view"][key] = view[key] + + view["view"]['options'] = [] + for key, title in view_editor_options(): + if view.get(key): + view['view']['options'].append(key) + + view['visibility'] = [] + for key in [ 'hidden', 'hidebutton', 'public' ]: + if view.get(key): + view['visibility'].append(key) + + view['grouping'] = { "grouping" : view.get('group_painters', []) } + view['sorting'] = { "sorters" : view.get('sorters', {}) } + + columns = [] + view['columns'] = { "columns" : columns } + for entry in view.get('painters', []): + if len(entry) == 5: + pname, viewname, tooltip, join_index, col_title = entry + columns.append((pname, join_index, viewname, tooltip, col_title)) + + elif len(entry) == 4: + pname, viewname, tooltip, join_index = entry + columns.append((pname, join_index, viewname, tooltip, '')) + + elif len(entry) == 3: + pname, viewname, tooltip = entry + columns.append((pname, viewname, tooltip)) + + else: + pname, viewname = entry + columns.append((pname, viewname, '')) + + +def transform_valuespec_value_to_view(view): + for ident, attrs in view.items(): + # Transform some valuespec specific options to legacy view + # format. We do not want to change the view data structure + # at the moment. + if ident == 'view': + if "options" in attrs: + for option in attrs['options']: + view[option] = True + del attrs['options'] + view.update(attrs) + del view["view"] + + elif ident == 'sorting': + view.update(attrs) + del view["sorting"] + + elif ident == 'grouping': + view['group_painters'] = attrs['grouping'] + del view["grouping"] + + elif ident == 'columns': + painters = [] + for column in attrs['columns']: + if len(column) == 5: + pname, join_index, viewname, tooltip, col_title = column + else: + pname, viewname, tooltip = column + join_index, col_title = None, None + + viewname = viewname and viewname or None + + if join_index and col_title: + painters.append((pname, viewname, tooltip, join_index, col_title)) + elif join_index: + painters.append((pname, viewname, tooltip, join_index)) + else: + painters.append((pname, viewname, tooltip)) + view['painters'] = painters + del view["columns"] + + +# Extract properties of view from HTML variables and construct +# view object, to be used for saving or displaying +# +# old_view is the old view dict which might be loaded from storage. +# view is the new dict object to be updated. +def create_view_from_valuespec(old_view, view): + ds_name = old_view.get('datasource', html.var('datasource')) + view['datasource'] = ds_name + datasource = multisite_datasources[ds_name] + vs_value = {} + for ident, vs in view_editor_specs(ds_name): + attrs = vs.from_html_vars(ident) + vs.validate_value(attrs, ident) + vs_value[ident] = attrs + + transform_valuespec_value_to_view(vs_value) + view.update(vs_value) + return view + +#. +# .--Display View--------------------------------------------------------. +# | ____ _ _ __ ___ | +# | | _ \(_)___ _ __ | | __ _ _ _ \ \ / (_) _____ __ | +# | | | | | / __| '_ \| |/ _` | | | | \ \ / /| |/ _ \ \ /\ / / | +# | | |_| | \__ \ |_) | | (_| | |_| | \ V / | | __/\ V V / | +# | |____/|_|___/ .__/|_|\__,_|\__, | \_/ |_|\___| \_/\_/ | +# | |_| |___/ | +# +----------------------------------------------------------------------+ +# | | +# '----------------------------------------------------------------------' def show_filter(f): if not f.visible(): @@ -101,6 +722,7 @@ html.write("
") html.write("
") + def show_filter_form(is_open, filters): # Table muss einen anderen Namen, als das Formular html.write('
' @@ -134,908 +756,24 @@ html.write("
") + def show_painter_options(painter_options): html.write('') - - -################################################################################## -# Filters -################################################################################## - -def declare_filter(sort_index, f, comment = None): - multisite_filters[f.name] = f - f.comment = comment - f.sort_index = sort_index - -# Base class for all filters -# name: The unique id of that filter. This id is e.g. used in the -# persisted view configuration -# title: The title of the filter visible to the user. This text -# may be localized -# info: The datasource info this filter needs to work. If this -# is "service", the filter will also be available in tables -# showing service information. "host" is available in all -# service and host views. The log datasource provides both -# "host" and "service". Look into datasource.py for which -# datasource provides which information -# htmlvars: HTML variables this filter uses -# link_columns: If this filter is used for linking (state "hidden"), then -# these Livestatus columns are needed to fill the filter with -# the proper information. In most cases, this is just []. Only -# a few filters are useful for linking (such as the host_name and -# service_description filters with exact match) -class Filter: - def __init__(self, name, title, info, htmlvars, link_columns): - self.name = name - self.info = info - self.title = title - self.htmlvars = htmlvars - self.link_columns = link_columns - - # Some filters can be unavailable due to the configuration (e.g. - # the WATO Folder filter is only available if WATO is enabled. - def available(self): - return True - - # Some filters can be invisible. This is useful to hide filters which have always - # the same value but can not be removed using available() because the value needs - # to be set during runtime. - # A good example is the "site" filter which does not need to be available to the - # user in single site setups. - def visible(self): - return True - - # More complex filters need more height in the HTML layout - def double_height(self): - return False - - def display(self): - raise MKInternalError(_("Incomplete implementation of filter %s '%s': missing display()") % \ - (self.name, self.title)) - html.write(_("FILTER NOT IMPLEMENTED")) - - def filter(self, tablename): - return "" - - # post-Livestatus filtering (e.g. for BI aggregations) - def filter_table(self, rows): - return rows - - def variable_settings(self, row): - return [] # return pairs of htmlvar and name according to dataset in row - - def infoprefix(self, infoname): - if self.info == infoname: - return "" - else: - return self.info[:-1] + "_" - - # Hidden filters may contribute to the pages headers of the views - def heading_info(self, infoname): - return None - - -# Load all views - users or builtins -def load_views(): - html.multisite_views = {} - - # first load builtins. Set username to '' - for name, view in multisite_builtin_views.items(): - view["owner"] = '' # might have been forgotten on copy action - view["public"] = True - view["name"] = name - html.multisite_views[('', name)] = view - - # Now scan users subdirs for files "views.mk" - subdirs = os.listdir(config.config_dir) - for user in subdirs: - try: - dirpath = config.config_dir + "/" + user - if os.path.isdir(dirpath): - path = dirpath + "/views.mk" - if not os.path.exists(path): - continue - f = file(path, "r", 65536) - sourcecode = f.read() - t = 0 - while sourcecode == "": # This should never happen. But it happened. Don't know why. - # It's just a plain file. No fsync or stuff helped. Hack around a bit. - time.sleep(0.2) - sourcecode = f.read() - t += 1 - if t > 10: - raise MKGeneralException(_("Cannot load views from %s/view.mk: file empty or not flushed") % dirpath) - views = eval(sourcecode) - for name, view in views.items(): - view["owner"] = user - view["name"] = name - html.multisite_views[(user, name)] = view - except SyntaxError, e: - raise MKGeneralException(_("Cannot load views from %s/views.mk: %s") % (dirpath, e)) - - html.available_views = available_views() - -# Get the list of views which are available to the user -# (which could be retrieved with get_view) -def available_views(): - user = config.user_id - views = {} - - # 1. user's own views, if allowed to edit views - if config.may("general.edit_views"): - for (u, n), view in html.multisite_views.items(): - if u == user: - views[n] = view - - # 2. views of special users allowed to globally override builtin views - for (u, n), view in html.multisite_views.items(): - if n not in views and view["public"] and config.user_may(u, "general.force_views"): - # Honor original permissions for the current user - permname = "view.%s" % n - if config.permission_exists(permname) \ - and not config.may(permname): - continue - views[n] = view - - # 3. Builtin views, if allowed. - for (u, n), view in html.multisite_views.items(): - if u == '' and n not in views and config.may("view.%s" % n): - views[n] = view - - # 4. other users views, if public. Sill make sure we honor permission - # for builtin views - for (u, n), view in html.multisite_views.items(): - if n not in views and view["public"] and config.user_may(u, "general.publish_views"): - # Is there a builtin view with the same name? If yes, honor permissions. - permname = "view.%s" % n - if config.permission_exists(permname) \ - and not config.may(permname): - continue - views[n] = view - - return views - - -def save_views(us): - userviews = {} - for (user, name), view in html.multisite_views.items(): - if us == user: - userviews[name] = view - config.save_user_file("views", userviews) - - -# ---------------------------------------------------------------------- -# _____ _ _ __ _ -# |_ _|_ _| |__ | | ___ ___ / _| __ _(_) _____ _____ -# | |/ _` | '_ \| |/ _ \ / _ \| |_ \ \ / / |/ _ \ \ /\ / / __| -# | | (_| | |_) | | __/ | (_) | _| \ V /| | __/\ V V /\__ \ -# |_|\__,_|_.__/|_|\___| \___/|_| \_/ |_|\___| \_/\_/ |___/ -# -# ---------------------------------------------------------------------- -# Show list of all views with buttons for editing -def page_edit_views(msg=None): - if not config.may("general.edit_views"): - raise MKAuthException(_("You are not allowed to edit views.")) - - html.header(_("Edit views"), stylesheets=["pages","views","status"]) - html.help(_("Here you can create and edit customizable views. A view " - "displays monitoring status or log data by combining filters, sortings, " - "groupings and other aspects.")) - - if msg: # called from page_edit_view() after saving - html.message(msg) - - load_views() - - # Deletion of views - delname = html.var("_delete") - if delname and html.confirm(_("Please confirm the deletion of the view %s.") % delname): - del html.multisite_views[(config.user_id, delname)] - save_views(config.user_id) - html.reload_sidebar(); - - html.begin_form("create_view", "edit_view.py") - - html.button("create", _("Create New View")) - html.write(_(" for datasource: ")) - html.sorted_select("datasource", [ (k, v["title"]) for k, v in multisite_datasources.items() ]) - - html.write('

' + _("Existing Views") + '

') - html.write('') - html.write("") - html.write("" % _("Actions")) - html.write("" % _("Link Name")) - html.write("" % _("Title")) - html.write("" % _("Datasource")) - html.write("" % _("Owner")) - html.write("" % _("Public")) - html.write("" % _("Hidden")) - html.write("") - - - keys_sorted = html.multisite_views.keys() - keys_sorted.sort(cmp = lambda a,b: -cmp(a[0],b[0]) or cmp(a[1], b[1])) - - odd = "odd" - for (owner, viewname) in keys_sorted: - if owner == "" and not config.may("view.%s" % viewname): - continue - view = html.multisite_views[(owner, viewname)] - if owner == config.user_id or (view["public"] \ - and (owner == "" or config.user_may(owner, "general.publish_views"))): - - odd = odd == "odd" and "even" or "odd" - html.write('' % odd) - - # Actions - html.write('') - - # Link name - html.write('' % viewname) - - # Title - html.write('") - - # Datasource - html.write("\n" % multisite_datasources[view["datasource"]]['title']) - - # Owner - if owner == "": - ownertxt = "" + _("builtin") + "" - else: - ownertxt = owner - html.write("" % ownertxt) - html.write("" % (view["public"] and _("yes") or _("no"))) - html.write("" % (view["hidden"] and _("yes") or _("no"))) - html.write("\n") - - html.write("
%s%s%s%s%s%s%s
') - - # Edit - if owner == config.user_id: - html.icon_button("edit_view.py?load_view=%s" % viewname, _("Edit"), "edit") - - # Clone / Customize - buttontext = not owner and _("Customize this view") \ - or _("Create a clone of this view") - backurl = htmllib.urlencode(html.makeuri([])) - clone_url = "edit_view.py?clonefrom=%s&load_view=%s&back=%s" \ - % (owner, viewname, backurl) - html.icon_button(clone_url, buttontext, "clone") - - # Delete - if owner == config.user_id: - html.icon_button("edit_views.py?_delete=%s" - % viewname, _("Delete this view!"), "delete") - html.write('%s') - if not view["hidden"]: - html.write("%s" - % (viewname, view["title"])) - else: - html.write(view["title"]) - html.help(view.get("description")) - html.write("%s%s%s%s
\n") - html.end_form() - html.footer() - - -def select_view(varname, only_with_hidden = False): - choices = [("", "")] - for name, view in html.available_views.items(): - if not only_with_hidden or len(view["hide_filters"]) > 0: - if view.get('mobile', False): - title = _('Mobile: ') + view["title"] - else: - title = view["title"] - choices.append(("%s" % name, title)) - html.sorted_select(varname, choices, "") - -# ------------------------------------------------------------------------- -# _____ _ _ _ __ ___ -# | ____|__| (_) |_ \ \ / (_) _____ __ -# | _| / _` | | __| \ \ / /| |/ _ \ \ /\ / / -# | |__| (_| | | |_ \ V / | | __/\ V V / -# |_____\__,_|_|\__| \_/ |_|\___| \_/\_/ -# Edit one view -# ------------------------------------------------------------------------- -def page_edit_view(): - if not config.may("general.edit_views"): - raise MKAuthException(_("You are not allowed to edit views.")) - - load_views() - view = None - - # Load existing view from disk - and create a copy if 'clonefrom' is set - viewname = html.var("load_view") - oldname = viewname - if viewname: - cloneuser = html.var("clonefrom") - if cloneuser != None: - view = copy.copy(html.multisite_views.get((cloneuser, viewname), None)) - # Make sure, name is unique - if cloneuser == config.user_id: # Clone own view - newname = viewname + "_clone" - else: - newname = viewname - # Name conflict -> try new names - n = 1 - while (config.user_id, newname) in html.multisite_views: - n += 1 - newname = viewname + "_clone%d" % n - view["name"] = newname - viewname = newname - oldname = None # Prevent renaming - if cloneuser == config.user_id: - view["title"] += _(" (Copy)") - else: - view = html.multisite_views.get((config.user_id, viewname)) - if not view: - view = html.multisite_views.get(('', viewname)) # load builtin view - - datasourcename = view["datasource"] - if view: - load_view_into_html_vars(view) - - # set datasource name if a new view is being created - elif html.var("datasource"): - datasourcename = html.var("datasource") - else: - raise MKInternalError(_("No view name and not datasource defined.")) - - - # handle case of save or try or press on search button - if html.var("save") or html.var("try") or html.var("search"): - try: - view = create_view() - if html.var("save"): - if html.check_transaction(): - load_views() - html.multisite_views[(config.user_id, view["name"])] = view - oldname = html.var("old_name") - # Handle renaming of views - if oldname and oldname != view["name"]: - # -> delete old entry - if (config.user_id, oldname) in html.multisite_views: - del html.multisite_views[(config.user_id, oldname)] - # -> change view_name in back parameter - if html.has_var('back'): - html.set_var('back', html.var('back', '').replace('view_name=' + oldname, - 'view_name=' + view["name"])) - save_views(config.user_id) - return page_message_and_forward(_("Your view has been saved."), "edit_views.py", - "\n") - - except MKUserError, e: - html.write("
%s
\n" % e.message) - html.add_user_error(e.varname, e.message) - - html.header(_("Edit view"), stylesheets=["pages", "views", "status", "bi"]) - html.begin_context_buttons() - back_url = html.var("back", "") - if back_url: - html.context_button(_("Back"), back_url, "back") - html.context_button(_("All Views"), "edit_views.py") - html.end_context_buttons() - - html.begin_form("view") - html.hidden_field("back", back_url) - html.hidden_field("old_name", viewname) # safe old name in case user changes it - - forms.header(_("Basic Settings")) - - forms.section(_("Title")) - html.text_input("view_title", size=50) - - forms.section(_("Link Name")) - html.text_input("view_name", size=12) - html.help(_("The link name will be used in URLs that point to a view, e.g. " - "view.py?view_name=myview. It will also be used " - "internally for identifying a view. You can create several views " - "with the same title but only one per link name. If you create a " - "view that has the same link name as a builtin view, then your " - "view will override that (shadowing it).")) - - forms.section(_("Datasource"), simple=True) - datasource_title = multisite_datasources[datasourcename]["title"] - html.write("%s: %s
\n" % (_('Datasource'), datasource_title)) - html.hidden_field("datasource", datasourcename) - html.help(_("The datasource of a view cannot be changed.")) - - forms.section(_("Topic")) - html.text_input("view_topic", _("Other"), size=50) - html.help(_("The view will be sorted under this topic in the Views snapin. ")) - - forms.section(_("Buttontext")) - html.text_input("view_linktitle", size=26) - html.write(_("  Icon: ")) - html.text_input("view_icon", size=14) - html.help(_("If you define a text here, then it will be used in " - "buttons to the view instead of of view title.")) - - forms.section(_("Description")) - html.text_area("view_description", "", rows=4, cols=50) - - forms.section(_("Visibility")) - if config.may("general.publish_views"): - html.checkbox("public", label=_('make this view available for all users')) - html.write("
\n") - html.checkbox("hidden", label=_('hide this view from the sidebar')) - html.write("
\n") - html.checkbox("mobile", label=_('show this view in the Mobile GUI')) - html.write("
\n") - html.checkbox("mustsearch", label=_('show data only on search') + "
") - html.checkbox("hidebutton", label=_('do not show a context button to this view')) - - forms.section(_("Browser reload")) - html.write(_("Reload page every ")) - html.number_input("browser_reload", 0) - html.write(_(" seconds")) - html.help(_("Leave this empty or at 0 for now automatic reload.")) - - forms.section(_("Audible alarm sounds"), simple=True) - html.checkbox("play_sounds", False, label=_("Play alarm sounds")) - html.help(_("If enabled and the view shows at least one host or service problem " - "the a sound will be played by the browser. Please consult the %s for details.") - % docu_link("multisite_sounds", _("documentation"))) - - forms.header(_("Filters"), isopen=False) - allowed_filters = filters_allowed_for_datasource(datasourcename) - - # sort filters according to title - s = [(filt.sort_index, filt.title, fname, filt) - for fname, filt in allowed_filters.items() - if fname not in ubiquitary_filters ] - s.sort() - - # Construct a list of other filters which conflict with this filter. A filter uses one or - # several http variables for transporting the filter data. There are several filters which - # have overlaping vars which must not be used at the same time. Those filters must exclude - # eachother. This is done in the JS code. When activating one filter it checks which other - # filters to disable and makes the "mode" dropdowns unchangable. - filter_htmlvars = {} - for sortindex, title, fname, filt in s: - for htmlvar in filt.htmlvars: - if htmlvar not in filter_htmlvars: - filter_htmlvars[htmlvar] = [] - filter_htmlvars[htmlvar].append(fname) - - filter_groups = {} - for sortindex, title, fname, filt in s: - filter_groups[fname] = set([]) - for htmlvar in filt.htmlvars: - filter_groups[fname].update(filter_htmlvars[htmlvar]) - filter_groups[fname].remove(fname) - filter_groups[fname] = list(filter_groups[fname]) - - shown_help = False - for sortindex, title, fname, filt in s: - forms.section(title, hide = not filt.visible()) - if not shown_help: - html.help(_("Please configure, which of the available filters will be used in this " - "view.

Show to user: the user will be able to see and modify these " - "filters. You can define default values.

Hardcode: these filters " - "will be in effect but not visible to the user.

Use for linking: " - "These filters (usually site, host name and service) are needed for views " - "that have a context (such as a host or a service). Such views can be used " - "as targets for columns. Whenever the context is available, a button to that " - "view will be displayed in related views.")) - shown_help = True - - html.write('
' % html.var("filter_%s" % fname, "off")) - html.sorted_select("filter_%s" % fname, - [("off", _("Don't use")), - ("show", _("Show to user")), - ("hide", _("Use for linking")), - ("hard", _("Hardcode"))], - "off", "filter_activation(this)") - show_filter(filt) - html.write('
') - html.write('
') - html.help(filt.comment) - - html.write("\n") - - - def sorter_selection(title, var_prefix, maxnum, data): - allowed = allowed_for_datasource(data, datasourcename) - forms.header(title, isopen=False) - # make sure, at least 3 selection boxes are free for new columns - while html.has_var("%s%d" % (var_prefix, maxnum - 2)): - maxnum += 1 - for n in range(1, maxnum + 1): - forms.section(_("%d. Column") % n) - collist = [ ("", "") ] + [ (name, p["title"]) for name, p in allowed.items() ] - html.sorted_select("%s%d" % (var_prefix, n), collist) - html.write(" ") - html.select("%sorder_%d" % (var_prefix, n), [("asc", _("Ascending")), ("dsc", _("Descending"))]) - - def column_selection(title, var_prefix, data): - allowed = allowed_for_datasource(data, datasourcename) - - joined = [] - if var_prefix == 'col_': - joined = allowed_for_joined_datasource(data, datasourcename) - - forms.header(title, isopen=False) - forms.section(_('Columns')) - # make sure, at least 3 selection boxes are free for new columns - maxnum = 1 - while html.has_var("%s%d" % (var_prefix, maxnum)): - maxnum += 1 - html.write('
') - for n in range(1, maxnum): - view_edit_column(n, var_prefix, maxnum, allowed, joined) - html.write('
') - html.jsbutton('add_column', _("Add Column"), "add_view_column(this, '%s', '%s')" % (datasourcename, var_prefix)) - - # [4] Sorting - sorter_selection(_("Sorting"), "sort_", max_sort_columns, multisite_sorters) - - # [5] Grouping - column_selection(_("Grouping"), "group_", multisite_painters) - - # [6] Columns (painters) - column_selection(_("Columns"), "col_", multisite_painters) - - # [7] Layout - forms.header(_("Layout"), isopen=False) - forms.section(_("Basic Layout")) - html.sorted_select("layout", [ (k, v["title"]) for k,v in multisite_layouts.items() if not v.get("hide")]) - - forms.section(_("Number of Columns")) - html.number_input("num_columns", 1) - forms.section(_('Column headers')) + html.button("painter_options", _("Submit"), "submit") - # 1.1.11i3: Fix deprecated column_header option: perpage -> pergroup - # This should be cleaned up someday - if html.var("column_headers") == 'perpage': - html.set_var("column_headers", 'pergroup') - - html.select("column_headers", [ - ("off", _("off")), - ("pergroup", _("once per group")), - ("repeat", _("repeat every 20'th row")) ]) - - forms.section(_('Sortable by user'), simple=True) - html.checkbox('user_sortable', True, label=_("Make view sortable by user")) - - forms.end() - - html.button("save", _("Save")) - html.write(" ") - html.button("try", _("Try out")) + html.hidden_fields() html.end_form() - - # html.write("
\n") - - if html.has_var("try") or html.has_var("search"): - html.set_var("search", "on") - if view: - bi.reset_cache_status() - show_view(view, False, False) - return # avoid second html footer - - - html.footer() - -def view_edit_column(n, var_prefix, maxnum, allowed, joined = []): - - collist = [ ("", "") ] + collist_of_collection(allowed) - if joined: - collist += [ ("-", "---") ] + collist_of_collection(joined, collist) - - html.write("
" % (var_prefix, n)) - - # Buttons for deleting and moving around - html.write('') - - # Actual column editor - html.write('
') - html.icon_button("javascript:void(0)", _("Delete this column"), "delete", onclick="delete_view_column(this);") - display = n == 1 and 'display:none;' or '' - html.icon_button("javascript:void(0)", _("Move this column up"), "up", onclick="move_column_up(this);", - id="%sup_%d" % (var_prefix, n), style=display) - - display = n == maxnum - 1 and 'display:none;' or '' - html.icon_button("javascript:void(0)", _("Move this column down"), "down", onclick="move_column_down(this);", - id="%sdown_%d" % (var_prefix, n), style=display) - html.write('%s:' % (var_prefix, n, _('Column'))) - html.select("%s%d" % (var_prefix, n), collist, "", "toggle_join_fields('%s', %d, this)" % (var_prefix, n)) - display = 'none' - if joined and is_joined_value(collist, "%s%d" % (var_prefix, n)): - display = '' - html.write("
%s:" % - (var_prefix, n, display, _('of Service'))) - html.text_input("%sjoin_index_%d" % (var_prefix, n), id = var_prefix + "join_index_%d" % n) - html.write("
%s:" % _('Link')) - select_view("%slink_%d" % (var_prefix, n)) - html.write("
%s:" % _('Tooltip')) - html.select("%stooltip_%d" % (var_prefix, n), collist) - html.write("
%s:" % - (var_prefix, n, display, _('Title'))) - html.text_input("%stitle_%d" % (var_prefix, n), id = var_prefix + "title_%d" % n) - html.write("
") - html.write("
") - -def ajax_get_edit_column(): - if not config.may("general.edit_views"): - raise MKAuthException(_("You are not allowed to edit views.")) - - if not html.has_var('ds') or not html.has_var('num') or not html.has_var('pre'): - raise MKInternalError(_("Missing attributes")) - - load_views() - - allowed = allowed_for_datasource(multisite_painters, html.var('ds')) - - joined = [] - if html.var('pre') == 'col_': - joined = allowed_for_joined_datasource(multisite_painters, html.var('ds')) - - num = int(html.var('num', 0)) - - html.form_vars = [] - view_edit_column(num, html.var('pre'), num + 1, allowed, joined) - -# Called by edit function in order to prefill HTML form -def load_view_into_html_vars(view): - # view is well formed, not checks neccessary - html.set_var("view_title", view["title"]) - html.set_var("view_topic", view.get("topic", _("Other"))) - html.set_var("view_linktitle", view.get("linktitle", view["title"])) - html.set_var("view_icon", view.get("icon")), - html.set_var("view_description", view.get("description", "")) - html.set_var("view_name", view["name"]) - html.set_var("datasource", view["datasource"]) - html.set_var("column_headers", view.get("column_headers", "off")) - html.set_var("layout", view["layout"]) - html.set_var("num_columns", view.get("num_columns", 1)) - html.set_var("browser_reload", view.get("browser_reload", 0)) - html.set_var("play_sounds", view.get("play_sounds", False) and "on" or "") - html.set_var("public", view["public"] and "on" or "") - html.set_var("hidden", view["hidden"] and "on" or "") - html.set_var("mobile", view.get("mobile") and "on" or "") - html.set_var("mustsearch", view["mustsearch"] and "on" or "") - html.set_var("hidebutton", view.get("hidebutton", False) and "on" or "") - html.set_var("user_sortable", view.get("user_sortable", True) and "on" or "") - - # [3] Filters - for name, filt in multisite_filters.items(): - if name not in ubiquitary_filters: - if name in view["show_filters"]: - html.set_var("filter_%s" % name, "show") - elif name in view["hard_filters"]: - html.set_var("filter_%s" % name, "hard") - elif name in view["hide_filters"]: - html.set_var("filter_%s" % name, "hide") - - for varname, value in view["hard_filtervars"]: - if not html.has_var(varname): - html.set_var(varname, value) - - # [4] Sorting - n = 1 - for name, desc in view["sorters"]: - html.set_var("sort_%d" % n, name) - if desc: - value = "dsc" - else: - value = "asc" - html.set_var("sort_order_%d" % n, value) - n +=1 - - # [5] Grouping - n = 1 - for entry in view["group_painters"]: - name = entry[0] - viewname = entry[1] - tooltip = len(entry) > 2 and entry[2] or None - html.set_var("group_%d" % n, name) - if viewname: - html.set_var("group_link_%d" % n, viewname) - if tooltip: - html.set_var("group_tooltip_%d" % n, tooltip) - n += 1 - - # [6] Columns - n = 1 - for entry in view["painters"]: - name = entry[0] - viewname = entry[1] - tooltip = len(entry) > 2 and entry[2] or None - join_index = len(entry) > 3 and entry[3] or None - col_title = len(entry) > 4 and entry[4] or None - html.set_var("col_%d" % n, name) - if viewname: - html.set_var("col_link_%d" % n, viewname) - if tooltip: - html.set_var("col_tooltip_%d" % n, tooltip) - if join_index: - html.set_var("col_join_index_%d" % n, join_index) - if col_title: - html.set_var("col_title_%d" % n, col_title) - n += 1 - - # Make sure, checkboxes with default "on" do no set "on". Otherwise they - # would always be on - html.set_var("filled_in", "create_view") - -# Extract properties of view from HTML variables and construct -# view object, to be used for saving or displaying -def create_view(): - name = html.var("view_name").strip() - if name == "": - raise MKUserError("view_name", _("Please supply a unique name for the view, this will be used to specify that view in HTTP links.")) - if not re.match("^[a-zA-Z0-9_]+$", name): - raise MKUserError("view_name", _("The name of the view may only contain letters, digits and underscores.")) - title = html.var_utf8("view_title").strip() - if title == "": - raise MKUserError("view_title", _("Please specify a title for your view.")) - linktitle = html.var("view_linktitle").strip() - if not linktitle: - linktitle = title - icon = html.var("view_icon") - if not icon: - icon = None - - topic = html.var_utf8("view_topic") - if not topic: - topic = _("Other") - datasourcename = html.var("datasource") - datasource = multisite_datasources[datasourcename] - tablename = datasource["table"] - layoutname = html.var("layout") - try: - num_columns = int(html.var("num_columns", 1)) - if num_columns < 1: num_columns = 1 - if num_columns > 50: num_columns = 50 - except: - num_columns = 1 - - try: - browser_reload = int(html.var("browser_reload", 0)) - if browser_reload < 0: browser_reload = 0 - except: - browser_reload = 0 - - play_sounds = html.var("play_sounds", "") != "" - public = html.var("public", "") != "" and config.may("general.publish_views") - hidden = html.var("hidden", "") != "" - mobile = html.var("mobile", "") != "" - mustsearch = html.var("mustsearch", "") != "" - hidebutton = html.var("hidebutton", "") != "" - column_headers = html.var("column_headers") - user_sortable = html.var("user_sortable") - - show_filternames = [] - hide_filternames = [] - hard_filternames = [] - hard_filtervars = [] - - for fname, filt in multisite_filters.items(): - usage = html.var("filter_%s" % fname) - if usage == "show": - show_filternames.append(fname) - elif usage == "hide": - hide_filternames.append(fname) - elif usage == "hard": - hard_filternames.append(fname) - if usage in [ "show", "hard" ]: - for varname in filt.htmlvars: - hard_filtervars.append((varname, html.var(varname, ""))) - - sorternames = [] - for n in range(1, max_sort_columns+1): - sname = html.var("sort_%d" % n) - if sname: - reverse = html.var("sort_order_%d" % n) == "dsc" - sorternames.append((sname, reverse)) - - group_painternames = [] - # User can set more than max_display_columns. We cannot easily know - # how many variables he has set since holes are allowed. Let's silently - # assume that 500 columns are enough. This surely is a hack, but if you - # have read this comment you might want to mail me a (simple) patch for - # doing this more cleanly... - for n in range(1, 500): - pname = html.var("group_%d" % n) - viewname = html.var("group_link_%d" % n) - tooltip = html.var("group_tooltip_%d" % n) - if pname: - if viewname not in html.available_views: - viewname = None - group_painternames.append((pname, viewname, tooltip)) - - painternames = [] - # User can set more than max_display_columns. We cannot easily know - # how many variables he has set since holes are allowed. Let's silently - # assume that 500 columns are enough. This surely is a hack, but if you - # have read this comment you might want to mail me a (simple) patch for - # doing this more cleanly... - for n in range(1, 500): - pname = html.var("col_%d" % n) - viewname = html.var("col_link_%d" % n) - tooltip = html.var("col_tooltip_%d" % n) - join_index = html.var('col_join_index_%d' % n) - col_title = html.var('col_title_%d' % n) - if pname and pname != '-': - if viewname not in html.available_views: - viewname = None - - allowed_cols = collist_of_collection(allowed_for_datasource(multisite_painters, datasourcename)) - joined_cols = collist_of_collection(allowed_for_joined_datasource(multisite_painters, datasourcename), allowed_cols) - if is_joined_value(joined_cols, "col_%d" % n) and not join_index: - raise MKUserError('col_join_index_%d' % n, _("Please specify the service to show the data for")) - - if join_index and col_title: - painternames.append((pname, viewname, tooltip, join_index, col_title)) - elif join_index: - painternames.append((pname, viewname, tooltip, join_index)) - else: - painternames.append((pname, viewname, tooltip)) - - if len(painternames) == 0: - raise MKUserError("col_1", _("Please add at least one column to your view.")) - - return { - "name" : name, - "owner" : config.user_id, - "title" : title, - "topic" : topic, - "linktitle" : linktitle, - "icon" : icon, - "description" : html.var_utf8("view_description", ""), - "datasource" : datasourcename, - "public" : public, - "hidden" : hidden, - "mobile" : mobile, - "mustsearch" : mustsearch, - "hidebutton" : hidebutton, - "layout" : layoutname, - "num_columns" : num_columns, - "browser_reload" : browser_reload, - "play_sounds" : play_sounds, - "column_headers" : column_headers, - "user_sortable" : user_sortable, - "show_filters" : show_filternames, - "hide_filters" : hide_filternames, - "hard_filters" : hard_filternames, - "hard_filtervars" : hard_filtervars, - "sorters" : sorternames, - "group_painters" : group_painternames, - "painters" : painternames, - } + html.write('
') -# --------------------------------------------------------------------- -# __ ___ _ -# \ \ / (_) _____ __ __ _(_) _____ __ -# \ \ / /| |/ _ \ \ /\ / / \ \ / / |/ _ \ \ /\ / / -# \ V / | | __/\ V V / \ V /| | __/\ V V / -# \_/ |_|\___| \_/\_/ \_/ |_|\___| \_/\_/ -# -# --------------------------------------------------------------------- -# Show one view filled with data def page_view(): bi.reset_cache_status() # needed for status icon @@ -1043,9 +781,16 @@ view_name = html.var("view_name") if view_name == None: raise MKGeneralException(_("Missing the variable view_name in the URL.")) - view = html.available_views.get(view_name) + view = available_views.get(view_name) if not view: - raise MKGeneralException(("No view defined with the name '%s'.") % htmllib.attrencode(view_name)) + raise MKGeneralException(_("No view defined with the name '%s'.") % html.attrencode(view_name)) + + # Gather the page context which is needed for the "add to visual" popup menu + # to add e.g. views to dashboards or reports + datasource = multisite_datasources[view['datasource']] + context = visuals.get_context_from_uri_vars(datasource['infos']) + context.update(visuals.get_singlecontext_html_vars(view)) + html.set_page_context(context) show_view(view, True, True, True) @@ -1056,18 +801,29 @@ # columns that need to fetch information from another table # (e.g. from the services table while we are in a hosts view) # If join_columns is False, we only return the "normal" columns. -def get_needed_columns(painters): +def get_needed_columns(view, painters): + # Make sure that the information about the available views is present. If + # called via the reporting, than this might not be the case + try: + available_views + except: + load_views() + columns = [] for entry in painters: - p = entry[0] - v = entry[1] - columns += p["columns"] - if v: - linkview = html.available_views.get(v) + painter = entry[0] + linkview_name = entry[1] + columns += painter["columns"] + if linkview_name: + linkview = available_views.get(linkview_name) if linkview: - for ef in linkview["hide_filters"]: - f = multisite_filters[ef] - columns += f.link_columns + for filt in [ visuals.get_filter(fn) for fn in visuals.get_single_info_keys(linkview) ]: + columns += filt.link_columns + + # The site attribute is no column. Filter it out here + #if 'site' in columns: + # columns.remove('site') + if len(entry) > 2 and entry[2]: tt = entry[2] columns += multisite_painters[tt]["columns"] @@ -1134,8 +890,6 @@ display_options = html.var("_display_options", "") display_options = apply_display_option_defaults(display_options) html.display_options = display_options - # Dont do this!! This garbles up the title links after a reload. - #html.title_display_options = display_options # But there is one special case: The sorter links! These links need to know # about the provided display_option parameter. The links could use @@ -1161,66 +915,115 @@ # Display view with real data. This is *the* function everying # is about. def show_view(view, show_heading = False, show_buttons = True, - show_footer = True, render_function = None, only_count=False): + show_footer = True, render_function = None, only_count=False, + all_filters_active=False, limit=None): + if html.var("mode") == "availability" and html.has_var("av_aggr_name") and html.var("timeline"): + bi.page_timeline() + return + display_options = prepare_display_options() # User can override the layout settings via HTML variables (buttons) - # which are safed persistently. This is known as "view options" - vo = view_options(view["name"]) + # which are safed persistently. This is known as "view options". Note: a few + # can be anonymous (e.g. when embedded into a report). In that case there + # are no display options. + if "name" in view: + vo = view_options(view["name"]) + else: + vo = {} num_columns = vo.get("num_columns", view.get("num_columns", 1)) browser_reload = vo.get("refresh", view.get("browser_reload", None)) - show_checkboxes = html.var('show_checkboxes', '0') == '1' + force_checkboxes = view.get("force_checkboxes", False) + show_checkboxes = force_checkboxes or html.var('show_checkboxes', '0') == '1' # Get the datasource (i.e. the logical table) datasource = multisite_datasources[view["datasource"]] tablename = datasource["table"] - # Filters to show in the view - show_filters = [ multisite_filters[fn] for fn in view["show_filters"] ] - - # add ubiquitary_filters that are possible for this datasource - for fn in ubiquitary_filters: - # Disable 'wato_folder' filter, if WATO is disabled or there is a single host view - if fn == "wato_folder" and (not config.wato_enabled or "host" in view["hide_filters"]): - continue - filter = multisite_filters[fn] - if not filter.info or filter.info in datasource["infos"]: - show_filters.append(filter) - - hide_filters = [ multisite_filters[fn] for fn in view["hide_filters"] ] - hard_filters = [ multisite_filters[fn] for fn in view["hard_filters"] ] - - for varname, value in view["hard_filtervars"]: - # shown filters are set, if form is fresh and variable not supplied in URL - if only_count or (html.var("filled_in") != "filter" and not html.has_var(varname)): - html.set_var(varname, value) + # Filters to use in the view + # In case of single object views, the needed filters are fixed, but not always present + # in context. In this case, take them from the context type definition. + use_filters = visuals.filters_of_visual(view, datasource['infos'], + all_filters_active, datasource.get('link_filters', {})) + + # Not all filters are really shown later in show_filter_form(), because filters which + # have a hardcoded value are not changeable by the user + show_filters = visuals.visible_filters_of_visual(view, use_filters) + + # FIXME TODO HACK to make grouping single contextes possible on host/service infos + # Is hopefully cleaned up soon. + if view['datasource'] in ['hosts', 'services']: + if 'hostgroup' in view['single_infos']: + html.set_var('opthost_group', html.var('hostgroup')) + if 'servicegroup' in view['single_infos']: + html.set_var('optservice_group', html.var('servicegroup')) + + # Now populate the HTML vars with context vars from the view definition. Hard + # coded default values are treated differently: + # + # a) single context vars of the view are enforced + # b) multi context vars can be overwritten by existing HTML vars + visuals.add_context_to_uri_vars(view, datasource["infos"], only_count) + + # Check that all needed information for configured single contexts are available + visuals.verify_single_contexts('views', view, datasource.get('link_filters', {})) + + # Af any painter, sorter or filter needs the information about the host's + # inventory, then we load it and attach it as column "host_inventory" + need_inventory_data = False # Prepare Filter headers for Livestatus + # TODO: When this is used by the reporting then *all* filters are + # active. That way the inventory data will always be loaded. When + # we convert this to the visuals principle the we need to optimize + # this. filterheaders = "" - only_sites = None - all_active_filters = [ f for f in show_filters + hide_filters + hard_filters if f.available() ] + all_active_filters = [ f for f in use_filters if f.available() ] for filt in all_active_filters: header = filt.filter(tablename) - if header.startswith("Sites:"): - only_sites = header.strip().split(" ")[1:] - else: - filterheaders += header + filterheaders += header + if filt.need_inventory(): + need_inventory_data = True + + # Apply the site hint / filter + if html.var("site"): + only_sites = [html.var("site")] + else: + only_sites = None + + # Prepare limit: + # We had a problem with stats queries on the logtable where + # the limit was not applied on the resulting rows but on the + # lines of the log processed. This resulted in wrong stats. + # For these datasources we ignore the query limits. + if limit == None: # Otherwise: specified as argument + if not datasource.get('ignore_limit', False): + limit = get_limit() + + # Fork to availability view. We just need the filter headers, since we do not query the normal + # hosts and service table, but "statehist". This is *not* true for BI availability, though (see later) + if html.var("mode") == "availability" and ( + "aggr" not in datasource["infos"] or html.var("timeline_aggr")): + return render_availability(view, datasource, filterheaders, display_options, only_sites, limit) query = filterheaders + view.get("add_headers", "") # Sorting - use view sorters and URL supplied sorters if not only_count: sorter_list = html.has_var('sort') and parse_url_sorters(html.var('sort')) or view["sorters"] - sorters = [ (multisite_sorters[s[0]],) + s[1:] for s in sorter_list ] + sorters = [ (multisite_sorters[s[0]],) + s[1:] for s in sorter_list + if s[0] in multisite_sorters ] else: sorters = [] - # Prepare gropuing information - group_painters = [ (multisite_painters[e[0]],) + e[1:] for e in view["group_painters"] ] + # Prepare grouping information + group_painters = [ (multisite_painters[e[0]],) + e[1:] for e in view["group_painters"] + if e[0] in multisite_painters ] # Prepare columns to paint - painters = [ (multisite_painters[e[0]],) + e[1:] for e in view["painters"] ] + painters = [ (multisite_painters[e[0]],) + e[1:] for e in view["painters"] + if e[0] in multisite_painters ] # Now compute the list of all columns we need to query via Livestatus. # Those are: (1) columns used by the sorters in use, (2) columns use by @@ -1232,8 +1035,8 @@ all_painters = group_painters + painters join_painters = [ p for p in all_painters if len(p) >= 4 ] master_painters = [ p for p in all_painters if len(p) < 4 ] - columns = get_needed_columns(master_painters) - join_columns = get_needed_columns(join_painters) + columns = get_needed_columns(view, master_painters) + join_columns = get_needed_columns(view, join_painters) # Columns needed for sorters for s in sorters: @@ -1241,6 +1044,8 @@ columns += s[0]["columns"] else: join_columns += s[0]["columns"] + if s[0].get("load_inv"): + need_inventory_data = True # Add key columns, needed for executing commands columns += datasource["keys"] @@ -1248,31 +1053,30 @@ # Add idkey columns, needed for identifying the row columns += datasource["idkeys"] + # BI availability needs aggr_tree + if html.var("mode") == "availability" and "aggr" in datasource["infos"]: + columns = [ "aggr_tree", "aggr_name", "aggr_group" ] + # Make column list unique and remove (implicit) site column colset = set(columns) if "site" in colset: colset.remove("site") columns = list(colset) - # We had a problem with stats queries on the logtable where - # the limit was not applied on the resulting rows but on the - # lines of the log processed. This resulted in wrong stats. - # For these datasources we ignore the query limits. - limit = None - if not datasource.get('ignore_limit', False): - limit = get_limit() - # Get list of painter options we need to display (such as PNP time range # or the format being used for timestamp display) painter_options = [] for entry in all_painters: p = entry[0] painter_options += p.get("options", []) + if p.get("load_inv"): + need_inventory_data = True + painter_options = list(set(painter_options)) painter_options.sort() # Fetch data. Some views show data only after pressing [Search] - if (only_count or (not view["mustsearch"]) or html.var("filled_in") in ["filter", 'actions', 'confirm']): + if (only_count or (not view.get("mustsearch")) or html.var("filled_in") in ["filter", 'actions', 'confirm']): # names for additional columns (through Stats: headers) add_columns = datasource.get("add_columns", []) @@ -1288,6 +1092,12 @@ if len(join_painters) > 0: do_table_join(datasource, rows, filterheaders, join_painters, join_columns, only_sites) + # Add inventory data if one of the painters or filters needs it + if need_inventory_data: + for row in rows: + if "host_name" in row: + row["host_inventory"] = inventory.host(row["host_name"]) + sort_data(rows, sorters) else: rows = [] @@ -1296,10 +1106,15 @@ for filter in all_active_filters: rows = filter.filter_table(rows) + if html.var("mode") == "availability": + render_bi_availability(view_title(view), rows) + return + # TODO: Use livestatus Stats: instead of fetching rows! if only_count: - for varname, value in view["hard_filtervars"]: - html.del_var(varname) + for fname, filter_vars in view["context"].items(): + for varname, value in filter_vars.items(): + html.del_var(varname) return len(rows) # Set browser reload @@ -1307,9 +1122,15 @@ html.set_browser_reload(browser_reload) # The layout of the view: it can be overridden by several specifying - # an output format (like json or python). + # an output format (like json or python). Note: the layout is not + # always needed. In case of an embedded view in the reporting this + # field is simply missing, because the rendering is done by the + # report itself. if html.output_format == "html": - layout = multisite_layouts[view["layout"]] + if "layout" in view: + layout = multisite_layouts[view["layout"]] + else: + layout = None else: layout = multisite_layouts.get(html.output_format) if not layout: @@ -1323,7 +1144,7 @@ render_function(view, rows, datasource, group_painters, painters, display_options, painter_options, show_heading, show_buttons, - show_checkboxes, layout, num_columns, show_filters, show_footer, hide_filters, + show_checkboxes, layout, num_columns, show_filters, show_footer, browser_reload) @@ -1331,9 +1152,11 @@ # then please also do this in htdocs/mobile.py! def render_view(view, rows, datasource, group_painters, painters, display_options, painter_options, show_heading, show_buttons, - show_checkboxes, layout, num_columns, show_filters, show_footer, hide_filters, + show_checkboxes, layout, num_columns, show_filters, show_footer, browser_reload): + if html.transaction_valid() and html.do_actions(): + html.set_browser_reload(0) # Show heading (change between "preview" mode and full page mode) if show_heading: @@ -1359,18 +1182,20 @@ can_display_checkboxes = layout.get('checkboxes', False) if show_buttons: - show_context_links(view, hide_filters, show_filters, display_options, + show_context_links(view, show_filters, display_options, painter_options, # Take into account: permissions, display_options row_count > 0 and command_form, # Take into account: layout capabilities - can_display_checkboxes, show_checkboxes) + can_display_checkboxes and not view.get("force_checkboxes"), show_checkboxes, + # Show link to availability + datasource["table"] in [ "hosts", "services" ] or "aggr" in datasource["infos"]) # User errors in filters html.show_user_errors() # Filter form - filter_isopen = html.var("filled_in") != "filter" and view["mustsearch"] + filter_isopen = html.var("filled_in") != "filter" and view.get("mustsearch") if 'F' in display_options and len(show_filters) > 0: show_filter_form(filter_isopen, show_filters) @@ -1384,11 +1209,11 @@ if html.do_actions() and html.transaction_valid(): # submit button pressed, no reload try: # Create URI with all actions variables removed - backurl = html.makeuri([]) + backurl = html.makeuri([], delvars=['filled_in', 'actions']) has_done_actions = do_actions(view, datasource["infos"][0], rows, backurl) except MKUserError, e: - html.show_error(e.message) - html.add_user_error(e.varname, e.message) + html.show_error(e) + html.add_user_error(e.varname, e) if 'C' in display_options: show_command_form(True, datasource) @@ -1429,8 +1254,8 @@ if show_buttons: update_context_links( # don't take display_options into account here ('c' is set during reload) - row_count > 0 and should_show_command_form('C', datasource) \ - and not html.do_actions(), + row_count > 0 and should_show_command_form('C', datasource), + # and not html.do_actions(), can_display_checkboxes ) @@ -1461,6 +1286,13 @@ if bi.reused_compilation(): html.add_status_icon("aggrcomp", _("Reused cached compiled BI aggregations (PID %d)") % pid) + if config.may('wato.users'): + try: + msg = file(defaults.var_dir + '/web/ldap_sync_fail.mk').read() + html.add_status_icon("ldap", _('Last LDAP sync failed! %s') % html.attrencode(msg)) + except IOError: + pass + html.bottom_focuscode() if 'Z' in display_options: html.bottom_footer() @@ -1468,35 +1300,59 @@ if 'H' in display_options: html.body_end() +# We should rename this into "painter_options". Also the saved file. def view_options(viewname): # Options are stored per view. Get all options for all views vo = config.load_user_file("viewoptions", {}) + # Now get options for the view in question v = vo.get(viewname, {}) must_save = False + # Now override the loaded options with new option settings that are + # provided by the URL. Our problem: we do not know the URL variables + # that a valuespec expects. But we know the common prefix of all + # variables for each option. if config.may("general.painter_options"): - for on, opt in multisite_painter_options.items(): - if html.has_var(on): - must_save = True - # Make sure only allowed values are returned - value = html.var(on) - for val, title in opt["values"]: - if value == val: - v[on] = value - elif on not in v: - v[on] = opt["default"] - opt["value"] = v[on] - + for option_name, opt in multisite_painter_options.items(): + have_old_value = option_name in v + if have_old_value: + old_value = v.get(option_name) + + # Are there settings for this painter option present? + var_prefix = 'po_' + option_name + if html.has_var_prefix(var_prefix): + + # Get new value for the option from the value spec + vs = opt['valuespec'] + value = vs.from_html_vars(var_prefix) + + v[option_name] = value + opt['value'] = value # make globally present for painters + + if not have_old_value or v[option_name] != old_value: + must_save = True + + elif have_old_value: + opt['value'] = old_value # make globally present for painters + elif 'value' in opt: + del opt['value'] + + # If the user has no permission for changing painter options + # (or has *lost* his permission) then we need to remove all + # of the options. But we do not save. else: for on, opt in multisite_painter_options.items(): if on in v: del v[on] - opt["value"] = None + must_save = True + if 'value' in opt: + del opt['value'] if must_save: vo[viewname] = v config.save_user_file("viewoptions", vo) + return v @@ -1554,35 +1410,7 @@ return config.soft_query_limit def view_title(view): - extra_titles = [ ] - datasource = multisite_datasources[view["datasource"]] - tablename = datasource["table"] - hide_filters = [ multisite_filters[fn] for fn in view["hide_filters"] ] - for filt in hide_filters: - heading = filt.heading_info(tablename) - if heading: - extra_titles.append(heading) - - title = view["title"] + " " + ", ".join(extra_titles) - - for fn in ubiquitary_filters: - # Disable 'wato_folder' filter, if WATO is disabled or there is a single host view - if fn == "wato_folder" and (not config.wato_enabled or "host" in view["hide_filters"]): - continue - filt = multisite_filters[fn] - heading = filt.heading_info(tablename) - if heading: - title = heading + " - " + title - - return title - -# Return title for context link buttons -def view_linktitle(view): - t = view.get("linktitle") - if not t: - return view_title(view) - else: - return t + return visuals.visual_title('view', view) def view_optiondial(view, option, choices, help): vo = view_options(view["name"]) @@ -1605,16 +1433,6 @@ def view_optiondial_off(option): html.write('
' % option) - -def view_option_toggler(id, view, option, icon, help, hidden = False): - vo = view_options(view["name"]) - value = vo.get(option, view.get(option, False)) - html.begin_context_buttons() # just to be sure - hide = hidden and ' style="display:none"' or '' - html.write('
' % ( - id, help, icon, value and "down" or "up", view["name"], option, hide)) - def toggler(id, icon, help, onclick, value, hidden = False): html.begin_context_buttons() # just to be sure hide = hidden and ' style="display:none"' or '' @@ -1657,14 +1475,15 @@ html.write('
' % (id, icon, cssclass, help, id, hide)) -def show_context_links(thisview, active_filters, show_filters, display_options, - painter_options, enable_commands, enable_checkboxes, show_checkboxes): +def show_context_links(thisview, show_filters, display_options, + painter_options, enable_commands, enable_checkboxes, show_checkboxes, + show_availability): # html.begin_context_buttons() called automatically by html.context_button() # That way if no button is painted we avoid the empty container if 'B' in display_options: execute_hooks('buttons-begin') - filter_isopen = html.var("filled_in") != "filter" and thisview["mustsearch"] + filter_isopen = html.var("filled_in") != "filter" and thisview.get("mustsearch") if 'F' in display_options: if len(show_filters) > 0: if html.var("filled_in") == "filter": @@ -1689,9 +1508,10 @@ togglebutton_off("commands", "commands", hidden = enable_commands) selection_enabled = enable_commands and enable_checkboxes - toggler("checkbox", "checkbox", _("Enable/Disable checkboxes for selecting rows for commands"), - "location.href='%s';" % html.makeuri([('show_checkboxes', show_checkboxes and '0' or '1')]), - show_checkboxes, hidden = not selection_enabled) + if not thisview.get("force_checkboxes"): + toggler("checkbox", "checkbox", _("Enable/Disable checkboxes for selecting rows for commands"), + "location.href='%s';" % html.makeuri([('show_checkboxes', show_checkboxes and '0' or '1')]), + show_checkboxes, hidden = True) # not selection_enabled) togglebutton_off("checkbox", "checkbox", hidden = selection_enabled) html.javascript('g_selection_enabled = %s;' % (selection_enabled and 'true' or 'false')) @@ -1709,8 +1529,8 @@ view_optiondial_off("refresh") - # WATO: If we have a host context, then show button to WATO, if permissions allow this if 'B' in display_options: + # WATO: If we have a host context, then show button to WATO, if permissions allow this if html.has_var("host") \ and config.wato_enabled \ and config.may("wato.use") \ @@ -1718,27 +1538,34 @@ and wato.using_wato_hosts(): host = html.var("host") if host: - url = wato.api.link_to_host(host) + url = wato.link_to_host(host) else: - url = wato.api.link_to_path(html.var("wato_folder", "")) + url = wato.link_to_path(html.var("wato_folder", "")) html.context_button(_("WATO"), url, "wato", id="wato", bestof = config.context_buttons_to_show) - links = collect_context_links(thisview, active_filters) - for view, linktitle, uri, icon, buttonid in links: - if not view.get("mobile"): - html.context_button(linktitle, url=uri, icon=icon, id=buttonid, bestof=config.context_buttons_to_show) + # Button for creating an instant report (if reporting is available) + if config.reporting_available(): + html.context_button(_("Export as PDF"), html.makeuri([], filename="report_instant.py"), "report") + + # Buttons to other views, dashboards, etc. + links = visuals.collect_context_links(thisview) + for linktitle, uri, icon, buttonid in links: + html.context_button(linktitle, url=uri, icon=icon, id=buttonid, bestof=config.context_buttons_to_show) # Customize/Edit view button if 'E' in display_options and config.may("general.edit_views"): - backurl = htmllib.urlencode(html.makeuri([])) + backurl = html.urlencode(html.makeuri([])) if thisview["owner"] == config.user_id: - url = "edit_view.py?load_view=%s&back=%s" % (thisview["name"], backurl) + url = "edit_view.py?load_name=%s&back=%s" % (thisview["name"], backurl) else: - url = "edit_view.py?clonefrom=%s&load_view=%s&back=%s" % \ + url = "edit_view.py?load_user=%s&load_name=%s&back=%s" % \ (thisview["owner"], thisview["name"], backurl) html.context_button(_("Edit View"), url, "edit", id="edit", bestof=config.context_buttons_to_show) + if 'E' in display_options and show_availability: + html.context_button(_("Availability"), html.makeuri([("mode", "availability")]), "availability") + if 'B' in display_options: execute_hooks('buttons-end') @@ -1748,59 +1575,6 @@ html.javascript("update_togglebutton('commands', %d);" % (enable_command_toggle and 1 or 0)) html.javascript("update_togglebutton('checkbox', %d);" % (enable_command_toggle and enable_checkbox_toggle and 1 or 0, )) -# Collect all views that share a context with thisview. For example -# if a view has an active filter variable specifying a host, then -# all host-related views are relevant. -def collect_context_links(thisview, active_filters): - # compute list of html variables used actively by hidden or shown - # filters. - active_filter_vars = set([]) - for filt in active_filters: - for var in filt.htmlvars: - if html.has_var(var): - active_filter_vars.add(var) - - context_links = [] - # sort view buttons somehow - sorted_views = html.available_views.values() - sorted_views.sort(cmp = lambda b,a: cmp(a.get('icon'), b.get('icon'))) - - for view in sorted_views: - name = view["name"] - linktitle = view.get("linktitle") - if not linktitle: - linktitle = view["title"] - if view == thisview: - continue - if view.get("hidebutton", False): - continue # this view does not want a button to be displayed - hidden_filternames = view["hide_filters"] - used_contextvars = [] - skip = False - for fn in hidden_filternames: - filt = multisite_filters[fn] - contextvars = filt.htmlvars - # now extract those variables which are honored by this - # view, regardless if used by hardcoded, shown or hidden filters. - for var in contextvars: - if var not in active_filter_vars: - skip = var - break - used_contextvars += contextvars - if skip: - break - if skip: - continue - - # add context link to this view - if len(used_contextvars): - vars_values = [ (var, html.var(var)) for var in set(used_contextvars) ] - uri = html.makeuri_contextless(vars_values + [("view_name", name)]) - icon = view.get("icon") - buttonid = "cb_" + name - context_links.append((view, linktitle, uri, icon, buttonid)) - return context_links - def ajax_count_button(): id = html.var("id") @@ -1845,8 +1619,12 @@ # Remove columns which are implicitely added by the datasource columns = [ c for c in columns if c not in add_columns ] - query = "GET %s\n" % tablename + return do_query_data(query, columns, add_columns, merge_column, + add_headers, only_sites, limit) + +def do_query_data(query, columns, add_columns, merge_column, + add_headers, only_sites, limit): query += "Columns: %s\n" % " ".join(columns) query += add_headers html.live.set_prepend_site(True) @@ -1854,7 +1632,7 @@ html.live.set_limit(limit + 1) # + 1: We need to know, if limit is exceeded if config.debug_livestatus_queries \ and html.output_format == "html" and 'W' in html.display_options: - html.write('
' + html.write('
' '%s
\n' % (query.replace('\n', '
\n'))) if only_sites: @@ -1875,6 +1653,7 @@ return rows + # Merge all data rows with different sites but the same value # in merge_column. We require that all column names are prefixed # with the tablename. The column with the merge key is required @@ -1931,7 +1710,7 @@ return # Handle case where join columns are not present for all rows - def save_compare(compfunc, row1, row2): + def save_compare(compfunc, row1, row2, args): if row1 == None and row2 == None: return 0 elif row1 == None: @@ -1939,7 +1718,10 @@ elif row2 == None: return 1 else: - return compfunc(row1, row2) + if args: + return compfunc(row1, row2, *args) + else: + return compfunc(row1, row2) sort_cmps = [] for s in sorters: @@ -1949,34 +1731,22 @@ joinkey = s[2] # e.g. service description else: joinkey = None - sort_cmps.append((cmpfunc, negate, joinkey)) + sort_cmps.append((cmpfunc, negate, joinkey, s[0].get('args'))) def multisort(e1, e2): - for func, neg, joinkey in sort_cmps: + for func, neg, joinkey, args in sort_cmps: if joinkey: # Sorter for join column, use JOIN info - c = neg * save_compare(func, e1["JOIN"].get(joinkey), e2["JOIN"].get(joinkey)) + c = neg * save_compare(func, e1["JOIN"].get(joinkey), e2["JOIN"].get(joinkey), args) else: - c = neg * func(e1, e2) + if args: + c = neg * func(e1, e2, *args) + else: + c = neg * func(e1, e2) if c != 0: return c return 0 # equal data.sort(multisort) -# Create a list of filters allowed for a certain data source. -# Each filter is valid for a special info, e.g. "host" or -# "service". or always (info is None in that case). -# Each datasource provides a list of info. The datasource "services" -# provides "service" and "host", for example. -def filters_allowed_for_datasource(datasourcename): - datasource = multisite_datasources[datasourcename] - infos = datasource["infos"] - allowed = {} - for fname, filt in multisite_filters.items(): - if filt.info == None or filt.info in infos: - allowed[fname] = filt - return allowed - - # Filters a list of sorters or painters and decides which of # those are available for a certain data source def allowed_for_datasource(collection, datasourcename): @@ -1997,10 +1767,6 @@ return {} return allowed_for_datasource(collection, multisite_datasources[datasourcename]['join'][0]) -def is_joined_value(collection, varname): - selected_label = [ label for name, label in collection if name == html.var(varname, '') ] - return selected_label and selected_label[0].startswith(_('SERVICE:')) - def collist_of_collection(collection, join_target = []): def sort_list(l): # Sort the lists but don't mix them up @@ -2011,9 +1777,10 @@ if not join_target: return sort_list([ (name, p["title"]) for name, p in collection.items() ]) else: - return sort_list([ (name, _('SERVICE:') + ' ' + p["title"]) for name, p in collection.items() if (name, p["title"]) not in join_target ]) + return sort_list([ (name, p["title"]) for name, p in collection.items() if (name, p["title"]) not in join_target ]) -# .----------------------------------------------------------------------. +#. +# .--Commands------------------------------------------------------------. # | ____ _ | # | / ___|___ _ __ ___ _ __ ___ __ _ _ __ __| |___ | # | | | / _ \| '_ ` _ \| '_ ` _ \ / _` | '_ \ / _` / __| | @@ -2070,6 +1837,10 @@ by_group = {} for command in multisite_commands: if what in command["tables"] and config.may(command["permission"]): + # Some special commands can be shown on special views using this option. + # It is currently only used in custom views, not shipped with check_mk. + if command.get('only_view') and html.var('view_name') != command['only_view']: + continue group = command.get("group", _("Various Commands")) by_group.setdefault(group, []).append(command) @@ -2090,7 +1861,7 @@ # (host name, service description, downtime/commands id) and # construct one or several core command lines and a descriptive # title. -def core_command(what, row): +def core_command(what, row, row_nr, total_rows): host = row.get("host_name") descr = row.get("service_description") @@ -2110,13 +1881,21 @@ cmdtag = "HOST" commands = None + title = None # Call all command actions. The first one that detects # itself to be executed (by examining the HTML variables) # will return a command to execute and a title for the # confirmation dialog. for cmd in multisite_commands: if config.may(cmd["permission"]): - result = cmd["action"](cmdtag, spec, row) + + # Does the command need information about the total number of rows + # and the number of the current row? Then specify that + if cmd.get("row_stats"): + result = cmd["action"](cmdtag, spec, row, row_nr, total_rows) + else: + result = cmd["action"](cmdtag, spec, row) + if result: executor = cmd.get("executor", command_executor_livestatus) commands, title = result @@ -2164,19 +1943,22 @@ return False # no actions done command = None - title, executor = core_command(what, action_rows[0])[1:3] # just get the title and executor + title, executor = core_command(what, action_rows[0], 0, len(action_rows))[1:3] # just get the title and executor if not html.confirm(_("Do you really want to %(title)s the following %(count)d %(what)s?") % { "title" : title, "count" : len(action_rows), "what" : _(what + "s"), }, method = 'GET'): return False count = 0 - for row in action_rows: - core_commands, title, executor = core_command(what, row) + already_executed = set([]) + for nr, row in enumerate(action_rows): + core_commands, title, executor = core_command(what, row, nr, len(action_rows)) for command in core_commands: - if type(command) == unicode: - command = command.encode("utf-8") - executor(command, row["site"]) - count += 1 + if (row["site"], command) not in already_executed: + if type(command) == unicode: + command = command.encode("utf-8") + executor(command, row["site"]) + already_executed.add((row["site"], command)) + count += 1 message = None if command: @@ -2189,6 +1971,11 @@ if message: if html.output_format == "html": # sorry for this hack message += '
%s' % (backurl, _('Back to view')) + if html.var("show_checkboxes") == "1": + html.del_var("selection") + weblib.selection_id() + backurl += "&selection=" + html.var("selection") + message += '
%s' % (backurl, _('Back to view with checkboxes reset')) html.message(message) return True @@ -2202,36 +1989,33 @@ def get_context_link(user, viewname): - if viewname in html.available_views: + if viewname in available_views: return "view.py?view_name=%s" % viewname else: return None def ajax_export(): load_views() - for name, view in html.available_views.items(): + for name, view in available_views.items(): view["owner"] = '' view["public"] = True - html.write(pprint.pformat(html.available_views)) + html.write(pprint.pformat(available_views)) +def get_view_by_name(view_name): + load_views() + return available_views[view_name] -def page_message_and_forward(message, default_url, addhtml=""): - url = html.var("back") - if not url: - url = default_url - - html.set_browser_redirect(1, url) - html.header("Multisite") - html.message(message) - html.write(addhtml) - html.footer() - -# ____ _ _ _ _ _ -# | _ \| |_ _ __ _(_)_ __ | | | | ___| |_ __ ___ _ __ ___ -# | |_) | | | | |/ _` | | '_ \ _____| |_| |/ _ \ | '_ \ / _ \ '__/ __| -# | __/| | |_| | (_| | | | | |_____| _ | __/ | |_) | __/ | \__ \ -# |_| |_|\__,_|\__, |_|_| |_| |_| |_|\___|_| .__/ \___|_| |___/ -# |___/ |_| +#. +# .--Plugin Helpers------------------------------------------------------. +# | ____ _ _ _ _ _ | +# | | _ \| |_ _ __ _(_)_ __ | | | | ___| |_ __ ___ _ __ ___ | +# | | |_) | | | | |/ _` | | '_ \ | |_| |/ _ \ | '_ \ / _ \ '__/ __| | +# | | __/| | |_| | (_| | | | | | | _ | __/ | |_) | __/ | \__ \ | +# | |_| |_|\__,_|\__, |_|_| |_| |_| |_|\___|_| .__/ \___|_| |___/ | +# | |___/ |_| | +# +----------------------------------------------------------------------+ +# | | +# '----------------------------------------------------------------------' def register_hook(hook, func): if not hook in view_hooks: @@ -2251,19 +2035,42 @@ else: pass +def paint(p, row, tdattrs=""): + tdclass, content = prepare_paint(p, row) + + if tdclass: + html.write("%s\n" % (tdattrs, tdclass, content)) + else: + html.write("%s" % (tdattrs, content)) + return content != "" + +def paint_painter(painter, row): + if not row: + return "", "" # no join information available for that column + + if "args" in painter: + return painter["paint"](row, *painter["args"]) + else: + return painter["paint"](row) + +def join_row(row, p): + join_key = len(p) >= 4 and p[3] or None + if join_key != None: + return row.get("JOIN", {}).get(join_key) + else: + return row + def prepare_paint(p, row): painter = p[0] linkview = p[1] tooltip = len(p) > 2 and p[2] or None - if len(p) >= 4: - join_key = p[3] - row = row.get("JOIN", {}).get(p[3]) - if not row: - return "", "" # no join information available for that column - tdclass, content = painter["paint"](row) + row = join_row(row, p) + tdclass, content = paint_painter(painter, row) + if tdclass == "" and content == "": + return tdclass, content - content = htmllib.utf8_to_entities(content) + content = html.utf8_to_entities(content) # Create contextlink to other view if content and linkview: @@ -2272,29 +2079,54 @@ # Tooltip if content != '' and tooltip: cla, txt = multisite_painters[tooltip]["paint"](row) - tooltiptext = htmllib.strip_tags(txt) + tooltiptext = html.utf8_to_entities(html.strip_tags(txt)) content = '%s' % (tooltiptext, content) return tdclass, content -def link_to_view(content, row, linkview): +def link_to_view(content, row, view_name): if 'I' not in html.display_options: return content - view = html.available_views.get(linkview) + view = available_views.get(view_name) if view: - filters = [ multisite_filters[fn] for fn in view["hide_filters"] ] - vars = [] - for filt in filters: - vars += filt.variable_settings(row) + # Get the context type of the view to link to, then get the parameters of this + # context type and try to construct the context from the data of the row + url_vars = [] + datasource = multisite_datasources[view['datasource']] + for info_key in datasource['infos']: + if info_key in view['single_infos']: + # Determine which filters (their names) need to be set + # for specifying in order to select correct context for the + # target view. + for filter_name in visuals.info_params(info_key): + filter_object = visuals.get_filter(filter_name) + # Get the list of URI vars to be set for that filter + new_vars = filter_object.variable_settings(row) + url_vars += new_vars + + # See get_link_filter_names() comment for details + for src_key, dst_key in visuals.get_link_filter_names(view, datasource['infos'], + datasource.get('link_filters', {})): + url_vars += visuals.get_filter(src_key).variable_settings(row) + url_vars += visuals.get_filter(dst_key).variable_settings(row) + + # Some special handling for the site filter which is meant as optional hint + # Always add the site filter var when some useful information is available + add_site_hint = True + for filter_key in datasource.get('multiple_site_filters', []): + if filter_key in dict(url_vars): + add_site_hint = False + + if add_site_hint and row.get('site'): + url_vars.append(('site', row['site'])) + do = html.var("display_options") if do: - vars.append(("display_options", do)) + url_vars.append(("display_options", do)) filename = html.mobile and "mobile_view.py" or "view.py" - uri = filename + "?" + htmllib.urlencode_vars([("view_name", linkview)] + vars) + uri = filename + "?" + html.urlencode_vars([("view_name", view_name)] + url_vars) content = "%s" % (uri, content) -# rel = 'view.py?view_name=hoststatus&site=local&host=erdb-lit&display_options=htbfcoezrsix' -# content = '%s' % (rel, uri, content) return content def docu_link(topic, text): @@ -2310,13 +2142,12 @@ key += '~%s' % row[col] return str(hash(key)) -def paint(p, row, tdattrs=""): - tdclass, content = prepare_paint(p, row) - if tdclass: - html.write("%s\n" % (tdattrs, tdclass, content)) +def paint_stalified(row, text): + if is_stale(row): + return "stale", text else: - html.write("%s" % (tdattrs, content)) - return content != "" + return "", text + def substract_sorters(base, remove): for s in remove: @@ -2447,18 +2278,15 @@ # Important for links: # - Add the display options (Keeping the same display options as current) # - Link to _self (Always link to the current frame) - # - Keep the _body_class variable (e.g. for dashlets) thclass = '' onclick = '' title = '' if 'L' in html.display_options \ - and view.get('user_sortable', True) \ + and view.get('user_sortable', False) \ and get_sorter_name_of_painter(painter) is not None: params = [ ('sort', sort_url(view, painter, join_index)), ] - if html.has_var('_body_class'): - params.append(('_body_class', html.var('_body_class'))) if hasattr(html, 'title_display_options'): params.append(('display_options', html.title_display_options)) @@ -2484,7 +2312,10 @@ for p in group_painters: groupvalfunc = p[0].get("groupby") if groupvalfunc: - group.append(groupvalfunc(row)) + if "args" in p[0]: + group.append(groupvalfunc(row, *p[0]["args"])) + else: + group.append(groupvalfunc(row)) else: for c in p[0]["columns"]: group.append(row[c]) @@ -2492,17 +2323,46 @@ def get_painter_option(name): opt = multisite_painter_options[name] - if not config.may("general.painter_options"): - return opt["default"] - return opt.get("value", opt["default"]) + if "forced_value" in opt: + return opt["forced_value"] + elif not config.may("general.painter_options"): + return opt['valuespec'].default_value() + else: + return opt.get("value", opt['valuespec'].default_value()) def get_host_tags(row): + if type(row.get("host_custom_variables")) == dict: + return row["host_custom_variables"].get("TAGS", "") + + if type(row.get("host_custom_variable_names")) != list: + return "" + for name, val in zip(row["host_custom_variable_names"], row["host_custom_variable_values"]): if name == "TAGS": return val return "" +# Get the definition of a tag group +g_taggroups_by_id = {} +def get_tag_group(tgid): + # Build a cache + if not g_taggroups_by_id: + for entry in config.wato_host_tags: + g_taggroups_by_id[entry[0]] = (entry[1], entry[2]) + + return g_taggroups_by_id.get(tgid) + +def get_custom_var(row, key): + for name, val in zip(row["custom_variable_names"], + row["custom_variable_values"]): + if name == key: + return val + return "" + +def is_stale(row): + return row.get('service_staleness', row.get('host_staleness', 0)) >= config.staleness_threshold + def cmp_insensitive_string(v1, v2): c = cmp(v1.lower(), v2.lower()) # force a strict order in case of equal spelling but different @@ -2513,10 +2373,25 @@ return c # Sorting +def cmp_ip_address(column, r1, r2): + def split_ip(ip): + try: + return tuple(int(part) for part in ip.split('.')) + except: + return ip + v1, v2 = split_ip(r1.get(column, '')), split_ip(r2.get(column, '')) + return cmp(v1, v2) + + def cmp_simple_string(column, r1, r2): v1, v2 = r1.get(column, ''), r2.get(column, '') return cmp_insensitive_string(v1, v2) +def cmp_num_split(column, r1, r2): + c1 = r1[column] + c2 = r2[column] + return cmp(num_split(c1) + (c1,), num_split(c2) + (c2,)) + def cmp_string_list(column, r1, r2): v1 = ''.join(r1.get(column, [])) v2 = ''.join(r2.get(column, [])) @@ -2525,6 +2400,9 @@ def cmp_simple_number(column, r1, r2): return cmp(r1.get(column), r2.get(column)) +def cmp_custom_variable(r1, r2, key, cmp_func): + return cmp(get_custom_var(r1, key), get_custom_var(r2, key)) + def declare_simple_sorter(name, title, column, func): multisite_sorters[name] = { "title" : title, @@ -2544,3 +2422,18 @@ multisite_sorters[painter_name]["cmp"] = \ lambda r1, r2: func(multisite_painters[painter_name]['columns'][col_num], r2, r1) return painter_name + + + +# Ajax call for fetching parts of the tree +def ajax_inv_render_tree(): + hostname = html.var("host") + invpath = html.var("path") + tree = inventory.host(hostname) + node = inventory.get(tree, invpath) + if not node: + html.show_error(_("Invalid path %s in inventory tree") % invpath) + else: + render_inv_subtree_container(hostname, invpath, node) + + diff -Nru check-mk-1.2.2p3/htdocs/visuals.py check-mk-1.2.6p12/htdocs/visuals.py --- check-mk-1.2.2p3/htdocs/visuals.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/htdocs/visuals.py 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,1396 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +import os, copy + +try: + import simplejson as json +except ImportError: + import json + +from lib import * +from valuespec import * +import config, table + +# .--Plugins-------------------------------------------------------------. +# | ____ _ _ | +# | | _ \| |_ _ __ _(_)_ __ ___ | +# | | |_) | | | | |/ _` | | '_ \/ __| | +# | | __/| | |_| | (_| | | | | \__ \ | +# | |_| |_|\__,_|\__, |_|_| |_|___/ | +# | |___/ | +# +----------------------------------------------------------------------+ +# | | +# '----------------------------------------------------------------------' + +loaded_with_language = False + +def load_plugins(): + global loaded_with_language + if loaded_with_language == current_language: + return + + global visual_types + visual_types = { + 'views': { + 'show_url' : 'view.py', + 'ident_attr' : 'view_name', + 'title' : _("view"), + 'plural_title' : _("views"), + 'module_name' : 'views', + 'multicontext_links' : False, + }, + 'dashboards': { + 'show_url' : 'dashboard.py', + 'ident_attr' : 'name', + 'title' : _("dashboard"), + 'plural_title' : _("dashboards"), + 'module_name' : 'dashboard', + 'popup_add_handler' : 'popup_list_dashboards', + 'add_visual_handler' : 'popup_add_dashlet', + 'multicontext_links' : False, + }, + } + + global title_functions ; title_functions = [] + global infos ; infos = {} + global multisite_filters ; multisite_filters = {} + global ubiquitary_filters ; ubiquitary_filters = [] # Always show these filters + + load_web_plugins('visuals', globals()) + loaded_with_language = current_language + +#. +# .--Save/Load-----------------------------------------------------------. +# | ____ ___ _ | +# | / ___| __ ___ _____ / / | ___ __ _ __| | | +# | \___ \ / _` \ \ / / _ \ / /| | / _ \ / _` |/ _` | | +# | ___) | (_| |\ V / __// / | |__| (_) | (_| | (_| | | +# | |____/ \__,_| \_/ \___/_/ |_____\___/ \__,_|\__,_| | +# | | +# +----------------------------------------------------------------------+ +# | | +# '----------------------------------------------------------------------' + +def save(what, visuals): + uservisuals = {} + for (user_id, name), visual in visuals.items(): + if config.user_id == user_id: + uservisuals[name] = visual + config.save_user_file('user_' + what, uservisuals, unlock=True) + +# FIXME: Currently all user visual files of this type are locked. We could optimize +# this not to lock all files but only lock the files the user is about to modify. +def load(what, builtin_visuals, skip_func = None, lock=False): + visuals = {} + + # first load builtins. Set username to '' + for name, visual in builtin_visuals.items(): + visual["owner"] = '' # might have been forgotten on copy action + visual["public"] = True + visual["name"] = name + + # Dashboards had not all COMMON fields in previous versions. Add them + # here to be compatible for a specific time. Seamless migration, yeah. + visual.setdefault('description', '') + visual.setdefault('hidden', False) + + visuals[('', name)] = visual + + # Now scan users subdirs for files "visuals.mk" + subdirs = os.listdir(config.config_dir) + for user in subdirs: + try: + dirpath = config.config_dir + "/" + user + if not os.path.isdir(dirpath): + continue + + # Be compatible to old views.mk. The views.mk contains customized views + # in an old format which will be loaded, transformed and when saved stored + # in users_views.mk. When this file exists only this file is used. + path = "%s/user_%s.mk" % (dirpath, what) + if what == 'views' and not os.path.exists(path): + path = "%s/%s.mk" % (dirpath, what) + + if not os.path.exists(path): + continue + + if lock: + aquire_lock(path) + + user_visuals = eval(file(path).read()) + for name, visual in user_visuals.items(): + visual["owner"] = user + visual["name"] = name + + if skip_func and skip_func(visual): + continue + + # Maybe resolve inherited attributes. This was a feature for several versions + # to make the visual texts localizable. This has been removed because the visual + # texts can now be localized using the custom localization strings. + # This is needed for backward compatibility to make the visuals without these + # attributes get the attributes from their builtin visual. + builtin_visual = visuals.get(('', name)) + if builtin_visual: + for attr in [ 'title', 'linktitle', 'topic', 'description' ]: + if attr not in visual and attr in builtin_visual: + visual[attr] = builtin_visual[attr] + + # Repair visuals with missing 'title' or 'description' + visual.setdefault("title", name) + visual.setdefault("description", "") + + # Declare custom permissions + declare_visual_permission(what, name, visual) + + visuals[(user, name)] = visual + + + except SyntaxError, e: + raise MKGeneralException(_("Cannot load %s from %s: %s") % (what, path, e)) + + return visuals + +def declare_visual_permission(what, name, visual): + permname = "%s.%s" % (what[:-1], name) + if visual["public"] and not config.permission_exists(permname): + config.declare_permission(permname, visual["title"], + visual["description"], ['admin','user','guest']) + +# Load all users visuals just in order to declare permissions of custom visuals +def declare_custom_permissions(what): + subdirs = os.listdir(config.config_dir) + for user in subdirs: + try: + dirpath = config.config_dir + "/" + user + if os.path.isdir(dirpath): + path = "%s/%s.mk" % (dirpath, what) + if not os.path.exists(path): + continue + visuals = eval(file(path).read()) + for name, visual in visuals.items(): + declare_visual_permission(what, name, visual) + except: + if config.debug: + raise + +# Get the list of visuals which are available to the user +# (which could be retrieved with get_visual) +def available(what, all_visuals): + user = config.user_id + visuals = {} + permprefix = what[:-1] + + # 1. user's own visuals, if allowed to edit visuals + if config.may("general.edit_" + what): + for (u, n), visual in all_visuals.items(): + if u == user: + visuals[n] = visual + + # 2. visuals of special users allowed to globally override builtin visuals + for (u, n), visual in all_visuals.items(): + if n not in visuals and visual["public"] and config.user_may(u, "general.force_" + what): + # Honor original permissions for the current user + permname = "%s.%s" % (permprefix, n) + if config.permission_exists(permname) \ + and not config.may(permname): + continue + visuals[n] = visual + + # 3. Builtin visuals, if allowed. + for (u, n), visual in all_visuals.items(): + if u == '' and n not in visuals and config.may("%s.%s" % (permprefix, n)): + visuals[n] = visual + + # 4. other users visuals, if public. Sill make sure we honor permission + # for builtin visuals. Also the permission "general.see_user_visuals" is + # necessary. + if config.may("general.see_user_" + what): + for (u, n), visual in all_visuals.items(): + if n not in visuals and visual["public"] and config.user_may(u, "general.publish_" + what): + # Is there a builtin visual with the same name? If yes, honor permissions. + permname = "%s.%s" % (permprefix, n) + if config.permission_exists(permname) \ + and not config.may(permname): + continue + visuals[n] = visual + + return visuals + +#. +# .--Listing-------------------------------------------------------------. +# | _ _ _ _ | +# | | | (_)___| |_(_)_ __ __ _ | +# | | | | / __| __| | '_ \ / _` | | +# | | |___| \__ \ |_| | | | | (_| | | +# | |_____|_|___/\__|_|_| |_|\__, | | +# | |___/ | +# +----------------------------------------------------------------------+ +# | Show a list of all visuals with actions to delete/clone/edit | +# '----------------------------------------------------------------------' + +def page_list(what, title, visuals, custom_columns = [], + render_custom_buttons = None, + render_custom_columns = None, + render_custom_context_buttons = None, + check_deletable_handler = None): + + what_s = what[:-1] + if not config.may("general.edit_" + what): + raise MKAuthException(_("Sorry, you lack the permission for editing this type of visuals.")) + + html.header(title, stylesheets=["pages", "views", "status"]) + + html.begin_context_buttons() + html.context_button(_('New'), 'create_%s.py' % what_s, "new") + if render_custom_context_buttons: + render_custom_context_buttons() + for other_what, info in visual_types.items(): + if what != other_what: + html.context_button(info["plural_title"].title(), 'edit_%s.py' % other_what, other_what[:-1]) + html.end_context_buttons() + + # Deletion of visuals + delname = html.var("_delete") + if delname and html.transaction_valid(): + deltitle = visuals[(config.user_id, delname)]['title'] + + try: + if check_deletable_handler: + check_deletable_handler(visuals, delname) + + c = html.confirm(_("Please confirm the deletion of \"%s\".") % deltitle) + if c: + del visuals[(config.user_id, delname)] + save(what, visuals) + html.reload_sidebar() + elif c == False: + html.footer() + return + except MKUserError, e: + html.write("
%s
\n" % e) + html.add_user_error(e.varname, e) + + keys_sorted = visuals.keys() + keys_sorted.sort(cmp = lambda a,b: -cmp(a[0],b[0]) or cmp(a[1], b[1])) + + custom = [] + builtin = [] + for (owner, visual_name) in keys_sorted: + if owner == "" and not config.may("%s.%s" % (what_s, visual_name)): + continue # not allowed to see this view + + visual = visuals[(owner, visual_name)] + if owner == config.user_id or \ + (visual["public"] and owner != '' and config.user_may(owner, "general.publish_" + what)): + custom.append((owner, visual_name, visual)) + elif visual["public"] and owner == "": + builtin.append((owner, visual_name, visual)) + + for title, items in [ (_('Custom'), custom), (_('Builtin'), builtin) ]: + html.write('

' + title + '

') + + table.begin(css = 'data', limit = None) + + for owner, visual_name, visual in items: + table.row(css = 'data') + + # Actions + table.cell(_('Actions'), css = 'buttons visuals') + + # Edit + if owner == config.user_id: + html.icon_button("edit_%s.py?load_name=%s" % (what_s, visual_name), _("Edit"), "edit") + + # Clone / Customize + buttontext = _("Create a customized copy of this") + backurl = html.urlencode(html.makeuri([])) + clone_url = "edit_%s.py?load_user=%s&load_name=%s&back=%s" \ + % (what_s, owner, visual_name, backurl) + html.icon_button(clone_url, buttontext, "clone") + + # Delete + if owner == config.user_id: + html.icon_button(html.makeactionuri([('_delete', visual_name)]), + _("Delete!"), "delete") + + # Custom buttons - visual specific + if render_custom_buttons: + render_custom_buttons(visual_name, visual) + + # visual Name + table.cell(_('ID'), visual_name) + + # Title + table.cell(_('Title')) + title = _u(visual['title']) + if not visual["hidden"]: + html.write("%s" % + (what_s, visual_types[what]['ident_attr'], visual_name, html.attrencode(title))) + else: + html.write(html.attrencode(title)) + html.help(html.attrencode(_u(visual['description']))) + + # Custom cols + for title, renderer in custom_columns: + table.cell(title, renderer(visual)) + + # Owner + if owner == "": + ownertxt = "" + _("builtin") + "" + else: + ownertxt = owner + table.cell(_('Owner'), ownertxt) + table.cell(_('Public'), visual["public"] and _("yes") or _("no")) + table.cell(_('Hidden'), visual["hidden"] and _("yes") or _("no")) + + if render_custom_columns: + render_custom_columns(visual_name, visual) + + table.end() + + html.footer() + +#. +# .--Create Visual-------------------------------------------------------. +# | ____ _ __ ___ _ | +# | / ___|_ __ ___ __ _| |_ ___ \ \ / (_)___ _ _ __ _| | | +# | | | | '__/ _ \/ _` | __/ _ \ \ \ / /| / __| | | |/ _` | | | +# | | |___| | | __/ (_| | || __/ \ V / | \__ \ |_| | (_| | | | +# | \____|_| \___|\__,_|\__\___| \_/ |_|___/\__,_|\__,_|_| | +# | | +# +----------------------------------------------------------------------+ +# | Realizes the steps before getting to the editor (context type) | +# '----------------------------------------------------------------------' + +def page_create_visual(what, info_keys, next_url = None): + title = visual_types[what]['title'] + what_s = what[:-1] + + # FIXME: Sort by (assumed) common usage + info_choices = [] + for key in info_keys: + info_choices.append((key, _('Show information of a single %s') % infos[key]['title'])) + + vs_infos = SingleInfoSelection(info_keys) + + html.header(_('Create %s') % title, stylesheets=["pages"]) + html.begin_context_buttons() + back_url = html.var("back", "") + html.context_button(_("Back"), back_url or "edit_%s.py" % what, "back") + html.end_context_buttons() + + html.write('

') + html.write( + _('Depending on the choosen datasource a %s can list multiple or single objects. ' + 'For example the services datasource can be used to simply create a list ' + 'of multiple services, a list of multiple services of a single host or even ' + 'a list of services with the same name on multiple hosts. When you just want to ' + 'create a list of objects, you do not need to make any selection in this dialog. ' + 'If you like to create a view for one specific object of a specific type, select the ' + 'object type below and continue.') % what_s) + html.write('

') + + if html.var('save') and html.check_transaction(): + try: + single_infos = vs_infos.from_html_vars('single_infos') + vs_infos.validate_value(single_infos, 'single_infos') + + if not next_url: + next_url = 'edit_'+what_s+'.py?mode=create&single_infos=%s' % ','.join(single_infos) + else: + next_url += '&single_infos=%s' % ','.join(single_infos) + html.http_redirect(next_url) + return + + except MKUserError, e: + html.write("
%s
\n" % e) + html.add_user_error(e.varname, e) + + html.begin_form('create_visual') + html.hidden_field('mode', 'create') + + forms.header(_('Select specific object type')) + forms.section(vs_infos.title()) + vs_infos.render_input('single_infos', '') + html.help(vs_infos.help()) + forms.end() + + html.button('save', _('Continue'), 'submit') + + html.hidden_fields() + html.end_form() + html.footer() + +#. +# .--Edit Visual---------------------------------------------------------. +# | _____ _ _ _ __ ___ _ | +# | | ____|__| (_) |_ \ \ / (_)___ _ _ __ _| | | +# | | _| / _` | | __| \ \ / /| / __| | | |/ _` | | | +# | | |__| (_| | | |_ \ V / | \__ \ |_| | (_| | | | +# | |_____\__,_|_|\__| \_/ |_|___/\__,_|\__,_|_| | +# | | +# +----------------------------------------------------------------------+ +# | Edit global settings of the visual | +# '----------------------------------------------------------------------' + +def get_context_specs(visual, info_handler): + context_specs = [] + info_keys = info_handler and info_handler(visual) or infos.keys() + for info_key in info_keys: + info = infos[info_key] + + if info_key in visual['single_infos']: + params = info['single_spec'] + optional = True + isopen = True + vs = Dictionary( + title = info['title'], + # render = 'form', + form_isopen = isopen, + optional_keys = optional, + elements = params, + ) + else: + filter_list = VisualFilterList([info_key], title=info['title']) + filter_names = filter_list.filter_names() + + if not filter_names: + continue # Skip infos which have no filters available + + params = [ + ('filters', filter_list), + ] + optional = None + # Make it open by default when at least one filter is used + isopen = bool([ fn for fn in visual.get('context', {}).keys() + if fn in filter_names ]) + vs = filter_list + + + # Single info context specifications should be listed first + if info_key in visual['single_infos']: + context_specs.insert(0, (info_key, vs)) + else: + context_specs.append((info_key, vs)) + return context_specs + +def process_context_specs(context_specs): + context = {} + for info_key, spec in context_specs: + ident = 'context_' + info_key + + attrs = spec.from_html_vars(ident) + spec.validate_value(attrs, ident) + context.update(attrs) + return context + +def render_context_specs(visual, context_specs): + forms.header(_("Context / Search Filters")) + for info_key, spec in context_specs: + forms.section(spec.title()) + ident = 'context_' + info_key + # Trick: the field "context" contains a dictionary with + # all filter settings, from which the value spec will automatically + # extract those that it needs. + value = visual.get('context', {}) + spec.render_input(ident, value) + +def page_edit_visual(what, all_visuals, custom_field_handler = None, + create_handler = None, try_handler = None, + load_handler = None, info_handler = None, + sub_pages = []): + visual_type = visual_types[what] + + visual_type = visual_types[what] + if not config.may("general.edit_" + what): + raise MKAuthException(_("You are not allowed to edit %s.") % visual_type["plural_title"]) + what_s = what[:-1] + + visual = {} + + # Load existing visual from disk - and create a copy if 'load_user' is set + visualname = html.var("load_name") + oldname = visualname + mode = html.var('mode', 'edit') + if visualname: + cloneuser = html.var("load_user") + if cloneuser: + mode = 'clone' + visual = copy.deepcopy(all_visuals.get((cloneuser, visualname), None)) + if not visual: + raise MKUserError('cloneuser', _('The %s does not exist.') % visual_type["title"]) + + # Make sure, name is unique + if cloneuser == config.user_id: # Clone own visual + newname = visualname + "_clone" + else: + newname = visualname + # Name conflict -> try new names + n = 1 + while (config.user_id, newname) in all_visuals: + n += 1 + newname = visualname + "_clone%d" % n + visual["name"] = newname + visualname = newname + oldname = None # Prevent renaming + if cloneuser == config.user_id: + visual["title"] += _(" (Copy)") + else: + visual = all_visuals.get((config.user_id, visualname)) + if not visual: + visual = all_visuals.get(('', visualname)) # load builtin visual + mode = 'clone' + if not visual: + raise MKGeneralException(_('The requested %s does not exist.') % visual_types[what]['title']) + + single_infos = visual['single_infos'] + + if load_handler: + load_handler(visual) + + else: + mode = 'create' + single_infos = [] + single_infos_raw = html.var('single_infos') + if single_infos_raw: + single_infos = single_infos_raw.split(',') + for key in single_infos: + if key not in infos: + raise MKUserError('single_infos', _('The info %s does not exist.') % key) + visual['single_infos'] = single_infos + + if mode == 'clone': + title = _('Clone %s') % visual_type["title"] + elif mode == 'create': + title = _('Create %s') % visual_type["title"] + else: + title = _('Edit %s') % visual_type["title"] + + html.header(title, stylesheets=["pages", "views", "status", "bi"]) + html.begin_context_buttons() + back_url = html.var("back", "") + html.context_button(_("Back"), back_url or "edit_%s.py" % what, "back") + + # Extra buttons to sub modules. These are used for things to edit about + # this visual that are more complex to be done in one value spec. + if mode not in [ "clone", "create" ]: + for title, pagename, icon in sub_pages: + uri = html.makeuri_contextless([(visual_types[what]['ident_attr'], visualname)], + filename = pagename + '.py') + html.context_button(title, uri, icon) + html.end_context_buttons() + + # A few checkboxes concerning the visibility of the visual. These will + # appear as boolean-keys directly in the visual dict, but encapsulated + # in a list choice in the value spec. + visibility_choices = [ + ('hidden', _('Hide this %s from the sidebar') % visual_type["title"]), + ('hidebutton', _('Do not show a context button to this %s') % visual_type["title"]), + ] + if config.may("general.publish_" + what): + visibility_choices.append( + ('public', _('Make this %s available for all users') % visual_type["title"])) + + vs_general = Dictionary( + title = _("General Properties"), + render = 'form', + optional_keys = None, + elements = [ + single_infos_spec(single_infos), + ('name', TextAscii( + title = _('Unique ID'), + help = _("The ID will be used in URLs that point to a view, e.g. " + "view.py?view_name=myview. It will also be used " + "internally for identifying a view. You can create several views " + "with the same title but only one per view name. If you create a " + "view that has the same view name as a builtin view, then your " + "view will override that (shadowing it)."), + regex = '^[a-zA-Z0-9_]+$', + regex_error = _('The name of the view may only contain letters, digits and underscores.'), + size = 24, allow_empty = False)), + ('title', TextUnicode( + title = _('Title') + '*', + size = 50, allow_empty = False)), + ('topic', TextUnicode( + title = _('Topic') + '*', + size = 50)), + ('description', TextAreaUnicode( + title = _('Description') + '*', + rows = 4, cols = 50)), + ('linktitle', TextUnicode( + title = _('Button Text') + '*', + help = _('If you define a text here, then it will be used in ' + 'context buttons linking to the %s instead of the regular title.') % visual_type["title"], + size = 26)), + ('icon', IconSelector( + title = _('Button Icon'), + )), + ('visibility', ListChoice( + title = _('Visibility'), + choices = visibility_choices, + )), + ], + ) + + context_specs = get_context_specs(visual, info_handler) + + # handle case of save or try or press on search button + save_and_go = None + for nr, (title, pagename, icon) in enumerate(sub_pages): + if html.var("save%d" % nr): + save_and_go = pagename + + if save_and_go or html.var("save") or html.var("try") or html.var("search"): + try: + general_properties = vs_general.from_html_vars('general') + vs_general.validate_value(general_properties, 'general') + + if not general_properties['linktitle']: + general_properties['linktitle'] = general_properties['title'] + if not general_properties['topic']: + general_properties['topic'] = _("Other") + + old_visual = visual + visual = {} + + # The dict of the value spec does not match exactly the dict + # of the visual. We take over some keys... + for key in ['single_infos', 'name', 'title', + 'topic', 'description', 'linktitle', 'icon']: + visual[key] = general_properties[key] + + # ...and import the visibility flags directly into the visual + for key, title in visibility_choices: + visual[key] = key in general_properties['visibility'] + + if not config.may("general.publish_" + what): + visual['public'] = False + + if create_handler: + visual = create_handler(old_visual, visual) + + visual['context'] = process_context_specs(context_specs) + + if html.var("save") or save_and_go: + if save_and_go: + back = html.makeuri_contextless([(visual_types[what]['ident_attr'], visual['name'])], + filename = save_and_go + '.py') + else: + back = html.var('back') + if not back: + back = 'edit_%s.py' % what + + if html.check_transaction(): + all_visuals[(config.user_id, visual["name"])] = visual + # Handle renaming of visuals + if oldname and oldname != visual["name"]: + # -> delete old entry + if (config.user_id, oldname) in all_visuals: + del all_visuals[(config.user_id, oldname)] + # -> change visual_name in back parameter + if back: + varstring = visual_type["ident_attr"] + "=" + back = back.replace(varstring + oldname, varstring + visual["name"]) + save(what, all_visuals) + + html.immediate_browser_redirect(1, back) + html.message(_('Your %s has been saved.') % visual_type["title"]) + html.reload_sidebar() + html.footer() + return + + except MKUserError, e: + html.write("
%s
\n" % e) + html.add_user_error(e.varname, e) + + html.begin_form("visual", method = "POST") + html.hidden_field("back", back_url) + html.hidden_field("mode", mode) + html.hidden_field("load_user", html.var("load_user", "")) # safe old name in case user changes it + html.hidden_field("load_name", oldname) # safe old name in case user changes it + + # FIXME: Hier werden die Flags aus visbility nicht korrekt geladen. Wäre es nicht besser, + # diese in einem Unter-Dict zu lassen, anstatt diese extra umzukopieren? + visib = [] + for key, title in visibility_choices: + if visual.get(key): + visib.append(key) + visual["visibility"] = visib + + vs_general.render_input("general", visual) + + if custom_field_handler: + custom_field_handler(visual) + + render_context_specs(visual, context_specs) + + forms.end() + html.show_localization_hint() + + html.button("save", _("Save")) + for nr, (title, pagename, icon) in enumerate(sub_pages): + html.button("save%d" % nr, _("Save and go to ") + title) + html.hidden_fields() + + if try_handler: + html.write(" ") + html.button("try", _("Try out")) + html.end_form() + + if (html.has_var("try") or html.has_var("search")) and not html.has_user_errors(): + html.set_var("search", "on") + if visual: + import bi + bi.reset_cache_status() + try_handler(visual) + return # avoid second html footer + else: + html.end_form() + + html.footer() + +#. +# .--Filters-------------------------------------------------------------. +# | _____ _ _ _ | +# | | ___(_) | |_ ___ _ __ ___ | +# | | |_ | | | __/ _ \ '__/ __| | +# | | _| | | | || __/ | \__ \ | +# | |_| |_|_|\__\___|_| |___/ | +# | | +# +----------------------------------------------------------------------+ +# | | +# '----------------------------------------------------------------------' + +def declare_filter(sort_index, f, comment = None): + multisite_filters[f.name] = f + f.comment = comment + f.sort_index = sort_index + +# Base class for all filters +# name: The unique id of that filter. This id is e.g. used in the +# persisted view configuration +# title: The title of the filter visible to the user. This text +# may be localized +# info: The datasource info this filter needs to work. If this +# is "service", the filter will also be available in tables +# showing service information. "host" is available in all +# service and host views. The log datasource provides both +# "host" and "service". Look into datasource.py for which +# datasource provides which information +# htmlvars: HTML variables this filter uses +# link_columns: If this filter is used for linking (state "hidden"), then +# these Livestatus columns are needed to fill the filter with +# the proper information. In most cases, this is just []. Only +# a few filters are useful for linking (such as the host_name and +# service_description filters with exact match) +class Filter: + def __init__(self, name, title, info, htmlvars, link_columns): + self.name = name + self.info = info + self.title = title + self.htmlvars = htmlvars + self.link_columns = link_columns + + # Some filters can be unavailable due to the configuration (e.g. + # the WATO Folder filter is only available if WATO is enabled. + def available(self): + return True + + # Some filters can be invisible. This is useful to hide filters which have always + # the same value but can not be removed using available() because the value needs + # to be set during runtime. + # A good example is the "site" filter which does not need to be available to the + # user in single site setups. + def visible(self): + return True + + # More complex filters need more height in the HTML layout + def double_height(self): + return False + + def display(self): + raise MKInternalError(_("Incomplete implementation of filter %s '%s': missing display()") % \ + (self.name, self.title)) + html.write(_("FILTER NOT IMPLEMENTED")) + + def filter(self, infoname): + return "" + + # Wether this filter needs to load host inventory data + def need_inventory(self): + return False + + # post-Livestatus filtering (e.g. for BI aggregations) + def filter_table(self, rows): + return rows + + def variable_settings(self, row): + return [] # return pairs of htmlvar and name according to dataset in row + + def infoprefix(self, infoname): + if self.info == infoname: + return "" + else: + return self.info[:-1] + "_" + + # Hidden filters may contribute to the pages headers of the views + def heading_info(self): + return None + + # Returns the current representation of the filter settings from the HTML + # var context. This can be used to persist the filter settings. + def value(self): + val = {} + for varname in self.htmlvars: + val[varname] = html.var(varname, '') + return val + + # Is used to populate a value, for example loaded from persistance, into + # the HTML context where it can be used by e.g. the display() method. + def set_value(self, value): + val = {} + for varname in self.htmlvars: + html.set_var(varname, value.get(varname)) + +def get_filter(name): + return multisite_filters[name] + +def filters_allowed_for_info(info): + allowed = {} + for fname, filt in multisite_filters.items(): + if filt.info == None or info == filt.info: + allowed[fname] = filt + return allowed + +# For all single_infos which are configured for a view which datasource +# does not provide these infos, try to match the keys of the single_info +# attributes to a filter which can then be used to filter the data of +# the available infos. +# This is needed to make the "hostgroup" single_info possible on datasources +# which do not have the "hostgroup" info, but the "host" info. This +# is some kind of filter translation between a filter of the "hostgroup" info +# and the "hosts" info. +def get_link_filter_names(visual, info_keys, link_filters): + names = [] + for info_key in visual['single_infos']: + if info_key not in info_keys: + for key in info_params(info_key): + if key in link_filters: + names.append((key, link_filters[key])) + return names + +# Collects all filters to be used for the given visual +def filters_of_visual(visual, info_keys, show_all=False, link_filters=[]): + filters = [] + + # Collect all available filters for these infos + all_possible_filters = [] + for filter_name, filter in multisite_filters.items(): + if filter.info in info_keys: + all_possible_filters.append(filter) + + for info_key in info_keys: + if info_key in visual['single_infos']: + for key in info_params(info_key): + filters.append(get_filter(key)) + elif not show_all: + for key, val in visual['context'].items(): + if type(val) == dict: # this is a real filter + filters.append(get_filter(key)) + + # See get_link_filter_names() comment for details + for key, dst_key in get_link_filter_names(visual, info_keys, link_filters): + filters.append(get_filter(dst_key)) + + if show_all: # add *all* available filters of these infos + filters += all_possible_filters + + # add ubiquitary_filters that are possible for these infos + for fn in ubiquitary_filters: + # Disable 'wato_folder' filter, if WATO is disabled or there is a single host view + filter = get_filter(fn) + if fn == "wato_folder" and (not filter.available() or 'host' in visual['single_infos']): + continue + if not filter.info or filter.info in info_keys: + filters.append(filter) + + return list(set(filters)) # remove duplicates + +# Reduces the list of the visuals used filters. The result are the ones +# which are really presented to the user later. +# For the moment we only remove the single context filters which have a +# hard coded default value which is treated as enforced value. +def visible_filters_of_visual(visual, use_filters): + show_filters = [] + + single_keys = get_single_info_keys(visual) + + for f in use_filters: + if f.name not in single_keys or \ + not visual['context'].get(f.name): + show_filters.append(f) + + return show_filters + +def add_context_to_uri_vars(visual, only_infos=None, only_count=False): + if only_infos == None: + only_infos = infos.keys() # all datasources! + + # Populate the HTML vars with missing context vars. The context vars set + # in single context are enforced (can not be overwritten by URL). The normal + # filter vars in "multiple" context are not enforced. + for key in get_single_info_keys(visual): + if key in visual['context']: + html.set_var(key, visual['context'][key]) + + # Now apply the multiple context filters + for info_key in only_infos: + for filter_name, filter_vars in visual['context'].items(): + if type(filter_vars) == dict: # this is a multi-context filter + # We add the filter only if *none* if its HTML variables are present on the URL + # This important because checkbox variables are not present if the box is not checked. + skip = False + for uri_varname, value in filter_vars.items(): + if html.has_var(uri_varname): + skip = True + break + if not skip or only_count: + for uri_varname, value in filter_vars.items(): + html.set_var(uri_varname, value) + +# Vice versa: find all filters that belong to the current URI variables +# and create a context dictionary from that. +def get_context_from_uri_vars(only_infos=None, single_infos=[]): + context = {} + for filter_name, filter_object in multisite_filters.items(): + if only_infos == None or filter_object.info in only_infos: + this_filter_vars = {} + for varname in filter_object.htmlvars: + if html.has_var(varname): + if filter_object.info in single_infos: + context[filter_name] = html.var(varname) + break + else: + this_filter_vars[varname] = html.var(varname) + if this_filter_vars: + context[filter_name] = this_filter_vars + return context + + +# Compute Livestatus-Filters based on a given context. Returns +# the only_sites list and a string with the filter headers +def get_filter_headers(datasource, context): + # Prepare Filter headers for Livestatus + filter_headers = "" + only_sites = None + html.stash_vars() + for filter_name, filter_vars in context.items(): + # first set the HTML variables. Sorry - the filters need this + if type(filter_vars) == dict: # this is a multi-context filter + for uri_varname, value in filter_vars.items(): + html.set_var(uri_varname, value) + else: + html.set_var(filter_name, filter_vars) + + # Now compute filter headers for all infos of the used datasource + our_infos = datasource["infos"] + for filter_name, filter_object in multisite_filters.items(): + if filter_object.info in our_infos: + header = filter_object.filter(datasource["table"]) + if header.startswith("Sites:"): + only_sites = header.strip().split(" ")[1:] + else: + filter_headers += header + html.unstash_vars() + return filter_headers, only_sites + + +#. +# .--ValueSpecs----------------------------------------------------------. +# | __ __ _ ____ | +# | \ \ / /_ _| |_ _ ___/ ___| _ __ ___ ___ ___ | +# | \ \ / / _` | | | | |/ _ \___ \| '_ \ / _ \/ __/ __| | +# | \ V / (_| | | |_| | __/___) | |_) | __/ (__\__ \ | +# | \_/ \__,_|_|\__,_|\___|____/| .__/ \___|\___|___/ | +# | |_| | +# +----------------------------------------------------------------------+ +# | | +# '----------------------------------------------------------------------' + +# Implements a list of available filters for the given infos. By default no +# filter is selected. The user may select a filter to be activated, then the +# filter is rendered and the user can provide a default value. +class VisualFilterList(ListOfMultiple): + def __init__(self, infos, **kwargs): + self._infos = infos + + # First get all filters useful for the infos, then create VisualFilter + # valuespecs from them and then sort them + fspecs = {} + self._filters = {} + for info in self._infos: + for fname, filter in filters_allowed_for_info(info).items(): + if fname not in fspecs and fname not in ubiquitary_filters: + fspecs[fname] = VisualFilter(fname, + title = filter.title, + ) + self._filters[fname] = fspecs[fname]._filter + + # Convert to list and sort them! + fspecs = sorted(fspecs.items(), key=lambda x: (x[1]._filter.sort_index, x[1].title())) + + kwargs.setdefault('title', _('Filters')) + kwargs.setdefault('add_label', _('Add filter')) + kwargs.setdefault('del_label', _('Remove filter')) + kwargs["delete_style"] = "filter" + + ListOfMultiple.__init__(self, fspecs, **kwargs) + + def filter_names(self): + return self._filters.keys() + +# Realizes a Multisite/visual filter in a valuespec. It can render the filter form, get +# the filled in values and provide the filled in information for persistance. +class VisualFilter(ValueSpec): + def __init__(self, name, **kwargs): + self._name = name + self._filter = multisite_filters[name] + + ValueSpec.__init__(self, **kwargs) + + def title(self): + return self._filter.title + + def canonical_value(self): + return {} + + def render_input(self, varprefix, value): + # kind of a hack to make the current/old filter API work. This should + # be cleaned up some day + if value != None: + self._filter.set_value(value) + + # A filter can not be used twice on a page, because the varprefix is not used + html.write('
' % (self._filter.double_height() and "double" or "single")) + html.write('
%s
' % self._filter.title) + html.write('
') + self._filter.display() + html.write("
") + html.write("
") + + def value_to_text(self, value): + # FIXME: optimize. Needed? + return repr(value) + + def from_html_vars(self, varprefix): + # A filter can not be used twice on a page, because the varprefix is not used + return self._filter.value() + + def validate_datatype(self, value, varprefix): + if type(value) != dict: + raise MKUserError(varprefix, _("The value must be of type dict, but it has type %s") % + type_name(value)) + + def validate_value(self, value, varprefix): + ValueSpec.custom_validate(self, value, varprefix) + + +def SingleInfoSelection(info_keys, **args): + info_choices = [] + for key in info_keys: + info_choices.append((key, _('Show information of a single %s') % infos[key]['title'])) + + args.setdefault("title", _('Specific objects')) + args["choices"] = info_choices + return ListChoice(**args) + +# Converts a context from the form { filtername : { ... } } into +# the for { infoname : { filtername : { } } for editing. +def pack_context_for_editing(visual, info_handler): + # We need to pack all variables into dicts with the name of the + # info. Since we have no mapping from info the the filter variable, + # we pack into every info every filter. The dict valuespec will + # pick out what it needs. Yurks. + packed_context = {} + info_keys = info_handler and info_handler(visual) or infos.keys() + for info_name in info_keys: + packed_context[info_name] = visual.get('context', {}) + return packed_context + +def unpack_context_after_editing(packed_context): + context = {} + for info_type, its_context in packed_context.items(): + context.update(its_context) + return context + + + +#. +# .--Misc----------------------------------------------------------------. +# | __ __ _ | +# | | \/ (_)___ ___ | +# | | |\/| | / __|/ __| | +# | | | | | \__ \ (__ | +# | |_| |_|_|___/\___| | +# | | +# +----------------------------------------------------------------------+ +# | | +# '----------------------------------------------------------------------' + +def is_single_site_info(info_key): + return infos[info_key].get('single_site', True) + +def single_infos_spec(single_infos): + return ('single_infos', FixedValue(single_infos, + title = _('Show information of single'), + totext = single_infos and ', '.join(single_infos) \ + or _('Not restricted to showing a specific object.'), + )) + +def verify_single_contexts(what, visual, link_filters): + for k, v in get_singlecontext_html_vars(visual).items(): + if v == None and k not in link_filters: + raise MKUserError(k, _('This %s can not be displayed, because the ' + 'necessary context information "%s" is missing.') % + (visual_types[what]['title'], k)) + +def visual_title(what, visual): + extra_titles = [] + + # Beware: if a single context visual is being visited *without* a context, then + # the value of the context variable(s) is None. In order to avoid exceptions, + # we simply drop these here. + extra_titles = [ v for k, v in get_singlecontext_html_vars(visual).items() if v != None ] + # FIXME: Is this really only needed for visuals without single infos? + if not visual['single_infos']: + used_filters = [ multisite_filters[fn] for fn in visual["context"].keys() ] + for filt in used_filters: + heading = filt.heading_info() + if heading: + extra_titles.append(heading) + + title = _u(visual["title"]) + if extra_titles: + title += " " + ", ".join(extra_titles) + + for fn in ubiquitary_filters: + # Disable 'wato_folder' filter, if WATO is disabled or there is a single host view + if fn == "wato_folder" and (not config.wato_enabled or 'host' in visual['single_infos']): + continue + heading = get_filter(fn).heading_info() + if heading: + title = heading + " - " + title + + # Execute title plugin functions which might be added by the user to + # the visuals plugins. When such a plugin function returns None, the regular + # title of the page is used, otherwise the title returned by the plugin + # function is used. + for func in title_functions: + result = func(what, visual, title) + if result != None: + return result + + return title + +# Determines the names of HTML variables to be set in order to +# specify a specify row in a datasource with a certain info. +# Example: the info "history" (Event Console History) needs +# the variables "event_id" and "history_line" to be set in order +# to exactly specify one history entry. +def info_params(info_key): + return dict(infos[info_key]['single_spec']).keys() + +def get_single_info_keys(visual): + keys = [] + for info_key in visual.get('single_infos', []): + keys += info_params(info_key) + return list(set(keys)) + +def get_singlecontext_vars(visual): + vars = {} + for key in get_single_info_keys(visual): + vars[key] = visual['context'].get(key) + return vars + +def get_singlecontext_html_vars(visual): + vars = get_singlecontext_vars(visual) + for key in get_single_info_keys(visual): + val = html.var_utf8(key) + if val != None: + vars[key] = val + return vars + +# Collect all visuals that share a context with visual. For example +# if a visual has a host context, get all relevant visuals. +def collect_context_links(this_visual, mobile = False, only_types = []): + # compute list of html variables needed for this visual + active_filter_vars = set([]) + for var, val in get_singlecontext_html_vars(this_visual).items(): + if html.has_var(var): + active_filter_vars.add(var) + + context_links = [] + for what in visual_types.keys(): + if not only_types or what in only_types: + context_links += collect_context_links_of(what, this_visual, active_filter_vars, mobile) + return context_links + +def collect_context_links_of(visual_type_name, this_visual, active_filter_vars, mobile): + context_links = [] + + # FIXME: Make this cross module access cleaner + visual_type = visual_types[visual_type_name] + module_name = visual_type["module_name"] + thing_module = __import__(module_name) + load_func_name = 'load_%s'% visual_type_name + if load_func_name not in thing_module.__dict__: + return context_links # in case of exception in "reporting", the load function might be missing + thing_module.__dict__['load_%s'% visual_type_name]() + available = thing_module.__dict__['permitted_%s' % visual_type_name]() + + # sort buttons somehow + visuals = available.values() + visuals.sort(cmp = lambda b,a: cmp(a.get('icon'), b.get('icon'))) + + for visual in visuals: + name = visual["name"] + linktitle = visual.get("linktitle") + if not linktitle: + linktitle = visual["title"] + if visual == this_visual: + continue + if visual.get("hidebutton", False): + continue # this visual does not want a button to be displayed + + if not mobile and visual.get('mobile') \ + or mobile and not visual.get('mobile'): + continue + + # For dashboards and views we currently only show a link button, + # if the target dashboard/view shares a single info with the + # current visual. + if not visual['single_infos'] and not visual_type["multicontext_links"]: + continue # skip non single visuals for dashboard, views + + # We can show a button only if all single contexts of the + # target visual are known currently + needed_vars = get_singlecontext_html_vars(visual).items() + skip = False + vars_values = [] + for var, val in needed_vars: + if var not in active_filter_vars: + skip = True # At least one single context missing + break + vars_values.append((var, val)) + + # When all infos of the target visual are showing single site data, add + # the site hint when available + if html.var('site') and all([ is_single_site_info(info_key)for info_key in visual['single_infos']]): + vars_values.append(('site', html.var('site'))) + + if not skip: + # add context link to this visual. For reports we put in + # the *complete* context, even the non-single one. + if visual_type["multicontext_links"]: + uri = html.makeuri([(visual_type['ident_attr'], name)], + filename = visual_type["show_url"]) + + # For views and dashboards currently the current filter + # settings + else: + uri = html.makeuri_contextless(vars_values + [(visual_type['ident_attr'], name)], + filename = visual_type["show_url"]) + icon = visual.get("icon") + buttonid = "cb_" + name + context_links.append((_u(linktitle), uri, icon, buttonid)) + + return context_links + +def transform_old_visual(visual): + if 'context_type' in visual: + if visual['context_type'] in [ 'host', 'service', 'hostgroup', 'servicegroup' ]: + visual['single_infos'] = [visual['context_type']] + else: + visual['single_infos'] = [] # drop the context type and assume a "multiple visual" + del visual['context_type'] + elif 'single_infos' not in visual: + visual['single_infos'] = [] + + visual.setdefault('context', {}) + + +#. +# .--Popup Add-----------------------------------------------------------. +# | ____ _ _ _ | +# | | _ \ ___ _ __ _ _ _ __ / \ __| | __| | | +# | | |_) / _ \| '_ \| | | | '_ \ / _ \ / _` |/ _` | | +# | | __/ (_) | |_) | |_| | |_) | / ___ \ (_| | (_| | | +# | |_| \___/| .__/ \__,_| .__/ /_/ \_\__,_|\__,_| | +# | |_| |_| | +# +----------------------------------------------------------------------+ +# | Handling of popup for adding a visual element to a dashboard, etc. | +# '----------------------------------------------------------------------' + +def ajax_popup_add(): + html.write("
    ") + + for visual_type_name, visual_type in visual_types.items(): + if "popup_add_handler" in visual_type: + module_name = visual_type["module_name"] + visual_module = __import__(module_name) + handler = visual_module.__dict__[visual_type["popup_add_handler"]] + visuals = handler() + html.write('
  • %s %s:
  • ' % (_('Add to'), visual_type["title"])) + for name, title in sorted(handler(), key=lambda x: x[1]): + html.write('
  • %s
  • ' % + (visual_type_name, name, visual_type_name.rstrip('s'), title)) + html.write('
\n') + + +def ajax_add_visual(): + visual_type = html.var('visual_type') # dashboards / views / ... + visual_type = visual_types[visual_type] + module_name = visual_type["module_name"] + visual_module = __import__(module_name) + handler = visual_module.__dict__[visual_type["add_visual_handler"]] + + visual_name = html.var("visual_name") # add to this visual + + # type of the visual to add (e.g. view) + element_type = html.var("type") + + # Context and params are | separated lists of : separated triples + # of name, datatype and value. Datatype is int or string + extra_data = [] + for what in [ 'context', 'params' ]: + value = html.var(what) + if value: + extra_data.append(json.loads(value)) + + handler(visual_name, element_type, *extra_data) diff -Nru check-mk-1.2.2p3/htdocs/wato.css check-mk-1.2.6p12/htdocs/wato.css --- check-mk-1.2.2p3/htdocs/wato.css 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/htdocs/wato.css 2015-09-21 10:59:54.000000000 +0000 @@ -60,9 +60,6 @@ border-left-style: none; border-radius: 4px 0px 0px 0px; } -.wato table.data td { - /* border-style: dashed; */ -} .wato table.data td.select { text-align: center; @@ -150,7 +147,7 @@ margin: 0 5px 0 0; padding: 0; border-style: none; - float: left; + float: none; } .wato table.foreignchanges { @@ -222,7 +219,6 @@ } .wato table.data tr.data td.bulksearch { - text-align: right; padding-right: 0px; white-space: nowrap; } @@ -232,6 +228,10 @@ margin-right: 5px; } +.wato table.data tr.data td.bulksearch div.togglebutton.checkbox { + margin-left: 0px; +} + /* Path to current folder at top of screen */ .wato div.folderpath { @@ -362,6 +362,7 @@ position: absolute; top: 28px; right: 19px; + z-index: 500; } .wato div.floatfolder div.infos img { @@ -393,7 +394,11 @@ position: absolute; bottom: 23px; left: 20px; + width: 140px; color: #bde; + overflow:hidden; + white-space: nowrap; + text-overflow: ellipsis; } .wato div.floatfolder .title { @@ -418,35 +423,24 @@ top: -2px; } -.wato div.move_dialog { - padding:10px; - background-color: #45829D; +.wato img.authicon { + width: 28px; + height: 28px; + margin-right: 10px; + vertical-align: middle; +} +.wato div.popup.move_dialog { position: relative; left:105px; top:35px; - z-index: 200; height: 190px; - - width: -moz-max-content; - width: intrinsic; - - -webkit-border-radius: 5px; - border-radius: 5px; - border-radius: 5px; - border: 1px solid #fff; } -.wato div.move_dialog select[multiple] { +.wato div.popup.move_dialog select[multiple] { height: 170px; } -.wato div.move_dialog span { - display: block; - margin-bottom: 2px; - color: white; -} - .wato div.hoverarea { width: 154px; height: 40px; @@ -584,6 +578,10 @@ width: 90px; } +.wato form.rule_editor table.hosttags td.title { + min-width: 200px; +} + .wato form.rule_editor td.tag_sel { vertical-align: middle; display: table-cell; @@ -611,10 +609,6 @@ text-shadow: 0px 1px 0px #fff; } -.wato table.data.ruleset td.condition { - width: 360px; -} - /* Special styling for Timeperiods editor */ .wato table.timeperiod td.name { width: 195px; @@ -776,6 +770,16 @@ /* color: #444; */ } +.wato .rulesets div.ruleset.nofloat { + float: none; + clear: left; +} + +.wato .rulesets div.ruleset div.help { + height: auto; + clear: left; +} + /* Page edit_ruleset */ div.varname { color: white; @@ -793,9 +797,21 @@ .wato div.globalvars td.simple a.modified, .wato div.globalvars a.modified table { border-radius: 5px 5px 5px 5px; - box-shadow: 3px 3px 5px #A2BECD inset; + border: 1px solid #cdf; + box-shadow: 3px 3px 5px #eee inset; margin-left: -5px; - padding: 0 5px; + padding-bottom: 18px; + padding: 1px 4px 2px 4px; +} + +.wato div.globalvars a.modified table.vs_dict_text { + border-collapse: separate; +} + +.wato div.globalvars div.title a.modified { + box-shadow: none; + border: none; + /* font-weight: bold; */ } .wato div.globalvars td.content img.iconbutton { @@ -1042,3 +1058,200 @@ table.multisite_conn_method td input { height: 18px; } + +/* BI Editor */ + +.wato .biruletree div { + float: none; + display: inline-block; + vertical-align: top; +} + +.wato .biruletree .birule { + background-color: #ccc; + border: 1px solid #666; + box-shadow: 0.5px 0.5px 2px #000; + color: black; + width: 150px; + border-radius: 4px; + padding-left: 4px; + height: 20px; + margin-bottom: 3px; + padding-top: 2px; + margin-top: 5px; +} + +.wato .biruletree .birule img { + position: relative; + top: -1px; +} + + +.wato .biruletree .birule:hover { + background-color: #eee; +} + +.wato .biruletree .birule a { + color: black; + text-decoration: none; + display: block; +} + +.wato .biruletree .arrow { + border-style: none none solid solid; + border-color: #134; + border-width: 2px; + margin-left: 40px; + margin-right: 3px; + width: 50px; + height: 16px; + border-radius: 0px 10px; +} + +.wato .biruletree .node { + margin-top: 0px; +} + +/*-Host Diag-----------------------------------------------------------. +| _ _ _ ____ _ | +| | | | | ___ ___| |_ | _ \(_) __ _ __ _ | +| | |_| |/ _ \/ __| __| | | | | |/ _` |/ _` | | +| | _ | (_) \__ \ |_ | |_| | | (_| | (_| | | +| |_| |_|\___/|___/\__| |____/|_|\__,_|\__, | | +| |___/ | ++---------------------------------------------------------------------*/ + +.wato .diag_host div.success { + margin-top: 0; +} + +.wato .diag_host td > h3 { + margin-top: 0; +} + +.wato .diag_host table.nform { + width: 212px; +} + +.wato .diag_host .test { + margin-bottom:15px; +} + +.wato .diag_host .log { + height: 113px; + width: 700px; + font-family: monospace, sans-serif; + box-shadow: 1px 1px 3px #888 inset; + overflow-y: scroll; + padding: 5px 5px; + vertical-align: top; + background-color: #fff; + /* below resize option is css3 */ + resize:both; +} +.wato .diag_host .log.diag_success { + background-color: #efe; +} +.wato .diag_host .log.diag_failed { + background-color: #fee; +} + +.wato .diag_host table.test td { + padding: 0; +} + +.wato .diag_host table.test td.icons { + padding: 2px; +} + +.wato .diag_host td.icons div { + position: relative; + width: 22px; + height: 120px; +} + +.wato .diag_host td.icons div img { + margin-left: 2px; +} + +.wato .diag_host td.icons img.retry { + position: absolute; + bottom: 0; + left: 0; +} + +/*--Settings---------------------------------------------------------------. +| ____ _ _ _ | +| / ___| ___| |_| |_(_)_ __ __ _ ___ | +| \___ \ / _ \ __| __| | '_ \ / _` / __| | +| ___) | __/ |_| |_| | | | | (_| \__ \ | +| |____/ \___|\__|\__|_|_| |_|\__, |___/ | +| |___/ | ++--------------------------------------------------------------------------+ +| Settings for hosts and services (rule analysis) | +'-------------------------------------------------------------------------*/ +table.setting td.reason { + float: none; + width: 180px; + display: inline-block; + color: white; + font-style: italic; +} + +table.setting td.reason i { /* default value */ + color: #b6c8d1; +} + +table.setting td.reason a { + text-decoration: none; +} +table.setting td.reason a:hover { + text-decoration: underline; +} + +table.setting td.settingvalue.used { +} +td.settingvalue.unused, td.settingvalue.unused td { + font-style: italic; + color: #256; +} +.wato table.nform div.title a { + color: black; + text-decoration: none; +} +.wato table.nform div.title a:hover { + text-decoration: underline; +} + +table.nform.rulesettings td.legend { + width: 300px; +} +table.nform.rulesettings td.legend div.title { + max-width: 300px; +} + + +/*--Profile Repl-----------------------------------------------------------. +| ____ __ _ _ ____ _ | +| | _ \ _ __ ___ / _(_) | ___ | _ \ ___ _ __ | | | +| | |_) | '__/ _ \| |_| | |/ _ \ | |_) / _ \ '_ \| | | +| | __/| | | (_) | _| | | __/ | _ < __/ |_) | | | +| |_| |_| \___/|_| |_|_|\___| |_| \_\___| .__/|_| | +| |_| | +'-------------------------------------------------------------------------*/ + +#profile_repl .site { + margin: 0 5px 5px 0; + padding: 2px; + border-radius: 1ex; + box-shadow: 0 0 3px #444; + background-color: #9dbecd; + border-color: #080; + + width: 158px; + text-overflow: ellipsis; +} + +#profile_repl .site img, #profile_repl .site span { + vertical-align: middle; +} diff -Nru check-mk-1.2.2p3/htdocs/wato.py check-mk-1.2.6p12/htdocs/wato.py --- check-mk-1.2.2p3/htdocs/wato.py 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/htdocs/wato.py 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,7 +24,7 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -# .-README---------------------------------------------------------------. +# .--README--------------------------------------------------------------. # | ____ _ | # | | _ \ ___ __ _ __| | _ __ ___ ___ | # | | |_) / _ \/ _` |/ _` | | '_ ` _ \ / _ \ | @@ -92,7 +92,7 @@ #. -# .-Init-----------------------------------------------------------------. +# .--Init----------------------------------------------------------------. # | ___ _ _ | # | |_ _|_ __ (_) |_ | # | | || '_ \| | __| | @@ -104,8 +104,9 @@ # `----------------------------------------------------------------------' import sys, pprint, socket, re, subprocess, time, datetime, \ - shutil, tarfile, StringIO, math, fcntl, pickle -import config, htmllib, table, multitar, userdb, hooks, weblib + shutil, tarfile, cStringIO, math, fcntl, pickle, random +import config, table, multitar, userdb, hooks, weblib, login +from hashlib import sha256 from lib import * from valuespec import * import forms @@ -122,7 +123,7 @@ var_dir = defaults.var_dir + "/wato/" log_dir = var_dir + "log/" snapshot_dir = var_dir + "snapshots/" -sync_snapshot_file = defaults.tmp_dir + "/sync_snapshot.tar.gz" +php_api_dir = var_dir + "php-api/" repstatus_file = var_dir + "replication_status.mk" @@ -144,7 +145,7 @@ g_html_head_open = False #. -# .-Main-----------------------------------------------------------------. +# .--Main----------------------------------------------------------------. # | __ __ _ | # | | \/ | __ _(_)_ __ | # | | |\/| |/ _` | | '_ \ | @@ -170,23 +171,18 @@ wato_styles = [ "pages", "wato", "status" ] def page_handler(): - - # Distributed WATO: redirect to better peer, if possible. Only the - # Sites administration is available locally. - peer = preferred_peer() - if do_peer_redirect(peer): - return - global g_html_head_open g_html_head_open = False if not config.wato_enabled: raise MKGeneralException(_("WATO is disabled. Please set wato_enabled = True" " in your multisite.mk if you want to use WATO.")) - if not config.may("wato.use"): + current_mode = html.var("mode") or "main" + modeperms, modefunc = modes.get(current_mode, ([], None)) + + if modeperms != None and not config.may("wato.use"): raise MKAuthException(_("You are not allowed to use WATO.")) - current_mode = html.var("mode") or "main" # If we do an action, we aquire an exclusive lock on the complete # WATO. @@ -205,7 +201,6 @@ else: raise - modeperms, modefunc = modes.get(current_mode, ([], None)) if modefunc == None: html.header(_("Sorry"), stylesheets=wato_styles) html.begin_context_buttons() @@ -216,9 +211,11 @@ return # Check general permission for this mode - if not config.may("wato.seeall"): + if modeperms != None and not config.may("wato.seeall"): for pname in modeperms: - config.need_permission("wato." + pname) + if '.' not in pname: + pname = "wato." + pname + config.need_permission(pname) # Do actions (might switch mode) action_message = None @@ -229,10 +226,11 @@ # Even if the user has seen this mode because auf "seeall", # he needs an explicit access permission for doing changes: if config.may("wato.seeall"): - for pname in modeperms: - if '.' not in pname: - pname = "wato." + pname - config.need_permission(pname) + if modeperms: + for pname in modeperms: + if '.' not in pname: + pname = "wato." + pname + config.need_permission(pname) result = modefunc("action") if type(result) == tuple: @@ -261,15 +259,15 @@ html.set_var("mode", newmode) # will be used by makeuri # Check general permissions for the new mode - if not config.may("wato.seeall"): + if modeperms != None and not config.may("wato.seeall"): for pname in modeperms: if '.' not in pname: pname = "wato." + pname config.need_permission(pname) except MKUserError, e: - action_message = e.message - html.add_user_error(e.varname, e.message) + action_message = "%s" % e + html.add_user_error(e.varname, action_message) except MKAuthException, e: action_message = e.reason @@ -280,13 +278,6 @@ html.write("") html.write("
\n") - if peer == False: - html.show_error("%s
%s" % ( - _("Primary system unreachable"), - _("The primary system is currently unreachable. Please make sure " - "that you synchronize changes back as soon as it is available " - "again."))) - try: # Show contexts buttons html.begin_context_buttons() @@ -331,6 +322,9 @@ if g_need_sidebar_reload == id(html): html.reload_sidebar() + if config.wato_use_git and html.is_transaction(): + do_git_commit() + html.footer() @@ -366,9 +360,56 @@ def lock_exclusive(): aquire_lock(defaults.default_config_dir + "/multisite.mk") +def unlock_exclusive(): + release_lock(defaults.default_config_dir + "/multisite.mk") + + +def git_command(args): + encoded_args = " ".join([ a.encode("utf-8") for a in args ]) + command = "cd '%s' && git %s 2>&1" % (defaults.default_config_dir, encoded_args) + p = os.popen(command) + output = p.read() + status = p.close() + if status != None: + raise MKGeneralException(_("Error executing GIT command %s:

%s") % + (command.decode('utf-8'), output.replace("\n", "
\n"))) + +def shell_quote(s): + return "'" + s.replace("'", "'\"'\"'") + "'" + +def do_git_commit(): + author = shell_quote("%s <%s>" % (config.user_id, config.user_alias)) + git_dir = defaults.default_config_dir + "/.git" + if not os.path.exists(git_dir): + git_command(["init"]) + + # Set git repo global user/mail. seems to be needed to prevent warning message + # on at least ubuntu 15.04: "Please tell me who you are. Run git config ..." + # The individual commits by users override the author on their own + git_command(["config", "user.email", "check_mk"]) + git_command(["config", "user.name", "check_mk"]) + + # Make sure that .gitignore-files are present and uptodate + file(defaults.default_config_dir + "/.gitignore", "w").write("*\n!*.d\n!.gitignore\n*swp\n") + for subdir in os.listdir(defaults.default_config_dir): + if subdir.endswith(".d"): + file(defaults.default_config_dir + "/" + subdir + "/.gitignore", "w").write("*\n!wato\n!wato/*\n") + + git_command(["add", ".gitignore", "*.d/wato"]) + git_command(["commit", "--untracked-files=no", "--author", author, "-m", shell_quote(_("Initialized GIT for Check_MK"))]) + + # Only commit, if something is changed + if os.popen("cd '%s' && git status --untracked-files=no --porcelain" % defaults.default_config_dir).read().strip(): + git_command(["add", "*.d/wato"]) + message = ", ".join(g_git_messages) + if not message: + message = _("Unknown configuration change") + git_command(["commit", "--author", author, "-m", shell_quote(message)]) + + #. -# .-Load/Save------------------------------------------------------------. +# .--Load/Save-----------------------------------------------------------. # | _ _ ______ | # | | | ___ __ _ __| | / / ___| __ ___ _____ | # | | | / _ \ / _` |/ _` | / /\___ \ / _` \ \ / / _ \ | @@ -407,19 +448,6 @@ if not folder.get(".lock_hosts"): save_hosts(folder) - -# Removed in version 1.2.1i2 - unused, and might cause -# trouble when saving locked folders -## Save a folder and all of its subfolders (recursively) -#def save_folders(folder): -# save_folder(folder) -# for subfolder in folder[".folders"].values(): -# save_folders(subfolder) -# -# -#def save_all_folders(): -# save_folders(g_root_folder) - def folder_config_exists(dir): return os.path.exists(dir + "/.wato") @@ -530,16 +558,17 @@ filename = root_dir + folder[".path"] + "/hosts.mk" if os.path.exists(filename): variables = { - "FOLDER_PATH" : "", - "ALL_HOSTS" : ALL_HOSTS, - "all_hosts" : [], - "clusters" : {}, - "ipaddresses" : {}, - "extra_host_conf" : { "alias" : [] }, - "extra_service_conf" : { "_WATO" : [] }, - "host_attributes" : {}, - "host_contactgroups" : [], - "_lock" : False, + "FOLDER_PATH" : "", + "ALL_HOSTS" : ALL_HOSTS, + "all_hosts" : [], + "clusters" : {}, + "ipaddresses" : {}, + "explicit_snmp_communities" : {}, + "extra_host_conf" : { "alias" : [] }, + "extra_service_conf" : { "_WATO" : [] }, + "host_attributes" : {}, + "host_contactgroups" : [], + "_lock" : False, } execfile(filename, variables, variables) nodes_of = {} @@ -568,8 +597,9 @@ alias = aliases[0] else: alias = None - host["alias"] = alias - host["ipaddress"] = ipaddress + host["alias"] = alias + host["ipaddress"] = ipaddress + host["snmp_community"] = variables["explicit_snmp_communities"].get(hostname) # Retrieve setting for each individual host tag tags = set([ tag for tag in parts[1:] if tag != 'wato' and not tag.endswith('.mk') ]) @@ -633,6 +663,7 @@ all_hosts = [] # list of [Python string for all_hosts] clusters = [] # tuple list of (Python string, nodes) ipaddresses = {} + explicit_snmp_communities = {} hostnames = hosts.keys() hostnames.sort() custom_macros = {} # collect value for attributes that are to be present in Nagios @@ -644,7 +675,8 @@ host = cleaned_hosts[hostname] effective = effective_attributes(host, folder) - ipaddress = effective.get("ipaddress") + ipaddress = effective.get("ipaddress") + snmp_community = effective.get("snmp_community") # Compute tags from settings of each individual tag. We've got # the current value for each individual tag. Also other attributes @@ -670,6 +702,8 @@ if ipaddress: ipaddresses[hostname] = ipaddress + if snmp_community: + explicit_snmp_communities[hostname] = snmp_community # Create contact group rule entries for hosts with explicitely set values # Note: since the type if this entry is a list, not a single contact group, all other list @@ -677,7 +711,9 @@ # precedence over the folder entries. if "contactgroups" in host: - use, cgs = host["contactgroups"] + cgconfig = convert_cgroups_from_tuple(host["contactgroups"]) + cgs = cgconfig["groups"] + use = cgconfig["use"] if use and cgs: out.write("\nhost_contactgroups += [\n") for cg in cgs: @@ -712,6 +748,12 @@ out.write("\n# Explicit IP addresses\n") out.write("ipaddresses.update(") out.write(pprint.pformat(ipaddresses)) + out.write(")\n") + + if len(explicit_snmp_communities) > 0: + out.write("\n# Explicit SNMP communities\n") + out.write("explicit_snmp_communities.update(") + out.write(pprint.pformat(explicit_snmp_communities)) out.write(")") out.write("\n") @@ -727,11 +769,10 @@ # If the contact groups of the host are set to be used for the monitoring, # we create an according rule for the folder and an according rule for # each host that has an explicit setting for that attribute. - effective_folder_attributes = effective_attributes(None, folder) - use, cgs = effective_folder_attributes.get("contactgroups", (False, [])) - if use and cgs: + perm_groups, contact_groups = collect_folder_groups(folder) + if contact_groups: out.write("\nhost_contactgroups.append(\n" - " ( %r, [ '/' + FOLDER_PATH + '/' ], ALL_HOSTS ))\n" % cgs) + " ( %r, [ '/' + FOLDER_PATH + '/' ], ALL_HOSTS ))\n" % list(contact_groups)) # Write information about all host attributes into special variable - even @@ -775,7 +816,7 @@ return ' / '.join(aliaspath) #. -# .-Folders--------------------------------------------------------------. +# .--Folders-------------------------------------------------------------. # | _____ _ _ | # | | ___|__ | | __| | ___ _ __ ___ | # | | |_ / _ \| |/ _` |/ _ \ '__/ __| | @@ -788,24 +829,31 @@ def mode_folder(phase): global g_folder + + auth_message = check_folder_permissions(g_folder, "read", False) + auth_read = auth_message == True + auth_write = check_folder_permissions(g_folder, "write", False) == True + if phase == "title": return g_folder["title"] elif phase == "buttons": global_buttons() - # html.write("


") if config.may("wato.rulesets") or config.may("wato.seeall"): html.context_button(_("Rulesets"), make_link([("mode", "ruleeditor")]), "rulesets") - html.context_button(_("Folder Properties"), make_link_to([("mode", "editfolder")], g_folder), "edit") - if not g_folder.get(".lock_subfolders") and config.may("wato.manage_folders"): + html.context_button(_("Manual Checks"), make_link([("mode", "static_checks")]), "static_checks") + if auth_read: + html.context_button(_("Folder Properties"), make_link_to([("mode", "editfolder")], g_folder), "edit") + if not g_folder.get(".lock_subfolders") and config.may("wato.manage_folders") and auth_write: html.context_button(_("New folder"), make_link([("mode", "newfolder")]), "newfolder") - if not g_folder.get(".lock_hosts") and config.may("wato.manage_hosts"): + if not g_folder.get(".lock_hosts") and config.may("wato.manage_hosts") and auth_write: html.context_button(_("New host"), make_link([("mode", "newhost")]), "new") html.context_button(_("New cluster"), make_link([("mode", "newcluster")]), "new_cluster") + html.context_button(_("Bulk Import"), make_link_to([("mode", "bulk_import")], g_folder), "bulk_import") if config.may("wato.services"): - html.context_button(_("Bulk Inventory"), make_link([("mode", "bulkinventory"), ("all", "1")]), + html.context_button(_("Bulk Discovery"), make_link([("mode", "bulkinventory"), ("all", "1")]), "inventory") - if not g_folder.get(".lock_hosts") and config.may("wato.parentscan"): + if not g_folder.get(".lock_hosts") and config.may("wato.parentscan") and auth_write: html.context_button(_("Parent scan"), make_link([("mode", "parentscan"), ("all", "1")]), "parentscan") search_button() @@ -873,6 +921,10 @@ if not html.transaction_valid(): return + # Host table: No error message on search filter reset + if html.var("_hosts_reset_sorting") or html.var("_hosts_sort"): + return + selected_hosts = get_hostnames_from_checkboxes() if len(selected_hosts) == 0: raise MKUserError(None, @@ -894,7 +946,7 @@ elif html.var("_bulk_move"): config.need_permission("wato.edit_hosts") config.need_permission("wato.move_hosts") - target_folder_name = html.var("bulk_moveto") + target_folder_name = html.var("bulk_moveto", html.var("_top_bulk_moveto")) if target_folder_name == "@": raise MKUserError("bulk_moveto", _("Please select the destination folder")) target_folder = g_folders[target_folder_name] @@ -916,6 +968,9 @@ else: render_folder_path() + if not auth_read: + html.message(HTML(' %s' % html.attrencode(auth_message))) + lock_messages = [] if g_folder.get(".lock_hosts"): if g_folder[".lock_hosts"] == True: @@ -943,7 +998,7 @@ if True == check_folder_permissions(g_folder, "read", False): have_something = show_hosts(g_folder) or have_something - if not have_something: + if not have_something and auth_write: menu_items = [] if not g_folder.get(".lock_hosts"): menu_items.extend([ @@ -986,8 +1041,8 @@ if config.may("wato.all_folders"): return True host = folder[".hosts"][hostname] - effective = effective_attributes(host, folder) - use, cgs = effective.get("contactgroups", (None, [])) + perm_groups, contact_groups = collect_host_groups(host, folder) + # Get contact groups of user users = userdb.load_users() if config.user_id not in users: @@ -996,12 +1051,12 @@ user_cgs = users[config.user_id].get("contactgroups",[]) for c in user_cgs: - if c in cgs: + if c in perm_groups: return True reason = _("Sorry, you have no permission on the host '%s'. The host's contact " "groups are %s, your contact groups are %s.") % \ - (hostname, ", ".join(cgs), ", ".join(user_cgs)) + (hostname, ", ".join(perm_groups), ", ".join(user_cgs)) if exception: raise MKAuthException(reason) return reason @@ -1014,18 +1069,26 @@ for child in folder.get('.folders', {}).itervalues(): get_flat_folders(child) - get_flat_folders(api.get_folder_tree()) + get_flat_folders(get_folder_tree()) permissions = {} users = userdb.load_users() for username in users.iterkeys(): - permissions[username] = {} + perms = {} for folder_path, folder in folders.iteritems(): - permissions[username][folder_path] = { - 'read': check_folder_permissions(folder, 'read', False, username, users) == True, - 'write': check_folder_permissions(folder, 'write', False, username, users) == True, - } + readable = check_folder_permissions(folder, 'read', False, username, users) == True + writable = check_folder_permissions(folder, 'write', False, username, users) == True + + if readable or writable: + perms[folder_path] = {} + if readable: + perms[folder_path]['read'] = True + if writable: + perms[folder_path]['write'] = True + + if perms: + permissions[username] = perms return permissions def check_folder_permissions(folder, how, exception=True, user = None, users = None): @@ -1041,8 +1104,7 @@ return True # Get contact groups of that folder - effective = effective_attributes(None, folder) - use, cgs = effective.get("contactgroups", (None, [])) + perm_groups, cgs = collect_folder_groups(folder) if not user: user = config.user_id @@ -1056,18 +1118,19 @@ user_cgs = users[user].get("contactgroups", []) for c in user_cgs: - if c in cgs: + if c in perm_groups: return True - reason = _("Sorry, you have no permission on the folder '%s'. " % folder["title"]) - if not cgs: - reason += _("The folder has no contact groups assigned to.") + reason = _("Sorry, you have no permissions to the folder %s. ") % folder["title"] + if not perm_groups: + reason += _("The folder is not permitted for any contact group.") else: - reason += _("The folder's contact groups are %s. " % ", ".join(cgs)) + reason += _("The folder's permitted contact groups are %s. ") % ", ".join(perm_groups) if user_cgs: reason += _("Your contact groups are %s.") % ", ".join(user_cgs) else: reason += _("But you are not a member of any contact group.") + reason += _("You may enter the folder as you might have permission on a subfolders, though.") if exception: raise MKAuthException(reason) @@ -1081,7 +1144,8 @@ if config.may("wato.all_folders"): return - use, cgs = cgspec + cgconf = convert_cgroups_from_tuple(cgspec) + cgs = cgconf["groups"] users = userdb.load_users() if config.user_id not in users: user_cgs = [] @@ -1094,6 +1158,53 @@ ( c, ", ".join(user_cgs))) +def get_folder_cgconf_from_attributes(attributes): + v = attributes.get("contactgroups", ( False, [] )) + cgconf = convert_cgroups_from_tuple(v) + return cgconf + +# Get all contact groups of a folder, while honoring recursive +# groups and permissions. Returns a pair of +# 1. The folders permitted groups (for WATO permissions) +# 2. The folders contact groups (for hosts) +def collect_folder_groups(folder, host=None): + perm_groups = set([]) + host_groups = set([]) + effective_folder_attributes = effective_attributes(host, folder) + cgconf = get_folder_cgconf_from_attributes(effective_folder_attributes) + + # First set explicit groups + perm_groups.update(cgconf["groups"]) + if cgconf["use"]: + host_groups.update(cgconf["groups"]) + + # Now consider recursion + if host: + parent = folder + elif ".parent" in folder: + parent = folder['.parent'] + else: + parent = None + + while parent: + effective_folder_attributes = effective_attributes(None, parent) + parconf = get_folder_cgconf_from_attributes(effective_folder_attributes) + parent_perm_groups, parent_host_groups = collect_folder_groups(parent) + + if parconf["recurse_perms"]: # Parent gives us its permissions + perm_groups.update(parent_perm_groups) + + if parconf["recurse_use"]: # Parent give us its contact groups + host_groups.update(parent_host_groups) + + parent = parent.get(".parent") + + return perm_groups, host_groups + + +def collect_host_groups(host, folder): + return collect_folder_groups(folder, host) + def show_subfolders(folder): if len(folder[".folders"]) == 0: @@ -1101,7 +1212,7 @@ html.write('
') - for entry in api.sort_by_title(folder[".folders"].values()): + for entry in sort_by_title(folder[".folders"].values()): enter_url = make_link_to([("mode", "folder")], entry) edit_url = make_link_to([("mode", "editfolder"), ("backfolder", g_folder[".path"])], entry) delete_url = make_action_link([("mode", "folder"), ("_delete_folder", entry[".name"])]) @@ -1113,18 +1224,24 @@ html.write('
') # Only make folder openable when permitted to edit - if auth_read: - html.write( - '
' - ) + if not auth_read: + html.write('' % \ + (html.strip_tags(auth_message))) + + if True: # auth_read: + if not auth_read: + html.write('
') + + else: + html.write( + '
' + ) - if auth_read: html.icon_button( edit_url, _("Edit the properties of this folder"), @@ -1145,7 +1262,7 @@ style = 'display:none', onclick = 'wato_toggle_move_folder(event, this);' ) - html.write('') - else: - html.write('' % \ - (htmllib.strip_tags(auth_message))) html.write('
') - # Show contact groups of the folder - effective = effective_attributes(None, entry) - use, cgs = effective.get("contactgroups", (None, [])) - group_info = userdb.load_group_information().get("contact", {}) - for num, cg in enumerate(cgs): - cgalias = group_info.get(cg,cg) - html.icon(_("Contactgroup assign to this folder"), "contactgroups") + groups = userdb.load_group_information().get("contact", {}) + perm_groups, contact_groups = collect_folder_groups(entry) + for num, pg in enumerate(perm_groups): + cgalias = groups.get(pg, {'alias': pg})['alias'] + html.icon(_("Contactgroups that have permission on this folder"), "contactgroups") html.write(' %s
' % cgalias) - if num > 1 and len(cgs) > 4: - html.write(_('%d more contact groups
') % (len(cgs) - num - 1)) + if num > 1 and len(perm_groups) > 4: + html.write(_('%d more contact groups
') % (len(perm_groups) - num - 1)) break + num_hosts = num_hosts_in(entry, recurse=True) if num_hosts == 1: html.write(_("1 Host")) @@ -1212,22 +1325,16 @@ html.write("

" + _("Hosts") + "

") hostnames = folder[".hosts"].keys() - hostnames.sort() + hostnames.sort(cmp = lambda a, b: cmp(num_split(a), num_split(b))) search_text = html.var("search") # Helper function for showing bulk actions. This is needed at the bottom # of the table of hosts and - if there are more than just a few - also # at the top of the table. search_shown = False - def bulk_actions(at_least_one_imported, top, withsearch, colspan, odd, show_checkboxes): - html.write('' % odd) - html.write("") - if withsearch: - html.text_input(top and "search" or "search") - html.button("_search", _("Search")) - html.set_focus("search") - html.write('') - html.write("" % (colspan-3)) + def bulk_actions(at_least_one_imported, top, withsearch, colspan, show_checkboxes): + table.row(collect_headers=False, fixed=True) + table.cell(css="bulksearch", colspan=3) if not show_checkboxes: html.write('
' % ( @@ -1239,29 +1346,32 @@ 'onclick="location.href=\'%s\'">
' % ( 'checkbox', _('Hide Checkboxes and bulk actions'), 'checkbox', html.makeuri([('show_checkboxes', '0')]))) + if withsearch: + html.text_input(top and "search" or "search") + html.button("_search", _("Search")) + html.set_focus("search") + table.cell(css="bulkactions", colspan=colspan-3) + html.write(' ' + _("Selected hosts:\n")) - html.write(' ' + _("Selected hosts:\n")) - - if not g_folder.get(".lock_hosts"): - if config.may("wato.manage_hosts"): - html.button("_bulk_delete", _("Delete")) - if config.may("wato.edit_hosts"): - html.button("_bulk_edit", _("Edit")) - html.button("_bulk_cleanup", _("Cleanup")) - if config.may("wato.services"): - html.button("_bulk_inventory", _("Inventory")) - if not g_folder.get(".lock_hosts"): - if config.may("wato.parentscan"): - html.button("_parentscan", _("Parentscan")) - if config.may("wato.edit_hosts") and config.may("wato.move_hosts"): - move_to_folder_combo("host", None, top) - if at_least_one_imported: - html.button("_bulk_movetotarget", _("Move to Target Folders")) - html.write("\n") + if not g_folder.get(".lock_hosts"): + if config.may("wato.manage_hosts"): + html.button("_bulk_delete", _("Delete")) + if config.may("wato.edit_hosts"): + html.button("_bulk_edit", _("Edit")) + html.button("_bulk_cleanup", _("Cleanup")) + if config.may("wato.services"): + html.button("_bulk_inventory", _("Discovery")) + if not g_folder.get(".lock_hosts"): + if config.may("wato.parentscan"): + html.button("_parentscan", _("Parentscan")) + if config.may("wato.edit_hosts") and config.may("wato.move_hosts"): + move_to_folder_combo("host", None, top) + if at_least_one_imported: + html.button("_bulk_movetotarget", _("Move to Target Folders")) # Show table of hosts in this folder - html.begin_form("hosts", None, "POST") - html.write("\n") + html.begin_form("hosts", method = "POST") + table.begin("hosts", searchable=False) # Remember if that host has a target folder (i.e. was imported with # a folder information but not yet moved to that folder). If at least @@ -1282,42 +1392,26 @@ more_than_ten_items = True # Compute colspan for bulk actions - colspan = 5 + colspan = 6 for attr, topic in host_attributes: if attr.show_in_table(): colspan += 1 - if config.may("wato.edit_hosts") and config.may("wato.move_hosts"): + if not g_folder.get(".lock_hosts") and config.may("wato.edit_hosts") and config.may("wato.move_hosts"): + colspan += 1 + if show_checkboxes: colspan += 1 # Add the bulk action buttons also to the top of the table when this # list shows more than 10 rows if more_than_ten_items and \ (config.may("wato.edit_hosts") or config.may("wato.manage_hosts")): - bulk_actions(at_least_one_imported, True, True, colspan, "even", show_checkboxes) + bulk_actions(at_least_one_imported, True, True, colspan, show_checkboxes) search_shown = True - # Header line - html.write("") - if show_checkboxes: - html.write("") - html.write("") - if not config.wato_hide_hosttags: - html.write("") - - for attr, topic in host_attributes: - if attr.show_in_table(): - html.write("" % attr.title()) - - if not g_folder.get(".lock_hosts") and config.may("wato.edit_hosts") and config.may("wato.move_hosts"): - html.write("") - - html.write("\n") - odd = "odd" + contact_group_names = userdb.load_group_information().get("contact", {}) + def render_contact_group(c): + display_name = contact_group_names.get(c, {'alias': c})['alias'] + return '%s' % (c, display_name) host_errors = validate_all_hosts(hostnames) rendered_hosts = [] @@ -1330,19 +1424,19 @@ host = g_folder[".hosts"][hostname] effective = effective_attributes(host, g_folder) - # Rows with alternating odd/even styles - html.write('' % odd) - odd = odd == "odd" and "even" or "odd" + table.row() # Column with actions (buttons) edit_url = make_link([("mode", "edithost"), ("host", hostname)]) + params_url = make_link([("mode", "object_parameters"), ("host", hostname)]) services_url = make_link([("mode", "inventory"), ("host", hostname)]) clone_url = make_link([("mode", host.get(".nodes") and "newcluster" or "newhost"), ("clone", hostname)]) delete_url = make_action_link([("mode", "folder"), ("_delete_host", hostname)]) if show_checkboxes: - html.write('\n') - html.write("\n") # Hostname with link to details page (edit host) - html.write('') + # Show attributes + for attr, topic in host_attributes: + if attr.show_in_table(): + attrname = attr.name() + if attrname in host: + tdclass, tdcontent = attr.paint(host.get(attrname), hostname) + else: + tdclass, tdcontent = attr.paint(effective.get(attrname), hostname) + tdclass += " inherited" + table.cell(attr.title(), tdcontent, css=tdclass) # Am I authorized? auth = check_host_permissions(hostname, False) @@ -1391,8 +1493,14 @@ title = _("You have permission to this host.") else: icon = "autherr" - title = htmllib.strip_tags(auth) - html.write('' % (icon, title)) + title = html.strip_tags(auth) + + table.cell(_('Auth'), '' % (icon, title), sortable=False) + + # Permissions and Contact groups - through complete recursion and inhertance + perm_groups, contact_groups = collect_host_groups(host, folder) + table.cell(_("Permissions"), ", ".join(map(render_contact_group, perm_groups))) + table.cell(_("Contact Groups"), ", ".join(map(render_contact_group, contact_groups))) if not config.wato_hide_hosttags: # Raw tags @@ -1401,33 +1509,18 @@ # 1. add round the single tags to prevent wrap within tags # 2. add "zero width space" (​) tag_title = "|".join([ '%s' % t for t in host[".tags"] ]) - html.write("" % (tag_title, "|​".join( - [ '%s' % t for t in host[".tags"] ]))) - - # Show attributes - for attr, topic in host_attributes: - if attr.show_in_table(): - attrname = attr.name() - if attrname in host: - tdclass, tdcontent = attr.paint(host.get(attrname), hostname) - else: - tdclass, tdcontent = attr.paint(effective.get(attrname), hostname) - tdclass += " inherited" - html.write('\n") + table.cell(_("Tags"), help=tag_title, css="tag-ellipsis") + html.write("|​".join([ '%s' % t for t in host[".tags"] ])) # Move to if not g_folder.get(".lock_hosts") and config.may("wato.edit_hosts") and config.may("wato.move_hosts"): - html.write("\n") - html.write("\n") if config.may("wato.edit_hosts") or config.may("wato.manage_hosts"): - bulk_actions(at_least_one_imported, False, not search_shown, colspan, odd, show_checkboxes) - html.write("
") - html.write("" % _('X')) - html.write(""+_("Actions")+"" - + _("Hostname") + "" - + _("Auth") + "" + _("Tags") + "%s" + _("Move To") + "
') + table.cell("" % _('X'), sortable=False) # Use CSS class "failed" in order to provide information about # selective toggling inventory-failed hosts for Javascript if host.get("inventory_failed"): @@ -1350,25 +1444,24 @@ else: css_class = "" html.write("" % (css_class, hostname, colspan)) - html.write('") + table.cell(_("Actions"), css="buttons", sortable=False) html.icon_button(edit_url, _("Edit the properties of this host"), "edit") + html.icon_button(params_url, _("View the rule based parameters of this host"), "rulesets") if check_host_permissions(hostname, False) == True: - msg = _("Edit the services of this host, do an inventory") + msg = _("Edit the services of this host, do a service discovery") image = "services" if host.get("inventory_failed"): image = "inventory_failed" - msg += ". " + _("The inventory of this host failed during a previous bulk inventory.") + msg += ". " + _("The service discovery of this host failed during a previous bulk service discovery.") html.icon_button(services_url, msg, image) if not g_folder.get(".lock_hosts") and config.may("wato.manage_hosts"): if config.may("wato.clone_hosts"): html.icon_button(clone_url, _("Create a clone of this host"), "insert") html.icon_button(delete_url, _("Delete this host"), "delete") - html.write("') + table.cell(_("Hostname")) errors = host_errors.get(hostname,[]) + validate_host(host, g_folder) if errors: msg = _("Warning: This host has an invalid configuration: ") @@ -1381,8 +1474,17 @@ if ".nodes" in host: html.write(" ") html.icon(_("This host is a cluster of %s") % ", ".join(host[".nodes"]), "cluster") - html.write('%s' % tdclass) - html.write(tdcontent) - html.write("") + table.cell(_("Move To"), css="right", sortable=False) move_to_folder_combo("host", hostname) - html.write("
\n") + bulk_actions(at_least_one_imported, False, not search_shown, colspan, show_checkboxes) + table.end() html.hidden_fields() html.end_form() @@ -1594,12 +1687,15 @@ elif target_folder.get(".lock_subfolders"): raise MKUserError(None, _("Cannot move folder: Target folder is locked.")) + new_dir = folder_dir(target_folder) + if os.path.exists(new_dir): + raise MKUserError(None, _("Cannot move folder: A folder with this name already exists in the target folder.")) + old_parent = what_folder[".parent"] old_dir = folder_dir(what_folder) del old_parent[".folders"][what_folder[".name"]] target_folder[".folders"][what_folder[".name"]] = what_folder what_folder[".parent"] = target_folder - new_dir = folder_dir(target_folder) shutil.move(old_dir, new_dir) def delete_folder_after_confirm(del_folder): @@ -1628,23 +1724,26 @@ # Create list of all hosts that are select with checkboxes in the current file. # This is needed for bulk operations. def get_hostnames_from_checkboxes(filterfunc = None): + show_checkboxes = html.var("show_checkboxes") == "1" + entries = g_folder[".hosts"].items() entries.sort() - selected = weblib.get_rowselection('wato-folder-/'+g_folder['.path']) + if show_checkboxes: + selected = weblib.get_rowselection('wato-folder-/'+g_folder['.path']) selected_hosts = [] search_text = html.var("search") for hostname, host in entries: if (not search_text or (search_text.lower() in hostname.lower())) \ - and ('_c_' + hostname) in selected: + and (not show_checkboxes or ('_c_' + hostname) in selected): if filterfunc == None or \ filterfunc(host): selected_hosts.append(hostname) return selected_hosts #. -# .-Edit Folder----------------------------------------------------------. +# .--Edit Folder---------------------------------------------------------. # | _____ _ _ _ _____ _ _ | # | | ____|__| (_) |_ | ___|__ | | __| | ___ _ __ | # | | _| / _` | | __| | |_ / _ \| |/ _` |/ _ \ '__| | @@ -1715,32 +1814,12 @@ if new: check_folder_permissions(g_folder, "write") check_user_contactgroups(attributes.get("contactgroups", (False, []))) - if g_folder[".path"]: - newpath = g_folder[".path"] + "/" + name - else: - newpath = name - new_folder = { - ".name" : name, - ".path" : newpath, - "title" : title, - "attributes" : attributes, - ".folders" : {}, - ".hosts" : {}, - "num_hosts" : 0, - ".lock" : False, - } - g_folders[newpath] = new_folder - g_folder[".folders"][name] = new_folder - save_folder(new_folder) - reload_folder(new_folder) - call_hook_folder_created(new_folder) - # Note: sites are not marked as dirty. Only peers will be synced. - # The creation of a folder without hosts has not effect on the - # monitoring. - log_pending(AFFECTED, new_folder, "new-folder", _("Created new folder %s") % title) + create_wato_folder(g_folder, name, title, attributes) else: - cgs_changed = attributes.get("contactgroups") != g_folder["attributes"].get("contactgroups") + # TODO: migrate this block into own function edit_wato_folder(..) + cgs_changed = get_folder_cgconf_from_attributes(attributes) != \ + get_folder_cgconf_from_attributes(g_folder["attributes"]) other_changed = attributes != g_folder["attributes"] and not cgs_changed if other_changed: check_folder_permissions(g_folder, "write") @@ -1765,7 +1844,7 @@ # in Nagios-relevant attributes. rewrite_config_files_below(g_folder) # due to inherited attributes save_folder(g_folder) - # This updats g_folder and g_folders[...] + # This updates g_folder and g_folders[...] g_folder = reload_folder(g_folder) mark_affected_sites_dirty(g_folder) @@ -1794,7 +1873,7 @@ if len(lock_message) > 0: html.write("
" + lock_message + "
") - html.begin_form("edithost") + html.begin_form("edithost", method = "POST") # title forms.header(_("Title")) @@ -1833,9 +1912,10 @@ html.end_form() -def check_wato_foldername(htmlvarname, name): - if name in g_folder[".folders"]: +def check_wato_foldername(htmlvarname, name, just_name = False): + if not just_name and name in g_folder: raise MKUserError(htmlvarname, _("A folder with that name already exists.")) + if not name: raise MKUserError(htmlvarname, _("Please specify a name.")) if not re.match("^[-a-z0-9A-Z_]*$", name): @@ -1879,7 +1959,7 @@ #. -# .-Edit-Host------------------------------------------------------------. +# .--Edit-Host-----------------------------------------------------------. # | _____ _ _ _ _ _ _ | # | | ____|__| (_) |_ | | | | ___ ___| |_ | # | | _| / _` | | __| | |_| |/ _ \/ __| __| | @@ -1905,11 +1985,14 @@ host = g_folder[".hosts"][clonename] cluster = ".nodes" in host mode = "clone" + check_host_permissions(clonename) + elif not new and hostname in g_folder[".hosts"]: - title = _("Edit host") + " " + hostname + title = _("Properties of host") + " " + hostname host = g_folder[".hosts"][hostname] cluster = ".nodes" in host mode = "edit" + check_host_permissions(hostname) else: if cluster: title = _("Create new cluster") @@ -1918,21 +2001,43 @@ title = _("Create new host") host = {} mode = "new" + new = True + check_new_host_permissions(g_folder, host, hostname) if phase == "title": return title elif phase == "buttons": - if not new: - host_status_button(hostname, "hoststatus") html.context_button(_("Folder"), make_link([("mode", "folder")]), "back") if not new: + host_status_button(hostname, "hoststatus") + html.context_button(_("Services"), make_link([("mode", "inventory"), ("host", hostname)]), "services") - html.context_button(_("Rulesets"), - make_link([("mode", "ruleeditor"), ("host", hostname), ("local", "on")]), "rulesets") + html.context_button(_("Parameters"), + make_link([("mode", "object_parameters"), ("host", hostname)]), "rulesets") + if not g_folder.get(".lock_hosts"): + html.context_button(_("Rename %s") % (cluster and _("Cluster") or _("Host")), + make_link([("mode", "rename_host"), ("host", hostname)]), "rename_host") + if not cluster: + html.context_button(_("Diagnostic"), + make_link([("mode", "diag_host"), ("host", hostname)]), "diagnose") + html.context_button(_("Update DNS Cache"), + html.makeactionuri([("_update_dns_cache", "1")]), "update") elif phase == "action": + if html.var("_update_dns_cache"): + if html.check_transaction(): + config.need_permission("wato.update_dns_cache") + num_updated, failed_hosts = check_mk_automation(host[".siteid"], "update-dns-cache", []) + infotext = _("Successfully updated IP addresses of %d hosts.") % num_updated + if failed_hosts: + infotext += "

Hostnames failed to lookup: " + ", ".join(["%s" % h for h in failed_hosts]) + return None, infotext + else: + return None + + if not new and html.var("delete"): # Delete this host config.need_permission("wato.manage_hosts") check_folder_permissions(g_folder, "write") @@ -1955,61 +2060,46 @@ if new: if not html.transaction_valid(): return "folder" - config.need_permission("wato.manage_hosts") - check_folder_permissions(g_folder, "write") - check_user_contactgroups(host.get("contactgroups", (False, []))) - if not hostname: - raise MKUserError("host", _("Please specify a host name.")) - elif hostname in g_folder[".hosts"]: - raise MKUserError("host", _("A host with this name already exists.")) - elif not re.match("^[a-zA-Z0-9-_.]+$", hostname): - raise MKUserError("host", _("Invalid host name: must contain only characters, digits, dash, underscore and dot.")) + check_new_host_permissions(g_folder, host, hostname) else: - config.need_permission("wato.edit_hosts") - - # Check which attributes have changed. For a change in the contact groups - # we need permissions on the folder. For a change in the rest we need - # permissions on the host - old_host = dict(g_folder[".hosts"][hostname].items()) - del old_host[".tags"] # not contained in new host - cgs_changed = host.get("contactgroups") != old_host.get("contactgroups") - other_changed = old_host != host and not cgs_changed - if other_changed: - check_host_permissions(hostname) - if cgs_changed \ - and True != check_folder_permissions(g_folder, "write", False): - raise MKAuthException(_("Sorry. In order to change the permissions of a host you need write " - "access to the folder it is contained in.")) - if cgs_changed: - check_user_contactgroups(host.get("contactgroups", (False, []))) + check_edit_host_permissions(g_folder, host, hostname) if hostname: go_to_services = html.var("services") + go_to_diag = html.var("diag_host") if html.check_transaction(): if new: - g_folder[".hosts"][hostname] = host - mark_affected_sites_dirty(g_folder, hostname) - message = _("Created new host %s.") % hostname - log_pending(AFFECTED, hostname, "create-host", message) - g_folder["num_hosts"] += 1 + add_hosts_to_folder(g_folder, {hostname: host}) else: - # The site attribute might have changed. In that case also - # the old site of the host must be marked dirty. - mark_affected_sites_dirty(g_folder, hostname) - g_folder[".hosts"][hostname] = host - mark_affected_sites_dirty(g_folder, hostname) - log_pending(AFFECTED, hostname, "edit-host", _("Edited properties of host [%s]") % hostname) - save_folder_and_hosts(g_folder) - reload_hosts(g_folder) - call_hook_hosts_changed(g_folder) + update_hosts_in_folder(g_folder, {hostname: {"set": host}}) errors = validate_all_hosts([hostname]).get(hostname, []) + validate_host(g_folder[".hosts"][hostname], g_folder) if errors: # keep on this page if host does not validate return elif new: - return go_to_services and "firstinventory" or "folder" + if host.get('tag_agent') != 'ping': + create_result = 'folder', _('Successfully created the host. Now you should do a ' + 'service discovery in order to auto-configure ' + 'all services to be checked on this host.') % \ + make_link([("mode", "inventory"), ("host", hostname)]) + else: + create_result = 'folder' + + if go_to_services: + return "firstinventory" + elif go_to_diag: + html.set_var("_try", "1") + return "diag_host" + else: + return create_result else: - return go_to_services and "inventory" or "folder" + if go_to_services: + return "inventory" + elif go_to_diag: + html.set_var("_try", "1") + return "diag_host" + else: + return "folder" else: # Show outcome of host validation. Do not validate new hosts @@ -2041,7 +2131,7 @@ if len(lock_message) > 0: html.write("
" + lock_message + "
") - html.begin_form("edithost") + html.begin_form("edithost", method="POST") # host name forms.header(_("General Properties")) @@ -2067,6 +2157,8 @@ if not g_folder.get(".lock_hosts"): html.image_button("services", _("Save & go to Services"), "submit") html.image_button("save", _("Save & Finish"), "submit") + if not cluster: + html.image_button("diag_host", _("Save & Test"), "submit") if not new: html.image_button("delete", _("Delete host!"), "submit") html.hidden_fields() @@ -2094,909 +2186,2116 @@ else: return None # browser reload +def check_new_hostname(varname, hostname): + if not hostname: + raise MKUserError(varname, _("Please specify a host name.")) + elif hostname in g_folder[".hosts"]: + raise MKUserError(varname, _("A host with this name already exists.")) + elif not re.match("^[a-zA-Z0-9-_.]+$", hostname): + raise MKUserError(varname, _("Invalid host name: must contain only characters, digits, dash, underscore and dot.")) + +def check_new_host_permissions(folder, host, hostname): + config.need_permission("wato.manage_hosts") + check_folder_permissions(folder, "write") + check_user_contactgroups(host.get("contactgroups", (False, []))) + if hostname != None: # otherwise: name not known yet + check_new_hostname("host", hostname) + +def check_edit_host_permissions(folder, host, hostname): + config.need_permission("wato.edit_hosts") + + # Check which attributes have changed. For a change in the contact groups + # we need permissions on the folder. For a change in the rest we need + # permissions on the host + old_host = dict(folder[".hosts"][hostname].items()) + del old_host[".tags"] # not contained in new host + cgs_changed = get_folder_cgconf_from_attributes(host) != \ + get_folder_cgconf_from_attributes(old_host) + other_changed = old_host != host and not cgs_changed + if other_changed: + check_host_permissions(hostname, folder = folder) + if cgs_changed \ + and True != check_folder_permissions(folder, "write", False): + raise MKAuthException(_("Sorry. In order to change the permissions of a host you need write " + "access to the folder it is contained in.")) + if cgs_changed: + check_user_contactgroups(host.get("contactgroups", (False, []))) + #. -# .-Inventory & Services-------------------------------------------------. -# | ____ _ | -# | / ___| ___ _ ____ _(_) ___ ___ ___ | -# | \___ \ / _ \ '__\ \ / / |/ __/ _ \/ __| | -# | ___) | __/ | \ V /| | (_| __/\__ \ | -# | |____/ \___|_| \_/ |_|\___\___||___/ | +# .--Rename Host---------------------------------------------------------. +# | ____ _ _ _ | +# | | _ \ ___ _ __ __ _ _ __ ___ ___ | | | | ___ ___| |_ | +# | | |_) / _ \ '_ \ / _` | '_ ` _ \ / _ \ | |_| |/ _ \/ __| __| | +# | | _ < __/ | | | (_| | | | | | | __/ | _ | (_) \__ \ |_ | +# | |_| \_\___|_| |_|\__,_|_| |_| |_|\___| |_| |_|\___/|___/\__| | # | | # +----------------------------------------------------------------------+ -# | Mode for doing the inventory on a single host and/or showing and | -# | editing the current services of a host. | +# | Mode for renaming an existing host. | # '----------------------------------------------------------------------' -def mode_inventory(phase, firsttime): +def mode_rename_host(phase): hostname = html.var("host") + if hostname not in g_folder[".hosts"]: - raise MKGeneralException(_("You called this page for a non-existing host.")) + raise MKGeneralException(_("You called this page with an invalid host name.")) + + check_host_permissions(hostname) + host = g_folder[".hosts"][hostname] + is_cluster = ".nodes" in host if phase == "title": - title = _("Services of host %s") % hostname - if html.var("_scan"): - title += _(" (live scan)") - else: - title += _(" (cached data)") - return title + return _("Rename %s %s") % (is_cluster and _("Cluster") or _("Host"), hostname) elif phase == "buttons": - host_status_button(hostname, "host") - html.context_button(_("Folder"), - make_link([("mode", "folder")]), "back") - html.context_button(_("Host properties"), - make_link([("mode", "edithost"), ("host", hostname)]), "host") - html.context_button(_("Full Scan"), html.makeuri([("_scan", "yes")])) + global_buttons() + html.context_button(_("Host Properties"), make_link([("mode", "edithost"), ("host", hostname)]), "back") + return elif phase == "action": - config.need_permission("wato.services") - check_host_permissions(hostname) - if html.check_transaction(): - cache_options = not html.var("_scan") and [ '--cache' ] or [] - table = check_mk_automation(host[".siteid"], "try-inventory", cache_options + [hostname]) - table.sort() - active_checks = {} - new_target = "folder" - for st, ct, checkgroup, item, paramstring, params, descr, state, output, perfdata in table: - if (html.has_var("_cleanup") or html.has_var("_fixall")) \ - and st in [ "vanished", "obsolete" ]: - pass - elif (html.has_var("_activate_all") or html.has_var("_fixall")) \ - and st == "new": - active_checks[(ct, item)] = paramstring - else: - varname = "_%s_%s" % (ct, item) - if html.var(varname, "") != "": - active_checks[(ct, item)] = paramstring + if g_folder.get(".lock_hosts"): + raise MKGeneralException(_("This folder is locked. You cannot rename a host here.")) - check_mk_automation(host[".siteid"], "set-autochecks", [hostname], active_checks) - if host.get("inventory_failed"): - del host["inventory_failed"] - save_hosts() - message = _("Saved check configuration of host [%s] with %d services") % \ - (hostname, len(active_checks)) - log_pending(LOCALRESTART, hostname, "set-autochecks", message) - mark_affected_sites_dirty(g_folder, hostname, sync=False, restart=True) - return new_target, message - return "folder" + if parse_audit_log("pending"): + raise MKGeneralException(_("You cannot rename a host while you have pending changes.")) - else: - show_service_table(host, firsttime) + newname = html.var("newname") + check_new_hostname("newname", newname) + c = wato_confirm(_("Confirm renaming of host"), + _("Are you sure you want to rename the host %s into %s? " + "This involves a restart of the monitoring core!") % + (hostname, newname)) + if c: + # Creating pending entry. That makes the site dirty and that will force a sync of + # the config to that site before the automation is being done. + log_pending(AFFECTED, newname, "rename-host", _("Renamed host %s into %s") % (hostname, newname)) + actions = rename_host(host, newname) # Already activates the changes! + log_commit_pending() # All activated by the underlying rename automation + html.set_var("host", newname) + action_txt = "".join([ "
  • %s
  • " % a for a in actions ]) + return "edithost", HTML(_("Renamed host %s into %s at the following places:
      %s
    ") % ( + hostname, newname, action_txt)) + elif c == False: # not yet confirmed + return "" + return + html.help(_("The renaming of hosts is a complex operation since a host's name is being " + "used as a unique key in various places. It also involves stopping and starting " + "of the monitoring core. You cannot rename a host while you have pending changes.")) + + html.begin_form("rename_host", method="POST") + forms.header(_("Rename to host %s") % hostname) + forms.section(_("Current name")) + html.write(hostname) + forms.section(_("New name")) + html.text_input("newname", "") + forms.end() + html.set_focus("newname") + html.image_button("rename", _("Rename host!"), "submit") + html.hidden_fields() + html.end_form() -def show_service_table(host, firsttime): - hostname = host[".name"] +def rename_host_in_list(thelist, oldname, newname): + did_rename = False + for nr, element in enumerate(thelist): + if element == oldname: + thelist[nr] = newname + did_rename = True + elif element == '!'+oldname: + thelist[nr] = '!'+newname + did_rename = True + return did_rename + +def rename_host(host, newname): + + actions = [] + + # 1. Fix WATO configuration itself ---------------- + + # Hostname itself in the current folder + oldname = host[".name"] + g_folder[".hosts"][newname] = host + host[".name"] = newname + del g_folder[".hosts"][oldname] + save_folder_and_hosts(g_folder) + mark_affected_sites_dirty(g_folder) + actions.append(_("The WATO folder")) - # Read current check configuration - cache_options = not html.var("_scan") and [ '--cache' ] or [] + # Is this host node of a cluster? + all_hosts = load_all_hosts() + clusters = [] + parents = [] + for somehost in all_hosts.values(): + if ".nodes" in somehost: + nodes = somehost[".nodes"] + if rename_host_in_list(somehost[".nodes"], oldname, newname): + clusters.append(somehost[".name"]) + folder = somehost['.folder'] + save_folder_and_hosts(folder) + mark_affected_sites_dirty(folder) - # We first try using the Cache (if the user has not pressed Full Scan). - # If we do not find any data, we omit the cache and immediately try - # again without using the cache. - try: - table = check_mk_automation(host[".siteid"], "try-inventory", cache_options + [hostname]) - if len(table) == 0 and cache_options != []: - table = check_mk_automation(host[".siteid"], "try-inventory", [hostname]) - html.set_var("_scan", "on") - except Exception, e: - if config.debug: - raise - html.show_error("Inventory failed for this host: %s" % e) - return + if somehost.get("parents"): + if rename_host_in_list(somehost["parents"], oldname, newname): + parents.append(somehost[".name"]) + folder = somehost['.folder'] + save_folder_and_hosts(folder) + mark_affected_sites_dirty(folder) - table.sort() + if clusters: + actions.append(_("The following cluster definitions: %s") % (", ".join(clusters))) - html.begin_form("checks", None, "POST") - fixall = 0 - if config.may("wato.services"): - for entry in table: - if entry[0] == 'new' and not html.has_var("_activate_all") and not firsttime: - html.button("_activate_all", _("Activate missing")) - fixall += 1 - break - for entry in table: - if entry[0] in [ 'obsolete', 'vanished', ]: - html.button("_cleanup", _("Remove exceeding")) - fixall += 1 - break + if parents: + actions.append(_("The parents of the following hosts: %s") % (", ".join(parents))) - if fixall == 2: - html.button("_fixall", _("Fix all missing/exceeding")) + # Rules that explicitely name that host (no regexes) + changed_rulesets = [] + def rename_host_in_folder_rules(folder): + rulesets = load_rulesets(folder) + changed = False + for varname, rules in rulesets.items(): + rulespec = g_rulespecs[varname] + for nr, rule in enumerate(rules): + value, tag_specs, host_list, item_list, rule_options = parse_rule(rulespec, rule) + if rename_host_in_list(host_list, oldname, newname): + newrule = construct_rule(rulespec, value, tag_specs, host_list, item_list, rule_options) + rules[nr] = newrule + changed_rulesets.append(varname) + changed = True + if changed: + save_rulesets(folder, rulesets) + mark_affected_sites_dirty(folder) + + for subfolder in folder['.folders'].values(): + rename_host_in_folder_rules(subfolder) + + rename_host_in_folder_rules(g_root_folder) + if changed_rulesets: + unique = set(changed_rulesets) + for varname in unique: + actions.append(_("%d WATO rules in ruleset %s") % ( + changed_rulesets.count(varname), g_rulespecs[varname]["title"])) + + # Business Intelligence rules + num_bi = rename_host_in_bi(oldname, newname) + if num_bi: + actions.append(_("%d BI rules and aggregations") % num_bi) + + # Now make sure that the remote site that contains that host is being + # synced. + + # 3. Check_MK stuff ------------------------------------------------ + # Things like autochecks, counters, etc. This has to be done via an + # automation, since it might have to happen on a remote site. During + # this automation the core will be stopped, after the renaming has + # taken place a new configuration will be created and the core started + # again. + ip_lookup_failed = True + for what in check_mk_automation(host[".siteid"], "rename-host", [oldname, newname]): + if what == "cache": + actions.append(_("Cached output of monitoring agents")) + elif what == "counters": + actions.append(_("Files with performance counters")) + elif what == "piggyback-load": + actions.append(_("Piggyback information from other host")) + elif what == "piggyback-pig": + actions.append(_("Piggyback information for other hosts")) + elif what == "autochecks": + actions.append(_("Auto-disovered services of the host")) + elif what == "logwatch": + actions.append(_("Logfile information of logwatch plugin")) + elif what == "snmpwalk": + actions.append(_("A stored SNMP walk")) + elif what == "rrd": + actions.append(_("RRD databases with performance data")) + elif what == "rrdcached": + actions.append(_("RRD updates in journal of RRD Cache")) + elif what == "pnpspool": + actions.append(_("Spool files of PNP4Nagios")) + elif what == "nagvis": + actions.append(_("NagVis maps")) + elif what == "history": + actions.append(_("Monitoring history entries (events and availability)")) + elif what == "retention": + actions.append(_("The current monitoring state (including acknowledgements and downtimes)")) + elif what == "ipfail": + actions.append("
    %s
    " % (_("WARNING: the IP address lookup of " + "%s has failed. The core has been started by using the address 0.0.0.0 for the while. " + "You will not be able to activate any changes until you have either updated your " + "DNS or configured an explicit address for %s.") % (newname, newname))) + + # Notification settings ---------------------------------------------- + # Notification rules - both global and users' ones + def rename_in_notification_rules(rules): + num_changed = 0 + for rule in rules: + for key in [ "match_hosts", "match_exclude_hosts" ]: + if rule.get(key): + if rename_host_in_list(rule[key], oldname, newname): + num_changed += 1 + return num_changed + + users = userdb.load_users(lock = True) + some_user_changed = False + for userid, user in users.items(): + if user.get("notification_rules"): + rules = user["notification_rules"] + num_changed = rename_in_notification_rules(rules) + if num_changed: + actions.append("%d notification rules of user %s" % (num_changed, userid)) + some_changed = True + + rules = load_notification_rules() + num_changed = rename_in_notification_rules(rules) + if num_changed: + actions.append(_("%d global notification rules") % num_changed) + save_notification_rules(rules) + + # Notification channels of flexible notifcations also can have host conditions + for userid, user in users.items(): + method = user.get("notification_method") + if method and type(method) == tuple and method[0] == "flexible": + channels_changed = 0 + for channel in method[1]: + if channel.get("only_hosts"): + num_changed = rename_host_in_list(channel["only_hosts"], oldname, newname) + if num_changed: + channels_changed += 1 + some_user_changed = True + if channels_changed: + actions.append("%d flexible notification configurations of user %s" % (channels_changed, userid)) - if len(table) > 0: - html.button("_save", _("Save manual check configuration")) + if some_user_changed: + userdb.save_users(users) - html.hidden_fields() - if html.var("_scan"): - html.hidden_field("_scan", "on") + # State of Multisite --------------------------------------- + # Favorites of users and maybe other settings. We simply walk through + # all directories rather then through the user database. That way we + # are sure that also currently non-existant users are being found and + # also only users that really have a profile. + users_changed = 0 + total_changed = 0 + for userid in os.listdir(config.config_dir): + if userid[0] != '.': + favpath = config.config_dir + "/" + userid + "/favorites.mk" + if os.path.exists(favpath): + try: + num_changed = 0 + favorites = eval(file(favpath).read()) + for nr, entry in enumerate(favorites): + if entry == oldname: + favorites[nr] = newname + num_changed += 1 + elif entry.startswith(oldname + ";"): + favorites[nr] = newname + ";" + entry.split(";")[1] + num_changed += 1 + if num_changed: + file(favpath, "w").write(repr(favorites) + "\n") + users_changed += 1 + total_changed += num_changed + except: + if config.debug: + raise + if users_changed: + actions.append(_("%d favorite entries of %d users") % (total_changed, users_changed)) - html.write("\n") + call_hook_hosts_changed(g_root_folder) + return actions - for state_name, state_type, checkbox in [ - ( _("Available (missing) services"), "new", firsttime ), - ( _("Already configured services"), "old", True, ), - ( _("Obsolete services (being checked, but should be ignored)"), "obsolete", True ), - ( _("Ignored services (configured away by admin)"), "ignored", False ), - ( _("Vanished services (checked, but no longer exist)"), "vanished", True ), - ( _("Active checks"), "active", None ), - ( _("Manual services (defined in main.mk)"), "manual", None ), - ( _("Legacy services (defined in main.mk)"), "legacy", None ) - ]: - first = True - trclass = "even" - for st, ct, checkgroup, item, paramstring, params, descr, state, output, perfdata in table: - item = htmllib.attrencode(item or 'None') - if state_type != st: - continue - if first: - html.write('\n' % state_name) - html.write("" - "\n") - first = False - trclass = trclass == "even" and "odd" or "even" - statename = nagios_short_state_names.get(state, "PEND") - if statename == "PEND": - stateclass = "state svcstate statep" - state = 0 # for tr class - else: - stateclass = "state svcstate state%s" % state - html.write("" % (trclass, state)) - # Status, Checktype, Item, Description, Check Output - html.write("" % - (stateclass, statename, ct, item, - htmllib.attrencode(descr), htmllib.attrencode(output))) - # Icon for Rule editor, Check parameters - html.write("', '\t') - paramtext = paramtext.replace('', '\n') - paramtext = htmllib.strip_tags(paramtext) - - title = _("Check parameters for this service") + ": \n" + paramtext - html.write('' % - (url, title)) - - html.write("") - - # Checkbox - html.write("\n") - html.write("

    %s
    " + _("Status") + "" + _("Checktype") + "" + _("Item") + "" + _("Service Description") + "" - + _("Current check") + "
    %s%s%s%s%s") - varname = None - if checkgroup: - varname = "checkgroup_parameters:" + checkgroup - elif state_type == "active": - varname = "active_checks:" + ct - - if varname and varname in g_rulespecs: - rulespec = g_rulespecs[varname] - url = make_link([("mode", "edit_ruleset"), - ("varname", varname), - ("host", hostname), - ("item", mk_repr(item))]) - try: - rulespec["valuespec"].validate_datatype(params, "") - rulespec["valuespec"].validate_value(params, "") - paramtext = rulespec["valuespec"].value_to_text(params) - except Exception, e: - paramtext = _("Invalid check parameter: %s!") % e - paramtext += _(" The parameter is: %r") % (params,) - - # Strip all html code from the paramtext - paramtext = paramtext.replace('
    ") - if checkbox != None: - varname = "_%s_%s" % (ct, item) - html.checkbox(varname, checkbox) - html.write("
    \n") - html.end_form() +#. +# .--Host & Services Parameters Overview pages---------------------------. +# | ____ _ | +# | | _ \ __ _ _ __ __ _ _ __ ___ ___| |_ ___ _ __ ___ | +# | | |_) / _` | '__/ _` | '_ ` _ \ / _ \ __/ _ \ '__/ __| | +# | | __/ (_| | | | (_| | | | | | | __/ || __/ | \__ \ | +# | |_| \__,_|_| \__,_|_| |_| |_|\___|\__\___|_| |___/ | +# | | +# +----------------------------------------------------------------------+ +# | Mode for displaying and modifying the rule based host and service | +# | parameters. This is a host/service overview page over all things | +# | that can be modified via rules. | +# '----------------------------------------------------------------------' +def mode_object_parameters(phase): + hostname = html.var("host") # may be empty in new/clone mode + host = g_folder[".hosts"][hostname] + is_cluster = ".nodes" in host + service = html.var("service") -#. -# .-Search---------------------------------------------------------------. -# | ____ _ | -# | / ___| ___ __ _ _ __ ___| |__ | -# | \___ \ / _ \/ _` | '__/ __| '_ \ | -# | ___) | __/ (_| | | | (__| | | | | -# | |____/ \___|\__,_|_| \___|_| |_| | -# | | -# +----------------------------------------------------------------------+ -# | Dialog for searching for hosts - globally in all files | -# '----------------------------------------------------------------------' + if hostname: + check_host_permissions(hostname) -def mode_search(phase): if phase == "title": - return _("Search for hosts") + title = _("Parameters of") + " " + hostname + if service: + title += " / " + service + return title elif phase == "buttons": - global_buttons() + if service: + prefix = _("Host-") + else: + prefix = "" html.context_button(_("Folder"), make_link([("mode", "folder")]), "back") + if service: + service_status_button(hostname, service) + else: + host_status_button(hostname, "hoststatus") + html.context_button(prefix + _("Properties"), make_link([("mode", "edithost"), ("host", hostname)]), "edit") + html.context_button(_("Services"), make_link([("mode", "inventory"), ("host", hostname)]), "services") + if not is_cluster: + html.context_button(prefix + _("Diagnostic"), + make_link([("mode", "diag_host"), ("host", hostname)]), "diagnose") return elif phase == "action": - return "search_results" + return - render_folder_path() - ## # Show search form - html.begin_form("edithost") - forms.header(_("General Properties")) - forms.section(_("Hostname")) - html.text_input("host") - html.set_focus("host") + # Now we collect all rulesets that apply to hosts, except those specifying + # new active or static checks + all_rulesets = load_all_rulesets() + groupnames = [ gn for gn, rulesets in g_rulespec_groups + if not gn.startswith("static/") and + not gn.startswith("checkparams/") and + gn != "activechecks" ] + groupnames.sort() - # Attributes - configure_attributes(False, {}, "search", parent = None) - # Button - forms.end() - html.button("_global", _("Search globally"), "submit") - html.button("_local", _("Search in %s") % g_folder["title"], "submit") - html.hidden_fields() - html.end_form() + def render_rule_reason(title, title_url, reason, reason_url, is_default, setting): + if title_url: + title = '%s' % (title_url, title) + forms.section(title) + + if reason: + title = '%s' % (reason_url, reason) + if is_default: + reason = '' + reason + '' + html.write("" % reason) + html.write('
    %s%s
    ' % (is_default and "unused" or "used", setting)) + + + # For services we make a special handling the for origin and parameters + # of that service! + if service: + serviceinfo = check_mk_automation(host[".siteid"], "analyse-service", [hostname, service]) + if serviceinfo: + forms.header(_("Check Origin and Parameters"), isopen = True, narrow=True, css="rulesettings") + origin = serviceinfo["origin"] + origin_txt = { + "active" : _("Active check"), + "static" : _("Manual check"), + "auto" : _("Inventorized check"), + "classic" : _("Classical check"), + }[origin] + render_rule_reason(_("Type of check"), None, "", "", False, origin_txt) + + # First case: discovered checks. They come from var/check_mk/autochecks/HOST. + if origin == "auto": + checkgroup = serviceinfo["checkgroup"] + checktype = serviceinfo["checktype"] + if not checkgroup: + render_rule_reason(_("Parameters"), None, "", "", True, _("This check is not configurable via WATO")) + + # Logwatch needs a special handling, since it is not configured + # via checkgroup_parameters but via "logwatch_rules" in a special + # WATO module. + elif checkgroup == "logwatch": + rulespec = g_rulespecs["logwatch_rules"] + output_analysed_ruleset(all_rulesets, rulespec, hostname, + serviceinfo["item"], serviceinfo["parameters"]) + else: + # Note: some discovered checks have a check group but + # *no* ruleset for discovered checks. One example is "ps". + # That can be configured as a manual check or created by + # inventory. But in the later case all parameters are set + # by the inventory. This will be changed in a later version, + # but we need to address it anyway. + grouprule = "checkgroup_parameters:" + checkgroup + if grouprule not in g_rulespecs: + rulespec = g_rulespecs.get("static_checks:" + checkgroup) + if rulespec: + url = make_link([('mode', 'edit_ruleset'), ('varname', "static_checks:" + checkgroup), ('host', hostname)]) + render_rule_reason(_("Parameters"), url, _("Determined by discovery"), None, False, + rulespec["valuespec"]._elements[2].value_to_text(serviceinfo["parameters"])) + else: + render_rule_reason(_("Parameters"), None, "", "", True, _("This check is not configurable via WATO")) -def mode_search_results(phase): - if phase == "title": - return _("Search results") + else: + rulespec = g_rulespecs[grouprule] + output_analysed_ruleset(all_rulesets, rulespec, hostname, + serviceinfo["item"], serviceinfo["parameters"]) + + elif origin == "static": + checkgroup = serviceinfo["checkgroup"] + checktype = serviceinfo["checktype"] + if not group: + htmlwrite(_("This check is not configurable via WATO")) + else: + rulespec = g_rulespecs["static_checks:" + checkgroup] + itemspec = rulespec["itemspec"] + if itemspec: + item_text = itemspec.value_to_text(serviceinfo["item"]) + title = rulespec["itemspec"].title() + else: + item_text = serviceinfo["item"] + title = _("Item") + render_rule_reason(title, None, "", "", False, item_text) + output_analysed_ruleset(all_rulesets, rulespec, hostname, + serviceinfo["item"], PARAMETERS_OMIT) + html.write(rulespec["valuespec"]._elements[2].value_to_text(serviceinfo["parameters"])) + html.write("") + + + elif origin == "active": + checktype = serviceinfo["checktype"] + rulespec = g_rulespecs["active_checks:" + checktype] + output_analysed_ruleset(all_rulesets, rulespec, hostname, None, serviceinfo["parameters"]) + + elif origin == "classic": + rule = all_rulesets["custom_checks"][serviceinfo["rule_nr"]] + # Find relative rule number in folder + old_folder = None + rel_nr = -1 + for r in all_rulesets["custom_checks"]: + if old_folder != r[0]: + rel_nr = -1 + rel_nr += 1 + if r is rule: + break + url = make_link([('mode', 'edit_ruleset'), ('varname', "custom_checks"), ('host', hostname)]) + forms.section('%s' % (url, _("Command Line"))) + url = make_link([ + ('mode', 'edit_rule'), + ('varname', "custom_checks"), + ('rule_folder', rule[0][".path"]), + ('rulenr', rel_nr), + ('host', hostname)]) + + html.write('' % ( + url, _("Rule"), rel_nr + 1, _("in"), rule[0]["title"])) + html.write("
    %s %d %s %s") + if "command_line" in serviceinfo: + html.write("%s" % serviceinfo["command_line"]) + else: + html.write(_("(no command line, passive check)")) + html.write("
    ") - elif phase == "buttons": - global_buttons() - html.context_button(_("New Search"), html.makeuri([("mode", "search")]), "back") - return + last_maingroup = None + for groupname in groupnames: + maingroup = groupname.split("/")[0] + # Show information about a ruleset + # Sort rulesets according to their title + g_rulespec_group[groupname].sort( + cmp = lambda a, b: cmp(a["title"], b["title"])) - elif phase == "action": - return + for rulespec in g_rulespec_group[groupname]: + if (rulespec["itemtype"] == 'service') == (not service): + continue # This rule is not for hosts/services - crit = { ".name" : html.var("host") } - crit.update(collect_attributes(do_validate = False)) + # Open form for that group here, if we know that we have at least one rule + if last_maingroup != maingroup: + last_maingroup = maingroup + grouptitle, grouphelp = g_rulegroups.get(maingroup, (maingroup, "")) + forms.header(grouptitle, isopen = maingroup == "monconf", narrow=True, css="rulesettings") + html.help(grouphelp) - if html.has_var("_local"): - folder = g_folder - else: - folder = g_root_folder + output_analysed_ruleset(all_rulesets, rulespec, hostname, service) - if not search_hosts_in_folders(folder, crit): - html.message(_("No matching hosts found.")) + forms.end() +PARAMETERS_UNKNOWN = [] +PARAMETERS_OMIT = [] +def output_analysed_ruleset(all_rulesets, rulespec, hostname, service, known_settings=PARAMETERS_UNKNOWN): + def rule_url(rule): + rule_folder, rule_nr = rule + return make_link([ + ('mode', 'edit_rule'), + ('varname', varname), + ('rule_folder', rule_folder[".path"]), + ('rulenr', rule_nr), + ('host', hostname), + ('item', service and mk_repr(service) or '')]) -def search_hosts_in_folders(folder, crit): - num_found = 0 + varname = rulespec["varname"] + valuespec = rulespec["valuespec"] + url = make_link([('mode', 'edit_ruleset'), ('varname', varname), ('host', hostname), ('item', mk_repr(service))]) + forms.section('%s' % (url, rulespec["title"])) + setting, rules = analyse_ruleset(rulespec, all_rulesets[varname], hostname, service) + html.write("") + html.write("') - num_found = search_hosts_in_folder(folder, crit) - for f in folder[".folders"].values(): - num_found += search_hosts_in_folders(f, crit) + # Show the resulting value or factory setting + html.write("
    ") + + # Show reason for the determined value + if len(rules) == 1: + rule_folder, rule_nr = rules[0] + url = rule_url(rules[0]) + html.write('%s' % (rule_url(rules[0]), _("Rule %d in %s") % (rule_nr + 1, rule_folder["title"]))) + elif len(rules) > 1: + html.write('%d %s' % (url, len(rules), _("Rules"))) + else: + html.write("" + _("Default Value") + "") + html.write('" % (len(rules) > 0 and "used" or "unused")) - return num_found + # In some cases we now the settings from a check_mk automation + if known_settings is PARAMETERS_OMIT: + return + # Special handling for logwatch: The check parameter is always None. The actual + # patterns are configured in logwatch_rules. We do not have access to the actual + # patterns here but just to the useless "None". In order not to complicate things + # we simply display nothing here. + elif varname == "logwatch_rules": + pass -def search_hosts_in_folder(folder, crit): - found = [] - hosts = load_hosts(folder) - for hostname, host in hosts.items(): - if crit[".name"] and crit[".name"].lower() not in hostname.lower(): + elif known_settings is not PARAMETERS_UNKNOWN: + try: + html.write(valuespec.value_to_text(known_settings)) + except Exception, e: + if config.debug: + raise + html.write(_("Invalid parameter %r: %s") % (known_settings, e)) + + else: + # For match type "dict" it can be the case the rule define some of the keys + # while other keys are taken from the factory defaults. We need to show the + # complete outcoming value here. + if rules and rulespec["match"] == "dict": + if rulespec["factory_default"] is not NO_FACTORY_DEFAULT \ + and rulespec["factory_default"] is not FACTORY_DEFAULT_UNUSED: + fd = rulespec["factory_default"].copy() + fd.update(setting) + setting = fd + + if valuespec and not rules: # show the default value + # Some rulesets are ineffective if they are empty + if rulespec["factory_default"] is FACTORY_DEFAULT_UNUSED: + html.write(_("(unused)")) + + # If there is a factory default then show that one + elif rulespec["factory_default"] is not NO_FACTORY_DEFAULT: + setting = rulespec["factory_default"] + html.write(valuespec.value_to_text(setting)) + + # Rulesets that build lists are empty if no rule matches + elif rulespec["match"] in ("all", "list"): + html.write(_("(no entry)")) + + # Else we use the default value of the valuespec + else: + html.write(valuespec.value_to_text(valuespec.default_value())) + + # We have a setting + elif valuespec: + if rulespec["match"] in ( "all", "list" ): + html.write(", ".join([valuespec.value_to_text(e) for e in setting])) + else: + html.write(valuespec.value_to_text(setting)) + + # Binary rule, no valuespec, outcome is True or False + else: + html.write('' % ( + setting and _("yes") or _("no"), setting and "yes" or "no", not rules and "_off" or "")) + + html.write("
    ") + +# Returns the outcoming value or None and +# a list of matching rules. These are pairs +# of rule_folder and rule_number +def analyse_ruleset(rulespec, ruleset, hostname, service): + resultlist = [] + resultdict = {} + effectiverules = [] + old_folder = None + nr = -1 + for ruledef in ruleset: + folder, rule = ruledef + if folder != old_folder: + old_folder = folder + nr = -1 # Starting couting again in new folder + nr += 1 + value, tag_specs, host_list, item_list, rule_options = parse_rule(rulespec, rule) + if rule_options.get("disabled"): continue - # Compute inheritance - effective = effective_attributes(host, folder) + if True != rule_matches_host_and_item(rulespec, tag_specs, host_list, item_list, folder, g_folder, hostname, service): + continue - # Check attributes - dont_match = False - for attr, topic in host_attributes: - attrname = attr.name() - if attrname in crit and \ - not attr.filter_matches(crit[attrname], effective.get(attrname), hostname): - dont_match = True - break - if dont_match: - continue + if rulespec["match"] == "all": + resultlist.append(value) + effectiverules.append((folder, nr)) + + elif rulespec["match"] == "list": + resultlist += value + effectiverules.append((folder, nr)) + + elif rulespec["match"] == "dict": + new_result = value.copy() + new_result.update(resultdict) + resultdict = new_result + effectiverules.append((folder, nr)) + + else: + return value, [(folder, nr)] + + if rulespec["match"] in ("list", "all"): + return resultlist, effectiverules + + elif rulespec["match"] == "dict": + return resultdict, effectiverules + + else: + return None, [] # No match - found.append((hostname, host, effective)) - if found: - render_folder_path(folder, True) - found.sort() - table.begin(""); - for hostname, host, effective in found: - host_url = make_link_to([("mode", "edithost"), ("host", hostname)], folder) - table.row() - table.cell(_("Hostname"), '%s' % (host_url, hostname)) - for attr, topic in host_attributes: - attrname = attr.name() - if attr.show_in_table(): - if attrname in host: - tdclass, content = attr.paint(host[attrname], hostname) - else: - tdclass, content = attr.paint(effective[attrname], hostname) - tdclass += " inherited" - table.cell(attr.title(), content, css=tdclass) - table.end() - return len(found) #. -# .-CSV-Import-----------------------------------------------------------. -# | ____ ______ __ ___ _ | -# | / ___/ ___\ \ / / |_ _|_ __ ___ _ __ ___ _ __| |_ | -# | | | \___ \\ \ / /____| || '_ ` _ \| '_ \ / _ \| '__| __| | -# | | |___ ___) |\ V /_____| || | | | | | |_) | (_) | | | |_ | -# | \____|____/ \_/ |___|_| |_| |_| .__/ \___/|_| \__| | -# | |_| | +# .--Host Diag-----------------------------------------------------------. +# | _ _ _ ____ _ | +# | | | | | ___ ___| |_ | _ \(_) __ _ __ _ | +# | | |_| |/ _ \/ __| __| | | | | |/ _` |/ _` | | +# | | _ | (_) \__ \ |_ | |_| | | (_| | (_| | | +# | |_| |_|\___/|___/\__| |____/|_|\__,_|\__, | | +# | |___/ | # +----------------------------------------------------------------------+ -# | The following functions help implementing an import of hosts from | -# | third party applications, such as from CVS files. The import itsself | -# | is not yet coded, but functions for dealing with the imported hosts. | +# | Verify or find out a hosts agent related configuration. | # '----------------------------------------------------------------------' -def move_to_imported_folders(hosts): - c = wato_confirm( - _("Confirm moving hosts"), - _('You are going to move the selected hosts to folders ' - 'representing their original folder location in the system ' - 'you did the import from. Please make sure that you have ' - 'done an inventory before moving the hosts.')) - if c == False: # not yet confirmed - return "" - elif not c: - return None # browser reload +def diag_host_tests(): + return [ + ('ping', _('Ping')), + ('agent', _('Agent')), + ('snmpv1', _('SNMPv1')), + ('snmpv2', _('SNMPv2c')), + ('snmpv2_nobulk', _('SNMPv2c (without Bulkwalk)')), + ('traceroute', _('Traceroute')), + ] - # Create groups of hosts with the same target folder - targets = {} - for hostname in hosts: - host = g_folder[".hosts"][hostname] - effective = effective_attributes(host, g_folder) - imported_folder = effective.get('imported_folder') - if imported_folder == None: - continue - targets.setdefault(imported_folder, []).append(hostname) +def mode_diag_host(phase): + hostname = html.var("host") + if not hostname: + raise MKGeneralException(_('The hostname is missing.')) - # Remove target folder information, now that the hosts are - # at their target position. - del host['imported_folder'] + check_host_permissions(hostname) - # Now handle each target folder - num_moved = 0 - for imported_folder, hosts in targets.items(): - # Next problem: The folder path in imported_folder refers - # to the Alias of the folders, not to the internal file - # name. And we need to create folders not yet existing. - target_folder = create_target_folder_from_aliaspath(imported_folder) - num_moved += move_hosts_to(hosts, target_folder[".path"]) - save_folder(target_folder) - save_folder(g_folder) - log_pending(AFFECTED, g_folder, "move-hosts", _("Moved %d imported hosts to their original destination.") % num_moved) - return None, _("Successfully moved %d hosts to their original folder destinations.") % num_moved + if phase == 'title': + return _('Diagnostic of host') + " " + hostname + elif phase == 'buttons': + html.context_button(_("Folder"), make_link([("mode", "folder")]), "back") + host_status_button(hostname, "hoststatus") + html.context_button(_("Properties"), + make_link([("mode", "edithost"), ("host", hostname)]), "edit") + html.context_button(_("Parameters"), + make_link([("mode", "object_parameters"), ("host", hostname)]), "rulesets") + html.context_button(_("Services"), + make_link([("mode", "inventory"), ("host", hostname)]), "services") + return -def create_target_folder_from_aliaspath(aliaspath): - # The alias path is a '/' separated path of folder titles. - # An empty path is interpreted as root path. The actual file - # name is the host list with the name "Hosts". - if aliaspath == "" or aliaspath == "/": - folder = g_root_folder - else: - parts = aliaspath.strip("/").split("/") - folder = g_root_folder - while len(parts) > 0: - # Look in current folder for subfolder with the target name - for name, f in folder.get(".folders", {}).items(): - if f["title"] == parts[0]: - folder = f - parts = parts[1:] - break - else: # not found. Create this folder - name = create_wato_foldername(parts[0], folder) - new_path = folder[".path"] - if new_path: - new_path += "/" - new_path += name + vs_host = Dictionary( + required_keys = ['hostname'], + elements = [ + ('hostname', FixedValue(hostname, + title = _('Hostname'), + allow_empty = False + )), + ('ipaddress', IPv4Address( + title = _('IP address'), + allow_empty = False + )), + ('snmp_community', TextAscii( + title = _("SNMP Community"), + allow_empty = False + )), + ] + ) - new_folder = { - ".name" : name, - ".path" : new_path, - "title" : parts[0], - "attributes" : {}, - ".folders" : {}, - ".files" : {}, - ".parent" : folder, - } + vs_rules = Dictionary( + optional_keys = False, + elements = [ + ('agent_port', Integer( + minvalue = 1, + maxvalue = 65535, + default_value = 6556, + title = _("Check_MK Agent Port (Rules)") % \ + make_link([('mode', 'edit_ruleset'), ('varname', 'agent_ports')]), + help = _("This variable allows to specify the TCP port to " + "be used to connect to the agent on a per-host-basis.") + )), + ('snmp_timeout', Integer( + title = _("SNMP-Timeout (Rules)") % \ + make_link([('mode', 'edit_ruleset'), ('varname', 'snmp_timing')]), + help = _("After a request is sent to the remote SNMP agent we will wait up to this " + "number of seconds until assuming the answer get lost and retrying."), + default_value = 1, + minvalue = 1, + maxvalue = 60, + unit = _("sec"), + )), + ('snmp_retries', Integer( + title = _("SNMP-Retries (Rules)") % \ + make_link([('mode', 'edit_ruleset'), ('varname', 'snmp_timing')]), + default_value = 5, + minvalue = 0, + maxvalue = 50, + )), + ('datasource_program', TextAscii( + title = _("Datasource Program (Rules)") % \ + make_link([('mode', 'edit_ruleset'), ('varname', 'datasource_programs')]), + help = _("For agent based checks Check_MK allows you to specify an alternative " + "program that should be called by Check_MK instead of connecting the agent " + "via TCP. That program must output the agent's data on standard output in " + "the same format the agent would do. This is for example useful for monitoring " + "via SSH. The command line may contain the placeholders <IP> and " + "<HOST>.") + )) + ] + ) - if '.siteid' in folder: - new_folder['.siteid'] = folder[".siteid"] + host = g_folder[".hosts"].get(hostname) - folder[".folders"][name] = new_folder - g_folders[new_path] = new_folder - folder = new_folder - parts = parts[1:] - save_folder(folder) # make sure, directory is created - reload_folder(folder) + if not host: + raise MKGeneralException(_('The given host does not exist.')) + if ".nodes" in host: + raise MKGeneralException(_('This view does not support cluster hosts.')) - return folder + if phase == 'action': + if not html.check_transaction(): + return + if html.var('_save'): + # Save the ipaddress and/or community + mark_affected_sites_dirty(g_folder, hostname) + + new = vs_host.from_html_vars('vs_host') + vs_host.validate_value(new, 'vs_host') + if 'ipaddress' in new: + host['ipaddress'] = new['ipaddress'] + if 'snmp_community' in new: + host['snmp_community'] = new['snmp_community'] + log_pending(AFFECTED, hostname, "edit-host", _("Edited properties of host via diagnose [%s]") % hostname) + save_folder_and_hosts(g_folder) + reload_hosts(g_folder) + call_hook_hosts_changed(g_folder) + html.del_all_vars() + html.set_var("host", hostname) + html.set_var("folder", g_folder[".path"]) -#. -# .-Bulk-Inventory-------------------------------------------------------. -# | ____ _ _ ___ _ | -# | | __ ) _ _| | | __ |_ _|_ ____ _____ _ __ | |_ ___ _ __ _ _ | -# | | _ \| | | | | |/ / | || '_ \ \ / / _ \ '_ \| __/ _ \| '__| | | | | -# | | |_) | |_| | | < | || | | \ V / __/ | | | || (_) | | | |_| | | -# | |____/ \__,_|_|_|\_\ |___|_| |_|\_/ \___|_| |_|\__\___/|_| \__, | | -# | |___/ | -# +----------------------------------------------------------------------+ -# | When the user wants to scan the services of multiple hosts at once | -# | this function is used. There is no fine-tuning possibility. We | -# | simply do something like -I or -II on the list of hosts. | -# '----------------------------------------------------------------------' + return "edithost" + return -def mode_bulk_inventory(phase): - if phase == "title": - return _("Bulk service detection (inventory)") - - elif phase == "buttons": - html.context_button(_("Folder"), make_link([("mode", "folder")]), "back") - return + html.write('
    ') + html.write('
    ') + html.begin_form('diag_host', method = "POST") + forms.header(_('Host Properties')) + + forms.section(legend = False) + vs_host.render_input("vs_host", host) + html.help(vs_host.help()) - elif phase == "action": - if html.var("_item"): - how = html.var("how") - try: - folderpath, hostname = html.var("_item").split("|") - folder = g_folders[folderpath] - load_hosts(folder) - host = folder[".hosts"][hostname] - eff = effective_attributes(host, folder) - site_id = eff.get("site") - counts = check_mk_automation(site_id, "inventory", [how, hostname]) - #counts = ( 1, 2, 3, 4 ) - result = repr([ 'continue', 1, 0 ] + list(counts)) + "\n" - result += _("Inventorized %s
    \n") % hostname - mark_affected_sites_dirty(folder, hostname, sync=False, restart=True) - log_pending(AFFECTED, hostname, "bulk-inventory", - _("Inventorized host: %d added, %d removed, %d kept, %d total services") % counts) - if "inventory_failed" in host: - del host["inventory_failed"] - save_hosts(folder) # Could be optimized, but difficult here + forms.end() - except Exception, e: - result = repr([ 'failed', 1, 1, 0, 0, 0, 0, ]) + "\n" - if site_id: - msg = _("Error during inventory of %s on site %s
    %s%s
    ") % (hostname, e) - if config.debug: - msg += "
    %s

    " % format_exception().replace("\n", "
    ") - result += msg - if not host.get("inventory_failed"): - host["inventory_failed"] = True - save_hosts(folder) - html.write(result) - return "" - return + html.write('
    ') + html.button("_save", _("Save & Exit")) + html.write('
    ') + forms.header(_('Options')) - # interactive progress is *not* done in action phase. It - # renders the page content itself. + value = {} + forms.section(legend = False) + vs_rules.render_input("vs_rules", value) + html.help(vs_rules.help()) + forms.end() - def recurse_hosts(folder, recurse, only_failed): - entries = [] - hosts = load_hosts(folder) - for hostname, host in hosts.items(): - if not only_failed or host.get("inventory_failed"): - entries.append((hostname, folder)) - if recurse: - for f in folder[".folders"].values(): - entries += recurse_hosts(f, recurse, only_failed) - return entries + html.button("_try", _("Test")) - config.need_permission("wato.services") + html.hidden_fields() + html.end_form() - # 'all' not set -> only inventorize checked hosts - if not html.var("all"): - complete_folder = False - if html.get_checkbox("only_failed"): - filterfunc = lambda host: host.get("inventory_failed") - else: - filterfunc = None + html.write('
    ') - hostnames = get_hostnames_from_checkboxes(filterfunc) - items = [ "%s|%s" % (g_folder[".path"], hostname) - for hostname in hostnames ] - for hostname in hostnames: - check_host_permissions(hostname) + if not html.var('_try'): + html.message(_('You can diagnose the connection to a specific host using this dialog. ' + 'You can either test wether your current configuration is still working ' + 'or investigate in which ways a host can be reached. Simply configure the ' + 'connection options you like to try on the right side of the screen and ' + 'press the "Test" button. The results will be displayed here.')) + else: + for ident, title in diag_host_tests(): + html.write('

    %s

    ' % title) + html.write('') + html.write('') + html.write('
    ') + html.write('' % ident) + html.write('' + '' % + (ident, hostname, ident, _('Retry this test'))) + html.write('
    ' % ident) + html.write('
    ') + html.javascript('start_host_diag_test("%s", "%s")' % (ident, hostname)) - # all host in this folder, maybe recursively - else: - complete_folder = True - entries = recurse_hosts(g_folder, html.get_checkbox("recurse"), html.get_checkbox("only_failed")) - items = [] - hostnames = [] - for hostname, folder in entries: - check_host_permissions(hostname, folder=folder) - items.append("%s|%s" % (folder[".path"], hostname)) - hostnames.append(hostname) + html.write('
    ') + html.write('
    ') +def ajax_diag_host(): + try: + prepare_folder_info() - if html.var("_start"): - # Start interactive progress - interactive_progress( - items, - _("Bulk inventory"), # title - [ (_("Total hosts"), 0), - (_("Failed hosts"), 0), - (_("Services added"), 0), - (_("Services removed"), 0), - (_("Services kept"), 0), - (_("Total services"), 0) ], # stats table - [ ("mode", "folder") ], # URL for "Stop/Finish" button - 50, # ms to sleep between two steps - fail_stats = [ 1 ], - ) + if not config.may('wato.diag_host'): + raise MKAuthException(_('You are not permitted to perform this action.')) - else: - html.begin_form("bulkinventory", None, "POST") - html.hidden_fields() + hostname = html.var("host") + if not hostname: + raise MKGeneralException(_('The hostname is missing.')) - # Mode of action - html.write("

    ") - if not complete_folder: - html.write(_("You have selected %d hosts for bulk inventory. ") % len(hostnames)) - html.write(_("Check_MK inventory will automatically find and configure " - "services to be checked on your hosts.

    ")) - forms.header(_("Bulk Inventory")) - forms.section(_("Mode")) - html.radiobutton("how", "new", True, _("Find only new services") + "
    ") - html.radiobutton("how", "remove", False, _("Remove obsolete services") + "
    ") - html.radiobutton("how", "fixall", False, _("Find new & remove obsolete") + "
    ") - html.radiobutton("how", "refresh", False, _("Refresh all services (tabula rasa)") + "
    ") + host = g_folder[".hosts"].get(hostname) - forms.section(_("Selection")) - if complete_folder: - html.checkbox("recurse", True, label=_("Include all subfolders")) - html.write("
    ") - html.checkbox("only_failed", False, label=_("Only include hosts that failed on previous inventory")) + if not host: + raise MKGeneralException(_('The given host does not exist.')) + if ".nodes" in host: + raise MKGeneralException(_('This view does not support cluster hosts.')) - # Start button - forms.end() - html.button("_start", _("Start")) + _test = html.var('_test') + if not _test: + raise MKGeneralException(_('The test is missing.')) + + # Execute a specific test + if _test not in dict(diag_host_tests()).keys(): + raise MKGeneralException(_('Invalid test.')) + args = [ + html.var('ipaddress'), + html.var('snmp_community'), + html.var('agent_port'), + html.var('snmp_timeout'), + html.var('snmp_retries'), + html.var('datasource_program'), + ] + result = check_mk_automation(host[".siteid"], "diag-host", [hostname, _test] + args) + # API is defined as follows: Two data fields, separated by space. + # First is the state: 0 or 1, 0 means success, 1 means failed. + # Second is treated as text output + html.write("%s %s" % (result[0], html.attrencode(result[1]))) + except Exception, e: + import traceback + html.write("1 %s" % _("Exception: %s") % html.attrencode(traceback.format_exc())) #. -# .-Bulk-Edit------------------------------------------------------------. -# | ____ _ _ _____ _ _ _ | -# | | __ ) _ _| | | __ | ____|__| (_) |_ | -# | | _ \| | | | | |/ / | _| / _` | | __| | -# | | |_) | |_| | | < | |__| (_| | | |_ | -# | |____/ \__,_|_|_|\_\ |_____\__,_|_|\__| | +# .--Inventory & Services------------------------------------------------. +# | ____ _ | +# | / ___| ___ _ ____ _(_) ___ ___ ___ | +# | \___ \ / _ \ '__\ \ / / |/ __/ _ \/ __| | +# | ___) | __/ | \ V /| | (_| __/\__ \ | +# | |____/ \___|_| \_/ |_|\___\___||___/ | # | | # +----------------------------------------------------------------------+ -# | Change the attributes of a number of selected host at once. Also the | -# | cleanup is implemented here: the bulk removal of explicit attribute | -# | values. | +# | Mode for doing the inventory on a single host and/or showing and | +# | editing the current services of a host. | # '----------------------------------------------------------------------' -def mode_bulk_edit(phase): +def mode_inventory(phase, firsttime): + hostname = html.var("host") + if hostname not in g_folder[".hosts"]: + raise MKGeneralException(_("You called this page for a non-existing host.")) + host = g_folder[".hosts"][hostname] + + check_host_permissions(hostname) + if phase == "title": - return _("Bulk edit hosts") + title = _("Services of host %s") % hostname + if html.var("_scan"): + title += _(" (live scan)") + else: + title += _(" (might be cached data)") + return title elif phase == "buttons": - html.context_button(_("Folder"), make_link([("mode", "folder")]), "back") - return + html.context_button(_("Folder"), + make_link([("mode", "folder")]), "back") + host_status_button(hostname, "host") + html.context_button(_("Properties"), make_link([("mode", "edithost"), ("host", hostname)]), "edit") + html.context_button(_("Parameters"), + make_link([("mode", "object_parameters"), ("host", hostname)]), "rulesets") + if ".nodes" not in host: + # only display for non cluster hosts + html.context_button(_("Diagnostic"), + make_link([("mode", "diag_host"), ("host", hostname)]), "diagnose") + html.context_button(_("Full Scan"), html.makeuri([("_scan", "yes")])) elif phase == "action": + config.need_permission("wato.services") + check_host_permissions(hostname) if html.check_transaction(): - config.need_permission("wato.edit_hosts") - changed_attributes = collect_attributes() - if "contactgroups" in changed_attributes: - if True != check_folder_permissions(g_folder, "write", False): - raise MKAuthException(_("Sorry. In order to change the permissions of a host you need write " - "access to the folder it is contained in.")) + # Settings for showing parameters + if html.var("_show_parameters"): + parameter_column = True + config.save_user_file("parameter_column", True) + return + elif html.var("_hide_parameters"): + parameter_column = False + config.save_user_file("parameter_column", False) + return - hostnames = get_hostnames_from_checkboxes() - # Check all permissions for doing any edit - for hostname in hostnames: - check_host_permissions(hostname) + cache_options = html.var("_scan") and [ '@scan' ] or [ '@noscan' ] + new_target = "folder" - for hostname in hostnames: - host = g_folder[".hosts"][hostname] - mark_affected_sites_dirty(g_folder, hostname) - host.update(changed_attributes) - mark_affected_sites_dirty(g_folder, hostname) - log_pending(AFFECTED, hostname, "bulk-edit", _("Changed attributes of host %s in bulk mode") % hostname) - save_folder_and_hosts(g_folder) - reload_hosts() # indirect host tag changes - call_hook_hosts_changed(g_folder) - return "folder" - return + if html.var("_refresh"): + counts, failed_hosts = check_mk_automation(host[".siteid"], "inventory", [ "@scan", "refresh", hostname ]) + count_added, count_removed, count_kept, count_new = counts[hostname] + message = _("Refreshed check configuration of host [%s] with %d services") % \ + (hostname, count_added) + log_pending(LOCALRESTART, hostname, "refresh-autochecks", message) + + else: + table = check_mk_automation(host[".siteid"], "try-inventory", cache_options + [hostname]) + table.sort() + active_checks = {} + for st, ct, checkgroup, item, paramstring, params, descr, state, output, perfdata in table: + if (html.has_var("_cleanup") or html.has_var("_fixall")) \ + and st in [ "vanished", "obsolete" ]: + pass + elif (html.has_var("_activate_all") or html.has_var("_fixall")) \ + and st == "new": + active_checks[(ct, item)] = paramstring + else: + varname = "_%s_%s" % (ct, html.varencode(item)) + if html.var(varname, "") != "": + active_checks[(ct, item)] = paramstring + if st.startswith("clustered"): + active_checks[(ct, item)] = paramstring - hostnames = get_hostnames_from_checkboxes() - hosts = dict([(hn, g_folder[".hosts"][hn]) for hn in hostnames]) + check_mk_automation(host[".siteid"], "set-autochecks", [hostname], active_checks) + if host.get("inventory_failed"): + del host["inventory_failed"] + save_hosts() + message = _("Saved check configuration of host [%s] with %d services") % \ + (hostname, len(active_checks)) + log_pending(LOCALRESTART, hostname, "set-autochecks", message) - html.write("

    " + _("You have selected %d hosts for bulk edit. You can now change " - "host attributes for all selected hosts at once. ") % len(hostnames)) - html.write(_("If a select is set to don't change then currenty not all selected " - "hosts share the same setting for this attribute. If you leave that selection, all hosts " - "will keep their individual settings.") + "

    ") + mark_affected_sites_dirty(g_folder, hostname, sync=False, restart=True) + return new_target, message + return "folder" - html.begin_form("edithost", None, "POST") - configure_attributes(False, hosts, "bulk", parent = g_folder) - forms.end() - html.button("_save", _("Save & Finish")) - html.hidden_fields() - html.end_form() + else: + show_service_table(host, firsttime) -#. -# .-Bulk-Cleanup---------------------------------------------------------. -# | ____ _ _ ____ _ | -# | | __ ) _ _| | | __ / ___| | ___ __ _ _ __ _ _ _ __ | -# | | _ \| | | | | |/ / | | | |/ _ \/ _` | '_ \| | | | '_ \ | -# | | |_) | |_| | | < | |___| | __/ (_| | | | | |_| | |_) | | -# | |____/ \__,_|_|_|\_\ \____|_|\___|\__,_|_| |_|\__,_| .__/ | -# | |_| | -# +----------------------------------------------------------------------+ -# | Mode for removing attributes from host in bulk mode. | -# '----------------------------------------------------------------------' +def show_service_table(host, firsttime): + hostname = host[".name"] -def mode_bulk_cleanup(phase): - if phase == "title": - return _("Bulk removal of explicit attributes") + # Read current check configuration + cache_options = html.var("_scan") and [ '@scan' ] or [ '@noscan' ] + parameter_column = config.load_user_file("parameter_column", False) - elif phase == "buttons": - html.context_button(_("Folder"), make_link([("mode", "folder")]), "back") + # We first try using the Cache (if the user has not pressed Full Scan). + # If we do not find any data, we omit the cache and immediately try + # again without using the cache. + try: + checktable = check_mk_automation(host[".siteid"], "try-inventory", cache_options + [hostname]) + if len(checktable) == 0 and cache_options != []: + checktable = check_mk_automation(host[".siteid"], "try-inventory", [ '@scan', hostname ]) + html.set_var("_scan", "on") + except Exception, e: + if config.debug: + raise + html.show_error(_("Service discovery failed for this host: %s") % e) return - elif phase == "action": - if html.check_transaction(): - config.need_permission("wato.edit_hosts") - to_clean = bulk_collect_cleaned_attributes() - if "contactgroups" in to_clean: - if True != check_folder_permissions(g_folder, "write", False): - raise MKAuthException(_("Sorry. In order to change the permissions of a host you need write " - "access to the folder it is contained in.")) - hostnames = get_hostnames_from_checkboxes() + checktable.sort() - # Check all permissions for doing any edit - for hostname in hostnames: - check_host_permissions(hostname) + html.begin_form("checks", method = "POST") + fixall = 0 + if config.may("wato.services"): + for entry in checktable: + if entry[0] == 'new' and not html.has_var("_activate_all") and not firsttime: + html.button("_activate_all", _("Activate missing")) + fixall += 1 + break + for entry in checktable: + if entry[0] in [ 'obsolete', 'vanished', ]: + html.button("_cleanup", _("Remove exceeding")) + fixall += 1 + break - for hostname in hostnames: - mark_affected_sites_dirty(g_folder, hostname) - host = g_folder[".hosts"][hostname] - num_cleaned = 0 - for attrname in to_clean: - num_cleaned += 1 - if attrname in host: - del host[attrname] - if num_cleaned > 0: - log_pending(AFFECTED, hostname, "bulk-cleanup", _("Cleaned %d attributes of host %s in bulk mode") % ( - num_cleaned, hostname)) - mark_affected_sites_dirty(g_folder, hostname) - save_hosts(g_folder) - reload_hosts() # indirect host tag changes - return "folder" - return + if fixall == 2: + html.button("_fixall", _("Fix all missing/exceeding")) - hostnames = get_hostnames_from_checkboxes() - hosts = dict([(hn, g_folder[".hosts"][hn]) for hn in hostnames]) + if len(checktable) > 0: + html.button("_save", _("Save manual check configuration")) + html.button("_refresh", _("Automatic Refresh (Tabula Rasa)")) - html.write("

    " + _("You have selected %d hosts for bulk cleanup. This means removing " - "explicit attribute values from hosts. The hosts will then inherit attributes " - "configured at the host list or folders or simply fall back to the builtin " - "default values.") % len(hostnames)) - html.write("

    ") + html.write("   ") + if parameter_column: + html.button("_hide_parameters", _("Hide Check Parameters")) + else: + html.button("_show_parameters", _("Show Check Parameters")) - html.begin_form("bulkcleanup", None, "POST") - forms.header(_("Attributes to remove from hosts")) - if not bulk_cleanup_attributes(g_folder, hosts): - forms.end() - html.write(_("The selected hosts have no explicit attributes")) - else: - forms.end() - html.button("_save", _("Save & Finish")) html.hidden_fields() - html.end_form() + if html.var("_scan"): + html.hidden_field("_scan", "on") + table.begin(css ="data", searchable = False, limit = None, sortable = False) -def bulk_collect_cleaned_attributes(): - to_clean = [] - for attr, topic in host_attributes: - attrname = attr.name() - if html.get_checkbox("_clean_" + attrname) == True: - to_clean.append(attrname) - return to_clean + # This option will later be switchable somehow + divid = 0 + for state_name, check_source, checkbox in [ + ( _("Available (missing) services"), "new", firsttime ), + ( _("Already configured services"), "old", True, ), + ( _("Obsolete services (being checked, but should be ignored)"), "obsolete", True ), + ( _("Disabled services (configured away by admin)"), "ignored", None), + ( _("Vanished services (checked, but no longer exist)"), "vanished", True ), + ( _("Active checks"), "active", None ), + ( _("Manual services (defined in main.mk)"), "manual", None ), + ( _("Legacy services (defined in main.mk)"), "legacy", None ), + ( _("Custom checks (defined via rule)"), "custom", None ), + ( _("Already configured clustered services (located on cluster host)"), "clustered_old", None ), + ( _("Available clustered services"), "clustered_new", None ), + ]: + first = True + for st, ct, checkgroup, item, paramstring, params, descr, state, output, perfdata in checktable: + item = html.attrencode(item or 'None') + if check_source != st: + continue + if first: + table.groupheader(state_name) + first = False + statename = nagios_short_state_names.get(state, _("PEND")) + if statename == _("PEND"): + stateclass = "state svcstate statep" + state = 0 # for tr class + else: + stateclass = "state svcstate state%s" % state + # html.write("" % (trclass, state)) -def bulk_cleanup_attributes(the_file, hosts): - num_shown = 0 - for attr, topic in host_attributes: - attrname = attr.name() + table.row(css="data", state=state) - # only show attributes that at least on host have set - num_haveit = 0 - for hostname, host in hosts.items(): - if attrname in host: - num_haveit += 1 + # Status, Checktype, Item, Description, Check Output + table.cell(_("Status"), statename, css=stateclass) + table.cell(_("Checkplugin"), ct) + table.cell(_("Item"), item) + table.cell(_("Service Description"), html.attrencode(descr)) + table.cell(_("Plugin output")) + + if defaults.omd_root and check_source in ( "custom", "active" ): + divid += 1 + html.write("
    " % (divid, hostname)) + html.final_javascript("execute_active_check('%s', '%s', '%s', '%s', 'activecheck%d');" % ( + host[".siteid"] or '', hostname, ct, item.replace("'", "\'"), divid)) + else: + html.write(html.attrencode(output)) - if num_haveit == 0: - continue + # Icon for Rule editor, Check parameters + varname = None + if checkgroup: + varname = "checkgroup_parameters:" + checkgroup + elif check_source == "active": + varname = "active_checks:" + ct - # If the attribute is mandatory and no value is inherited - # by file or folder, the attribute cannot be cleaned. - container = the_file - is_inherited = False - while container: - if "attributes" in container and attrname in container["attributes"]: - is_inherited = True - inherited_value = container["attributes"][attrname] - break - container = container.get(".parent") + if parameter_column: + table.cell(_("Check Parameters")) + if varname and varname in g_rulespecs: + rulespec = g_rulespecs[varname] + try: + rulespec["valuespec"].validate_datatype(params, "") + rulespec["valuespec"].validate_value(params, "") + paramtext = rulespec["valuespec"].value_to_text(params) + except Exception, e: + paramtext = _("Invalid check parameter: %s!") % e + paramtext += _(" The parameter is: %r") % (params,) + paramtext += _(" The varname is: %s") % varname + + html.write(paramtext) + + # Icon for Service parameters. Not for missing services! + table.cell(css='buttons') + if check_source not in [ "new", "ignored" ]: + # Link to list of all rulesets affecting this service + params_url = make_link([("mode", "object_parameters"), + ("host", hostname), + ("service", descr)]) + html.icon_button(params_url, _("View and edit the parameters for this service"), "rulesets") + url = make_link([("mode", "edit_ruleset"), + ("varname", varname), + ("host", hostname), + ("item", mk_repr(item))]) + html.icon_button(url, _("Edit and analyze the check parameters of this service"), "check_parameters") - num_shown += 1 + if check_source == "ignored": + url = make_link([("mode", "edit_ruleset"), + ("varname", "ignored_services"), + ("host", hostname), + ("item", mk_repr(descr))]) + html.icon_button(url, _("Edit and analyze the disabled services rules"), "ignore") - # Legend and Help - forms.section(attr.title()) + # Permanently disable icon + if check_source in ['new', 'old']: + url = make_link([ + ('mode', 'edit_ruleset'), + ('varname', 'ignored_services'), + ('host', hostname), + ('item', mk_repr(descr)), + ('mode', 'new_rule'), + ('_new_host_rule', '1'), + ('filled_in', 'new_rule'), + ('rule_folder', ''), + ('back_mode', 'inventory'), + ]) + html.icon_button(url, _("Create rule to permanently disable this service"), "ignore") - if attr.is_mandatory() and not is_inherited: - html.write(_("This attribute is mandatory and there is no value " - "defined in the host list or any parent folder.")) - else: - label = "clean this attribute on %s hosts" % \ - (num_haveit == len(hosts) and "all selected" or str(num_haveit)) - html.checkbox("_clean_%s" % attrname, False, label=label) - html.help(attr.help()) + # Temporary ignore checkbox + table.cell() + if checkbox != None: + varname = "_%s_%s" % (ct, html.varencode(item)) + html.checkbox(varname, checkbox, add_attr = ['title="%s"' % _('Temporarily ignore this service')]) + + table.end() + html.end_form() - return num_shown > 0 +def ajax_execute_check(): + site = html.var("site") + hostname = html.var("host") + checktype = html.var("checktype") + item = html.var("item") + try: + status, output = check_mk_automation(site, "active-check", [ hostname, checktype, item ]) + except Exception, e: + status = 1 + output = str(e) + statename = nagios_short_state_names.get(status, "UNKN") + html.write("%d\n%s\n%s" % (status, statename, output)) #. -# .-Parentscan-----------------------------------------------------------. -# | ____ _ | -# | | _ \ __ _ _ __ ___ _ __ | |_ ___ ___ __ _ _ __ | -# | | |_) / _` | '__/ _ \ '_ \| __/ __|/ __/ _` | '_ \ | -# | | __/ (_| | | | __/ | | | |_\__ \ (_| (_| | | | | | -# | |_| \__,_|_| \___|_| |_|\__|___/\___\__,_|_| |_| | +# .--Search--------------------------------------------------------------. +# | ____ _ | +# | / ___| ___ __ _ _ __ ___| |__ | +# | \___ \ / _ \/ _` | '__/ __| '_ \ | +# | ___) | __/ (_| | | | (__| | | | | +# | |____/ \___|\__,_|_| \___|_| |_| | # | | # +----------------------------------------------------------------------+ -# | Automatic scan for parents (similar to cmk --scan-parents) | +# | Dialog for searching for hosts - globally in all files | # '----------------------------------------------------------------------' -def mode_parentscan(phase): + +def mode_search(phase): if phase == "title": - return _("Parent scan") + return _("Search for hosts") elif phase == "buttons": + global_buttons() html.context_button(_("Folder"), make_link([("mode", "folder")]), "back") return - # Ignored during initial form display - settings = { - "where" : html.var("where"), - "alias" : html.var_utf8("alias", "").strip() or None, - "recurse" : html.get_checkbox("recurse"), - "select" : html.var("select"), - "timeout" : saveint(html.var("timeout")) or 8, - "probes" : saveint(html.var("probes")) or 2, - "max_ttl" : saveint(html.var("max_ttl")) or 10, - "force_explicit" : html.get_checkbox("force_explicit"), - "ping_probes" : saveint(html.var("ping_probes")) or 0, - } + elif phase == "action": + return "search_results" - if phase == "action": - if html.var("_item"): - try: - # TODO: We could improve the performance by scanning - # in parallel. The automation already can do this. - # We would need to cluster hosts into bulks here. - folderpath, hostname = html.var("_item").split("|") - folder = g_folders[folderpath] - load_hosts(folder) - host = folder[".hosts"][hostname] - eff = effective_attributes(host, folder) - site_id = eff.get("site") - params = map(str, [ settings["timeout"], settings["probes"], settings["max_ttl"], settings["ping_probes"] ]) - gateways = check_mk_automation(site_id, "scan-parents", params + [hostname]) - gateway, state, skipped_gateways, error = gateways[0] + render_folder_path() - if state in [ "direct", "root", "gateway" ]: - message, pconf, gwcreat = \ - configure_gateway(state, site_id, folder, host, eff, gateway) - else: - message = error - pconf = False - gwcreat = False + ## # Show search form + html.begin_form("edithost", method = "POST") + forms.header(_("General Properties")) + forms.section(_("Hostname")) + html.text_input("host") + html.set_focus("host") - # Possible values for state are: - # failed, dnserror, garbled, root, direct, notfound, gateway - counts = [ 'continue', - 1, # Total hosts - gateway and 1 or 0, # Gateways found - state in [ "direct", "root" ] and 1 or 0, # Directly reachable hosts - skipped_gateways, # number of failed PING probes - state == "notfound" and 1 or 0, # No gateway found - pconf and 1 or 0, # New parents configured - gwcreat and 1 or 0, # Gateway hosts created - state in [ "failed", "dnserror", "garbled" ] and 1 or 0, # Errors - ] - result = "%r\n%s: %s
    \n" % (counts, hostname, message) + # Attributes + configure_attributes(False, {}, "search", parent = None) - except Exception, e: - result = repr([ 'failed', 1, 0, 0, 0, 0, 0, 1 ]) + "\n" - if site_id: - msg = _("Error during parent scan of %s on site %s: %s") % (hostname, site_id, e) - else: - msg = _("Error during parent scan of %s: %s") % (hostname, e) - if config.debug: - msg += "
    %s
    " % format_exception().replace("\n", "
    ") - result += msg + "\n
    " - html.write(result) - return "" + # Button + forms.end() + html.button("_global", _("Search globally"), "submit") + html.button("_local", _("Search in %s") % g_folder["title"], "submit") + html.hidden_fields() + html.end_form() + + +def mode_search_results(phase): + if phase == "title": + return _("Search results") + + elif phase == "buttons": + global_buttons() + html.context_button(_("New Search"), html.makeuri([("mode", "search")]), "back") return + elif phase == "action": + return - config.need_permission("wato.parentscan") + crit = { ".name" : html.var("host") } + crit.update(collect_attributes(do_validate = False)) - # interactive progress is *not* done in action phase. It - # renders the page content itself. + if html.has_var("_local"): + folder = g_folder + else: + folder = g_root_folder - # select: 'noexplicit' -> no explicit parents - # 'no' -> no implicit parents - # 'ignore' -> not important - def include_host(folder, host, select): - if select == 'noexplicit' and "parents" in host: - return False - elif select == 'no': - effective = effective_attributes(host, folder) - if effective.get("parents"): - return False - return True + if not search_hosts_in_folders(folder, crit): + html.message(_("No matching hosts found.")) - def recurse_hosts(folder, recurse, select): - entries = [] - hosts = load_hosts(folder) - for hostname, host in hosts.items(): - if include_host(folder, host, select): - entries.append((hostname, folder)) - if recurse: - for f in folder[".folders"].values(): - entries += recurse_hosts(f, recurse, select) - return entries - # 'all' not set -> only scan checked hosts in current folder, no recursion - if not html.var("all"): - complete_folder = False - items = [] - for hostname in get_hostnames_from_checkboxes(): - host = g_folder[".hosts"][hostname] - if include_host(g_folder, host, settings["select"]): - items.append("%s|%s" % (g_folder[".path"], hostname)) - # all host in this folder, maybe recursively - else: - complete_folder = True - entries = recurse_hosts(g_folder, settings["recurse"], settings["select"]) - items = [] - for hostname, folder in entries: - items.append("%s|%s" % (folder[".path"], hostname)) +def search_hosts_in_folders(folder, crit): + num_found = 0 + num_found = search_hosts_in_folder(folder, crit) + for f in folder[".folders"].values(): + num_found += search_hosts_in_folders(f, crit) - if html.var("_start"): - # Persist settings - config.save_user_file("parentscan", settings) + return num_found - # Start interactive progress - interactive_progress( - items, - _("Parent scan"), # title - [ (_("Total hosts"), 0), - (_("Gateways found"), 0), +def search_hosts_in_folder(folder, crit): + found = [] + + if check_folder_permissions(folder, "read", exception=False) != True: + return 0 + + hosts = load_hosts(folder) + for hostname, host in hosts.items(): + if crit[".name"] and crit[".name"].lower() not in hostname.lower(): + continue + + # Compute inheritance + effective = effective_attributes(host, folder) + + # Check attributes + dont_match = False + for attr, topic in host_attributes: + attrname = attr.name() + if attrname in crit and \ + not attr.filter_matches(crit[attrname], effective.get(attrname), hostname): + dont_match = True + break + if dont_match: + continue + + found.append((hostname, host, effective)) + + if found: + render_folder_path(folder, True) + found.sort(cmp = lambda a,b: cmp(num_split(a[0]), num_split(b[0]))) + + table.begin("search_hosts", ""); + for hostname, host, effective in found: + host_url = make_link_to([("mode", "edithost"), ("host", hostname)], folder) + table.row() + table.cell(_("Hostname"), '%s' % (host_url, hostname)) + for attr, topic in host_attributes: + attrname = attr.name() + if attr.show_in_table(): + if attrname in host: + tdclass, content = attr.paint(host[attrname], hostname) + else: + tdclass, content = attr.paint(effective[attrname], hostname) + tdclass += " inherited" + table.cell(attr.title(), content, css=tdclass) + table.end() + + return len(found) + +#. +# .--CSV-Import----------------------------------------------------------. +# | ____ ______ __ ___ _ | +# | / ___/ ___\ \ / / |_ _|_ __ ___ _ __ ___ _ __| |_ | +# | | | \___ \\ \ / /____| || '_ ` _ \| '_ \ / _ \| '__| __| | +# | | |___ ___) |\ V /_____| || | | | | | |_) | (_) | | | |_ | +# | \____|____/ \_/ |___|_| |_| |_| .__/ \___/|_| \__| | +# | |_| | +# +----------------------------------------------------------------------+ +# | The following functions help implementing an import of hosts from | +# | third party applications, such as from CVS files. The import itsself | +# | is not yet coded, but functions for dealing with the imported hosts. | +# '----------------------------------------------------------------------' + +def move_to_imported_folders(hosts): + c = wato_confirm( + _("Confirm moving hosts"), + _('You are going to move the selected hosts to folders ' + 'representing their original folder location in the system ' + 'you did the import from. Please make sure that you have ' + 'done an inventory before moving the hosts.')) + if c == False: # not yet confirmed + return "" + elif not c: + return None # browser reload + + # Create groups of hosts with the same target folder + targets = {} + for hostname in hosts: + host = g_folder[".hosts"][hostname] + effective = effective_attributes(host, g_folder) + imported_folder = effective.get('imported_folder') + if imported_folder == None: + continue + targets.setdefault(imported_folder, []).append(hostname) + + # Remove target folder information, now that the hosts are + # at their target position. + del host['imported_folder'] + + # Now handle each target folder + num_moved = 0 + for imported_folder, hosts in targets.items(): + # Next problem: The folder path in imported_folder refers + # to the Alias of the folders, not to the internal file + # name. And we need to create folders not yet existing. + target_folder = create_target_folder_from_aliaspath(imported_folder) + num_moved += move_hosts_to(hosts, target_folder[".path"]) + save_folder(target_folder) + save_folder(g_folder) + log_pending(AFFECTED, g_folder, "move-hosts", _("Moved %d imported hosts to their original destination.") % num_moved) + return None, _("Successfully moved %d hosts to their original folder destinations.") % num_moved + + +def create_target_folder_from_aliaspath(aliaspath): + # The alias path is a '/' separated path of folder titles. + # An empty path is interpreted as root path. The actual file + # name is the host list with the name "Hosts". + if aliaspath == "" or aliaspath == "/": + folder = g_root_folder + else: + parts = aliaspath.strip("/").split("/") + folder = g_root_folder + while len(parts) > 0: + # Look in current folder for subfolder with the target name + for name, f in folder.get(".folders", {}).items(): + if f["title"] == parts[0]: + folder = f + parts = parts[1:] + break + else: # not found. Create this folder + name = create_wato_foldername(parts[0], folder) + new_path = folder[".path"] + if new_path: + new_path += "/" + new_path += name + + new_folder = { + ".name" : name, + ".path" : new_path, + "title" : parts[0], + "attributes" : {}, + ".folders" : {}, + ".files" : {}, + ".parent" : folder, + } + + if '.siteid' in folder: + new_folder['.siteid'] = folder[".siteid"] + + folder[".folders"][name] = new_folder + g_folders[new_path] = new_folder + folder = new_folder + parts = parts[1:] + save_folder(folder) # make sure, directory is created + reload_folder(folder) + + return folder + +#. +# .--Bulk Import---------------------------------------------------------. +# | ____ _ _ ___ _ | +# | | __ ) _ _| | | __ |_ _|_ __ ___ _ __ ___ _ __| |_ | +# | | _ \| | | | | |/ / | || '_ ` _ \| '_ \ / _ \| '__| __| | +# | | |_) | |_| | | < | || | | | | | |_) | (_) | | | |_ | +# | |____/ \__,_|_|_|\_\ |___|_| |_| |_| .__/ \___/|_| \__| | +# | |_| | +# +----------------------------------------------------------------------+ +# | Realizes a simple page with a single textbox which one can insert | +# | several hostnames, separated by different chars and submit it to | +# | create these hosts in the current folder. | +# '----------------------------------------------------------------------' + +def mode_bulk_import(phase): + if phase == "title": + return _('Bulk Host Import') + + elif phase == "buttons": + html.context_button(_("Folder"), make_link([("mode", "folder")]), "back") + + elif phase == "action": + if not html.check_transaction(): + return "folder" + + attributes = collect_attributes() + + config.need_permission("wato.manage_hosts") + check_folder_permissions(g_folder, "write") + check_user_contactgroups(attributes.get("contactgroups", (False, []))) + + hosts = html.var('_hosts') + if not hosts: + raise MKUserError('_hosts', _('Please specify at least one hostname.')) + + created = 0 + skipped = 0 + selected = [] + + # Split by all possible separators + hosts = hosts.replace(' ', ';').replace(',', ';').replace('\n', ';').replace('\r', '') + for hostname in hosts.split(';'): + if hostname in g_folder['.hosts']: + skipped += 1 + continue + elif not re.match('^[a-zA-Z0-9-_.]+$', hostname): + skipped += 1 + continue + + new_host = { + '.name' : hostname, + '.folder' : g_folder, + } + g_folder[".hosts"][hostname] = new_host + mark_affected_sites_dirty(g_folder, hostname) + + message = _("Created new host %s.") % hostname + log_pending(AFFECTED, hostname, "create-host", message) + g_folder["num_hosts"] += 1 + created += 1 + selected.append('_c_%s' % hostname) + + if not created: + return 'folder', _('No host has been imported.') + + else: + save_folder_and_hosts(g_folder) + reload_hosts(g_folder) + call_hook_hosts_changed(g_folder) + + if html.get_checkbox('_do_service_detection'): + # Create a new selection + weblib.set_rowselection('wato-folder-/'+g_folder['.path'], selected, 'set') + html.set_var('mode', 'bulkinventory') + html.set_var('show_checkboxes', '1') + return 'bulkinventory' + + result_txt = '' + if skipped > 0: + result_txt = _('Imported %d hosts, but skipped %d hosts. Hosts might ' + 'be skipped when they already exist or contain illegal chars.') % (created, skipped) + else: + result_txt = _('Imported %d hosts.') % created + + return 'folder', result_txt + + else: + html.begin_form("bulkimport", method = "POST") + + html.write('

    ') + html.write(_('Using this page you can import several hosts at once into ' + 'the choosen folder. You can paste a list of hostnames, separated ' + 'by comma, semicolon, space or newlines. These hosts will then be ' + 'added to the folder using the default attributes. If some of the ' + 'host names cannot be resolved via DNS, you must manually edit ' + 'those hosts later and add explicit IP addresses.')) + html.write('

    ') + forms.header(_('Bulk Host Import')) + forms.section(_('Hosts')) + html.text_area('_hosts', cols = 70, rows = 10) + + forms.section(_('Options')) + html.checkbox('_do_service_detection', False, label = _('Perform automatic service discovery')) + forms.end() + + html.button('_import', _('Import')) + + html.hidden_fields() + html.end_form() + +#. +# .--Bulk-Inventory------------------------------------------------------. +# | ____ _ _ ___ _ | +# | | __ ) _ _| | | __ |_ _|_ ____ _____ _ __ | |_ ___ _ __ _ _ | +# | | _ \| | | | | |/ / | || '_ \ \ / / _ \ '_ \| __/ _ \| '__| | | | | +# | | |_) | |_| | | < | || | | \ V / __/ | | | || (_) | | | |_| | | +# | |____/ \__,_|_|_|\_\ |___|_| |_|\_/ \___|_| |_|\__\___/|_| \__, | | +# | |___/ | +# +----------------------------------------------------------------------+ +# | When the user wants to scan the services of multiple hosts at once | +# | this function is used. There is no fine-tuning possibility. We | +# | simply do something like -I or -II on the list of hosts. | +# '----------------------------------------------------------------------' + +def mode_bulk_inventory(phase): + if phase == "title": + return _("Bulk Service Discovery") + + elif phase == "buttons": + html.context_button(_("Folder"), make_link([("mode", "folder")]), "back") + return + + elif phase == "action": + if html.var("_item"): + if not html.check_transaction(): + html.write(repr([ 'failed', 0, 0, 0, 0, 0, 0, ]) + "\n") + html.write(_("Error during discovery: Maximum number of retries reached. " + "You need to restart the bulk service discovery")) + return "" + + how = html.var("how") + try: + site_id, folderpath, hostnamesstring = html.var("_item").split("|") + hostnames = hostnamesstring.split(";") + num_hosts = len(hostnames) + folder = g_folders[folderpath] + load_hosts(folder) + arguments = [how,] + hostnames + if html.var("use_cache"): + arguments = [ "@cache" ] + arguments + if html.var("do_scan"): + arguments = [ "@scan" ] + arguments + + unlock_exclusive() # Avoid freezing WATO when hosts do not respond timely + counts, failed_hosts = check_mk_automation(site_id, "inventory", arguments) + lock_exclusive() + load_hosts(folder) + + # sum up host individual counts to have a total count + sum_counts = [ 0, 0, 0, 0 ] # added, removed, kept, new + result_txt = '' + for hostname in hostnames: + sum_counts[0] += counts[hostname][0] + sum_counts[1] += counts[hostname][1] + sum_counts[2] += counts[hostname][2] + sum_counts[3] += counts[hostname][3] + host = folder[".hosts"][hostname] + if hostname in failed_hosts: + result_txt += _("Failed to inventorize %s: %s
    ") % (hostname, failed_hosts[hostname]) + if not host.get("inventory_failed") and not host.get(".folder", {}).get("_lock_hosts"): + host["inventory_failed"] = True + save_hosts(folder) + else: + result_txt += _("Inventorized %s
    \n") % hostname + mark_affected_sites_dirty(folder, hostname, sync=False, restart=True) + log_pending(AFFECTED, hostname, "bulk-inventory", + _("Inventorized host: %d added, %d removed, %d kept, %d total services") % + tuple(counts[hostname])) + + if "inventory_failed" in host and not host.get(".folder", {}).get("_lock_hosts"): + del host["inventory_failed"] + save_hosts(folder) # Could be optimized, but difficult here + + result = repr([ 'continue', num_hosts, len(failed_hosts) ] + sum_counts) + "\n" + result_txt + + except Exception, e: + result = repr([ 'failed', num_hosts, num_hosts, 0, 0, 0, 0, ]) + "\n" + if site_id: + msg = _("Error during inventory of %s on site %s
    %s%s
    ") % (", ".join(hostnames), e) + if config.debug: + msg += "
    %s

    " % html.attrencode(format_exception().replace("\n", "
    ")) + result += msg + html.write(result) + return "" + return + + + # interactive progress is *not* done in action phase. It + # renders the page content itself. + + def recurse_hosts(folder, recurse, only_failed): + entries = [] + hosts = load_hosts(folder) + for hostname, host in hosts.items(): + if not only_failed or host.get("inventory_failed"): + entries.append((hostname, folder)) + if recurse: + for f in folder[".folders"].values(): + entries += recurse_hosts(f, recurse, only_failed) + return entries + + config.need_permission("wato.services") + + if html.get_checkbox("only_failed_invcheck"): + restrict_to_hosts = find_hosts_with_failed_inventory_check() + else: + restrict_to_hosts = None + + if html.get_checkbox("only_ok_agent"): + skip_hosts = find_hosts_with_failed_agent() + else: + skip_hosts = [] + + # 'all' not set -> only inventorize checked hosts + hosts_to_inventorize = [] + + if not html.var("all"): + complete_folder = False + if html.get_checkbox("only_failed"): + filterfunc = lambda host: host.get("inventory_failed") + else: + filterfunc = None + + hostnames = get_hostnames_from_checkboxes(filterfunc) + for hostname in hostnames: + if restrict_to_hosts and hostname not in restrict_to_hosts: + continue + if hostname in skip_hosts: + continue + check_host_permissions(hostname) + host = g_folder[".hosts"][hostname] + eff = effective_attributes(host, g_folder) + site_id = eff.get("site") + hosts_to_inventorize.append( (site_id, g_folder[".path"], hostname) ) + + # all host in this folder, maybe recursively. New: we always group + # a bunch of subsequent hosts of the same folder into one item. + # That saves automation calls and speeds up mass inventories. + else: + complete_folder = True + entries = recurse_hosts(g_folder, html.get_checkbox("recurse"), html.get_checkbox("only_failed")) + items = [] + hostnames = [] + current_folder = None + num_hosts_in_current_chunk = 0 + for hostname, folder in entries: + if restrict_to_hosts != None and hostname not in restrict_to_hosts: + continue + if hostname in skip_hosts: + continue + check_host_permissions(hostname, folder=folder) + host = folder[".hosts"][hostname] + eff = effective_attributes(host, folder) + site_id = eff.get("site") + hosts_to_inventorize.append( (site_id, folder[".path"], hostname) ) + + # Create a list of items for the progress bar, where we group + # subsequent hosts that are in the same folder and site + hosts_to_inventorize.sort() + + current_site_and_folder = None + items = [] + hosts_in_this_item = 0 + bulk_size = int(html.var("bulk_size", 10)) + for site_id, folder_path, hostname in hosts_to_inventorize: + if not items or (site_id, folder_path) != current_site_and_folder or hosts_in_this_item >= bulk_size: + items.append("%s|%s|%s" % (site_id, folder_path, hostname)) + hosts_in_this_item = 1 + else: + items[-1] += ";" + hostname + hosts_in_this_item += 1 + current_site_and_folder = site_id, folder_path + + + if html.var("_start"): + # Start interactive progress + interactive_progress( + items, + _("Bulk Service Discovery"), # title + [ (_("Total hosts"), 0), + (_("Failed hosts"), 0), + (_("Services added"), 0), + (_("Services removed"), 0), + (_("Services kept"), 0), + (_("Total services"), 0) ], # stats table + [ ("mode", "folder") ], # URL for "Stop/Finish" button + 50, # ms to sleep between two steps + fail_stats = [ 1 ], + ) + + else: + html.begin_form("bulkinventory", method = "POST") + html.hidden_fields() + + # Mode of action + html.write("

    ") + if not complete_folder: + html.write(_("You have selected %d hosts for bulk discovery. ") % len(hostnames)) + html.write(_("Check_MK service discovery will automatically find and configure " + "services to be checked on your hosts.")) + forms.header(_("Bulk Discovery")) + forms.section(_("Mode")) + html.radiobutton("how", "new", True, _("Find only new services") + "
    ") + html.radiobutton("how", "remove", False, _("Remove obsolete services") + "
    ") + html.radiobutton("how", "fixall", False, _("Find new & remove obsolete") + "
    ") + html.radiobutton("how", "refresh", False, _("Refresh all services (tabula rasa)") + "
    ") + + forms.section(_("Selection")) + if complete_folder: + html.checkbox("recurse", True, label=_("Include all subfolders")) + html.write("
    ") + html.checkbox("only_failed", False, label=_("Only include hosts that failed on previous discovery")) + html.write("
    ") + html.checkbox("only_failed_invcheck", False, label=_("Only include hosts with a failed discovery check")) + html.write("
    ") + html.checkbox("only_ok_agent", False, label=_("Exclude hosts where the agent is unreachable")) + + forms.section(_("Performance options")) + html.checkbox("use_cache", True, label=_("Use cached data if present")) + html.write("
    ") + html.checkbox("do_scan", True, label=_("Do full SNMP scan for SNMP devices")) + html.write("
    ") + html.write(_("Number of hosts to handle at once:") + " ") + html.number_input("bulk_size", 10, size=3) + + # Start button + forms.end() + html.button("_start", _("Start")) + +def find_hosts_with_failed_inventory_check(): + return html.live.query_column( + "GET services\n" + "Filter: description = Check_MK inventory\n" # FIXME: Remove this one day + "Filter: description = Check_MK Discovery\n" + "Or: 2\n" + "Filter: state > 0\n" + "Columns: host_name") + +def find_hosts_with_failed_agent(): + return html.live.query_column( + "GET services\n" + "Filter: description = Check_MK\n" + "Filter: state >= 2\n" + "Columns: host_name") + +#. +# .--Bulk-Edit-----------------------------------------------------------. +# | ____ _ _ _____ _ _ _ | +# | | __ ) _ _| | | __ | ____|__| (_) |_ | +# | | _ \| | | | | |/ / | _| / _` | | __| | +# | | |_) | |_| | | < | |__| (_| | | |_ | +# | |____/ \__,_|_|_|\_\ |_____\__,_|_|\__| | +# | | +# +----------------------------------------------------------------------+ +# | Change the attributes of a number of selected host at once. Also the | +# | cleanup is implemented here: the bulk removal of explicit attribute | +# | values. | +# '----------------------------------------------------------------------' + +def mode_bulk_edit(phase): + if phase == "title": + return _("Bulk edit hosts") + + elif phase == "buttons": + html.context_button(_("Folder"), make_link([("mode", "folder")]), "back") + return + + elif phase == "action": + if html.check_transaction(): + config.need_permission("wato.edit_hosts") + + changed_attributes = collect_attributes() + if "contactgroups" in changed_attributes: + if True != check_folder_permissions(g_folder, "write", False): + raise MKAuthException(_("Sorry. In order to change the permissions of a host you need write " + "access to the folder it is contained in.")) + + hostnames = get_hostnames_from_checkboxes() + # Check all permissions for doing any edit + for hostname in hostnames: + check_host_permissions(hostname) + + for hostname in hostnames: + host = g_folder[".hosts"][hostname] + mark_affected_sites_dirty(g_folder, hostname) + host.update(changed_attributes) + mark_affected_sites_dirty(g_folder, hostname) + log_pending(AFFECTED, hostname, "bulk-edit", _("Changed attributes of host %s in bulk mode") % hostname) + save_folder_and_hosts(g_folder) + reload_hosts() # indirect host tag changes + call_hook_hosts_changed(g_folder) + return "folder" + return + + hostnames = get_hostnames_from_checkboxes() + hosts = dict([(hn, g_folder[".hosts"][hn]) for hn in hostnames]) + + html.write("

    " + _("You have selected %d hosts for bulk edit. You can now change " + "host attributes for all selected hosts at once. ") % len(hostnames)) + html.write(_("If a select is set to don't change then currenty not all selected " + "hosts share the same setting for this attribute. If you leave that selection, all hosts " + "will keep their individual settings.") + "

    ") + + html.begin_form("edithost", method = "POST") + configure_attributes(False, hosts, "bulk", parent = g_folder) + forms.end() + html.button("_save", _("Save & Finish")) + html.hidden_fields() + html.end_form() + + +#. +# .--Bulk-Cleanup--------------------------------------------------------. +# | ____ _ _ ____ _ | +# | | __ ) _ _| | | __ / ___| | ___ __ _ _ __ _ _ _ __ | +# | | _ \| | | | | |/ / | | | |/ _ \/ _` | '_ \| | | | '_ \ | +# | | |_) | |_| | | < | |___| | __/ (_| | | | | |_| | |_) | | +# | |____/ \__,_|_|_|\_\ \____|_|\___|\__,_|_| |_|\__,_| .__/ | +# | |_| | +# +----------------------------------------------------------------------+ +# | Mode for removing attributes from host in bulk mode. | +# '----------------------------------------------------------------------' + +def mode_bulk_cleanup(phase): + if phase == "title": + return _("Bulk removal of explicit attributes") + + elif phase == "buttons": + html.context_button(_("Folder"), make_link([("mode", "folder")]), "back") + return + + elif phase == "action": + if html.check_transaction(): + config.need_permission("wato.edit_hosts") + to_clean = bulk_collect_cleaned_attributes() + if "contactgroups" in to_clean: + if True != check_folder_permissions(g_folder, "write", False): + raise MKAuthException(_("Sorry. In order to change the permissions of a host you need write " + "access to the folder it is contained in.")) + hostnames = get_hostnames_from_checkboxes() + + # Check all permissions for doing any edit + for hostname in hostnames: + check_host_permissions(hostname) + + for hostname in hostnames: + mark_affected_sites_dirty(g_folder, hostname) + host = g_folder[".hosts"][hostname] + num_cleaned = 0 + for attrname in to_clean: + num_cleaned += 1 + if attrname in host: + del host[attrname] + if num_cleaned > 0: + log_pending(AFFECTED, hostname, "bulk-cleanup", _("Cleaned %d attributes of host %s in bulk mode") % ( + num_cleaned, hostname)) + mark_affected_sites_dirty(g_folder, hostname) + save_hosts(g_folder) + reload_hosts() # indirect host tag changes + return "folder" + return + + hostnames = get_hostnames_from_checkboxes() + hosts = dict([(hn, g_folder[".hosts"][hn]) for hn in hostnames]) + + html.write("

    " + _("You have selected %d hosts for bulk cleanup. This means removing " + "explicit attribute values from hosts. The hosts will then inherit attributes " + "configured at the host list or folders or simply fall back to the builtin " + "default values.") % len(hostnames)) + html.write("

    ") + + html.begin_form("bulkcleanup", method = "POST") + forms.header(_("Attributes to remove from hosts")) + if not bulk_cleanup_attributes(g_folder, hosts): + forms.end() + html.write(_("The selected hosts have no explicit attributes")) + else: + forms.end() + html.button("_save", _("Save & Finish")) + html.hidden_fields() + html.end_form() + + +def bulk_collect_cleaned_attributes(): + to_clean = [] + for attr, topic in host_attributes: + attrname = attr.name() + if html.get_checkbox("_clean_" + attrname) == True: + to_clean.append(attrname) + return to_clean + + +def bulk_cleanup_attributes(the_file, hosts): + num_shown = 0 + for attr, topic in host_attributes: + attrname = attr.name() + + # only show attributes that at least on host have set + num_haveit = 0 + for hostname, host in hosts.items(): + if attrname in host: + num_haveit += 1 + + if num_haveit == 0: + continue + + # If the attribute is mandatory and no value is inherited + # by file or folder, the attribute cannot be cleaned. + container = the_file + is_inherited = False + while container: + if "attributes" in container and attrname in container["attributes"]: + is_inherited = True + inherited_value = container["attributes"][attrname] + break + container = container.get(".parent") + + + num_shown += 1 + + # Legend and Help + forms.section(attr.title()) + + if attr.is_mandatory() and not is_inherited: + html.write(_("This attribute is mandatory and there is no value " + "defined in the host list or any parent folder.")) + else: + label = "clean this attribute on %s hosts" % \ + (num_haveit == len(hosts) and "all selected" or str(num_haveit)) + html.checkbox("_clean_%s" % attrname, False, label=label) + html.help(attr.help()) + + return num_shown > 0 + + + +#. +# .--Parentscan----------------------------------------------------------. +# | ____ _ | +# | | _ \ __ _ _ __ ___ _ __ | |_ ___ ___ __ _ _ __ | +# | | |_) / _` | '__/ _ \ '_ \| __/ __|/ __/ _` | '_ \ | +# | | __/ (_| | | | __/ | | | |_\__ \ (_| (_| | | | | | +# | |_| \__,_|_| \___|_| |_|\__|___/\___\__,_|_| |_| | +# | | +# +----------------------------------------------------------------------+ +# | Automatic scan for parents (similar to cmk --scan-parents) | +# '----------------------------------------------------------------------' +def mode_parentscan(phase): + if phase == "title": + return _("Parent scan") + + elif phase == "buttons": + html.context_button(_("Folder"), make_link([("mode", "folder")]), "back") + return + + # Ignored during initial form display + settings = { + "where" : html.var("where"), + "alias" : html.var_utf8("alias", "").strip() or None, + "recurse" : html.get_checkbox("recurse"), + "select" : html.var("select"), + "timeout" : saveint(html.var("timeout")) or 8, + "probes" : saveint(html.var("probes")) or 2, + "max_ttl" : saveint(html.var("max_ttl")) or 10, + "force_explicit" : html.get_checkbox("force_explicit"), + "ping_probes" : saveint(html.var("ping_probes")) or 0, + } + + if phase == "action": + if html.var("_item"): + try: + # TODO: We could improve the performance by scanning + # in parallel. The automation already can do this. + # We would need to cluster hosts into bulks here. + folderpath, hostname = html.var("_item").split("|") + folder = g_folders[folderpath] + load_hosts(folder) + host = folder[".hosts"][hostname] + eff = effective_attributes(host, folder) + site_id = eff.get("site") + params = map(str, [ settings["timeout"], settings["probes"], settings["max_ttl"], settings["ping_probes"] ]) + gateways = check_mk_automation(site_id, "scan-parents", params + [hostname]) + gateway, state, skipped_gateways, error = gateways[0] + + if state in [ "direct", "root", "gateway" ]: + message, pconf, gwcreat = \ + configure_gateway(state, site_id, folder, host, eff, gateway) + else: + message = error + pconf = False + gwcreat = False + + # Possible values for state are: + # failed, dnserror, garbled, root, direct, notfound, gateway + counts = [ 'continue', + 1, # Total hosts + gateway and 1 or 0, # Gateways found + state in [ "direct", "root" ] and 1 or 0, # Directly reachable hosts + skipped_gateways, # number of failed PING probes + state == "notfound" and 1 or 0, # No gateway found + pconf and 1 or 0, # New parents configured + gwcreat and 1 or 0, # Gateway hosts created + state in [ "failed", "dnserror", "garbled" ] and 1 or 0, # Errors + ] + result = "%r\n%s: %s
    \n" % (counts, hostname, message) + + except Exception, e: + result = repr([ 'failed', 1, 0, 0, 0, 0, 0, 1 ]) + "\n" + if site_id: + msg = _("Error during parent scan of %s on site %s: %s") % (hostname, site_id, e) + else: + msg = _("Error during parent scan of %s: %s") % (hostname, e) + if config.debug: + msg += "
    %s
    " % html.attrencode(format_exception().replace("\n", "
    ")) + result += msg + "\n
    " + html.write(result) + return "" + return + + + config.need_permission("wato.parentscan") + + # interactive progress is *not* done in action phase. It + # renders the page content itself. + + # select: 'noexplicit' -> no explicit parents + # 'no' -> no implicit parents + # 'ignore' -> not important + def include_host(folder, host, select): + if select == 'noexplicit' and "parents" in host: + return False + elif select == 'no': + effective = effective_attributes(host, folder) + if effective.get("parents"): + return False + return True + + def recurse_hosts(folder, recurse, select): + entries = [] + hosts = load_hosts(folder) + for hostname, host in hosts.items(): + if include_host(folder, host, select): + entries.append((hostname, folder)) + + if recurse: + for f in folder[".folders"].values(): + entries += recurse_hosts(f, recurse, select) + return entries + + # 'all' not set -> only scan checked hosts in current folder, no recursion + if not html.var("all"): + complete_folder = False + items = [] + for hostname in get_hostnames_from_checkboxes(): + host = g_folder[".hosts"][hostname] + if include_host(g_folder, host, settings["select"]): + items.append("%s|%s" % (g_folder[".path"], hostname)) + + # all host in this folder, maybe recursively + else: + complete_folder = True + entries = recurse_hosts(g_folder, settings["recurse"], settings["select"]) + items = [] + for hostname, folder in entries: + items.append("%s|%s" % (folder[".path"], hostname)) + + + if html.var("_start"): + # Persist settings + config.save_user_file("parentscan", settings) + + + # Start interactive progress + interactive_progress( + items, + _("Parent scan"), # title + [ (_("Total hosts"), 0), + (_("Gateways found"), 0), (_("Directly reachable hosts"), 0), (_("Unreachable gateways"), 0), (_("No gateway found"), 0), @@ -3010,2393 +4309,5908 @@ ) else: - html.begin_form("parentscan", None, "POST") - html.hidden_fields() + html.begin_form("parentscan", method = "POST") + html.hidden_fields() + + # Mode of action + html.write("

    ") + if not complete_folder: + html.write(_("You have selected %d hosts for parent scan. ") % len(items)) + html.write("

    " + + _("The parent scan will try to detect the last gateway " + "on layer 3 (IP) before a host. This will be done by " + "calling traceroute. If a gateway is found by " + "that way and its IP address belongs to one of your " + "monitored hosts, that host will be used as the hosts " + "parent. If no such host exists, an artifical ping-only " + "gateway host will be created if you have not disabled " + "this feature.") + "

    ") + + forms.header(_("Settings for Parent Scan")) + + settings = config.load_user_file("parentscan", { + "where" : "subfolder", + "alias" : _("Created by parent scan"), + "recurse" : True, + "select" : "noexplicit", + "timeout" : 8, + "probes" : 2, + "ping_probes" : 5, + "max_ttl" : 10, + "force_explicit" : False, + }) + + # Selection + forms.section(_("Selection")) + if complete_folder: + html.checkbox("recurse", settings["recurse"], label=_("Include all subfolders")) + html.write("
    ") + html.radiobutton("select", "noexplicit", settings["select"] == "noexplicit", + _("Skip hosts with explicit parent definitions (even if empty)") + "
    ") + html.radiobutton("select", "no", settings["select"] == "no", + _("Skip hosts hosts with non-empty parents (also if inherited)") + "
    ") + html.radiobutton("select", "ignore", settings["select"] == "ignore", + _("Scan all hosts") + "
    ") + + # Performance + forms.section(_("Performance")) + html.write("" % _("sec")) + html.write("') + html.write("') + html.write('') + html.write('
    ") + html.write(_("Timeout for responses") + ":") + html.number_input("timeout", settings["timeout"], size=2) + html.write(" %s
    ") + html.write(_("Number of probes per hop") + ":") + html.number_input("probes", settings["probes"], size=2) + html.write('
    ") + html.write(_("Maximum distance (TTL) to gateway") + ":") + html.number_input("max_ttl", settings["max_ttl"], size=2) + html.write('
    ') + html.write(_("Number of PING probes") + ":") + html.help(_("After a gateway has been found, Check_MK checks if it is reachable " + "via PING. If not, it is skipped and the next gateway nearer to the " + "monitoring core is being tried. You can disable this check by setting " + "the number of PING probes to 0.")) + html.write("") + html.number_input("ping_probes", settings.get("ping_probes", 5), size=2) + html.write('
    ') + + # Configuring parent + forms.section(_("Configuration")) + html.checkbox("force_explicit", + settings["force_explicit"], label=_("Force explicit setting for parents even if setting matches that of the folder")) + + # Gateway creation + forms.section(_("Creation of gateway hosts")) + html.write(_("Create gateway hosts in
      ")) + html.radiobutton("where", "subfolder", settings["where"] == "subfolder", + _("in the subfolder %s/Parents") % g_folder["title"]) + html.write("
      ") + html.radiobutton("where", "here", settings["where"] == "here", + _("directly in the folder %s") % g_folder["title"]) + html.write("
      ") + html.radiobutton("where", "there", settings["where"] == "there", + _("in the same folder as the host")) + html.write("
      ") + html.radiobutton("where", "nowhere", settings["where"] == "nowhere", + _("do not create gateway hosts")) + html.write("
    ") + html.write(_("Alias for created gateway hosts") + ": ") + html.text_input("alias", settings["alias"]) + + # Start button + forms.end() + html.button("_start", _("Start")) + + +def configure_gateway(state, site_id, folder, host, effective, gateway): + # Settings for configuration and gateway creation + force_explicit = html.get_checkbox("force_explicit") + where = html.var("where") + alias = html.var("alias") + + # If we have found a gateway, we need to know a matching + # host name from our configuration. If there is none, + # we can create one, if the users wants this. The automation + # for the parent scan already tries to find such a host + # within the site. + gwcreat = False + + if gateway: + gw_host, gw_ip, dns_name = gateway + if not gw_host: + if where == "nowhere": + return _("No host %s configured, parents not set") % gw_ip, \ + False, False + + # Determine folder where to create the host. + elif where == "here": # directly in current folder + gw_folder = g_folder + elif where == "subfolder": + # Put new gateways in subfolder "Parents" of current + # folder. Does this folder already exist? + if "parents" in g_folder[".folders"]: + gw_folder = g_folder[".folders"]["parents"] + load_hosts(gw_folder) + else: + # Create new gateway folder + config.need_permission("wato.manage_folders") + check_folder_permissions(g_folder, "write") + gw_folder = { + ".name" : "parents", + ".parent" : g_folder, + ".path" : g_folder[".path"] + "/parents", + "title" : _("Parents"), + "attributes" : {}, + ".folders" : {}, + ".hosts" : {}, + "num_hosts" : 0, + } + g_folders[gw_folder[".path"]] = gw_folder + g_folder[".folders"]["parent"] = gw_folder + save_folder(gw_folder) + call_hook_folder_created(gw_folder) + log_pending(AFFECTED, gw_folder, "new-folder", + _("Created new folder %s during parent scant") + % gw_folder[".path"]) + elif where == "there": # In same folder as host + gw_folder = folder + load_hosts(gw_folder) + + # Create gateway host + config.need_permission("wato.manage_hosts") + check_folder_permissions(gw_folder, "write") + if dns_name: + gw_host = dns_name + elif site_id: + gw_host = "gw-%s-%s" % (site_id, gw_ip.replace(".", "-")) + else: + gw_host = "gw-%s" % (gw_ip.replace(".", "-")) + + new_host = { + ".name" : gw_host, + "ipaddress" : gw_ip, + ".folder" : gw_folder, + } + if alias: + new_host["alias"] = alias + + # Important: set the "site" attribute for the new host, but + # only set it explicitely if it differs from the id of the + # folder. + e = effective_attributes(new_host, gw_folder) + if "site" in e and e["site"] != site_id: + new_host["site"] = site_id + + gw_folder[".hosts"][new_host[".name"]] = new_host + save_hosts(gw_folder) + reload_hosts(gw_folder) + save_folder(gw_folder) + mark_affected_sites_dirty(gw_folder, gw_host) + log_pending(AFFECTED, gw_host, "new-host", + _("Created new host %s during parent scan") % gw_host) + + reload_folder(gw_folder) + gwcreat = True + + parents = [ gw_host ] + + else: + parents = [] + + if effective["parents"] == parents: + return _("Parents unchanged at %s") % \ + (parents and ",".join(parents) or _("none")), False, gwcreat + + + config.need_permission("wato.edit_hosts") + check_host_permissions(host[".name"], folder=folder) + + if force_explicit: + host["parents"] = parents + else: + # Check which parents the host would have inherited + if "parents" in host: + del host["parents"] + effective = effective_attributes(host, folder) + if effective["parents"] != parents: + host["parents"] = parents + + if parents: + message = _("Set parents to %s") % ",".join(parents) + else: + message = _("Removed parents") + + mark_affected_sites_dirty(folder, host[".name"]) + save_hosts(folder) + log_pending(AFFECTED, host[".name"], "set-gateway", message) + return message, True, gwcreat + + +#. +# .--Random Hosts--------------------------------------------------------. +# | ____ _ _ _ _ | +# | | _ \ __ _ _ __ __| | ___ _ __ ___ | | | | ___ ___| |_ ___ | +# | | |_) / _` | '_ \ / _` |/ _ \| '_ ` _ \ | |_| |/ _ \/ __| __/ __| | +# | | _ < (_| | | | | (_| | (_) | | | | | | | _ | (_) \__ \ |_\__ \ | +# | |_| \_\__,_|_| |_|\__,_|\___/|_| |_| |_| |_| |_|\___/|___/\__|___/ | +# | | +# +----------------------------------------------------------------------+ +# | This module allows the creation of large numbers of random hosts, | +# | for test and development. | +# '----------------------------------------------------------------------' +def mode_random_hosts(phase): + if phase == "title": + return _("Random Hosts") + + elif phase == "buttons": + html.context_button(_("Folder"), make_link([("mode", "folder")]), "back") + return + + elif phase == "action": + if html.check_transaction(): + count = int(html.var("count")) + folders = int(html.var("folders")) + levels = int(html.var("levels")) + created = create_random_hosts(g_folder, count, folders, levels) + log_pending(AFFECTED, g_folder, "create-random-hosts", + _("Created %d random hosts in %d folders") % (created, folders)) + return "folder", _("Created %d random hosts.") % created + else: + return "folder" + + html.begin_form("random") + forms.header(_("Create Random Hosts")) + forms.section(_("Number to create")) + html.write("%s: " % _("Hosts to create in each folder")) + html.number_input("count", 10) + html.set_focus("count") + html.write("
    %s: " % _("Number of folders to create in each level")) + html.number_input("folders", 10) + html.write("
    %s: " % _("Levels of folders to create")) + html.number_input("levels", 1) + + forms.end() + html.button("start", _("Start!"), "submit") + html.hidden_fields() + html.end_form() + +def create_random_hosts(folder, count, folders, levels): + if levels == 0: + created = 0 + while created < count: + name = "random_%010d" % int(random.random() * 10000000000) + host = {"ipaddress" : "127.0.0.1"} + folder[".hosts"][name] = host + created += 1 + folder["num_hosts"] += count + save_folder_and_hosts(folder) + mark_affected_sites_dirty(folder) + reload_hosts() + return count + else: + total_created = 0 + if folder[".path"]: + prefixpath = folder[".path"] + "/" + else: + prefixpath = "" + created = 0 + while created < folders: + created += 1 + i = 1 + while True: + name = "folder_%02d" % i + if name not in folder[".folders"]: + break + i += 1 + title = "Subfolder %02d" % i + path = prefixpath + name + subfolder = { + ".parent" : folder, + ".name" : name, + ".folders" : {}, + ".hosts" : {}, + ".path" : path, + ".siteid" : None, + "attributes" : {}, + "num_hosts" : 0, + "title" : title, + } + g_folders[path] = subfolder + folder[".folders"][name] = subfolder + save_folder(subfolder) + total_created += create_random_hosts(subfolder, count, folders, levels - 1) + save_folder(folder) + return total_created + +#. +# .--Auditlog------------------------------------------------------------. +# | _ __ _ _ | +# | | | ___ __ _ / _(_) | ___ | +# | | | / _ \ / _` | |_| | |/ _ \ | +# | | |__| (_) | (_| | _| | | __/ | +# | |_____\___/ \__, |_| |_|_|\___| | +# | |___/ | +# +----------------------------------------------------------------------+ +# | Handling of the audit logfiles | +# '----------------------------------------------------------------------' +def mode_auditlog(phase): + if phase == "title": + return _("Audit Log") + + elif phase == "buttons": + home_button() + changelog_button() + if log_exists("audit") and config.may("wato.auditlog") and config.may("wato.edit"): + html.context_button(_("Download"), + html.makeactionuri([("_action", "csv")]), "download") + if config.may("wato.edit"): + html.context_button(_("Clear Log"), + html.makeactionuri([("_action", "clear")]), "trash") + return + + elif phase == "action": + if html.var("_action") == "clear": + config.need_permission("wato.auditlog") + config.need_permission("wato.edit") + return clear_audit_log_after_confirm() + + elif html.var("_action") == "csv": + config.need_permission("wato.auditlog") + return export_audit_log() + + audit = parse_audit_log("audit") + if len(audit) == 0: + html.write("
    " + _("The audit log is empty.") + "
    ") + else: + render_audit_log(audit, "audit") + +#. +# .--Pending & Replication-----------------------------------------------. +# | ____ _ _ | +# | | _ \ ___ _ __ __| (_)_ __ __ _ | +# | | |_) / _ \ '_ \ / _` | | '_ \ / _` | | +# | | __/ __/ | | | (_| | | | | | (_| | | +# | |_| \___|_| |_|\__,_|_|_| |_|\__, | | +# | |___/ | +# +----------------------------------------------------------------------+ +# | Mode for activating pending changes. Does also replication with | +# | remote sites in distributed WATO. | +# '----------------------------------------------------------------------' + +def mode_changelog(phase): + # See below for the usage of this weird variable... + global sitestatus_do_async_replication + try: + sitestatus_do_async_replication + except: + sitestatus_do_async_replication = False + + if phase == "title": + return _("Pending changes to activate") + + elif phase == "buttons": + home_button() + # Commit pending log right here, if all sites are up-to-date + if is_distributed() and global_replication_state() == "clean": + log_commit_pending() + + if config.may("wato.activate") and ( + (not is_distributed() and log_exists("pending")) + or (is_distributed() and global_replication_state() == "dirty")): + html.context_button(_("Activate Changes!"), + html.makeactionuri([("_action", "activate")]), + "apply", True, id="act_changes_button") + if get_last_wato_snapshot_file(): + html.context_button(_("Discard Changes!"), + html.makeactionuri([("_action", "discard")]), + "discard", id="discard_changes_button") + + if is_distributed(): + html.context_button(_("Site Configuration"), make_link([("mode", "sites")]), "sites") + + if config.may("wato.auditlog"): + html.context_button(_("Audit Log"), make_link([("mode", "auditlog")]), "auditlog") + + elif phase == "action": + action = html.var("_action", html.var("_siteaction")) + if action == "activate": + # Let host validators do their work + defective_hosts = validate_all_hosts([], force_all = True) + if defective_hosts: + raise MKUserError(None, _("You cannot activate changes while some hosts have " + "an invalid configuration: ") + ", ".join( + [ '%s' % (make_link([("mode", "edithost"), ("host", hn)]), hn) + for hn in defective_hosts.keys() ])) + + # If there are changes by other users, we need a confirmation + transaction_already_checked = False + changes = foreign_changes() + if changes: + table = "" + for user_id, count in changes.items(): + table += '' % \ + (config.alias_of_user(user_id), count, _("changes")) + table += '
    %s: %d %s
    ' + + if action in [ "activate", "sync_restart", "restart" ]: + title = _("Confirm activating foreign changes") + text = _("There are some changes made by your colleagues that you will " + "activate if you proceed:") + elif action == "sync": + title = _("Confirm synchronizing foreign changes") + text = _("There are some changes made by your colleagues that you will " + "synchronize if you proceed:") + else: + title = _("Confirm discarding foreign changes") + text = _("There are some changes made by your colleagues that you will " + "discard if you proceed:") + + c = wato_confirm(title, + HTML('' + text + table + + _("Do you really want to proceed?"))) + if c == False: + return "" + elif not c: + return None + transaction_already_checked = True + + if changes and not config.may("wato.activateforeign"): + raise MKAuthException(_("Sorry, you are not allowed to activate " + "changes of other users.")) + + if action == "discard": + # Now remove all currently pending changes by simply restoring the last automatically + # taken snapshot. Then activate the configuration. This should revert all pending changes. + file_to_restore = get_last_wato_snapshot_file() + if not file_to_restore: + raise MKUserError(None, _('There is no WATO snapshot to be restored.')) + log_pending(LOCALRESTART, None, "changes-discarded", + _("Discarded pending changes (Restored %s)") % html.attrencode(file_to_restore)) + extract_snapshot(file_to_restore) + activate_changes() + log_commit_pending() + return None, _("Successfully discarded all pending changes.") + + # Give hooks chance to do some pre-activation things (and maybe stop + # the activation) + try: + call_hook_pre_distribute_changes() + except Exception, e: + if config.debug: + raise + else: + raise MKUserError(None, "

    %s

    %s" % (_("Cannot activate changes"), e)) + + sitestatus_do_async_replication = False # see below + if html.has_var("_siteaction"): + config.need_permission("wato.activate") + site_id = html.var("_site") + action = html.var("_siteaction") + if transaction_already_checked or html.check_transaction(): + try: + # If the site has no pending changes but just needs restart, + # the button text is just "Restart". We do a sync anyway. This + # can be optimized in future but is the save way for now. + site = config.site(site_id) + if action in [ "sync", "sync_restart" ]: + response = synchronize_site(site, restart = action == "sync_restart") + else: + try: + restart_site(site) + response = True + except Exception, e: + response = str(e) + + if response == True: + return + else: + raise MKUserError(None, _("Error on remote access to site: %s") % response) + + except MKAutomationException, e: + raise MKUserError(None, _("Remote command on site %s failed:
    %s
    ") % (site_id, e)) + except Exception, e: + if config.debug: + raise + raise MKUserError(None, _("Remote command on site %s failed:
    %s
    ") % (site_id, e)) + + elif transaction_already_checked or html.check_transaction(): + config.need_permission("wato.activate") + create_snapshot({"comment": "Activated changes by %s" % config.user_id}) + + # Do nothing here, but let site status table be shown in a mode + # were in each site that is not up-to-date an asynchronus AJAX + # job is being startet that updates that site + sitestatus_do_async_replication = True + + else: # phase: regular page rendering + changes_activated = False + + if is_distributed(): + # Distributed WATO: Show replication state of each site + + html.write("

    %s

    " % _("Distributed WATO - Replication Status")) + repstatus = load_replication_status() + sites = [(name, config.site(name)) for name in config.sitenames() ] + sort_sites(sites) + html.write("") + html.write("") + html.write("" % _("ID") + + "" % _("Alias")) + html.write("" % _("Livestatus")) + html.write("" % + (sitestatus_do_async_replication and 3 or 6, _("Replication"))) + html.write("" + + "" % _("Status") + + "" % _("Version") + + "" % _("Core") + + "" % _("Ho.") + + "" % _("Sv.") + + "" % _("Uptime") + + "" % _("Multisite URL") + + "" % _("Type")) + if sitestatus_do_async_replication: + html.write("" % _("Replication result")) + else: + html.write("" % _("State") + + "" % _("Actions") + + "" % _("Last result")) + html.write("") + + odd = "odd" + num_replsites = 0 # for detecting end of bulk replication + for site_id, site in sites: + is_local = site_is_local(site_id) + + if not is_local and not site.get("replication"): + continue + + if site.get("disabled"): + ss = {} + status = "disabled" + else: + ss = html.site_status.get(site_id, {}) + status = ss.get("state", "unknown") + + srs = repstatus.get(site_id, {}) + + # Make row red, if site status is not online + html.write('' % odd) + odd = odd == "odd" and "even" or "odd" + + # ID & Alias + html.write("" % + (make_link([("mode", "edit_site"), ("edit", site_id)]), site_id)) + html.write("" % site.get("alias", "")) + + # Livestatus + html.write('' % (status)) + + # Livestatus-Version + html.write('' % ss.get("livestatus_version", "")) + + # Core-Version + html.write('' % ss.get("program_version", "")) + + # Hosts/services + html.write('' % + (site_id, ss.get("num_hosts", ""))) + html.write('' % + (site_id, ss.get("num_services", ""))) + + # Uptime / Last restart + if "program_start" in ss: + age_text = html.age_text(time.time() - ss["program_start"]) + else: + age_text = "" + html.write('' % age_text) + + # Multisite-URL + html.write("" % (not is_local + and "%s" % tuple([site.get("multisiteurl")]*2) or "")) + + # Type + sitetype = '' + if is_local: + sitetype = _("local") + elif site["replication"] == "slave": + sitetype = _("Slave") + html.write("" % sitetype) + + need_restart = srs.get("need_restart") + need_sync = srs.get("need_sync") and not site_is_local(site_id) + uptodate = not (need_restart or need_sync) + + # Start asynchronous replication + if sitestatus_do_async_replication: + html.write("") + else: + # State + html.write("") + + # Actions + html.write("") + + # Last result + result = srs.get("result", "") + if len(result) > 20: + result = html.strip_tags(result) + result = '%s...' % \ + (html.attrencode(result), result[:20]) + html.write("" % result) + + html.write("") + html.write("
    %s%s%s%s
    %s%s%s%s%s%s%s%s%s%s%s%s
    %s%s%s%s%s%s%s%s%s") + # Do only include sites that are known to be up + if not site_is_local(site_id) and not "secret" in site: + html.write("%s" % _("Not logged in.")) + else: + html.write('
    %s
    ' % + (site_id, uptodate and _("nothing to do") or "")) + if not uptodate: + if need_restart and need_sync: + what = "sync+restart" + elif need_restart: + what = "restart" + else: + what = "sync" + estimated_duration = srs.get("times", {}).get(what, 2.0) + html.javascript("wato_do_replication('%s', %d);" % + (site_id, int(estimated_duration * 1000.0))) + num_replsites += 1 + html.write("
    ") + if srs.get("need_sync") and not site_is_local(site_id): + html.write('' % + _("This site is not update and needs a replication.")) + if srs.get("need_restart"): + html.write('' % + _("This site needs a restart for activating the changes.")) + if uptodate: + html.write('' % + _("This site is up-to-date.")) + html.write("") + sync_url = make_action_link([("mode", "changelog"), + ("_site", site_id), ("_siteaction", "sync")]) + restart_url = make_action_link([("mode", "changelog"), + ("_site", site_id), ("_siteaction", "restart")]) + sync_restart_url = make_action_link([("mode", "changelog"), + ("_site", site_id), ("_siteaction", "sync_restart")]) + if not site_is_local(site_id) and "secret" not in site: + html.write("%s" % _("Not logged in.")) + elif not uptodate: + if not site_is_local(site_id): + if srs.get("need_sync"): + html.buttonlink(sync_url, _("Sync")) + if srs.get("need_restart"): + html.buttonlink(sync_restart_url, _("Sync & Restart")) + else: + html.buttonlink(restart_url, _("Restart")) + else: + html.buttonlink(restart_url, _("Restart")) + html.write("%s
    ") + # The Javascript world needs to know, how many asynchronous + # replication jobs it should wait to be finished. + if sitestatus_do_async_replication and num_replsites > 0: + html.javascript("var num_replsites = %d;\n" % num_replsites) + + elif sitestatus_do_async_replication: + # Single site setup + if cmc_rush_ahead_activation(): + html.message(_("All changes have been activated.")) + changes_activated = True + else: + # Is rendered on the page after hitting the "activate" button + # Renders the html to show the progress and starts the sync via javascript + html.write("") + html.write("" % (_('Progress'), _('Status'))) + html.write('') + html.write('
    %s%s
    %s
    ' % _('activating...')) + + srs = load_replication_status().get(None, {}) + estimated_duration = srs.get("times", {}).get('act', 2.0) + html.javascript("wato_do_activation(%d);" % + (int(estimated_duration * 1000.0))) + + sitestatus_do_async_replication = None # could survive in global context! + + pending = parse_audit_log("pending") + if len(pending) == 0: + if not changes_activated: + html.write("
    " + _("There are no pending changes.") + "
    ") + else: + html.write('
    ') + render_audit_log(pending, "pending", hilite_others=True) + html.write('
    ') + +def get_last_wato_snapshot_file(): + for snapshot_file in get_snapshots(): + status = get_snapshot_status(snapshot_file) + if status['type'] == 'automatic' and not status['broken']: + return snapshot_file + +# Determine if other users have made pending changes +def foreign_changes(): + changes = {} + for t, linkinfo, user, action, text in parse_audit_log("pending"): + if user != '-' and user != config.user_id: + changes.setdefault(user, 0) + changes[user] += 1 + return changes + + +def log_entry(linkinfo, action, message, logfilename, user_id = None): + if type(message) == unicode: + message = message.encode("utf-8") + message = message.strip() + + # linkinfo is either a folder, or a hostname or None + if type(linkinfo) == dict and linkinfo[".path"] in g_folders: + link = linkinfo[".path"] + ":" + elif linkinfo == None: + link = "-" + elif linkinfo and ".hosts" in g_folder and linkinfo in g_folder[".hosts"]: # hostname in current folder + link = g_folder[".path"] + ":" + linkinfo + else: + link = ":" + linkinfo + + if user_id == None: + user_id = config.user_id + elif user_id == '': + user_id = '-' + + log_file = log_dir + logfilename + make_nagios_directory(log_dir) + f = create_user_file(log_file, "ab") + f.write("%d %s %s %s %s\n" % (int(time.time()), link, user_id, action, message)) + + +def log_audit(linkinfo, what, message, user_id = None): + if config.wato_use_git: + g_git_messages.append(message) + log_entry(linkinfo, what, message, "audit.log", user_id) + +# status is one of: +# SYNC -> Only sync neccessary +# RESTART -> Restart and sync neccessary +# SYNCRESTART -> Do sync and restart +# AFFECTED -> affected sites are already marked for sync+restart +# by mark_affected_sites_dirty(). +# LOCALRESTART-> Called after inventory. In distributed mode, affected +# sites have already been marked for restart. Do nothing here. +# In non-distributed mode mark for restart +def log_pending(status, linkinfo, what, message, user_id = None): + log_audit(linkinfo, what, message, user_id) + need_sidebar_reload() + + # On each change to the Check_MK configuration mark the agents to be rebuild + if 'need_to_bake_agents' in globals(): + need_to_bake_agents() + + # The latter one condition applies to slave sites + # Otherwise slave sites would trigger the cmcrushd + if not is_distributed() and not has_distributed_wato_file(): + if status != SYNC: + log_entry(linkinfo, what, message, "pending.log", user_id) + cmc_rush_ahead() + + + # Currently we add the pending to each site, regardless if + # the site is really affected. This needs to be optimized + # in future. + else: + log_entry(linkinfo, what, message, "pending.log", user_id) + for siteid, site in config.sites.items(): + + changes = {} + + # Local site can never have pending changes to be synced + if site_is_local(siteid): + if status in [ RESTART, SYNCRESTART ]: + changes["need_restart"] = True + else: + if status in [ SYNC, SYNCRESTART ]: + changes["need_sync"] = True + if status in [ RESTART, SYNCRESTART ]: + changes["need_restart"] = True + update_replication_status(siteid, changes) + + # Make sure that a new snapshot for syncing will be created + # when times comes to syncing + remove_sync_snapshot(siteid) + +def cmc_rush_ahead(): + if defaults.omd_root: + socket_path = defaults.omd_root + "/tmp/run/cmcrush" + if os.path.exists(socket_path): + try: + changeid = str(random.randint(1, 100000000000000000)) + file(log_dir + "changeid", "w").write(changeid + "\n") + socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) \ + .sendto(changeid, socket_path) + except: + if config.debug: + raise + + +def log_commit_pending(): + pending = log_dir + "pending.log" + if os.path.exists(pending): + os.remove(pending) + need_sidebar_reload() + +def clear_audit_log(): + path = log_dir + "audit.log" + if os.path.exists(path): + newpath = path + time.strftime(".%Y-%m-%d") + if os.path.exists(newpath): + n = 1 + while True: + n += 1 + with_num = newpath + "-%d" % n + if not os.path.exists(with_num): + newpath = with_num + break + os.rename(path, newpath) + +def clear_audit_log_after_confirm(): + c = wato_confirm(_("Confirm deletion of audit log"), + _("Do you really want to clear the audit log?")) + if c: + clear_audit_log() + return None, _("Cleared audit log.") + elif c == False: # not yet confirmed + return "" + else: + return None # browser reload + +def parse_audit_log(what): + path = log_dir + what + ".log" + if os.path.exists(path): + entries = [] + for line in file(path): + line = line.rstrip().decode("utf-8") + splitted = line.split(None, 4) + if len(splitted) == 5 and is_integer(splitted[0]): + splitted[0] = int(splitted[0]) + entries.append(splitted) + entries.reverse() + return entries + return [] + +def is_integer(i): + try: + int(i) + return True + except: + return False + +def log_exists(what): + path = log_dir + what + ".log" + return os.path.exists(path) + +def render_linkinfo(linkinfo): + if ':' in linkinfo: # folder:host + path, hostname = linkinfo.split(':', 1) + if path in g_folders: + folder = g_folders[path] + if hostname: + hosts = load_hosts(folder) + if hostname in hosts: + url = html.makeuri_contextless([("mode", "edithost"), + ("folder", path), ("host", hostname)]) + title = hostname + else: + return hostname + else: # only folder + url = html.makeuri_contextless([("mode", "folder"), ("folder", path)]) + title = g_folders[path]["title"] + else: + return linkinfo + else: + return "" + + return '%s' % (url, title) + +def get_timerange(t): + st = time.localtime(int(t)) + start = int(time.mktime(time.struct_time((st[0], st[1], st[2], 0, 0, 0, st[6], st[7], st[8])))) + end = start + 86399 + return start, end + +def fmt_date(t): + return time.strftime('%Y-%m-%d', time.localtime(t)) + +def fmt_time(t): + return time.strftime('%H:%M:%S', time.localtime(t)) + +def fmt_bytes(num): + for x in ['Bytes', 'KB', 'MB', 'GB', 'TB']: + if num < 1024.0: + if x == "Bytes": + return "%d %s" % (num, x) + else: + return "%3.1f %s" % (num, x) + num /= 1024.0 + +def paged_log(log): + start = int(html.var('start', 0)) + if not start: + start = int(time.time()) + + while True: + log_today, times = paged_log_from(log, start) + if len(log) == 0 or len(log_today) > 0: + return log_today, times + else: # No entries today, but log not empty -> go back in time + start -= 24 * 3600 + + +def paged_log_from(log, start): + start_time, end_time = get_timerange(start) + previous_log_time = None + next_log_time = None + first_log_index = None + last_log_index = None + for index, (t, linkinfo, user, action, text) in enumerate(log): + if t >= end_time: + # This log is too new + continue + elif first_log_index is None \ + and t < end_time \ + and t >= start_time: + # This is a log for this day. Save the first index + if first_log_index is None: + first_log_index = index + + # When possible save the timestamp of the previous log + if index > 0: + next_log_time = int(log[index - 1][0]) + + elif t < start_time and last_log_index is None: + last_log_index = index + # This is the next log after this day + previous_log_time = int(log[index][0]) + # Finished! + break + + if last_log_index is None: + last_log_index = len(log) + + return log[first_log_index:last_log_index], (start_time, end_time, previous_log_time, next_log_time) + +def display_paged((start_time, end_time, previous_log_time, next_log_time)): + html.write('
    ') + + if next_log_time is not None: + html.icon_button(html.makeuri([('start', get_timerange(int(time.time()))[0])]), + _("Most recent events"), "start") + html.icon_button(html.makeuri([('start', next_log_time)]), + '%s: %s' % (_("Newer events"), fmt_date(next_log_time)), + "back") + else: + html.empty_icon_button() + html.empty_icon_button() + + if previous_log_time is not None: + html.icon_button(html.makeuri([('start', previous_log_time)]), + '%s: %s' % (_("Older events"), fmt_date(previous_log_time)), + "forth") + else: + html.empty_icon_button() + html.write('
    ') + + +def render_audit_log(log, what, with_filename = False, hilite_others=False): + htmlcode = '' + if what == 'audit': + log, times = paged_log(log) + empty_msg = _("The log is empty. No host has been created or changed yet.") + elif what == 'pending': + empty_msg = _("No pending changes, monitoring server is up to date.") + + if len(log) == 0: + html.write("
    %s
    " % empty_msg) + return + + elif what == 'audit': + htmlcode += "

    " + _("Audit log for %s") % fmt_date(times[0]) + "

    " + + elif what == 'pending': + if is_distributed(): + htmlcode += "

    " + _("Changes that are not activated on all sites:") + "

    " + else: + htmlcode += "

    " + _("Changes that are not yet activated:") + "

    " + + if what == 'audit': + display_paged(times) + + htmlcode += '' % what + even = "even" + for t, linkinfo, user, action, text in log: + even = even == "even" and "odd" or "even" + hilite = hilite_others and user != '-' and config.user_id != user + htmlcode += '' % (even, hilite and 2 or 0) + htmlcode += '' % render_linkinfo(linkinfo) + htmlcode += '' % fmt_date(float(t)) + htmlcode += '' % fmt_time(float(t)) + htmlcode += '' + + htmlcode += '\n' % text + htmlcode += "
    %s%s%s' + user = user == '-' and ('%s' % _('internal')) or user + if hilite: + htmlcode += '' \ + % _("This change has been made by another user") + htmlcode += user + '%s
    " + + if what == 'audit': + html.write(htmlcode) + display_paged(times) + else: + html.write(htmlcode) + +def export_audit_log(): + html.req.content_type = "text/csv; charset=UTF-8" + filename = 'wato-auditlog-%s_%s.csv' % (fmt_date(time.time()), fmt_time(time.time())) + html.req.headers_out['Content-Disposition'] = 'attachment; filename=%s' % filename + titles = ( + _('Date'), + _('Time'), + _('Linkinfo'), + _('User'), + _('Action'), + _('Text'), + ) + html.write(','.join(titles) + '\n') + for t, linkinfo, user, action, text in parse_audit_log("audit"): + if linkinfo == '-': + linkinfo = '' + html.write(','.join((fmt_date(int(t)), fmt_time(int(t)), linkinfo, + user, action, '"' + text + '"')) + '\n') + return False + +#. +# .--Automation----------------------------------------------------------. +# | _ _ _ _ | +# | / \ _ _| |_ ___ _ __ ___ __ _| |_(_) ___ _ __ | +# | / _ \| | | | __/ _ \| '_ ` _ \ / _` | __| |/ _ \| '_ \ | +# | / ___ \ |_| | || (_) | | | | | | (_| | |_| | (_) | | | | | +# | /_/ \_\__,_|\__\___/|_| |_| |_|\__,_|\__|_|\___/|_| |_| | +# | | +# +----------------------------------------------------------------------+ +# | This code section deals with the interaction of Check_MK. It is used | +# | for doing inventory, showing the services of a host, deletion of a | +# | host and similar things. | +# '----------------------------------------------------------------------' + +def check_mk_automation(siteid, command, args=[], indata=""): + if not siteid or site_is_local(siteid): + return check_mk_local_automation(command, args, indata) + else: + return check_mk_remote_automation(siteid, command, args, indata) + + +def check_mk_local_automation(command, args=[], indata=""): + # Gather the command to use for executing --automation calls to check_mk + # - First try to use the check_mk_automation option from the defaults + # - When not set try to detect the command for OMD or non OMD installations + # - OMD 'own' apache mode or non OMD: check_mk --automation + # - OMD 'shared' apache mode: Full path to the binary and the defaults + sudoline = None + if defaults.check_mk_automation: + commandargs = defaults.check_mk_automation.split() + cmd = commandargs + [ command, '--' ] + args + else: + omd_mode, omd_site = html.omd_mode() + if not omd_mode or omd_mode == 'own': + commandargs = [ 'check_mk', '--automation' ] + cmd = commandargs + [ command, '--' ] + args + else: # OMD shared mode + commandargs = [ 'sudo', '/bin/su', '-', omd_site, '-c', 'check_mk --automation' ] + cmd = commandargs[:-1] + [ commandargs[-1] + ' ' + ' '.join([ command, '--' ] + args) ] + sudoline = "%s ALL = (root) NOPASSWD: /bin/su - %s -c check_mk\\ --automation\\ *" % (html.apache_user(), omd_site) + + sudo_msg = '' + if commandargs[0] == 'sudo': + if not sudoline: + if commandargs[1] == '-u': # skip -u USER in /etc/sudoers + sudoline = "%s ALL = (%s) NOPASSWD: %s *" % (html.apache_user(), commandargs[2], " ".join(commandargs[3:])) + else: + sudoline = "%s ALL = (root) NOPASSWD: %s *" % (html.apache_user(), commandargs[0], " ".join(commandargs[1:])) + + sudo_msg = ("

    The webserver is running as user which has no rights on the " + "needed Check_MK/Nagios files.
    Please ensure you have set-up " + "the sudo environment correctly. e.g. proceed as follows:

    \n" + "
    1. install sudo package
    2. \n" + "
    3. Append the following to the /etc/sudoers file:\n" + "
      # Needed for WATO - the Check_MK Web Administration Tool\n"
      +                    "Defaults:%s !requiretty\n"
      +                    "%s\n"
      +                    "
    4. \n" + "
    5. Retry this operation
    \n" % + (html.apache_user(), sudoline)) + + if command in [ 'restart', 'reload' ]: + try: + call_hook_pre_activate_changes() + except Exception, e: + if config.debug: + raise + html.show_error(_("

    Cannot activate changes

    %s") % e) + return + + try: + # This debug output makes problems when doing bulk inventory, because + # it garbles the non-HTML response output + # if config.debug: + # html.write("
    Running %s
    \n" % " ".join(cmd)) + p = subprocess.Popen(cmd, + stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True) + except Exception, e: + if commandargs[0] == 'sudo': + raise MKGeneralException("Cannot execute %s: %s

    %s" % (commandargs[0], e, sudo_msg)) + else: + raise MKGeneralException("Cannot execute %s: %s" % (commandargs[0], e)) + p.stdin.write(repr(indata)) + p.stdin.close() + outdata = p.stdout.read() + exitcode = p.wait() + if exitcode != 0: + if config.debug: + raise MKGeneralException("Error running %s (exit code %d):
    %s
    %s" % + (" ".join(cmd), exitcode, hilite_errors(outdata), outdata.lstrip().startswith('sudo:') and sudo_msg or '')) + else: + raise MKGeneralException(hilite_errors(outdata)) + + + # On successful "restart" command execute the activate changes hook + if command in [ 'restart', 'reload' ]: + call_hook_activate_changes() + + try: + return eval(outdata) + except Exception, e: + raise MKGeneralException("Error running %s. Invalid output from webservice (%s):
    %s
    " % + (" ".join(cmd), e, outdata)) + + +def hilite_errors(outdata): + return re.sub("\nError: *([^\n]*)", "\n
    Error: \\1
    ", outdata) + + +#. +# .--Progress------------------------------------------------------------. +# | ____ | +# | | _ \ _ __ ___ __ _ _ __ ___ ___ ___ | +# | | |_) | '__/ _ \ / _` | '__/ _ \/ __/ __| | +# | | __/| | | (_) | (_| | | | __/\__ \__ \ | +# | |_| |_| \___/ \__, |_| \___||___/___/ | +# | |___/ | +# +----------------------------------------------------------------------+ +# | Bulk inventory and other longer procedures are separated in single | +# | steps and run by an JavaScript scheduler showing a progress bar and | +# | buttons for aborting and pausing. | +# '----------------------------------------------------------------------' + +# success_stats: Fields from the stats list to use for checking if something has been found +# fail_stats: Fields from the stats list to used to count failed elements +def interactive_progress(items, title, stats, finishvars, timewait, success_stats = [], termvars = [], fail_stats = []): + if not termvars: + termvars = finishvars; + html.write("
    ") + html.write("") + html.write("" % title) + html.write("") + html.write("") + html.write("") + html.write("") + html.write("
    %s
    ") + html.write(" " + "
    ") + html.write("
    ") + html.write(" ") + html.write("
    ") + html.write(" ") + for num, (label, value) in enumerate(stats): + html.write(" " % (label, num, value)) + html.write("
    %s%d
    ") + html.write("
    ") + html.jsbutton('progress_pause', _('Pause'), 'javascript:progress_pause()') + html.jsbutton('progress_proceed', _('Proceed'), 'javascript:progress_proceed()', 'display:none') + html.jsbutton('progress_finished', _('Finish'), 'javascript:progress_end()', 'display:none') + html.jsbutton('progress_retry', _('Retry Failed Hosts'), 'javascript:progress_retry()', 'display:none') + html.jsbutton('progress_restart', _('Restart'), 'javascript:location.reload()') + html.jsbutton('progress_abort', _('Abort'), 'javascript:progress_end()') + html.write("
    ") + html.write("
    ") + json_items = '[ %s ]' % ',\n'.join([ "'" + h + "'" for h in items ]) + success_stats = '[ %s ]' % ','.join(map(str, success_stats)) + fail_stats = '[ %s ]' % ','.join(map(str, fail_stats)) + # Remove all sel_* variables. We do not need them for our ajax-calls. + # They are just needed for the Abort/Finish links. Those must be converted + # to POST. + base_url = html.makeuri([], remove_prefix = "sel") + finish_url = make_link([("mode", "folder")] + finishvars) + term_url = make_link([("mode", "folder")] + termvars) + + # Reserve a certain amount of transids for the progress scheduler + # Each json item requires one transid. Additionally, each "Retry failed hosts" eats + # up another one. We reserve 20 additional transids for the retry function + # Note: The "retry option" ignores the bulk size + transids = [] + for i in range(len(items) + 20): + transids.append(html.fresh_transid()) + json_transids = '[ %s ]' % ',\n'.join([ "'" + h + "'" for h in transids]) + html.javascript(('progress_scheduler("%s", "%s", 50, %s, %s, "%s", %s, %s, "%s", "' + _("FINISHED.") + '");') % + (html.var('mode'), base_url, json_items, json_transids, finish_url, + success_stats, fail_stats, term_url)) + + +#. +# .--Attributes----------------------------------------------------------. +# | _ _ _ _ _ _ | +# | / \ | |_| |_ _ __(_) |__ _ _| |_ ___ ___ | +# | / _ \| __| __| '__| | '_ \| | | | __/ _ \/ __| | +# | / ___ \ |_| |_| | | | |_) | |_| | || __/\__ \ | +# | /_/ \_\__|\__|_| |_|_.__/ \__,_|\__\___||___/ | +# | | +# +----------------------------------------------------------------------+ +# | Attributes of hosts are based on objects and are extendable via | +# | WATO plugins. | +# '----------------------------------------------------------------------' + +class Attribute: + # The constructor stores name and title. If those are + # dynamic than leave them out and override name() and + # title() + def __init__(self, name=None, title=None, help=None, default_value=None): + self._name = name + self._title = title + self._help = help + self._default_value = default_value + + # Return the name (= identifier) of the attribute + def name(self): + return self._name + + # Return the name of the Nagios configuration variable + # if this is a Nagios-bound attribute (e.g. "alias" or "_SERIAL") + def nagios_name(self): + return None + + # Return the title to be displayed to the user + def title(self): + return self._title + + # Return an optional help text + def help(self): + return self._help + + # Return the default value for new hosts + def default_value(self): + return self._default_value + + # Render HTML code displaying a value + def paint(self, value, hostname): + return "", value + + # Wether or not to show this attribute in tables. + # This value is set by declare_host_attribute + def show_in_table(self): + return self._show_in_table + + # Wether or not to show this attribute in the edit form. + # This value is set by declare_host_attribute + def show_in_form(self): + return self._show_in_form + + # Wether or not to make this attribute configurable in + # files and folders (as defaule value for the hosts) + def show_in_folder(self): + return self._show_in_folder + + # Wether or not this attribute can be edited after creation + # of the object + def editable(self): + return self._editable + + # Wether it is allowed that a host has no explicit + # value here (inherited or direct value). An mandatory + # has *no* default value. + def is_mandatory(self): + return False + + # Return information about the user roles we depend on. + # The method is usually not overridden, but the variable + # _depends_on_roles is set by declare_host_attribute(). + def depends_on_roles(self): + try: + return self._depends_on_roles + except: + return [] + + # Return information about the host tags we depend on. + # The method is usually not overridden, but the variable + # _depends_on_tags is set by declare_host_attribute(). + def depends_on_tags(self): + try: + return self._depends_on_tags + except: + return [] + + # Render HTML input fields displaying the value and + # make it editable. If filter == True, then the field + # is to be displayed in filter mode (as part of the + # search filter) + def render_input(self, value): + pass + + # Create value from HTML variables. + def from_html_vars(self): + return None + + + # Check whether this attribute needs to be validated at all + # Attributes might be permanently hidden (show_in_form = False) + # or dynamically hidden by the depends_on_tags, editable features + def needs_validation(self): + if not self._show_in_form: + return False + return html.var('attr_display_%s' % self._name, "1") == "1" + + # Check if the value entered by the user is valid. + # This method may raise MKUserError in case of invalid user input. + def validate_input(self): + pass + + # If this attribute should be present in Nagios as + # a host custom macro, then the value of that macro + # should be returned here - otherwise None + def to_nagios(self, value): + return None + + # Checks if the give value matches the search attributes + # that are represented by the current HTML variables. + def filter_matches(self, crit, value, hostname): + return crit == value + + # Host tags to set for this host + def get_tag_list(self, value): + return [] + + +# A simple text attribute. It is stored in +# a Python unicode string +class TextAttribute(Attribute): + def __init__(self, name, title, help = None, default_value="", mandatory=False, allow_empty=True, size=25): + Attribute.__init__(self, name, title, help, default_value) + self._mandatory = mandatory + self._allow_empty = allow_empty + self._size = size + + def paint(self, value, hostname): + if not value: + return "", "" + else: + return "", value + + def is_mandatory(self): + return self._mandatory + + def render_input(self, value): + if value == None: + value = "" + html.text_input("attr_" + self.name(), value, size = self._size) + + def from_html_vars(self): + value = html.var_utf8("attr_" + self.name()) + if value == None: + value = "" + return value.strip() + + def validate_input(self): + value = self.from_html_vars() + if self._mandatory and not value: + raise MKUserError("attr_" + self.name(), + _("Please specify a value for %s") % self.title()) + if not self._allow_empty and value.strip() == "": + raise MKUserError("attr_" + self.name(), + _("%s may be missing, if must not be empty if it is set.") % self.title()) + + + def filter_matches(self, crit, value, hostname): + if value == None: # Host does not have this attribute + value = "" + return crit.lower() in value.lower() + +# A simple text attribute that is not editable by the user. +# It can be used to store context information from other +# systems (e.g. during an import of a host database from +# another system). +class FixedTextAttribute(TextAttribute): + def __init__(self, name, title, help = None): + TextAttribute.__init__(self, name, title, help, None) + self._mandatory = False + + def render_input(self, value): + if value != None: + html.hidden_field("attr_" + self.name(), value) + html.write(value) + + def from_html_vars(self): + return html.var("attr_" + self.name()) + + +# A text attribute that is stored in a Nagios custom macro +class NagiosTextAttribute(TextAttribute): + def __init__(self, name, nag_name, title, help = None, default_value="", mandatory = False, allow_empty=True): + TextAttribute.__init__(self, name, title, help, default_value, mandatory, allow_empty) + self.nag_name = nag_name + + def nagios_name(self): + return self.nag_name + + def to_nagios(self, value): + if value: + return value + else: + return None + +# An attribute for selecting one item out of list using +# a drop down box (' % checkbox_name + checkbox_code += '' % checkbox_name + else: + onclick = "wato_fix_visibility(); wato_toggle_attribute(this, '%s');" % attrname + checkbox_code = '' % ( + checkbox_name, active and "CHECKED" or "", disabled and "DISABLED" or "", onclick) + + forms.section(_u(attr.title()), checkbox=checkbox_code, id="attr_" + attrname) + html.help(attr.help()) + + if len(values) == 1: + defvalue = values[0] + else: + defvalue = attr.default_value() + + if not new and not attr.editable(): + # In edit mode only display non editable values, don't show the + # input fields + html.write('') + + html.write('
    ' % (attrname)) + + else: + # Now comes the input fields and the inherited / default values + # as two DIV elements, one of which is visible at one time. + + # DIV with the input elements + html.write('
    ' + % (attrname, (not active) and "display: none" or "")) + + attr.render_input(defvalue) + html.write("
    ") + + html.write('
    ' + % (attrname, active and "display: none" or "")) + + # + # DIV with actual / inherited / default value + # + + # in bulk mode we show inheritance only if *all* hosts inherit + explanation = "" + if for_what == "bulk": + if num_haveit == 0: + explanation = " (" + inherited_from + ")" + value = inherited_value + elif not unique: + explanation = _("This value differs between the selected hosts.") + else: + value = values[0] + + elif for_what in [ "host", "folder" ]: + if not new and not attr.editable() and active: + value = values[0] + else: + explanation = " (" + inherited_from + ")" + value = inherited_value + + if for_what != "search" and not (for_what == "bulk" and not unique): + tdclass, content = attr.paint(value, "") + if not content: + content = _("empty") + html.write("" + _u(content) + "") + + html.write(explanation) + html.write("
    ") + + + if len(topics) > 1: + if topic_is_volatile: + volatile_topics.append((topic or _("Basic settings")).encode('utf-8')) + + def dump_json(obj): + return repr(obj).replace('None', 'null') + + forms.end() + # Provide Javascript world with the tag dependency information + # of all attributes. + html.javascript("var inherited_tags = %s;\n"\ + "var wato_check_attributes = %s;\n"\ + "var wato_depends_on_tags = %s;\n"\ + "var wato_depends_on_roles = %s;\n"\ + "var volatile_topics = %s;\n"\ + "var user_roles = %s;\n"\ + "var hide_attributes = %s;\n"\ + "wato_fix_visibility();\n" % ( + dump_json(inherited_tags), + dump_json(list(set(dependency_mapping_tags.keys()+dependency_mapping_roles.keys()+hide_attributes))), + dump_json(dependency_mapping_tags), + dump_json(dependency_mapping_roles), + dump_json(volatile_topics), + dump_json(config.user_role_ids), + dump_json(hide_attributes))) + + +# Check if at least one host in a folder (or its subfolders) +# has not set a certain attribute. This is needed for the validation +# of mandatory attributes. +def some_host_hasnt_set(folder, attrname): + # Check subfolders + for subfolder in folder[".folders"].values(): + # If the attribute is not set in the subfolder, we need + # to check all hosts and that folder. + if attrname not in subfolder["attributes"] \ + and some_host_hasnt_set(subfolder, attrname): + return True + + # Check hosts in this folder + load_hosts(folder) # make sure hosts are loaded + for host in folder[".hosts"].values(): + if attrname not in host: + return True + + return False + +# Compute effective (explicit and inherited) attributes +# for a host. This returns a dictionary with a value for +# each host attribute +def effective_attributes(host, folder): + if host: + chain = [ host ] + else: + chain = [ ] - # Mode of action - html.write("

    ") - if not complete_folder: - html.write(_("You have selected %d hosts for parent scan. ") % len(items)) - html.write("

    " + - _("The parent scan will try to detect the last gateway " - "on layer 3 (IP) before a host. This will be done by " - "calling traceroute. If a gateway is found by " - "that way and its IP address belongs to one of your " - "monitored hosts, that host will be used as the hosts " - "parent. If no such host exists, an artifical ping-only " - "gateway host will be created if you have not disabled " - "this feature.") + "

    ") + while folder: + chain.append(folder.get("attributes", {})) + folder = folder.get(".parent") - forms.header(_("Settings for Parent Scan")) + eff = {} + for a in chain[::-1]: + eff.update(a) - settings = config.load_user_file("parentscan", { - "where" : "subfolder", - "alias" : _("Created by parent scan"), - "recurse" : True, - "select" : "noexplicit", - "timeout" : 8, - "probes" : 2, - "ping_probes" : 5, - "max_ttl" : 10, - "force_explicit" : False, - }) + # now add default values of attributes for all missing values + for attr, topic in host_attributes: + attrname = attr.name() + if attrname not in eff: + eff.setdefault(attrname, attr.default_value()) - # Selection - forms.section(_("Selection")) - if complete_folder: - html.checkbox("recurse", settings["recurse"], label=_("Include all subfolders")) - html.write("
    ") - html.radiobutton("select", "noexplicit", settings["select"] == "noexplicit", - _("Skip hosts with explicit parent definitions (even if empty)") + "
    ") - html.radiobutton("select", "no", settings["select"] == "no", - _("Skip hosts hosts with non-empty parents (also if inherited)") + "
    ") - html.radiobutton("select", "ignore", settings["select"] == "ignore", - _("Scan all hosts") + "
    ") + return eff - # Performance - forms.section(_("Performance")) - html.write("" % _("sec")) - html.write("') - html.write("') - html.write('') - html.write('
    ") - html.write(_("Timeout for responses") + ":") - html.number_input("timeout", settings["timeout"], size=2) - html.write(" %s
    ") - html.write(_("Number of probes per hop") + ":") - html.number_input("probes", settings["probes"], size=2) - html.write('
    ") - html.write(_("Maximum distance (TTL) to gateway") + ":") - html.number_input("max_ttl", settings["max_ttl"], size=2) - html.write('
    ') - html.write(_("Number of PING probes") + ":") - html.help(_("After a gateway has been found, Check_MK checks if it is reachable " - "via PING. If not, it is skipped and the next gateway nearer to the " - "monitoring core is being tried. You can disable this check by setting " - "the number of PING probes to 0.")) - html.write("") - html.number_input("ping_probes", settings.get("ping_probes", 5), size=2) - html.write('
    ') - # Configuring parent - forms.section(_("Configuration")) - html.checkbox("force_explicit", - settings["force_explicit"], label=_("Force explicit setting for parents even if setting matches that of the folder")) +#. +# .--Snapshots-----------------------------------------------------------. +# | ____ _ _ | +# | / ___| _ __ __ _ _ __ ___| |__ ___ | |_ ___ | +# | \___ \| '_ \ / _` | '_ \/ __| '_ \ / _ \| __/ __| | +# | ___) | | | | (_| | |_) \__ \ | | | (_) | |_\__ \ | +# | |____/|_| |_|\__,_| .__/|___/_| |_|\___/ \__|___/ | +# | |_| | +# +----------------------------------------------------------------------+ +# | Mode for backup/restore/creation of snapshots | +# '----------------------------------------------------------------------' - # Gateway creation - forms.section(_("Creation of gateway hosts")) - html.write(_("Create gateway hosts in
      ")) - html.radiobutton("where", "subfolder", settings["where"] == "subfolder", - _("in the subfolder %s/Parents") % g_folder["title"]) - html.write("
      ") - html.radiobutton("where", "here", settings["where"] == "here", - _("directly in the folder %s") % g_folder["title"]) - html.write("
      ") - html.radiobutton("where", "there", settings["where"] == "there", - _("in the same folder as the host")) - html.write("
      ") - html.radiobutton("where", "nowhere", settings["where"] == "nowhere", - _("do not create gateway hosts")) - html.write("
    ") - html.write(_("Alias for created gateway hosts") + ": ") - html.text_input("alias", settings["alias"]) +# Returns status information for snapshots or snapshots in progress +def get_snapshot_status(snapshot, validate_checksums = False): + if type(snapshot) == tuple: + name, file_stream = snapshot + else: + name = snapshot + file_stream = None + + # Defaults of available keys + status = { + "name" : "", + "total_size" : 0, + "type" : None, + "files" : {}, + "comment" : "", + "created_by" : "", + "broken" : False, + "progress_status" : "", + } - # Start button - forms.end() - html.button("_start", _("Start")) + def access_snapshot(handler): + if file_stream: + file_stream.seek(0) + return handler(file_stream) + else: + return handler(snapshot_dir + name) + + def check_size(): + if file_stream: + file_stream.seek(0, os.SEEK_END) + size = file_stream.tell() + else: + statinfo = os.stat(snapshot_dir + name) + size = statinfo.st_size + if size < 256: + raise MKGeneralException(_("Invalid snapshot (too small)")) + else: + status["total_size"] = size + + def check_extension(): + # Check snapshot extension: tar or tar.gz + if name.endswith(".tar.gz"): + status["type"] = "legacy" + status["comment"] = _("Snapshot created with old version") + elif not name.endswith(".tar"): + raise MKGeneralException(_("Invalid snapshot (incorrect file extension)")) + + def check_content(): + status["files"] = access_snapshot(multitar.list_tar_content) + + if status.get("type") == "legacy": + allowed_files = map(lambda x: "%s.tar" % x[1], backup_paths) + for tarname in status["files"].keys(): + if tarname not in allowed_files: + raise MKGeneralException(_("Invalid snapshot (contains invalid tarfile %s)") % tarname) + else: # new snapshots + for entry in ["comment", "created_by", "type"]: + if entry in status["files"]: + status[entry] = access_snapshot(lambda x: multitar.get_file_content(x, entry)) + else: + raise MKGeneralException(_("Invalid snapshot (missing file: %s)") % entry) + + def check_core(): + if not defaults.omd_root: + return # Do not perform this check in non OMD environments + cmk_tar = cStringIO.StringIO(access_snapshot(lambda x: multitar.get_file_content(x, 'check_mk.tar.gz'))) + files = multitar.list_tar_content(cmk_tar) + using_cmc = os.path.exists(defaults.omd_root + '/etc/check_mk/conf.d/microcore.mk') + snapshot_cmc = 'conf.d/microcore.mk' in files + if using_cmc and not snapshot_cmc: + raise MKGeneralException(_('You are currently using the Check_MK Micro Core, but this snapshot does not use the ' + 'Check_MK Micro Core. If you need to migrate your data, you could consider changing ' + 'the core, restoring the snapshot and changing the core back again.')) + elif not using_cmc and snapshot_cmc: + raise MKGeneralException(_('You are currently not using the Check_MK Micro Core, but this snapshot uses the ' + 'Check_MK Micro Core. If you need to migrate your data, you could consider changing ' + 'the core, restoring the snapshot and changing the core back again.')) + + def snapshot_secret(): + path = defaults.default_config_dir + '/snapshot.secret' + try: + return file(path).read() + except IOError: + return '' # validation will fail in this case + + def check_checksums(): + for f in status["files"].values(): + f['checksum'] = None + + # checksums field might contain three states: + # a) None - This is a legacy snapshot, no checksum file available + # b) False - No or invalid checksums + # c) True - Checksums successfully validated + if status['type'] == 'legacy': + status['checksums'] = None + return + + if 'checksums' not in status['files'].keys(): + status['checksums'] = False + return + + # Extract all available checksums from the snapshot + checksums_raw = access_snapshot(lambda x: multitar.get_file_content(x, 'checksums')) + checksums = {} + for l in checksums_raw.split('\n'): + line = l.strip() + if ' ' in line: + parts = line.split(' ') + if len(parts) == 3: + checksums[parts[0]] = (parts[1], parts[2]) + + # now loop all known backup domains and check wheter or not they request + # checksum validation, there is one available and it is valid + status['checksums'] = True + for domain_id, domain in backup_domains.items(): + filename = domain_id + '.tar.gz' + if not domain.get('checksum', True) or filename not in status['files']: + continue + + if filename not in checksums: + continue + + checksum, signed = checksums[filename] + + # Get hashes of file in question + subtar = access_snapshot(lambda x: multitar.get_file_content(x, filename)) + subtar_hash = sha256(subtar).hexdigest() + subtar_signed = sha256(subtar_hash + snapshot_secret()).hexdigest() + + status['files'][filename]['checksum'] = checksum == subtar_hash and signed == subtar_signed + status['checksums'] &= status['files'][filename]['checksum'] + + try: + if len(name) > 35: + status["name"] = "%s %s" % (name[14:24], name[25:33].replace("-",":")) + else: + status["name"] = name + + if not file_stream: + # Check if the snapshot build is still in progress... + path_status = "%s/workdir/%s/%s.status" % (snapshot_dir, name, name) + path_pid = "%s/workdir/%s/%s.pid" % (snapshot_dir, name, name) + + # Check if this process is still running + if os.path.exists(path_pid): + if os.path.exists(path_pid) and not os.path.exists("/proc/%s" % open(path_pid).read()): + status["progress_status"] = _("ERROR: Snapshot progress no longer running!") + raise MKGeneralException(_("Error: The process responsible for creating the snapshot is no longer running!")) + else: + status["progress_status"] = _("Snapshot build currently in progress") + + # Read snapshot status file (regularly updated by snapshot process) + if os.path.exists(path_status): + lines = file(path_status, "r").readlines() + status["comment"] = lines[0].split(":", 1)[1] + file_info = {} + for filename in lines[1:]: + name, info = filename.split(":", 1) + text, size = info[:-1].split(":", 1) + file_info[name] = {"size" : saveint(size), "text": text} + status["files"] = file_info + return status + + # Snapshot exists and is finished - do some basic checks + check_size() + check_extension() + check_content() + check_core() + + if validate_checksums: + check_checksums() + + except Exception, e: + if config.debug: + import traceback + status["broken_text"] = traceback.format_exc() + status["broken"] = True + else: + status["broken_text"] = '%s' % e + status["broken"] = True + return status + +def mode_snapshot_detail(phase): + snapshot_name = html.var("_snapshot_name") + + if ".." in snapshot_name or "/" in snapshot_name: + raise MKUserError("_snapshot_name", _("Invalid snapshot requested")) + if not os.path.exists(snapshot_dir + '/' + snapshot_name): + raise MKUserError("_snapshot_name", _("The requested snapshot does not exist")) + + if phase not in ["buttons", "action"]: + status = get_snapshot_status(snapshot_name, validate_checksums = True) + + if phase == "title": + return _("Snapshot details of %s") % html.attrencode(status["name"]) + elif phase == "buttons": + home_button() + html.context_button(_("Back"), make_link([("mode", "snapshot")]), "back") + return + elif phase == "action": + return + + other_content = [] + + if status.get("broken"): + html.add_user_error('broken', _ ('This snapshot is broken!')) + html.add_user_error('broken_text', status.get("broken_text")) + html.show_user_errors() + + html.begin_form("snapshot_details", method="POST") + forms.header(_("Snapshot %s") % html.attrencode(snapshot_name)) + + for entry in [ ("comment", _("Comment")), ("created_by", _("Created by")) ]: + if status.get(entry[0]): + forms.section(entry[1]) + html.write(status.get(entry[0])) + + forms.section(_("Content")) + files = status["files"] + if not files: + html.write(_("Snapshot is empty!")) + else: + html.write("") + html.write("" + "" + "" % (_("Description"), _("Size"), _("Trusted"))) + + domains = [] + other_content = [] + for filename, values in files.items(): + if filename in ["comment", "type", "created_by", "checksums"]: + continue + domain_key = filename[:-7] + if domain_key in backup_domains.keys(): + verify_checksum = backup_domains.get('checksum', True) # is checksum check enabled here? + domains.append((backup_domains[domain_key]["title"], verify_checksum, filename, values)) + else: + other_content.append((_("Other"), filename, values)) + domains.sort() + + for (title, verify_checksum, filename, values) in domains: + extra_info = "" + if values.get("text"): + extra_info = "%s - " % values["text"] + html.write("" % (extra_info, title)) + html.write("" % fmt_bytes(values["size"])) + + html.write("") + + html.write("") + + if other_content: + html.write("" % _("Other content")) + for (title, filename, values) in other_content: + html.write("" % html.attrencode(filename)) + html.write("" % fmt_bytes(values["size"])) + html.write("") + html.write("") + html.write("
    %s%s%s
    %s%s%s") + if verify_checksum: + if values.get('checksum') == True: + checksum_title = _('Checksum valid and signed') + checksum_icon = '' + elif values.get('checksum') == False: + checksum_title = _('Checksum invalid and not signed') + checksum_icon = 'p' + else: + checksum_title = _('Checksum not available') + checksum_icon = 'n' + html.icon(checksum_title, 'snapshot_%schecksum' % checksum_icon) + html.write("
    %s
    %s%s
    ") + + forms.end() + + if snapshot_name != "uploaded_snapshot": + delete_url = make_action_link([("mode", "snapshot"), ("_delete_file", snapshot_name)]) + html.buttonlink(delete_url, _("Delete Snapshot")) + download_url = make_action_link([("mode", "snapshot"), ("_download_file", snapshot_name)]) + html.buttonlink(download_url, _("Download Snapshot")) + + if not status.get("progress_status") and not status.get("broken"): + restore_url = make_action_link([("mode", "snapshot"), ("_restore_snapshot", snapshot_name)]) + html.buttonlink(restore_url, _("Restore Snapshot")) + +def get_snapshots(): + snapshots = [] + try: + for f in os.listdir(snapshot_dir): + if os.path.isfile(snapshot_dir + f): + snapshots.append(f) + snapshots.sort(reverse=True) + except OSError: + pass + return snapshots + +def extract_snapshot(snapshot_file): + multitar.extract_from_file(snapshot_dir + snapshot_file, backup_domains) + +def mode_snapshot(phase): + if phase == "title": + return _("Backup & Restore") + elif phase == "buttons": + home_button() + changelog_button() + html.context_button(_("Factory Reset"), + make_action_link([("mode", "snapshot"),("_factory_reset","Yes")]), "factoryreset") + return + + # Cleanup incompletely processed snapshot upload + if os.path.exists(snapshot_dir) and not html.var("_restore_snapshot") \ + and os.path.exists("%s/uploaded_snapshot" % snapshot_dir): + os.remove("%s/uploaded_snapshot" % snapshot_dir) + + snapshots = get_snapshots() + + # Generate valuespec for snapshot options + # Sort domains by group + domains_grouped = {} + for domainname, data in backup_domains.items(): + if not data.get("deprecated"): + domains_grouped.setdefault(data.get("group","Other"), {}).update({domainname: data}) + backup_groups = [] + for idx, key in enumerate(sorted(domains_grouped.keys())): + value = domains_grouped[key] + choices = [] + default_values = [] + for entry in sorted(value.keys()): + choices.append( (entry,value[entry]["title"]) ) + if value[entry].get("default"): + default_values.append(entry) + choices.sort(key = lambda x: x[1]) + backup_groups.append( ("group_%d" % idx, ListChoice(title = key, choices = choices, default_value = default_values) ) ) + + # Optional snapshot comment + backup_groups.append(("comment", TextUnicode(title = _("Comment"), size=80))) + snapshot_vs = Dictionary( + elements = backup_groups, + optional_keys = [] + ) + + + if phase == "action": + if html.has_var("_download_file"): + download_file = html.var("_download_file") + + # Find the latest snapshot file + if download_file == 'latest': + if not snapshots: + return False + download_file = snapshots[-1] + elif download_file not in snapshots: + raise MKUserError(None, _("Invalid download file specified.")) + + download_path = os.path.join(snapshot_dir, download_file) + if os.path.exists(download_path): + html.req.headers_out['Content-Disposition'] = 'Attachment; filename=' + download_file + html.req.headers_out['content_type'] = 'application/x-tar' + html.write(open(download_path).read()) + return False + + # create snapshot + elif html.has_var("_create_snapshot"): + if html.check_transaction(): + # create snapshot + store_domains = {} + + snapshot_options = snapshot_vs.from_html_vars("snapshot_options") + snapshot_vs.validate_value(snapshot_options, "snapshot_options") + + for key, value in snapshot_options.items(): + if key.startswith("group_"): + for entry in value: + store_domains[entry] = backup_domains[entry] + + snapshot_data = {} + snapshot_name = "wato-snapshot-%s.tar" % \ + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime(time.time())) + snapshot_data["comment"] = snapshot_options.get("comment") \ + or _("Snapshot created by %s") % config.user_id + snapshot_data["type"] = "manual" + snapshot_data["snapshot_name"] = snapshot_name + snapshot_data["domains"] = store_domains + + return None, _("Created snapshot %s.") % create_snapshot(snapshot_data) + + # upload snapshot + elif html.uploads.get("_upload_file"): + uploaded_file = html.uploaded_file("_upload_file") + filename = uploaded_file[0] + + if ".." in filename or "/" in filename: + raise MKUserError("_upload_file", _("Invalid filename")) + filename = os.path.basename(filename) + + if uploaded_file[0] == "": + raise MKUserError(None, _("Please select a file for upload.")) + + if html.check_transaction(): + file_stream = cStringIO.StringIO(uploaded_file[2]) + status = get_snapshot_status((filename, file_stream), validate_checksums = True) + + if status.get("broken"): + raise MKUserError("_upload_file", _("This is not a Check_MK snapshot!
    %s") % \ + status.get("broken_text")) + elif not status.get("checksums") and not config.wato_upload_insecure_snapshots: + if status["type"] == "legacy": + raise MKUserError("_upload_file", _('The integrity of this snapshot could not be verified. ' + 'You are restoring a legacy snapshot which can not be verified. The snapshot contains ' + 'files which contain code that will be executed during runtime of the monitoring. ' + 'The upload of insecure snapshots is currently disabled in WATO. If you want to allow ' + 'the upload of insecure snapshots you can activate it in the Global Settings under ' + 'Configuration GUI (WATO) -> Allow upload of insecure WATO snapshots')) + else: + raise MKUserError("_upload_file", _('The integrity of this snapshot could not be verified.

    ' + 'If you restore a snapshot on the same site as where it was created, the checksum should ' + 'always be OK. If not, it is likely that something has been modified in the snapshot.
    ' + 'When you restore the snapshot on a different site, the checksum check will always fail. ' + 'The snapshot contains files which contain code that will be executed during runtime ' + 'of the monitoring.

    ' + 'The upload of insecure snapshots is currently disabled in WATO. If you want to allow ' + 'the upload of insecure snapshots you can activate it in the Global Settings under
    ' + 'Configuration GUI (WATO) -> Allow upload of insecure WATO snapshots')) + else: + file(snapshot_dir + filename, "w").write(uploaded_file[2]) + html.set_var("_snapshot_name", filename) + return "snapshot_detail" + + # delete file + elif html.has_var("_delete_file"): + delete_file = html.var("_delete_file") + + if delete_file not in snapshots: + raise MKUserError(None, _("Invalid file specified.")) + + c = wato_confirm(_("Confirm deletion of snapshot"), + _("Are you sure you want to delete the snapshot

    %s?") % + html.attrencode(delete_file) + ) + if c: + os.remove(os.path.join(snapshot_dir, delete_file)) + # Remove any files in workdir + for ext in [ ".pid", ".status", ".subtar", ".work" ]: + tmp_name = "%s/workdir/%s%s" % (snapshot_dir, os.path.basename(delete_file), ext) + if os.path.exists(tmp_name): + os.remove(tmp_name) + return None, _("Snapshot deleted.") + elif c == False: # not yet confirmed + return "" + # restore snapshot + elif html.has_var("_restore_snapshot"): + snapshot_file = html.var("_restore_snapshot") -def configure_gateway(state, site_id, folder, host, effective, gateway): - # Settings for configuration and gateway creation - force_explicit = html.get_checkbox("force_explicit") - where = html.var("where") - alias = html.var("alias") + if snapshot_file not in snapshots: + raise MKUserError(None, _("Invalid file specified.")) - # If we have found a gateway, we need to know a matching - # host name from our configuration. If there is none, - # we can create one, if the users wants this. The automation - # for the parent scan already tries to find such a host - # within the site. - gwcreat = False + status = get_snapshot_status(snapshot_file, validate_checksums = True) - if gateway: - gw_host, gw_ip, dns_name = gateway - if not gw_host: - if where == "nowhere": - return _("No host %s configured, parents not set") % gw_ip, \ - False, False + if status['checksums'] == True: + q = _("Are you sure you want to restore the snapshot %s?") % \ + html.attrencode(snapshot_file) + + elif status["type"] == "legacy" and status['checksums'] == None: + q = _('The integrity of this snapshot could not be verified.

    ' + 'You are restoring a legacy snapshot which can not be verified. The snapshot contains ' + 'files which contain code that will be executed during runtime of the monitoring. Please ' + 'ensure that the snapshot is a legit, not manipulated file.

    ' + 'Do you want to continue restoring the snapshot?') + + else: + q = _('The integrity of this snapshot could not be verified.

    ' + 'If you restore a snapshot on the same site as where it was created, the checksum should ' + 'always be OK. If not, it is likely that something has been modified in the snapshot.
    ' + 'When you restore the snapshot on a different site, the checksum check will always fail.

    ' + 'The snapshot contains files which contain code that will be executed during runtime ' + 'of the monitoring. Please ensure that the snapshot is a legit, not manipulated file.

    ' + 'Do you want to ignore the failed integrity check and restore the snapshot?') - # Determine folder where to create the host. - elif where == "here": # directly in current folder - gw_folder = g_folder - elif where == "subfolder": - # Put new gateways in subfolder "Parents" of current - # folder. Does this folder already exist? - if "parents" in g_folder[".folders"]: - gw_folder = g_folder[".folders"]["parents"] - load_hosts(gw_folder) + c = wato_confirm(_("Confirm restore snapshot"), q) + if c: + if status["type"] == "legacy": + multitar.extract_from_file(snapshot_dir + snapshot_file, backup_paths) else: - # Create new gateway folder - config.need_permission("wato.manage_folders") - check_folder_permissions(g_folder, "write") - gw_folder = { - ".name" : "parents", - ".parent" : g_folder, - ".path" : g_folder[".path"] + "/parents", - "title" : _("Parents"), - "attributes" : {}, - ".folders" : {}, - ".hosts" : {}, - "num_hosts" : 0, - } - g_folders[gw_folder[".path"]] = gw_folder - g_folder[".folders"]["parent"] = gw_folder - save_folder(gw_folder) - call_hook_folder_created(gw_folder) - log_pending(AFFECTED, gw_folder, "new-folder", - _("Created new folder %s during parent scant") - % gw_folder[".path"]) - elif where == "there": # In same folder as host - gw_folder = folder - load_hosts(gw_folder) + extract_snapshot(snapshot_file) + log_pending(SYNCRESTART, None, "snapshot-restored", + _("Restored snapshot %s") % html.attrencode(snapshot_file)) + return None, _("Successfully restored snapshot.") + elif c == False: # not yet confirmed + return "" - # Create gateway host - config.need_permission("wato.manage_hosts") - check_folder_permissions(gw_folder, "write") - if dns_name: - gw_host = dns_name - elif site_id: - gw_host = "gw-%s-%s" % (site_id, gw_ip.replace(".", "-")) - else: - gw_host = "gw-%s" % (gw_ip.replace(".", "-")) + elif html.has_var("_factory_reset"): + c = wato_confirm(_("Confirm factory reset"), + _("If you proceed now, all hosts, folders, rules and other configurations " + "done with WATO will be deleted! Please consider making a snapshot before " + "you do this. Snapshots will not be deleted. Also the password of the currently " + "logged in user (%s) will be kept.

    " + "Do you really want to delete all or your configuration data?") % config.user_id) + if c: + factory_reset() + return None, _("Resetted WATO, wiped all configuration.") + elif c == False: # not yet confirmed + return "" + return None - new_host = { - ".name" : gw_host, - "ipaddress" : gw_ip, - ".folder" : gw_folder, - } - if alias: - new_host["alias"] = alias + else: + snapshots = get_snapshots() - # Important: set the "site" attribute for the new host, but - # only set it explicitely if it differs from the id of the - # folder. - e = effective_attributes(new_host, gw_folder) - if "site" in e and e["site"] != site_id: - new_host["site"] = site_id + # Render snapshot domain options + html.begin_form("create_snapshot", method="POST") + forms.header(_("Create snapshot")) + forms.section(_("Elements to save")) + forms.input(snapshot_vs, "snapshot_options", {}) + html.write("

    ") + html.hidden_fields() + forms.end() + html.button("_create_snapshot", _("Create snapshot"), "submit") + html.end_form() + html.write("
    ") - gw_folder[".hosts"][new_host[".name"]] = new_host - save_hosts(gw_folder) - reload_hosts(gw_folder) - save_folder(gw_folder) - mark_affected_sites_dirty(gw_folder, gw_host) - log_pending(AFFECTED, gw_host, "new-host", - _("Created new host %s during parent scan") % gw_host) + html.write("

    " + _("Restore from uploaded file") + "

    ") + html.write(_("Only supports snapshots up to 100MB. If your snapshot is larger than 100MB please copy it into the sites " + "backup directory %s/wato/snapshots. It will then show up in the snapshots table.") % defaults.var_dir) + html.begin_form("upload_form", method = "POST") + html.upload_file("_upload_file") + html.button("upload_button", _("Restore from file"), "submit") + html.hidden_fields() + html.end_form() - reload_folder(gw_folder) - gwcreat = True + table.begin("snapshots", _("Snapshots"), empty_text=_("There are no snapshots available.")) + for name in snapshots: + if name == "uploaded_snapshot": + continue + status = get_snapshot_status(name) + table.row() + # Snapshot name + table.cell(_("From"), '%s' % + (make_link([("mode","snapshot_detail"),("_snapshot_name", name)]), status["name"])) - parents = [ gw_host ] + # Comment + table.cell(_("Comment"), status.get("comment","")) - else: - parents = [] + # Age and Size + st = os.stat(snapshot_dir + name) + age = time.time() - st.st_mtime + table.cell(_("Size"), fmt_bytes(st.st_size), css="number"), - if effective["parents"] == parents: - return _("Parents unchanged at %s") % \ - (parents and ",".join(parents) or _("none")), False, gwcreat + # Status icons + table.cell(_("Status")) + if status.get("broken"): + html.icon(status.get("broken_text",_("This snapshot is broken")), "validation_error") + elif status.get("progress_status"): + html.icon( status.get("progress_status"), "timeperiods") + table.end() +def get_backup_domains(modes, extra_domains = {}): + domains = {} + for mode in modes: + for domain, value in backup_domains.items(): + if mode in value and not value.get("deprecated"): + domains.update({domain: value}) + domains.update(extra_domains) + return domains - config.need_permission("wato.edit_hosts") - check_host_permissions(host[".name"], folder=folder) +def do_snapshot_maintenance(): + snapshots = [] + for f in os.listdir(snapshot_dir): + if f.startswith('wato-snapshot-'): + status = get_snapshot_status(f) + # only remove automatic and legacy snapshots + if status.get("type") in [ "automatic", "legacy" ]: + snapshots.append(f) - if force_explicit: - host["parents"] = parents - else: - # Check which parents the host would have inherited - if "parents" in host: - del host["parents"] - effective = effective_attributes(host, folder) - if effective["parents"] != parents: - host["parents"] = parents + snapshots.sort(reverse=True) + while len(snapshots) > config.wato_max_snapshots: + log_audit(None, "snapshot-removed", _("Removed snapshot %s") % snapshots[-1]) + os.remove(snapshot_dir + snapshots.pop()) - if parents: - message = _("Set parents to %s") % ",".join(parents) - else: - message = _("Removed parents") - mark_affected_sites_dirty(folder, host[".name"]) - save_hosts(folder) - log_pending(AFFECTED, host[".name"], "set-gateway", message) - return message, True, gwcreat +def create_snapshot(data = {}): + import copy + def remove_functions(snapshot_data): + snapshot_data_copy = copy.deepcopy(snapshot_data) + for dom_key, dom_values in snapshot_data.items(): + for key, value in dom_values.items(): + if hasattr(value, '__call__'): + del snapshot_data_copy[dom_key][key] + return snapshot_data_copy + make_nagios_directory(snapshot_dir) -#. -# .-Random Hosts---------------------------------------------------------. -# | ____ _ _ _ _ | -# | | _ \ __ _ _ __ __| | ___ _ __ ___ | | | | ___ ___| |_ ___ | -# | | |_) / _` | '_ \ / _` |/ _ \| '_ ` _ \ | |_| |/ _ \/ __| __/ __| | -# | | _ < (_| | | | | (_| | (_) | | | | | | | _ | (_) \__ \ |_\__ \ | -# | |_| \_\__,_|_| |_|\__,_|\___/|_| |_| |_| |_| |_|\___/|___/\__|___/ | -# | | -# +----------------------------------------------------------------------+ -# | This module allows the creation of large numbers of random hosts, | -# | for test and development. | -# '----------------------------------------------------------------------' -def mode_random_hosts(phase): - if phase == "title": - return _("Random Hosts") + snapshot_name = data.get("name") or "wato-snapshot-%s.tar" % \ + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime(time.time())) + snapshot_data = {} + snapshot_data["comment"] = data.get("comment", _("Snapshot created by %s") % config.user_id) + snapshot_data["created_by"] = data.get("created_by", config.user_id) + snapshot_data["type"] = data.get("type", "automatic") + snapshot_data["snapshot_name"] = snapshot_name + snapshot_data["domains"] = remove_functions(data.get("domains", get_backup_domains(["default"]))) - elif phase == "buttons": - html.context_button(_("Folder"), make_link([("mode", "folder")]), "back") - return + check_mk_local_automation("create-snapshot", [], snapshot_data) - elif phase == "action": - if html.check_transaction(): - count = int(html.var("count")) - folders = int(html.var("folders")) - levels = int(html.var("levels")) - created = create_random_hosts(g_folder, count, folders, levels) - return "folder", _("Created %d random hosts.") % created - else: - return "folder" + log_audit(None, "snapshot-created", _("Created snapshot %s") % snapshot_name) + do_snapshot_maintenance() - html.begin_form("random") - forms.header(_("Create Random Hosts")) - forms.section(_("Number to create")) - html.write("%s: " % _("Hosts to create in each folder")) - html.number_input("count", 10) - html.set_focus("count") - html.write("
    %s: " % _("Number of folders to create in each level")) - html.number_input("folders", 10) - html.write("
    %s: " % _("Levels of folders to create")) - html.number_input("levels", 1) + return snapshot_name - forms.end() - html.button("start", _("Start!"), "submit") - html.hidden_fields() - html.end_form() -def create_random_hosts(folder, count, folders, levels): - import random - if levels == 0: - created = 0 - while created < count: - name = "random_%010d" % int(random.random() * 10000000000) - host = {"ipaddress" : "127.0.0.1"} - folder[".hosts"][name] = host - created += 1 - folder["num_hosts"] += count - save_folder_and_hosts(folder) - reload_hosts() - return count - else: - total_created = 0 - if folder[".path"]: - prefixpath = folder[".path"] + "/" - else: - prefixpath = "" - created = 0 - while created < folders: - created += 1 - i = 1 - while True: - name = "folder_%02d" % i - if name not in folder[".folders"]: - break - i += 1 - title = "Subfolder %02d" % i - path = prefixpath + name - subfolder = { - ".parent" : folder, - ".name" : name, - ".folders" : {}, - ".hosts" : {}, - ".path" : path, - "attributes" : {}, - "num_hosts" : 0, - "title" : title, - } - g_folders[path] = subfolder - folder[".folders"][name] = subfolder - save_folder(subfolder) - total_created += create_random_hosts(subfolder, count, folders, levels - 1) - save_folder(folder) - return total_created +def factory_reset(): + # Darn. What makes things complicated here is that we need to conserve htpasswd, + # at least the account of the currently logged in user. + users = userdb.load_users(lock = True) + for id in users.keys(): + if id != config.user_id: + del users[id] + + to_delete = [ path for c,n,path + in backup_paths + if n != "auth.secret" ] + [ log_dir ] + for path in to_delete: + if os.path.isdir(path): + shutil.rmtree(path) + elif os.path.exists(path): + os.remove(path) + + make_nagios_directory(multisite_dir) + make_nagios_directory(root_dir) + + userdb.save_users(users) # make sure, omdadmin is present after this + log_pending(SYNCRESTART, None, "factory-reset", _("Complete reset to factory settings.")) + #. -# .-Auditlog-------------------------------------------------------------. -# | _ __ _ _ | -# | | | ___ __ _ / _(_) | ___ | -# | | | / _ \ / _` | |_| | |/ _ \ | -# | | |__| (_) | (_| | _| | | __/ | -# | |_____\___/ \__, |_| |_|_|\___| | -# | |___/ | +# .--Value-Editor--------------------------------------------------------. +# | __ __ _ _____ _ _ _ | +# | \ \ / /_ _| |_ _ ___ | ____|__| (_) |_ ___ _ __ | +# | \ \ / / _` | | | | |/ _ \ | _| / _` | | __/ _ \| '__| | +# | \ V / (_| | | |_| | __/ | |__| (_| | | || (_) | | | +# | \_/ \__,_|_|\__,_|\___| |_____\__,_|_|\__\___/|_| | +# | | # +----------------------------------------------------------------------+ -# | Handling of the audit logfiles | +# | The value editor is used in the configuration and rules module for | +# | editing single values (e.g. configuration parameter for main.mk or | +# | check parameters). | # '----------------------------------------------------------------------' -def mode_auditlog(phase): - if phase == "title": - return _("Audit logfile") - elif phase == "buttons": - home_button() - changelog_button() - if log_exists("audit") and config.may("wato.auditlog") and config.may("wato.edit"): - html.context_button(_("Download"), - html.makeactionuri([("_action", "csv")]), "download") - if config.may("wato.edit"): - html.context_button(_("Clear Logfile"), - html.makeactionuri([("_action", "clear")]), "trash") - return - elif phase == "action": - if html.var("_action") == "clear": - config.need_permission("wato.auditlog") - config.need_permission("wato.edit") - return clear_audit_log_after_confirm() +class CheckTypeSelection(DualListChoice): + def __init__(self, **kwargs): + DualListChoice.__init__(self, **kwargs) - elif html.var("_action") == "csv": - config.need_permission("wato.auditlog") - return export_audit_log() + def get_elements(self): + checks = check_mk_local_automation("get-check-information") + elements = [ (cn, (cn + " - " + c["title"])[:60]) for (cn, c) in checks.items()] + elements.sort() + return elements + + +def edit_value(valuespec, value, title=""): + if title: + title = title + "
    " + help = valuespec.help() or "" + html.write('') + html.write('%s' % title) + html.help(help) + html.write("") + + valuespec.render_input("ve", value) + html.write("") + +def get_edited_value(valuespec): + value = valuespec.from_html_vars("ve") + valuespec.validate_value(value, "ve") + return value - audit = parse_audit_log("audit") - if len(audit) == 0: - html.write("
    " + _("The audit logfile is empty.") + "
    ") - else: - render_audit_log(audit, "audit") #. -# .-Pending & Replication------------------------------------------------. -# | ____ _ _ | -# | | _ \ ___ _ __ __| (_)_ __ __ _ | -# | | |_) / _ \ '_ \ / _` | | '_ \ / _` | | -# | | __/ __/ | | | (_| | | | | | (_| | | -# | |_| \___|_| |_|\__,_|_|_| |_|\__, | | -# | |___/ | +# .--Configuration-------------------------------------------------------. +# | ____ __ _ _ _ | +# | / ___|___ _ __ / _(_) __ _ _ _ _ __ __ _| |_(_) ___ _ __ | +# | | | / _ \| '_ \| |_| |/ _` | | | | '__/ _` | __| |/ _ \| '_ \ | +# | | |__| (_) | | | | _| | (_| | |_| | | | (_| | |_| | (_) | | | | | +# | \____\___/|_| |_|_| |_|\__, |\__,_|_| \__,_|\__|_|\___/|_| |_| | +# | |___/ | # +----------------------------------------------------------------------+ -# | Mode for activating pending changes. Does also replication with | -# | remote sites in distributed WATO. | +# | Main entry page for configuration of global variables, rules, groups,| +# | timeperiods, users, etc. | # '----------------------------------------------------------------------' -def mode_changelog(phase): - # See below for the usage of this weird variable... - global sitestatus_do_async_replication - try: - sitestatus_do_async_replication - except: - sitestatus_do_async_replication = False - +def mode_main(phase): if phase == "title": - return _("Pending changes to activate") + return _("WATO - Check_MK's Web Administration Tool") elif phase == "buttons": - home_button() - # Commit pending log right here, if all sites are up-to-date - if is_distributed() and global_replication_state() == "clean": - log_commit_pending() + changelog_button() + return - if config.may("wato.activate") and ( - (not is_distributed() and log_exists("pending")) - or (is_distributed() and global_replication_state() == "dirty")): - html.context_button(_("Activate Changes!"), - html.makeactionuri([("_action", "activate")]), - "apply", True, id="act_changes_button") + elif phase == "action": + return - if is_distributed(): - html.context_button(_("Site Configuration"), make_link([("mode", "sites")]), "sites") + render_main_menu(modules) - elif phase == "action": +def render_main_menu(some_modules, columns = 2): + html.write('") +#. +# .--LDAP Config---------------------------------------------------------. +# | _ ____ _ ____ ____ __ _ | +# | | | | _ \ / \ | _ \ / ___|___ _ __ / _(_) __ _ | +# | | | | | | |/ _ \ | |_) | | | / _ \| '_ \| |_| |/ _` | | +# | | |___| |_| / ___ \| __/ | |__| (_) | | | | _| | (_| | | +# | |_____|____/_/ \_\_| \____\___/|_| |_|_| |_|\__, | | +# | |___/ | +# +----------------------------------------------------------------------+ +# | LDAP configuration and diagnose page | +# '----------------------------------------------------------------------' - # Give hooks chance to do some pre-activation things (and maybe stop - # the activation) - try: - call_hook_pre_distribute_changes() - except Exception, e: - if config.debug: - raise - else: - raise MKUserError(None, "

    %s

    %s" % (_("Cannot activate changes"), e)) +def mode_ldap_config(phase): + if phase == 'title': + return _('LDAP Configuration') - sitestatus_do_async_replication = False # see below - if html.has_var("_siteaction"): - config.need_permission("wato.activate") - site_id = html.var("_site") - action = html.var("_siteaction") - if transaction_already_checked or html.check_transaction(): - try: - # If the site has no pending changes but just needs restart, - # the button text is just "Restart". We do a sync anyway. This - # can be optimized in future but is the save way for now. - site = config.site(site_id) - if action in [ "sync", "sync_restart" ]: - response = synchronize_site(site, restart = action == "sync_restart") - else: - try: - restart_site(site) - response = True - except Exception, e: - response = str(e) + elif phase == 'buttons': + global_buttons() + html.context_button(_("Users"), make_link([("mode", "users")]), "users") + return - if response == True: - return None - else: - raise MKUserError(None, _("Error on remote access to site: %s") % response) + config_vars = [ + 'ldap_connection', + 'ldap_userspec', + 'ldap_groupspec', + 'ldap_active_plugins', + 'ldap_cache_livetime', + 'ldap_debug_log', + ] + vs = [ (v, g_configvars[v][1]) for v in config_vars ] - except MKAutomationException, e: - raise MKUserError(None, _("Remote command on site %s failed:
    %s
    ") % (site_id, e)) - except Exception, e: - if config.debug: - raise - raise MKUserError(None, _("Remote command on site %s failed:
    %s
    ") % (site_id, e)) + current_settings = load_configuration_settings() - elif transaction_already_checked or html.check_transaction(): - config.need_permission("wato.activate") - create_snapshot() + if not userdb.connector_enabled('ldap'): + html.message(_('The LDAP user connector is disabled. You need to enable it to be able ' + 'to configure the LDAP settings.')) + return - # Do nothing here, but let site status table be shown in a mode - # were in each site that is not up-to-date an asynchronus AJAX - # job is being startet that updates that site - sitestatus_do_async_replication = True + if phase == 'action': + if not html.check_transaction(): + return - else: # phase: regular page rendering + for (varname, valuespec) in vs: + valuespec = dict(vs)[varname] + new_value = valuespec.from_html_vars(varname) + valuespec.validate_value(new_value, varname) + if current_settings.get(varname) != new_value: + msg = _("Changed LDAP configuration variable %s to %s.") \ + % (varname, valuespec.value_to_text(new_value)) + log_pending(SYNC, None, "edit-configvar", msg) + current_settings[varname] = new_value - if is_distributed(): - # Distributed WATO: Show replication state of each site + save_configuration_settings(current_settings) + config.load_config() # make new configuration active + return - # During bulk replication we rather create the sync snapshot now. Otherwise - # there is the danger, that it is created multiple times in parallel, thus - # wasting time. - if sitestatus_do_async_replication: - create_sync_snapshot() + userdb.ldap_test_module() - html.write("

    %s

    " % _("Distributed WATO - Replication Status")) - repstatus = load_replication_status() - sites = [(name, config.site(name)) for name in config.sitenames() ] - sort_sites(sites) - html.write("") - html.write("") - html.write("" % _("ID") + - "" % _("Alias")) - html.write("" % _("Livestatus")) - html.write("" % - (sitestatus_do_async_replication and 3 or 6, _("Replication"))) - html.write("" + - "" % _("Status") + - "" % _("Version") + - "" % _("Core") + - "" % _("Ho.") + - "" % _("Sv.") + - "" % _("Uptime") + - "" % _("Multisite URL") + - "" % _("Type")) - if sitestatus_do_async_replication: - html.write("" % _("Replication result")) - else: - html.write("" % _("State") + - "" % _("Actions") + - "" % _("Last result")) - html.write("") + # + # Regular page rendering + # + + html.write('
    ') + html.write('
    %s%s%s%s
    %s%s%s%s%s%s%s%s%s%s%s%s
    ' % odd) - odd = odd == "odd" and "even" or "odd" + for title, test in tests: + table.row() + try: + state, msg = test(address) + except Exception, e: + state = False + msg = _('Exception: %s') % html.attrencode(e) - # ID & Alias - html.write("" % - (make_link([("mode", "edit_site"), ("edit", site_id)]), site_id)) - html.write("" % site.get("alias", "")) + if state: + img = html.render_icon("success", _('Success')) + else: + img = html.render_icon("failed", _("Failed")) - # Livestatus - html.write('' % (status)) + table.cell(_("Test"), title) + table.cell(_("State"), img) + table.cell(_("Details"), msg) - # Livestatus-Version - html.write('' % ss.get("livestatus_version", "")) + table.end() - # Core-Version - html.write('' % ss.get("program_version", "")) + userdb.ldap_disconnect() - # Hosts/services - html.write('' % - (site_id, ss.get("num_hosts", ""))) - html.write('' % - (site_id, ss.get("num_services", ""))) + html.write('
    ') + html.begin_form('ldap_config', method = "POST", action = 'wato.py?mode=ldap_config') + need_header = True + for (var, valuespec) in vs: + value = current_settings.get(var, valuespec.default_value()) + if isinstance(valuespec, Dictionary): + valuespec._render = "form" + else: + if need_header: + forms.header(_('Other Settings')) + need_header = False + forms.section(valuespec.title()) + valuespec.render_input(var, value) + html.help(valuespec.help()) + forms.end() - odd = "odd" - num_replsites = 0 # for detecting end of bulk replication - for site_id, site in sites: - is_local = site_is_local(site_id) + html.button("_save", _("Save")) + html.button("_test", _("Save & Test")) + html.hidden_fields() + html.end_form() + html.write('') - if not is_local and not site.get("replication"): - continue + html.write('

    ' + _('Diagnostics') + '

    ') + if not html.var('_test'): + html.message(HTML('

    %s

    %s

    ' % + (_('You can verify the single parts of your ldap configuration using this ' + 'dialog. Simply make your configuration in the form on the left side and ' + 'hit the "Save & Test" button to execute the tests. After ' + 'the page reload, you should see the results of the test here.'), + _('If you need help during configuration or experience problems, please refer ' + 'to the Multisite ' + 'LDAP Documentation.')))) + else: + def test_connect(address): + conn, msg = userdb.ldap_connect_server(address) + if conn: + return (True, _('Connection established. The connection settings seem to be ok.')) + else: + return (False, msg) + + def test_user_base_dn(address): + if not userdb.ldap_user_base_dn_configured(): + return (False, _('The User Base DN is not configured.')) + userdb.ldap_connect(enforce_new = True, enforce_server = address) + if userdb.ldap_user_base_dn_exists(): + return (True, _('The User Base DN could be found.')) + elif userdb.ldap_bind_credentials_configured(): + return (False, _('The User Base DN could not be found. Maybe the provided ' + 'user (provided via bind credentials) has no permission to ' + 'access the Base DN or the credentials are wrong.')) + else: + return (False, _('The User Base DN could not be found. Seems you need ' + 'to configure proper bind credentials.')) + + def test_user_count(address): + if not userdb.ldap_user_base_dn_configured(): + return (False, _('The User Base DN is not configured.')) + userdb.ldap_connect(enforce_new = True, enforce_server = address) + try: + ldap_users = userdb.ldap_get_users() + msg = _('Found no user object for synchronization. Please check your filter settings.') + except Exception, e: + ldap_users = None + msg = "%s" % e + if 'successful bind must be completed' in msg: + if not userdb.ldap_bind_credentials_configured(): + return (False, _('Please configure proper bind credentials.')) + else: + return (False, _('Maybe the provided user (provided via bind credentials) has not ' + 'enough permissions or the credentials are wrong.')) + + if ldap_users and len(ldap_users) > 0: + return (True, _('Found %d users for synchronization.') % len(ldap_users)) + else: + return (False, msg) + + def test_group_base_dn(address): + if not userdb.ldap_group_base_dn_configured(): + return (False, _('The Group Base DN is not configured, not fetching any groups.')) + userdb.ldap_connect(enforce_new = True, enforce_server = address) + if userdb.ldap_group_base_dn_exists(): + return (True, _('The Group Base DN could be found.')) + else: + return (False, _('The Group Base DN could not be found.')) + + def test_group_count(address): + if not userdb.ldap_group_base_dn_configured(): + return (False, _('The Group Base DN is not configured, not fetching any groups.')) + userdb.ldap_connect(enforce_new = True, enforce_server = address) + try: + ldap_groups = userdb.ldap_get_groups() + msg = _('Found no group object for synchronization. Please check your filter settings.') + except Exception, e: + ldap_groups = None + msg = "%s" % e + if 'successful bind must be completed' in msg: + if not userdb.ldap_bind_credentials_configured(): + return (False, _('Please configure proper bind credentials.')) + else: + return (False, _('Maybe the provided user (provided via bind credentials) has not ' + 'enough permissions or the credentials are wrong.')) + if ldap_groups and len(ldap_groups) > 0: + return (True, _('Found %d groups for synchronization.') % len(ldap_groups)) + else: + return (False, msg) + + def test_groups_to_roles(address): + if 'groups_to_roles' not in config.ldap_active_plugins: + return True, _('Skipping this test (Plugin is not enabled)') + + userdb.ldap_connect(enforce_new = True, enforce_server = address) + num = 0 + for role_id, dn in config.ldap_active_plugins['groups_to_roles'].items(): + if type(dn) in [str, unicode]: + num += 1 + try: + ldap_groups = userdb.ldap_get_groups(dn) + if not ldap_groups: + return False, _('Could not find the group specified for role %s') % role_id + except Exception, e: + return False, _('Error while fetching group for role %s: %s') % (role_id, e) + return True, _('Found all %d groups.') % num - if site.get("disabled"): - ss = {} - status = "disabled" - else: - ss = html.site_status.get(site_id, {}) - status = ss.get("state", "unknown") + tests = [ + (_('Connection'), test_connect), + (_('User Base-DN'), test_user_base_dn), + (_('Count Users'), test_user_count), + (_('Group Base-DN'), test_group_base_dn), + (_('Count Groups'), test_group_count), + (_('Sync-Plugin: Roles'), test_groups_to_roles), + ] - srs = repstatus.get(site_id, {}) + for address in userdb.ldap_servers(): + html.write('

    %s: %s

    ' % (_('Server'), address)) + table.begin('test', searchable = False) - # Make row red, if site status is not online - html.write('
    %s%s%s%s%s%s
    ') + html.write('
    ') - # Uptime / Last restart - if "program_start" in ss: - age_text = html.age_text(time.time() - ss["program_start"]) - else: - age_text = "" - html.write('%s' % age_text) +#. +# .--Global-Settings-----------------------------------------------------. +# | ____ _ _ _ __ __ | +# | / ___| | ___ | |__ __ _| | \ \ / /_ _ _ __ ___ | +# | | | _| |/ _ \| '_ \ / _` | | \ \ / / _` | '__/ __| | +# | | |_| | | (_) | |_) | (_| | | \ V / (_| | | \__ \ | +# | \____|_|\___/|_.__/ \__,_|_| \_/ \__,_|_| |___/ | +# | | +# +----------------------------------------------------------------------+ +# | Editor for global settings in main.mk | +# '----------------------------------------------------------------------' - # Multisite-URL - html.write("%s" % (not is_local - and "%s" % tuple([site.get("multisiteurl")]*2) or "")) +def mode_globalvars(phase): + search = html.var_utf8("search") + if search != None: + search = search.strip().lower() - # Type - if is_local: - sitetype = _("local") - elif site["replication"] == "slave": - sitetype = _("Slave") - else: - sitetype = _("Peer") - html.write("%s" % sitetype) + if phase == "title": + if search: + return _("Global configuration settings matching %s") % html.attrencode(search) + else: + return _("Global configuration settings for Check_MK") - need_restart = srs.get("need_restart") - need_sync = srs.get("need_sync") and not site_is_local(site_id) - uptodate = not (need_restart or need_sync) + elif phase == "buttons": + global_buttons() + return - # Start asynchronous replication - if sitestatus_do_async_replication: - html.write("") - # Do only include sites that are known to be up - if not site_is_local(site_id) and not "secret" in site: - html.write("%s" % _("Not logged in.")) - else: - html.write('
    %s
    ' % - (site_id, uptodate and _("nothing to do") or "")) - if not uptodate: - if need_restart and need_sync: - what = "sync+restart" - elif need_restart: - what = "restart" - else: - what = "sync" - estimated_duration = srs.get("times", {}).get(what, 2.0) - html.javascript("wato_do_replication('%s', %d);" % - (site_id, int(estimated_duration * 1000.0))) - num_replsites += 1 - html.write("") - else: - # State - html.write("") - if srs.get("need_sync") and not site_is_local(site_id): - html.write('' % - _("This site is not update and needs a replication.")) - if srs.get("need_restart"): - html.write('' % - _("This site needs a restart for activating the changes.")) - if uptodate: - html.write('' % - _("This site is up-to-date.")) - html.write("") + # Get default settings of all configuration variables of interest in the domain + # "check_mk". (this also reflects the settings done in main.mk) + check_mk_vars = [ varname for (varname, var) in g_configvars.items() if var[0] == "check_mk" ] + default_values = check_mk_local_automation("get-configuration", [], check_mk_vars) + current_settings = load_configuration_settings() - # Actions - html.write("") - sync_url = make_action_link([("mode", "changelog"), - ("_site", site_id), ("_siteaction", "sync")]) - restart_url = make_action_link([("mode", "changelog"), - ("_site", site_id), ("_siteaction", "restart")]) - sync_restart_url = make_action_link([("mode", "changelog"), - ("_site", site_id), ("_siteaction", "sync_restart")]) - if not site_is_local(site_id) and "secret" not in site: - html.write("%s" % _("Not logged in.")) - elif not uptodate: - if not site_is_local(site_id): - if srs.get("need_sync"): - html.buttonlink(sync_url, _("Sync")) - if srs.get("need_restart"): - html.buttonlink(sync_restart_url, _("Sync & Restart")) - else: - html.buttonlink(restart_url, _("Restart")) - else: - html.buttonlink(restart_url, _("Restart")) - html.write("") + if phase == "action": + varname = html.var("_varname") + action = html.var("_action") + if varname: + domain, valuespec, need_restart, allow_reset, in_global_settings = g_configvars[varname] + def_value = default_values.get(varname, valuespec.default_value()) - # Last result - result = srs.get("result", "") - if len(result) > 20: - result = htmllib.strip_tags(result) - result = '%s...' % \ - (htmllib.attrencode(result), result[:20]) - html.write("%s" % result) + if action == "reset" and not is_a_checkbox(valuespec): + c = wato_confirm( + _("Resetting configuration variable"), + _("Do you really want to reset the configuration variable %s " + "back to the default value of %s?") % + (varname, valuespec.value_to_text(def_value))) + else: + if not html.check_transaction(): + return + c = True # no confirmation for direct toggle - html.write("") - html.write("") - # The Javascript world needs to know, how many asynchronous - # replication jobs it should wait to be finished. - if sitestatus_do_async_replication and num_replsites > 0: - html.javascript("var num_replsites = %d;\n" % num_replsites) + if c: + # if action == "reset": + # del current_settings[varname] + # msg = _("Resetted configuration variable %s to its default.") % varname + # else: + if varname in current_settings: + current_settings[varname] = not current_settings[varname] + else: + current_settings[varname] = not def_value + msg = _("Changed Configuration variable %s to %s." % (varname, + current_settings[varname] and "on" or "off")) + save_configuration_settings(current_settings) + pending_func = g_configvar_domains[domain].get("pending") + if pending_func: + pending_func(msg) + else: + log_pending(need_restart and SYNCRESTART or SYNC, None, "edit-configvar", msg) + if action == "_reset": + return "globalvars", msg + else: + return "globalvars" + elif c == False: + return "" + else: + return None + else: + return - elif sitestatus_do_async_replication: - # Single site setup + render_global_configuration_variables(default_values, current_settings, search=search) - # Is rendered on the page after hitting the "activate" button - # Renders the html to show the progress and starts the sync via javascript - html.write("") - html.write("" % (_('Progress'), _('Status'))) - html.write('') - html.write('
    %s%s
    %s
    ' % _('activating...')) - - srs = load_replication_status().get(None, {}) - estimated_duration = srs.get("times", {}).get('act', 2.0) - html.javascript("wato_do_activation(%d);" % - (int(estimated_duration * 1000.0))) +def render_global_configuration_variables(default_values, current_settings, show_all=False, search=None): + groupnames = g_configvar_groups.keys() + groupnames.sort() - sitestatus_do_async_replication = None # could survive in global context! + search_form(_("Search for settings:")) - pending = parse_audit_log("pending") - if len(pending) == 0: - html.write("
    " + _("There are no pending changes.") + "
    ") - else: - html.write('
    ') - render_audit_log(pending, "pending", hilite_others=True) - html.write('
    ') + at_least_one_painted = False + html.write('
    ') + for groupname in groupnames: + header_is_painted = False # needed for omitting empty groups -# Determine if other users have made pending changes -def foreign_changes(): - changes = {} - for t, linkinfo, user, action, text in parse_audit_log("pending"): - if user != config.user_id: - changes.setdefault(user, 0) - changes[user] += 1 - return changes + for domain, varname, valuespec in g_configvar_groups[groupname]: + if not show_all and (not g_configvars[varname][4] + or not g_configvar_domains[domain].get('in_global_settings', True)): + continue # do not edit via global settings + if domain == "check_mk" and varname not in default_values: + if config.debug: + raise MKGeneralException("The configuration variable %s is unknown to " + "your local Check_MK installation" % varname) + else: + continue + help_text = valuespec.help() or '' + title_text = valuespec.title() -def log_entry(linkinfo, action, message, logfilename): - if type(message) == unicode: - message = message.encode("utf-8") - message = message.strip() + if search and search not in groupname \ + and search not in domain \ + and search not in varname \ + and search not in help_text \ + and search not in title_text: + continue # skip variable when search is performed and nothing matches + at_least_one_painted = True + + if not header_is_painted: + # always open headers when searching + forms.header(groupname, isopen=search) + header_is_painted = True - # linkinfo is either a folder, or a hostname or None - if type(linkinfo) == dict and linkinfo[".path"] in g_folders: - link = linkinfo[".path"] + ":" - elif linkinfo == None: - link = "-" - elif linkinfo and ".hosts" in g_folder and linkinfo in g_folder[".hosts"]: # hostname in current folder - link = g_folder[".path"] + ":" + linkinfo - else: - link = ":" + linkinfo + defaultvalue = default_values.get(varname, valuespec.default_value()) - log_file = log_dir + logfilename - make_nagios_directory(log_dir) - f = create_user_file(log_file, "ab") - f.write("%d %s %s %s " % (int(time.time()), link, config.user_id, action)) - f.write(message) - f.write("\n") + edit_url = make_link([("mode", "edit_configvar"), ("varname", varname), ("site", html.var("site", ""))]) + title = '%s' % \ + (edit_url, varname in current_settings and '"modified"' or '""', + html.strip_tags(help_text), title_text) + if varname in current_settings: + to_text = valuespec.value_to_text(current_settings[varname]) + else: + to_text = valuespec.value_to_text(defaultvalue) -def log_audit(linkinfo, what, message): - log_entry(linkinfo, what, message, "audit.log") + # Is this a simple (single) value or not? change styling in these cases... + simple = True + if '\n' in to_text or '' in to_text: + simple = False + forms.section(title, simple=simple) -# status is one of: -# SYNC -> Only sync neccessary -# RESTART -> Restart and sync neccessary -# SYNCRESTART -> Do sync and restart -# AFFECTED -> affected sites are already marked for sync+restart -# by mark_affected_sites_dirty(). But we need to -# mark our peers for sync, regardless of any affected -# sites. Peers need always to be up-to-date. -# LOCALRESTART-> Called after inventory. In distributed mode, affected -# sites have already been marked for restart. Do nothing here. -# In non-distributed mode mark for restart -def log_pending(status, linkinfo, what, message): - log_entry(linkinfo, what, message, "audit.log") - need_sidebar_reload() + toggle_url = html.makeactionuri([("_action", "toggle"), ("_varname", varname)]) + if varname in current_settings: + if is_a_checkbox(valuespec): + html.icon_button(toggle_url, _("Immediately toggle this setting"), + "snapin_switch_" + (current_settings[varname] and "on" or "off"), + cssclass="modified") + else: + html.write('%s' % (edit_url, to_text)) + else: + if is_a_checkbox(valuespec): + html.icon_button(toggle_url, _("Immediately toggle this setting"), + # "snapin_greyswitch_" + (defaultvalue and "on" or "off")) + "snapin_switch_" + (defaultvalue and "on" or "off")) + else: + html.write('%s' % (edit_url, to_text)) - if not is_distributed(): - if status != SYNC: - log_entry(linkinfo, what, message, "pending.log") + if header_is_painted: + forms.end() + if not at_least_one_painted: + html.message(_('Did not find any global setting matching your search.')) + html.write('
    ') - # Currently we add the pending to each site, regardless if - # the site is really affected. This needs to be optimized - # in future. +def mode_edit_configvar(phase, what = 'globalvars'): + siteid = html.var("site") + if siteid: + sites = load_sites() + site = sites[siteid] + + if phase == "title": + if what == 'mkeventd': + return _("Event Console Configuration") + elif siteid: + return _("Site-specific global configuration for %s" % siteid) + else: + return _("Global configuration settings for Check_MK") + + elif phase == "buttons": + if what == 'mkeventd': + html.context_button(_("Abort"), make_link([("mode", "mkeventd_config")]), "abort") + elif siteid: + html.context_button(_("Abort"), make_link([("mode", "edit_site_globals"), ("site", siteid)]), "abort") + else: + html.context_button(_("Abort"), make_link([("mode", "globalvars")]), "abort") + return + + varname = html.var("varname") + domain, valuespec, need_restart, allow_reset, in_global_settings = g_configvars[varname] + if siteid: + current_settings = site.setdefault("globals", {}) else: - log_entry(linkinfo, what, message, "pending.log") - for siteid, site in config.sites.items(): + current_settings = load_configuration_settings() - changes = {} + is_on_default = varname not in current_settings - # Local site can never have pending changes to be synced - if site_is_local(siteid): - if status in [ RESTART, SYNCRESTART ]: - changes["need_restart"] = True - elif site.get("replication") == "peer" and status == AFFECTED: - changes["need_sync"] = True - else: - if status in [ SYNC, SYNCRESTART ]: - changes["need_sync"] = True - if status in [ RESTART, SYNCRESTART ]: - changes["need_restart"] = True - update_replication_status(siteid, changes) + if phase == "action": + if html.var("_reset"): + if not is_a_checkbox(valuespec): + c = wato_confirm( + _("Resetting configuration variable"), + _("Do you really want to reset this configuration variable " + "back to its default value?")) + if c == False: + return "" + elif c == None: + return None - # Make sure that a new snapshot for syncing will be created - # when times comes to syncing - remove_sync_snapshot() + del current_settings[varname] + msg = _("Resetted configuration variable %s to its default.") % varname + else: + new_value = get_edited_value(valuespec) + current_settings[varname] = new_value + msg = _("Changed global configuration variable %s to %s.") \ + % (varname, valuespec.value_to_text(new_value)) + if siteid: + save_sites(sites, activate=False) + changes = { "need_sync" : True } + if need_restart: + changes["need_restart"] = True + update_replication_status(siteid, changes) + log_pending(AFFECTED, None, "edit-configvar", msg) + return "edit_site_globals" + else: + save_configuration_settings(current_settings) + if need_restart: + status = SYNCRESTART + else: + status = SYNC -def log_commit_pending(): - pending = log_dir + "pending.log" - if os.path.exists(pending): - os.remove(pending) - need_sidebar_reload() + pending_func = g_configvar_domains[domain].get("pending") + if pending_func: + pending_func(msg) + else: + log_pending(status, None, "edit-configvar", msg) + if what == 'mkeventd': + return 'mkeventd_config' + else: + return "globalvars" -def clear_audit_log(): - path = log_dir + "audit.log" - if os.path.exists(path): - newpath = path + time.strftime(".%Y-%m-%d") - if os.path.exists(newpath): - n = 1 - while True: - n += 1 - with_num = newpath + "-%d" % n - if not os.path.exists(with_num): - newpath = with_num - break - os.rename(path, newpath) + check_mk_vars = check_mk_local_automation("get-configuration", [], [varname]) -def clear_audit_log_after_confirm(): - c = wato_confirm(_("Confirm deletion of audit logfile"), - _("Do you really want to clear audit logfile?")) - if c: - clear_audit_log() - return None, _("Cleared audit logfile.") - elif c == False: # not yet confirmed - return "" + if varname in current_settings: + value = current_settings[varname] else: - return None # browser reload + if siteid: + globalsettings = load_configuration_settings() + check_mk_vars.update(globalsettings) + value = check_mk_vars.get(varname, valuespec.default_value()) -def parse_audit_log(what): - path = log_dir + what + ".log" - if os.path.exists(path): - entries = [] - for line in file(path): - line = line.rstrip().decode("utf-8") - splitted = line.split(None, 4) - if len(splitted) == 5 and is_integer(splitted[0]): - splitted[0] = int(splitted[0]) - entries.append(splitted) - entries.reverse() - return entries - return [] + if siteid: + defvalue = check_mk_vars.get(varname, valuespec.default_value()) + else: + defvalue = valuespec.default_value() -def is_integer(i): - try: - int(i) - return True - except: - return False -def log_exists(what): - path = log_dir + what + ".log" - return os.path.exists(path) + html.begin_form("value_editor", method="POST") + forms.header(valuespec.title()) + if not config.wato_hide_varnames: + forms.section(_("Variable for %s.mk" % + { "check_mk" : "main" }.get(domain, domain))) + html.write("%s" % varname) -def render_linkinfo(linkinfo): - if ':' in linkinfo: # folder:host - path, hostname = linkinfo.split(':', 1) - if path in g_folders: - folder = g_folders[path] - if hostname: - hosts = load_hosts(folder) - if hostname in hosts: - url = html.makeuri_contextless([("mode", "edithost"), - ("folder", path), ("host", hostname)]) - title = hostname - else: - return hostname - else: # only folder - url = html.makeuri_contextless([("mode", "folder"), ("folder", path)]) - title = g_folders[path]["title"] - else: - return linkinfo + forms.section(_("Current setting")) + valuespec.render_input("ve", value) + valuespec.set_focus("ve") + html.help(valuespec.help()) + + forms.section(_("Default setting")) + if is_on_default: + html.write(_("This variable is at factory settings.")) else: - return "" + curvalue = current_settings[varname] + if curvalue == defvalue: + html.write(_("Your setting and factory settings are identical.")) + else: + html.write(valuespec.value_to_text(defvalue)) - return '%s' % (url, title) + forms.end() + html.button("save", _("Save")) + if allow_reset and not is_on_default: + curvalue = current_settings[varname] + html.button("_reset", curvalue == defvalue and _("Remove explicit setting") or _("Reset to default")) + html.hidden_fields() + html.end_form() -def get_timerange(t): - st = time.localtime(int(t)) - start = int(time.mktime(time.struct_time((st[0], st[1], st[2], 0, 0, 0, st[6], st[7], st[8])))) - end = start + 86399 - return start, end +# domain is one of "check_mk", "multisite" or "nagios" +def register_configvar(group, varname, valuespec, domain="check_mk", + need_restart=False, allow_reset=True, in_global_settings=True): + g_configvar_groups.setdefault(group, []).append((domain, varname, valuespec)) + g_configvars[varname] = domain, valuespec, need_restart, allow_reset, in_global_settings -def fmt_date(t): - return time.strftime('%Y-%m-%d', time.localtime(t)) +g_configvar_domains = { + "check_mk" : { + "configdir" : root_dir, + }, + "multisite" : { + "configdir" : multisite_dir, + }, +} -def fmt_time(t): - return time.strftime('%H:%M:%S', time.localtime(t)) +# The following keys are available: +# configdir: Directory to store the global.mk in (applies to check_mk, multisite, mkeventd) +# pending: Handler function to create the pending log entry +# load: Optional handler to load/parse the file +# save: Optional handler to save the filea +# in_global_settings: Set to False to hide whole section from global settings dialog +def register_configvar_domain(domain, configdir = None, pending = None, save = None, load = None, in_global_settings = True): + g_configvar_domains[domain] = { + 'in_global_settings': in_global_settings, + } + for k in [ 'configdir', 'pending', 'save', 'load' ]: + if locals()[k] is not None: + g_configvar_domains[domain][k] = locals()[k] -def paged_log(log): - start = int(html.var('start', 0)) - if not start: - start = int(time.time()) +# Persistenz: Speicherung der Werte +# - WATO speichert seine Variablen für main.mk in conf.d/wato/global.mk +# - Daten, die der User in main.mk einträgt, müssen WATO auch bekannt sein. +# Sie werden als Defaultwerte verwendet. +# - Daten, die der User in final.mk oder local.mk einträgt, werden von WATO +# völlig ignoriert. Der Admin kann hier Werte überschreiben, die man mit +# WATO dann nicht ändern kann. Und man sieht auch nicht, dass der Wert +# nicht änderbar ist. +# - WATO muss irgendwie von Check_MK herausbekommen, welche Defaultwerte +# Variablen haben bzw. welche Einstellungen diese Variablen nach main.mk +# haben. +# - WATO kann main.mk nicht selbst einlesen, weil dann der Kontext fehlt +# (Default-Werte der Variablen aus Check_MK und aus den Checks) +# - --> Wir machen eine automation, die alle Konfigurationsvariablen +# ausgibt. - while True: - log_today, times = paged_log_from(log, start) - if len(log) == 0 or len(log_today) > 0: - return log_today, times - else: # No entries today, but log not empty -> go back in time - start -= 24 * 3600 +def load_configuration_settings(): + settings = {} + for domain, domain_info in g_configvar_domains.items(): + if 'load' in domain_info: + domain_info['load'](settings) + else: + load_configuration_vars(domain_info["configdir"] + "global.mk", settings) + return settings -def paged_log_from(log, start): - start_time, end_time = get_timerange(start) - previous_log_time = None - next_log_time = None - first_log_index = None - last_log_index = None - for index, (t, linkinfo, user, action, text) in enumerate(log): - if t >= end_time: - # This log is too new +def load_configuration_vars(filename, settings): + if not os.path.exists(filename): + return {} + try: + execfile(filename, settings, settings) + for varname in settings.keys(): + if varname not in g_configvars: + del settings[varname] + return settings + except Exception, e: + if config.debug: + raise MKGeneralException(_("Cannot read configuration file %s: %s" % + (filename, e))) + return {} + + +def save_configuration_settings(vars): + per_domain = {} + for varname, (domain, valuespec, need_restart, allow_reset, in_global_settings) in g_configvars.items(): + if varname not in vars: continue - elif first_log_index is None \ - and t < end_time \ - and t >= start_time: - # This is a log for this day. Save the first index - if first_log_index is None: - first_log_index = index + per_domain.setdefault(domain, {})[varname] = vars[varname] - # When possible save the timestamp of the previous log - if index > 0: - next_log_time = int(log[index - 1][0]) + # The global setting wato_enabled is not registered in the configuration domains + # since the user must not change it directly. It is set by D-WATO on slave sites. + if "wato_enabled" in vars: + per_domain.setdefault("multisite", {})["wato_enabled"] = vars["wato_enabled"] - elif t < start_time and last_log_index is None: - last_log_index = index - # This is the next log after this day - previous_log_time = int(log[index][0]) - # Finished! - break + for domain, domain_info in g_configvar_domains.items(): + if 'save' in domain_info: + domain_info['save'](per_domain.get(domain, {})) + else: + dir = domain_info["configdir"] + make_nagios_directory(dir) + save_configuration_vars(per_domain.get(domain, {}), dir + "global.mk") - if last_log_index is None: - last_log_index = len(log) +def save_configuration_vars(vars, filename): + out = create_user_file(filename, 'w') + out.write("# Written by WATO\n# encoding: utf-8\n\n") + for varname, value in vars.items(): + out.write("%s = %s\n" % (varname, pprint.pformat(value))) + +#. +# .--Groups--------------------------------------------------------------. +# | ____ | +# | / ___|_ __ ___ _ _ _ __ ___ | +# | | | _| '__/ _ \| | | | '_ \/ __| | +# | | |_| | | | (_) | |_| | |_) \__ \ | +# | \____|_| \___/ \__,_| .__/|___/ | +# | |_| | +# +----------------------------------------------------------------------+ +# | Mode for editing host-, service- and contact groups | +# '----------------------------------------------------------------------' - return log[first_log_index:last_log_index], (start_time, end_time, previous_log_time, next_log_time) +def find_usages_of_group_in_rules(name, varnames): + used_in = [] + rulesets = load_all_rulesets() + for varname in varnames: + ruleset = rulesets[varname] + rulespec = g_rulespecs[varname] + for folder, rule in ruleset: + value, tag_specs, host_list, item_list, rule_options = parse_rule(rulespec, rule) + if value == name: + used_in.append(("%s: %s" % (_("Ruleset"), g_rulespecs[varname]["title"]), + make_link([("mode", "edit_ruleset"), ("varname", varname)]))) + return used_in -def display_paged((start_time, end_time, previous_log_time, next_log_time)): - html.write('
    ') +# Check if a group is currently in use and cannot be deleted +# Returns a list of occurrances. +# Possible usages: +# - 1. rules: host to contactgroups, services to contactgroups +# - 2. user memberships +def find_usages_of_contact_group(name): + # Part 1: Rules + used_in = find_usages_of_group_in_rules(name, [ 'host_contactgroups', 'service_contactgroups' ]) - if next_log_time is not None: - html.icon_button(html.makeuri([('start', get_timerange(int(time.time()))[0])]), - _("Most recent events"), "start") - html.icon_button(html.makeuri([('start', next_log_time)]), - '%s: %s' % (_("Newer events"), fmt_date(next_log_time)), - "back") - else: - html.empty_icon_button() - html.empty_icon_button() + # Is the contactgroup assigned to a user? + users = filter_hidden_users(userdb.load_users()) + entries = users.items() + entries.sort(cmp = lambda a, b: cmp(a[1].get("alias"), b[1].get("alias"))) + for userid, user in entries: + cgs = user.get("contactgroups", []) + if name in cgs: + used_in.append(('%s: %s' % (_('User'), user.get('alias')), + make_link([('mode', 'edit_user'), ('edit', userid)]))) - if previous_log_time is not None: - html.icon_button(html.makeuri([('start', previous_log_time)]), - '%s: %s' % (_("Older events"), fmt_date(previous_log_time)), - "forth") - else: - html.empty_icon_button() - html.write('
    ') + global_config = load_configuration_settings() + # Used in default_user_profile? + domain, valuespec, need_restart, allow_reset, in_global_settings = g_configvars['default_user_profile'] + configured = global_config.get('default_user_profile', {}) + default_value = valuespec.default_value() + if (configured and name in configured['contactgroups']) \ + or name in default_value['contactgroups']: + used_in.append(('%s' % (_('Default User Profile')), + make_link([('mode', 'edit_configvar'), ('varname', 'default_user_profile')]))) -def render_audit_log(log, what, with_filename = False, hilite_others=False): - htmlcode = '' - if what == 'audit': - log, times = paged_log(log) - empty_msg = _("The logfile is empty. No host has been created or changed yet.") - elif what == 'pending': - empty_msg = _("No pending changes, monitoring server is up to date.") + # Is the contactgroup used in mkeventd notify (if available)? + if 'mkeventd_notify_contactgroup' in g_configvars: + domain, valuespec, need_restart, allow_reset, in_global_settings = g_configvars['mkeventd_notify_contactgroup'] + configured = global_config.get('mkeventd_notify_contactgroup') + default_value = valuespec.default_value() + if (configured and name == configured) \ + or name == default_value: + used_in.append(('%s' % (valuespec.title()), + make_link([('mode', 'edit_configvar'), ('varname', 'mkeventd_notify_contactgroup')]))) - if len(log) == 0: - html.write("
    %s
    " % empty_msg) - return + return used_in - elif what == 'audit': - htmlcode += "

    " + _("Audit logfile for %s") % fmt_date(times[0]) + "

    " +def find_usages_of_host_group(name): + return find_usages_of_group_in_rules(name, [ 'host_groups' ]) - elif what == 'pending': - if is_distributed(): - htmlcode += "

    " + _("Changes that are not activated on all sites:") + "

    " - else: - htmlcode += "

    " + _("Changes that are not yet activated:") + "

    " +def find_usages_of_service_group(name): + return find_usages_of_group_in_rules(name, [ 'service_groups' ]) - if what == 'audit': - display_paged(times) +def get_nagvis_maps(): + # Find all NagVis maps in the local installation to register permissions + # for each map. When no maps can be found skip this problem silently. + # This only works in OMD environments. + maps = [] + if defaults.omd_root: + nagvis_maps_path = defaults.omd_root + '/etc/nagvis/maps' + for f in os.listdir(nagvis_maps_path): + if f[0] != '.' and f.endswith('.cfg'): + maps.append((f[:-4], f[:-4])) + return maps - htmlcode += '' % what - even = "even" - for t, linkinfo, user, action, text in log: - even = even == "even" and "odd" or "even" - hilite = hilite_others and config.user_id != user - htmlcode += '' % (even, hilite and 2 or 0) - htmlcode += '' % render_linkinfo(linkinfo) - htmlcode += '' % fmt_date(float(t)) - htmlcode += '' % fmt_time(float(t)) - htmlcode += '' +def mode_groups(phase, what): + if what == "host": + what_name = _("host groups") + elif what == "service": + what_name = _("service groups") + elif what == "contact": + what_name = _("contact groups") - htmlcode += '\n' % text - htmlcode += "
    %s%s%s' - if hilite: - htmlcode += '' \ - % _("This change has been made by another user") - htmlcode += user + '%s
    " + if phase == "title": + return what_name.title() - if what == 'audit': - html.write(htmlcode) - display_paged(times) - else: - html.write(htmlcode) + elif phase == "buttons": + global_buttons() + if what == "host": + html.context_button(_("Service groups"), make_link([("mode", "service_groups")]), "hostgroups") + html.context_button(_("New host group"), make_link([("mode", "edit_host_group")]), "new") + elif what == "service": + html.context_button(_("Host groups"), make_link([("mode", "host_groups")]), "servicegroups") + html.context_button(_("New service group"), make_link([("mode", "edit_service_group")]), "new") + else: + html.context_button(_("New contact group"), make_link([("mode", "edit_contact_group")]), "new") + if what == "contact": + html.context_button(_("Rules"), make_link([("mode", "rulesets"), + ("filled_in", "search"), ("search", _("contact group"))]), "rulesets") + else: + varname = what + "_groups" + html.context_button(_("Rules"), make_link([("mode", "edit_ruleset"), ("varname", varname)]), "rulesets") + return -def export_audit_log(): - html.req.content_type = "text/csv; charset=UTF-8" - filename = 'wato-auditlog-%s_%s.csv' % (fmt_date(time.time()), fmt_time(time.time())) - html.req.headers_out['Content-Disposition'] = 'attachment; filename=%s' % filename - titles = ( - _('Date'), - _('Time'), - _('Linkinfo'), - _('User'), - _('Action'), - _('Text'), - ) - html.write(','.join(titles) + '\n') - for t, linkinfo, user, action, text in parse_audit_log("audit"): - if linkinfo == '-': - linkinfo = '' - html.write(','.join((fmt_date(int(t)), fmt_time(int(t)), linkinfo, - user, action, '"' + text + '"')) + '\n') - return False + all_groups = userdb.load_group_information() + groups = all_groups.get(what, {}) + if phase == "action": + if html.var('_delete'): + delname = html.var("_delete") -#. -# .-Automation-----------------------------------------------------------. -# | _ _ _ _ | -# | / \ _ _| |_ ___ _ __ ___ __ _| |_(_) ___ _ __ | -# | / _ \| | | | __/ _ \| '_ ` _ \ / _` | __| |/ _ \| '_ \ | -# | / ___ \ |_| | || (_) | | | | | | (_| | |_| | (_) | | | | | -# | /_/ \_\__,_|\__\___/|_| |_| |_|\__,_|\__|_|\___/|_| |_| | -# | | -# +----------------------------------------------------------------------+ -# | This code section deals with the interaction of Check_MK. It is used | -# | for doing inventory, showing the services of a host, deletion of a | -# | host and similar things. | -# '----------------------------------------------------------------------' + if what == 'contact': + usages = find_usages_of_contact_group(delname) + elif what == 'host': + usages = find_usages_of_host_group(delname) + elif what == 'service': + usages = find_usages_of_service_group(delname) -def check_mk_automation(siteid, command, args=[], indata=""): - if not siteid or site_is_local(siteid): - return check_mk_local_automation(command, args, indata) - else: - return check_mk_remote_automation(siteid, command, args, indata) + if usages: + message = "%s
    %s:
      " % \ + (_("You cannot delete this %s group.") % what, + _("It is still in use by")) + for title, link in usages: + message += '
    • %s
    • \n' % (link, title) + message += "
    " + raise MKUserError(None, message) + confirm_txt = _('Do you really want to delete the %s group "%s"?') % (what, delname) -def check_mk_local_automation(command, args=[], indata=""): - # Gather the command to use for executing --automation calls to check_mk - # - First try to use the check_mk_automation option from the defaults - # - When not set try to detect the command for OMD or non OMD installations - # - OMD 'own' apache mode or non OMD: check_mk --automation - # - OMD 'shared' apache mode: Full path to the binary and the defaults - sudoline = None - if defaults.check_mk_automation: - commandargs = defaults.check_mk_automation.split() - cmd = commandargs + [ command, '--' ] + args - else: - omd_mode, omd_site = html.omd_mode() - if not omd_mode or omd_mode == 'own': - commandargs = [ 'check_mk', '--automation' ] - cmd = commandargs + [ command, '--' ] + args - else: # OMD shared mode - commandargs = [ 'sudo', '/bin/su', '-', omd_site, '-c', 'check_mk --automation' ] - cmd = commandargs[:-1] + [ commandargs[-1] + ' ' + ' '.join([ command, '--' ] + args) ] - sudoline = "%s ALL = (root) NOPASSWD: /bin/su - %s -c check_mk\\ --automation\\ *" % (html.apache_user(), omd_site) + c = wato_confirm(_("Confirm deletion of group \"%s\"" % delname), confirm_txt) + if c: + del groups[delname] + save_group_information(all_groups) + if what == 'contact': + hooks.call('contactgroups-saved', all_groups) + log_pending(SYNCRESTART, None, "edit-%sgroups", _("Deleted %s group %s" % (what, delname))) + elif c == False: + return "" - sudo_msg = '' - if commandargs[0] == 'sudo': - if not sudoline: - if commandargs[1] == '-u': # skip -u USER in /etc/sudoers - sudoline = "%s ALL = (%s) NOPASSWD: %s *" % (html.apache_user(), commandargs[2], " ".join(commandargs[3:])) - else: - sudoline = "%s ALL = (root) NOPASSWD: %s *" % (html.apache_user(), commandargs[0], " ".join(commandargs[1:])) + return None - sudo_msg = ("

    The webserver is running as user which has no rights on the " - "needed Check_MK/Nagios files.
    Please ensure you have set-up " - "the sudo environment correctly. e.g. proceed as follows:

    \n" - "
    1. install sudo package
    2. \n" - "
    3. Append the following to the /etc/sudoers file:\n" - "
      # Needed for WATO - the Check_MK Web Administration Tool\n"
      -                    "Defaults:%s !requiretty\n"
      -                    "%s\n"
      -                    "
    4. \n" - "
    5. Retry this operation
    \n" % - (html.apache_user(), sudoline)) + sorted = groups.items() + sorted.sort(cmp = lambda a,b: cmp(a[1]['alias'], b[1]['alias'])) + if len(sorted) == 0: + if what == "contact": + render_main_menu([ + ( "edit_contact_group", _("Create new contact group"), "new", + what == "contact" and "users" or "groups", + _("Contact groups are needed for assigning hosts and services to people (contacts)"))]) + else: + html.write("
    " + _("No groups are defined yet.") + "
    ") + return - if command == 'restart': - try: - call_hook_pre_activate_changes() - except Exception, e: - if config.debug: - raise - html.show_error("

    Cannot activate changes

    %s" % e) - return + # Show member of contact groups + if what == "contact": + users = filter_hidden_users(userdb.load_users()) + members = {} + for userid, user in users.items(): + cgs = user.get("contactgroups", []) + for cg in cgs: + members.setdefault(cg, []).append((userid, user.get('alias', userid))) - if config.debug: - log_audit(None, "automation", "Automation: %s" % " ".join(cmd)) - try: - # This debug output makes problems when doing bulk inventory, because - # it garbles the non-HTML response output - # if config.debug: - # html.write("
    Running %s
    \n" % " ".join(cmd)) - p = subprocess.Popen(cmd, - stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - except Exception, e: - if commandargs[0] == 'sudo': - raise MKGeneralException("Cannot execute %s: %s

    %s" % (commandargs[0], e, sudo_msg)) - else: - raise MKGeneralException("Cannot execute %s: %s" % (commandargs[0], e)) - p.stdin.write(repr(indata)) - p.stdin.close() - outdata = p.stdout.read() - exitcode = p.wait() - if exitcode != 0: - if config.debug: - log_audit(None, "automation", "Automation command %s failed with exit code %d: %s" % (" ".join(cmd), exitcode, outdata)) - raise MKGeneralException("Error running %s (exit code %d):
    %s
    %s" % - (" ".join(cmd), exitcode, hilite_errors(outdata), outdata.lstrip().startswith('sudo:') and sudo_msg or '')) - else: - raise MKGeneralException("

    %s

    %s" % (_("Error"), hilite_errors(outdata))) + table.begin(what + "groups") + for name, group in sorted: + table.row() + table.cell(_("Actions"), css="buttons") + edit_url = make_link([("mode", "edit_%s_group" % what), ("edit", name)]) + delete_url = html.makeactionuri([("_delete", name)]) + clone_url = make_link([("mode", "edit_%s_group" % what), ("clone", name)]) + html.icon_button(edit_url, _("Properties"), "edit") + html.icon_button(clone_url, _("Create a copy of this group"), "clone") + html.icon_button(delete_url, _("Delete"), "delete") - # On successful "restart" command execute the activate changes hook - if command == 'restart': - call_hook_activate_changes() + table.cell(_("Name"), name) + table.cell(_("Alias"), group['alias']) - try: - if config.debug: - log_audit(None, "automation", "Result from automation: %s" % outdata) - return eval(outdata) - except Exception, e: - if config.debug: - log_audit(None, "automation", "Automation command %s failed: invalid output: %s" % (" ".join(cmd), outdata)) - raise MKGeneralException("Error running %s. Invalid output from webservice (%s):
    %s
    " % - (" ".join(cmd), e, outdata)) + if what == "contact": + table.cell(_("Members")) + html.write(", ".join( + [ '%s' % (make_link([("mode", "edit_user"), ("edit", userid)]), alias) + for userid, alias in members.get(name, [])])) + table.end() -def hilite_errors(outdata): - return re.sub("\nError: *([^\n]*)", "\n
    Error: \\1
    ", outdata) +def mode_edit_group(phase, what): + name = html.var("edit") # missing -> new group + new = name == None -#. -# .-Progress-------------------------------------------------------------. -# | ____ | -# | | _ \ _ __ ___ __ _ _ __ ___ ___ ___ | -# | | |_) | '__/ _ \ / _` | '__/ _ \/ __/ __| | -# | | __/| | | (_) | (_| | | | __/\__ \__ \ | -# | |_| |_| \___/ \__, |_| \___||___/___/ | -# | |___/ | -# +----------------------------------------------------------------------+ -# | Bulk inventory and other longer procedures are separated in single | -# | steps and run by an JavaScript scheduler showing a progress bar and | -# | buttons for aborting and pausing. | -# '----------------------------------------------------------------------' + if phase == "title": + if new: + if what == "host": + return _("Create new host group") + elif what == "service": + return _("Create new service group") + elif what == "contact": + return _("Create new contact group") + else: + if what == "host": + return _("Edit host group") + elif what == "service": + return _("Edit service group") + elif what == "contact": + return _("Edit contact group") -# success_stats: Fields from the stats list to use for checking if something has been found -# fail_stats: Fields from the stats list to used to count failed elements -def interactive_progress(items, title, stats, finishvars, timewait, success_stats = [], termvars = [], fail_stats = []): - if not termvars: - termvars = finishvars; - html.write("
    ") - html.write("") - html.write("" % title) - html.write("") - html.write("") - html.write("") - html.write("") - html.write("
    %s
    ") - html.write(" " - "
    ") - html.write("
    ") - html.write(" ") - html.write("
    ") - html.write(" ") - for num, (label, value) in enumerate(stats): - html.write(" " % (label, num, value)) - html.write("
    %s%d
    ") - html.write("
    ") - html.jsbutton('progress_pause', _('Pause'), 'javascript:progress_pause()') - html.jsbutton('progress_proceed', _('Proceed'), 'javascript:progress_proceed()', 'display:none') - html.jsbutton('progress_finished', _('Finish'), 'javascript:progress_end()', 'display:none') - html.jsbutton('progress_retry', _('Retry Failed Hosts'), 'javascript:progress_retry()', 'display:none') - html.jsbutton('progress_restart', _('Restart'), 'javascript:location.reload()') - html.jsbutton('progress_abort', _('Abort'), 'javascript:progress_end()') - html.write("
    ") - html.write("
    ") - json_items = '[ %s ]' % ',\n'.join([ "'" + h + "'" for h in items ]) - success_stats = '[ %s ]' % ','.join(map(str, success_stats)) - fail_stats = '[ %s ]' % ','.join(map(str, fail_stats)) - # Remove all sel_* variables. We do not need them for our ajax-calls. - # They are just needed for the Abort/Finish links. Those must be converted - # to POST. - base_url = html.makeuri([], remove_prefix = "sel") - finish_url = make_link([("mode", "folder")] + finishvars) - term_url = make_link([("mode", "folder")] + termvars) + elif phase == "buttons": + html.context_button(_("All groups"), make_link([("mode", "%s_groups" % what)]), "back") + return - html.javascript(('progress_scheduler("%s", "%s", 50, %s, "%s", %s, %s, "%s", "' + _("FINISHED.") + '");') % - (html.var('mode'), base_url, json_items, finish_url, - success_stats, fail_stats, term_url)) + all_groups = userdb.load_group_information() + groups = all_groups.setdefault(what, {}) + edit_nagvis_map_permissions = what == 'contact' and defaults.omd_root + if edit_nagvis_map_permissions: + vs_nagvis_maps = ListChoice( + title = _('NagVis Maps'), + choices = get_nagvis_maps, + toggle_all = True, + ) -#. -# .-Attributes-----------------------------------------------------------. -# | _ _ _ _ _ _ | -# | / \ | |_| |_ _ __(_) |__ _ _| |_ ___ ___ | -# | / _ \| __| __| '__| | '_ \| | | | __/ _ \/ __| | -# | / ___ \ |_| |_| | | | |_) | |_| | || __/\__ \ | -# | /_/ \_\__|\__|_| |_|_.__/ \__,_|\__\___||___/ | -# | | -# +----------------------------------------------------------------------+ -# | Attributes of hosts are based on objects and are extendable via | -# | WATO plugins. | -# '----------------------------------------------------------------------' + if not new: + permitted_maps = groups[name].get('nagvis_maps', []) + else: + permitted_maps = [] -class Attribute: - # The constructor stores name and title. If those are - # dynamic than leave them out and override name() and - # title() - def __init__(self, name=None, title=None, help=None, default_value=None): - self._name = name - self._title = title - self._help = help - self._default_value = default_value + if phase == "action": + if html.check_transaction(): + alias = html.var_utf8("alias").strip() + if not alias: + raise MKUserError("alias", _("Please specify an alias name.")) - # Return the name (= identifier) of the attribute - def name(self): - return self._name + unique, info = is_alias_used(what, name, alias) + if not unique: + raise MKUserError("alias", info) - # Return the name of the Nagios configuration variable - # if this is a Nagios-bound attribute (e.g. "alias" or "_SERIAL") - def nagios_name(self): - return None + if new: + name = html.var("name").strip() + if len(name) == 0: + raise MKUserError("name", _("Please specify a name of the new group.")) + if ' ' in name: + raise MKUserError("name", _("Sorry, spaces are not allowed in group names.")) + if not re.match("^[-a-z0-9A-Z_]*$", name): + raise MKUserError("name", _("Invalid group name. Only the characters a-z, A-Z, 0-9, _ and - are allowed.")) + if name in groups: + raise MKUserError("name", _("Sorry, there is already a group with that name")) + groups[name] = { + 'alias': alias, + } + log_pending(SYNCRESTART, None, "edit-%sgroups" % what, _("Create new %s group %s") % (what, name)) + else: + groups[name] = { + 'alias': alias, + } + log_pending(SYNCRESTART, None, "edit-%sgroups" % what, _("Updated properties of %s group %s") % (what, name)) - # Return the title to be displayed to the user - def title(self): - return self._title + if edit_nagvis_map_permissions: + permitted_maps = vs_nagvis_maps.from_html_vars('nagvis_maps') + vs_nagvis_maps.validate_value(permitted_maps, 'nagvis_maps') + if permitted_maps: + groups[name]['nagvis_maps'] = permitted_maps - # Return an optional help text - def help(self): - return self._help + save_group_information(all_groups) + if what == 'contact': + hooks.call('contactgroups-saved', all_groups) - # Return the default value for new hosts - def default_value(self): - return self._default_value + return what + "_groups" - # Render HTML code displaying a value - def paint(self, value, hostname): - return "", value - # Wether or not to show this attribute in tables. - # This value is set by declare_host_attribute - def show_in_table(self): - return self._show_in_table + html.begin_form("group") + forms.header(_("Properties")) + forms.section(_("Name"), simple = not new) + html.help(_("The name of the group is used as an internal key. It cannot be " + "changed later. It is also visible in the status GUI.")) + if new: + clone_group = html.var("clone") + html.text_input("name", clone_group or "") + html.set_focus("name") + else: + clone_group = None + html.write(name) + html.set_focus("alias") - # Wether or not to show this attribute in the edit form. - # This value is set by declare_host_attribute - def show_in_form(self): - return self._show_in_form + forms.section(_("Alias")) + html.help(_("An Alias or description of this group.")) + alias = groups.get(name, {}).get('alias', '') + if not alias: + if clone_group: + alias = groups.get(clone_group, {}).get('alias', '') + else: + alias = name + html.text_input("alias", alias) - # Wether or not to make this attribute configurable in - # files and folders (as defaule value for the hosts) - def show_in_folder(self): - return self._show_in_folder + # Show permissions for NagVis maps if any of those exist + if edit_nagvis_map_permissions and get_nagvis_maps(): + forms.header(_("Permissions")) + forms.section(_("Access to NagVis Maps")) + html.help(_("Configure access permissions to NagVis maps.")) + vs_nagvis_maps.render_input('nagvis_maps', permitted_maps) - # Wether or not this attribute can be edited after creation - # of the object - def editable(self): - return self._editable + forms.end() + html.button("save", _("Save")) + html.hidden_fields() + html.end_form() - # Wether it is allowed that a host has no explicit - # value here (inherited or direct value). An mandatory - # has *no* default value. - def is_mandatory(self): - return False +def save_group_information(all_groups): + # Split groups data into Check_MK/Multisite parts + check_mk_groups = {} + multisite_groups = {} + + for what, groups in all_groups.items(): + check_mk_groups[what] = {} + for gid, group in groups.items(): + check_mk_groups[what][gid] = group['alias'] + + for attr, value in group.items(): + if attr != 'alias': + multisite_groups.setdefault(what, {}) + multisite_groups[what].setdefault(gid, {}) + multisite_groups[what][gid][attr] = value - # Return information about the user roles we depend on. - # The method is usually not overridden, but the variable - # _depends_on_roles is set by declare_host_attribute(). - def depends_on_roles(self): - try: - return self._depends_on_roles - except: - return [] + # Save Check_MK world related parts + make_nagios_directory(root_dir) + out = create_user_file(root_dir + "groups.mk", "w") + out.write("# Written by WATO\n# encoding: utf-8\n\n") + for what in [ "host", "service", "contact" ]: + if what in check_mk_groups and len(check_mk_groups[what]) > 0: + out.write("if type(define_%sgroups) != dict:\n define_%sgroups = {}\n" % (what, what)) + out.write("define_%sgroups.update(%s)\n\n" % (what, pprint.pformat(check_mk_groups[what]))) - # Return information about the host tags we depend on. - # The method is usually not overridden, but the variable - # _depends_on_tags is set by declare_host_attribute(). - def depends_on_tags(self): - try: - return self._depends_on_tags - except: - return [] + # Users with passwords for Multisite + filename = multisite_dir + "groups.mk.new" + make_nagios_directory(multisite_dir) + out = create_user_file(filename, "w") + out.write("# Written by WATO\n# encoding: utf-8\n\n") + for what in [ "host", "service", "contact" ]: + if what in multisite_groups and len(multisite_groups[what]) > 0: + out.write("multisite_%sgroups = \\\n%s\n\n" % (what, pprint.pformat(multisite_groups[what]))) + out.close() + os.rename(filename, filename[:-4]) - # Render HTML input fields displaying the value and - # make it editable. If filter == True, then the field - # is to be displayed in filter mode (as part of the - # search filter) - def render_input(self, value): - pass +class GroupSelection(ElementSelection): + def __init__(self, what, **kwargs): + kwargs.setdefault('empty_text', _('You have not defined any %s group yet. Please ' + 'create at least one first.') % + (what, what)) + ElementSelection.__init__(self, **kwargs) + self._what = what + # Allow to have "none" entry with the following title + self._no_selection = kwargs.get("no_selection") + + def get_elements(self): + all_groups = userdb.load_group_information() + this_group = all_groups.get(self._what, {}) + # replace the title with the key if the title is empty + elements = [ (k, t['alias'] and t['alias'] or k) for (k, t) in this_group.items() ] + if self._no_selection: + # Beware: ElementSelection currently can only handle string + # keys, so we cannot take 'None' as a value. + elements.append(('', self._no_selection)) + return dict(elements) - # Create value from HTML variables. - def from_html_vars(self): - return None +class CheckTypeGroupSelection(ElementSelection): + def __init__(self, checkgroup, **kwargs): + ElementSelection.__init__(self, **kwargs) + self._checkgroup = checkgroup - # Check whether this attribute needs to be validated at all - # Attributes might be permanently hidden (show_in_form = False) - # or dynamically hidden by the depends_on_tags, editable features - def needs_validation(self): - if not self._show_in_form: - return False - return html.var('attr_display_%s' % self._name, "1") == "1" + def get_elements(self): + checks = check_mk_local_automation("get-check-information") + elements = dict([ (cn, "%s - %s" % (cn, c["title"])) for (cn, c) in checks.items() + if c.get("group") == self._checkgroup ]) + return elements - # Check if the value entered by the user is valid. - # This method may raise MKUserError in case of invalid user input. - def validate_input(self): - pass + def value_to_text(self, value): + return "%s" % value - # If this attribute should be present in Nagios as - # a host custom macro, then the value of that macro - # should be returned here - otherwise None - def to_nagios(self, value): - return None - # Checks if the give value matches the search attributes - # that are represented by the current HTML variables. - def filter_matches(self, crit, value, hostname): - return crit == value +#. +# .--Notifications-(Rule Based)------------------------------------------. +# | _ _ _ _ __ _ _ _ | +# | | \ | | ___ | |_(_)/ _(_) ___ __ _| |_(_) ___ _ __ ___ | +# | | \| |/ _ \| __| | |_| |/ __/ _` | __| |/ _ \| '_ \/ __| | +# | | |\ | (_) | |_| | _| | (_| (_| | |_| | (_) | | | \__ \ | +# | |_| \_|\___/ \__|_|_| |_|\___\__,_|\__|_|\___/|_| |_|___/ | +# | | +# +----------------------------------------------------------------------+ +# | Module for managing the new rule based notifications. | +# '----------------------------------------------------------------------' - # Host tags to set for this host - def get_tag_list(self, value): +def load_notification_rules(): + filename = root_dir + "notifications.mk" + if not os.path.exists(filename): + return [] + try: + vars = { "notification_rules" : [] } + execfile(filename, vars, vars) + notification_rules = vars["notification_rules"] + # Convert to new plugin configuration format + for rule in notification_rules: + if "notify_method" in rule: + method = rule["notify_method"] + plugin = rule["notify_plugin"] + del rule["notify_method"] + rule["notify_plugin"] = ( plugin, method ) + return notification_rules + except: + if config.debug: + raise MKGeneralException(_("Cannot read configuration file %s: %s" % + (filename, e))) return [] +def save_notification_rules(rules): + make_nagios_directory(root_dir) + file(root_dir + "notifications.mk", "w").write("notification_rules += %s\n" % pprint.pformat(rules)) -# A simple text attribute. It is stored in -# a Python unicode string -class TextAttribute(Attribute): - def __init__(self, name, title, help = None, default_value="", mandatory=False, allow_empty=True, size=25): - Attribute.__init__(self, name, title, help, default_value) - self._mandatory = mandatory - self._allow_empty = allow_empty - self._size = size - - def paint(self, value, hostname): - if not value: - return "", "" - else: - return "", value - def is_mandatory(self): - return self._mandatory +def FolderChoice(**kwargs): + kwargs["choices"] = folder_selection(g_root_folder) + kwargs.setdefault("title", _("Folder")) + return DropdownChoice(**kwargs) - def render_input(self, value): - if value == None: - value = "" - html.text_input("attr_" + self.name(), value, size = self._size) - def from_html_vars(self): - value = html.var_utf8("attr_" + self.name()) - if value == None: - value = "" - return value.strip() +class GroupChoice(DualListChoice): + def __init__(self, what, **kwargs): + DualListChoice.__init__(self, **kwargs) + self.what = what + self._choices = lambda: self.load_groups() - def validate_input(self): - value = self.from_html_vars() - if self._mandatory and not value: - raise MKUserError("attr_" + self.name(), - _("Please specify a value for %s") % self.title()) - if value.strip() == "" and not self._allow_empty: - raise MKUserError("attr_" + self.name(), - _("%s may be missing, if must not be empty if it is set.") % self.title()) + def load_groups(self): + all_groups = userdb.load_group_information() + this_group = all_groups.get(self.what, {}) + return [ (k, t['alias'] and t['alias'] or k) for (k, t) in this_group.items() ] +def vs_notification_bulkby(): + return ListChoice( + title = _("Create separate notification bulks based on"), + choices = [ + ( "folder", _("Folder") ), + ( "host", _("Host") ), + ( "service", _("Service description") ), + ( "sl", _("Service level") ), + ( "check_type", _("Check type") ), + ( "state", _("Host/Service state") ), + ], + default_value = [ "host" ], + ) - def filter_matches(self, crit, value, hostname): - if value == None: # Host does not have this attribute - value = "" - return crit.lower() in value.lower() +def vs_notification_scripts(): + return DropdownChoice( + title = _("Notification Script"), + choices = notification_script_choices, + default_value = "mail" + ) -# A simple text attribute that is not editable by the user. -# It can be used to store context information from other -# systems (e.g. during an import of a host database from -# another system). -class FixedTextAttribute(TextAttribute): - def __init__(self, name, title, help = None): - TextAttribute.__init__(self, name, title, help, None) - self._mandatory = False +def vs_notification_methods(): + return CascadingDropdown( + title = _("Notification Method"), + choices = notification_script_choices_with_parameters, + default_value = ( "mail", {} ) + ) - def render_input(self, value): - if value != None: - html.hidden_field("attr_" + self.name(), value) - html.write(value) +def vs_notification_rule(userid = None): + if userid: + contact_headers = [] + section_contacts = [] + section_override = [] + else: + contact_headers = [ + ( _("Contact Selection"), [ "contact_all", "contact_all_with_email", "contact_object", + "contact_users", "contact_groups", "contact_emails" ] ), + ] + section_contacts = [ + # Contact selection + ( "contact_object", + Checkbox( + title = _("All contacts of the notified object"), + label = _("Notify all contacts of the notified host or service."), + default_value = True, + ) + ), + ( "contact_all", + Checkbox( + title = _("All users"), + label = _("Notify all users"), + ) + ), + ( "contact_all_with_email", + Checkbox( + title = _("All users with an email address"), + label = _("Notify all users that have configured an email address in their profile"), + ) + ), + ( "contact_users", + ListOf( + UserSelection(only_contacts = False), + title = _("The following users"), + help = _("Enter a list of user IDs to be notified here. These users need to be members " + "of at least one contact group in order to be notified."), + movable = False, + ) + ), + ( "contact_groups", + ListOf( + GroupSelection("contact"), + title = _("The members of certain contact groups"), + movable = False, + ) + ), + ( "contact_emails", + ListOfStrings( + valuespec = EmailAddress(size = 44), + title = _("The following explicit email addresses"), + orientation = "vertical", + ) + ), + ] + section_override = [ + ( "allow_disable", + Checkbox( + title = _("Overriding by users"), + help = _("If you uncheck this option then users are not allowed to deactive notifications " + "that are created by this rule."), + label = _("allow users to deactivate this notification"), + default_value = True, + ) + ), + ] - def from_html_vars(self): - return html.var("attr_" + self.name()) + return Dictionary( + title = _("Rule Properties"), + elements = [ + # General Properties + ( "description", + TextUnicode( + title = _("Description"), + help = _("You can use this description for commenting your rules. It has no influence on the notification."), + size = 64, + attrencode = True, + allow_empty = False, + )), + ( "comment", + TextAreaUnicode( + title = _("Comment"), + help = _("An optional comment that explains the purpose of this rule."), + rows = 5, + ) + ), + ( "disabled", + Checkbox( + title = _("Rule activation"), + help = _("Disabled rules are kept in the configuration but are not applied."), + label = _("do not apply this rule"), + ) + ), + ] + section_override + + [ + # Matching + ( "match_folder", + FolderChoice( + help = _("This condition makes the rule match only hosts that are managed " + "via WATO and that are contained in this folder - either directly " + "or in one of its subfolders."), + ), + ), + ( "match_hosttags", + HostTagCondition( + title = _("Match Host Tags")) + ), + ( "match_hostgroups", + GroupChoice("host", + title = _("Match Host Groups"), + help = _("The host must be in one of the selected host groups"), + allow_empty = False, + ) + ), + ( "match_hosts", + ListOfStrings( + title = _("Match only the following hosts"), + size = 24, + orientation = "horizontal", + allow_empty = False, + empty_text = _("Please specify at least one host. Disable the option if you want to allow all hosts."), + ) + ), + ( "match_exclude_hosts", + ListOfStrings( + title = _("Exclude the following hosts"), + size = 24, + orientation = "horizontal", + ) + ), + ( "match_services", + ListOfStrings( + title = _("Match only the following services"), + help = _("Specify a list of regular expressions that must match the beginning of the " + "service name in order for the rule to match. Note: Host notifications never match this " + "rule if this option is being used."), + valuespec = TextUnicode(size = 32), + orientation = "horizontal", + allow_empty = False, + empty_text = _("Please specify at least one service regex. Disable the option if you want to allow all services."), + ) + ), + ( "match_servicegroups", + GroupChoice("service", + title = _("Match Service Groups"), + help = _("The service must be in one of the selected service groups"), + allow_empty = False, + ) + ), + ( "match_contactgroups", + GroupChoice("contact", + title = _("Match Contact Groups (CMC only)"), + help = _("The host/service must be in one of the selected contact groups. This only works with Check_MK Micro Core. " \ + "If you don't use the CMC that filter will not apply"), + allow_empty = False, + ) + ), + ( "match_exclude_services", + ListOfStrings( + title = _("Do not match the following services"), + valuespec = TextUnicode(size = 32), + orientation = "horizontal", + ) + ), + ( "match_plugin_output", + RegExp( + title = _("Match the output of the check plugin"), + help = _("This text is a regular expression that is being searched in the output " + "of the check plugins that produced the alert. It is not a prefix but an infix match."), + ), + ), + ( "match_checktype", + CheckTypeSelection( + title = _("Match the following check types"), + help = _("Only apply the rule if the notification originates from certain types of check plugins. " + "Note: Host notifications never match this rule if this option is being used."), + ) + ), + ( "match_timeperiod", + TimeperiodSelection( + title = _("Match only during timeperiod"), + help = _("Match this rule only during times where the selected timeperiod from the monitoring " + "system is active."), + ), + ), + ( "match_escalation", + Tuple( + title = _("Restrict to nth to mth notification"), + orientation = "float", + elements = [ + Integer( + label = _("from"), + help = _("Let through notifications counting from this number. " + "For normal alerts the first notification has the number 1. " + "For custom notifications the number is 0."), + default_value = 0, + minvalue = 0, + maxvalue = 999999, + ), + Integer( + label = _("to"), + help = _("Let through notifications counting upto this number"), + default_value = 999999, + minvalue = 1, + maxvalue = 999999, + ), + ], + ), + ), + ( "match_escalation_throttle", + Tuple( + title = _("Throttle periodic notifications"), + help = _("This match option allows you to throttle periodic notifications after " + "a certain number of notifications have been created by the monitoring " + "core. If you for example select 10 as the beginning and 5 as the rate " + "then you will receive the notification 1 through 10 and then 15, 20, " + "25.. and so on."), + orientation = "float", + elements = [ + Integer( + label = _("beginning from notifcation number"), + default_value = 10, + minvalue = 1, + ), + Integer( + label = _("send only every"), + default_value = 5, + unit = _("th notification"), + minvalue = 1, + ) + ], + ) + ), + ( "match_sl", + Tuple( + title = _("Match service level"), + help = _("Host or service must be in the following service level to get notification"), + orientation = "horizontal", + show_titles = False, + elements = [ + DropdownChoice(label = _("from:"), choices = service_levels, prefix_values = True), + DropdownChoice(label = _(" to:"), choices = service_levels, prefix_values = True), + ], + ), + ), + ( "match_host_event", + ListChoice( + title = _("Match host event type"), + help = _("Select the host event types and transitions this rule should handle. Note: " + "If you activate this option and do not also specify service event " + "types then this rule will never hold for service notifications!"), + choices = [ + ( 'rd', _("UP") + u" ➤ " + _("DOWN")), + ( 'dr', _("DOWN") + u" ➤ " + _("UP")), + ( 'ru', _("UP") + u" ➤ " + _("UNREACHABLE")), + ( 'du', _("DOWN") + u" ➤ " + _("UNREACHABLE")), + ( 'ud', _("UNREACHABLE") + u" ➤ " + _("DOWN")), + ( 'ur', _("UNREACHABLE") + u" ➤ " + _("UP")), + ( 'f', _("Start or end of flapping state")), + ( 's', _("Start or end of a scheduled downtime ")), + ( 'x', _("Acknowledgement of host problem")), + ], + default_value = [ 'rd', 'dr', 'f', 's', 'x' ], + ) + ), + ( "match_service_event", + ListChoice( + title = _("Match service event type"), + help = _("Select the service event types and transitions this rule should handle. Note: " + "If you activate this option and do not also specify host event " + "types then this rule will never hold for host notifications!"), + choices = [ + ( 'rw', _("OK") + u" ➤ " + _("WARN")), + ( 'rc', _("OK") + u" ➤ " + _("CRIT")), + ( 'ru', _("OK") + u" ➤ " + _("UNKNOWN")), + + ( 'wr', _("WARN") + u" ➤ " + _("OK")), + ( 'wc', _("WARN") + u" ➤ " + _("CRIT")), + ( 'wu', _("WARN") + u" ➤ " + _("UNKNOWN")), + + ( 'cr', _("CRIT") + u" ➤ " + _("OK")), + ( 'cw', _("CRIT") + u" ➤ " + _("WARN")), + ( 'cu', _("CRIT") + u" ➤ " + _("UNKNOWN")), + + ( 'ur', _("UNKNOWN") + u" ➤ " + _("OK")), + ( 'uw', _("UNKNOWN") + u" ➤ " + _("WARN")), + ( 'uc', _("UNKNOWN") + u" ➤ " + _("CRIT")), + + ( 'f', _("Start or end of flapping state")), + ( 's', _("Start or end of a scheduled downtime")), + ( 'x', _("Acknowledgement of service problem")), + ], + default_value = [ 'rw', 'rc', 'ru', 'wc', 'wu', 'uc', 'f', 's', 'x' ], + ) + ), + ( "match_ec", + Alternative( + title = _("Event Console alerts"), + help = _("The Event Console can have events create notifications in Check_MK. " + "These notifications will be processed by the rule based notification " + "system of Check_MK. This matching option helps you distinguishing " + "and also gives you access to special event fields."), + style = "dropdown", + elements = [ + FixedValue(False, title = _("Do not match Event Console alerts"), totext=""), + Dictionary( + title = _("Match only Event Console alerts"), + elements = [ + ( "match_rule_id", + ID(title = _("Match event rule"), label = _("Rule ID:"), size=12, allow_empty=False), + ), + ( "match_priority", + Tuple( + title = _("Match syslog priority"), + help = _("Define a range of syslog priorities this rule matches"), + orientation = "horizontal", + show_titles = False, + elements = [ + DropdownChoice(label = _("from:"), choices = mkeventd.syslog_priorities, default_value = 4), + DropdownChoice(label = _(" to:"), choices = mkeventd.syslog_priorities, default_value = 0), + ], + ), + ), + ( "match_facility", + DropdownChoice( + title = _("Match syslog facility"), + help = _("Make the rule match only if the event has a certain syslog facility. " + "Messages not having a facility are classified as user."), + choices = mkeventd.syslog_facilities, + ) + ), + ( "match_comment", + RegExpUnicode( + title = _("Match event comment"), + help = _("This is a regular expression for matching the event's comment."), + ) + ), + ] + ) + ] + ) + ) + ] + + section_contacts + + [ + # Notification + ( "notify_plugin", + vs_notification_methods(), + ), -# A text attribute that is stored in a Nagios custom macro -class NagiosTextAttribute(TextAttribute): - def __init__(self, name, nag_name, title, help = None, default_value="", mandatory = False, allow_empty=True): - TextAttribute.__init__(self, name, title, help, default_value, mandatory, allow_empty) - self.nag_name = nag_name + # ( "notify_method", + # Alternative( + # title = _("Parameters / Cancelling"), + # style = "dropdown", + # elements = [ + # ListOfStrings( + # title = _("Call the script with the following parameters"), + # valuespec = TextUnicode(size = 24), + # orientation = "horizontal", + # ), + # FixedValue( + # value = None, + # title = _("Cancel all previous notifications with this method"), + # totext = "", + # ), + # ] + # ) + # ), + + ( "bulk", + Dictionary( + title = _("Notification Bulking"), + help = _("Enabling the bulk notifications will collect several subsequent notifications " + "for the same contact into one single notification, which lists of all the " + "actual problems, e.g. in a single emails. This cuts down the number of notifications " + "in cases where many (related) problems occur within a short time."), + elements = [ + ( "interval", + Age( + title = _("Time horizon"), + label = _("Bulk up to"), + help = _("Notifications are kept back for bulking at most for this time."), + default_value = 60, + ) + ), + ( "count", + Integer( + title = _("Maximum bulk size"), + label = _("Bulk up to"), + unit = _("Notifications"), + help = _("At most that many Notifications are kept back for bulking. A value of " + "1 essentially turns of notification bulking."), + default_value = 1000, + minvalue = 1, + ), + ), + ( "groupby", + vs_notification_bulkby(), + ), + ( "groupby_custom", + ListOfStrings( + valuespec = ID(), + orientation = "horizontal", + title = _("Create separate notification bulks for different values of the following custom macros"), + help = _("If you enter the names of host/service-custom macros here then for each different " + "combination of values of those macros a separate bulk will be created. This can be used " + "in combination with the grouping by folder, host etc. Omit any leading underscore. " + "Note: If you are using " + "Nagios as a core you need to make sure that the values of the required macros are " + "present in the notification context. This is done in check_mk_templates.cfg. If you " + "macro is _FOO then you need to add the variables NOTIFY_HOST_FOO and " + "NOTIFY_SERVICE_FOO."), + )), + ], + columns = 1, + optional_keys = False, + ), + ), - def nagios_name(self): - return self.nag_name + ], + optional_keys = [ "match_folder", "match_hosttags", "match_hostgroups", "match_hosts", "match_exclude_hosts", + "match_services", "match_servicegroups", "match_contactgroups", "match_exclude_services", "match_plugin_output", + "match_timeperiod", "match_escalation", "match_escalation_throttle", + "match_sl", "match_host_event", "match_service_event", "match_ec", + "match_checktype", "bulk", "contact_users", "contact_groups", "contact_emails" ], + headers = [ + ( _("General Properties"), [ "description", "comment", "disabled", "allow_disable" ] ), + ( _("Notification Method"), [ "notify_plugin", "notify_method", "bulk" ] ),] + + contact_headers + + [ + ( _("Conditions"), [ "match_folder", "match_hosttags", "match_hostgroups", "match_hosts", "match_exclude_hosts", + "match_services", "match_servicegroups", "match_contactgroups", "match_exclude_services", "match_plugin_output", + "match_checktype", "match_timeperiod", + "match_escalation", "match_escalation_throttle", + "match_sl", "match_host_event", "match_service_event", "match_ec" ] ), + ], + render = "form", + form_narrow = True, + validate = validate_notification_rule, + ) - def to_nagios(self, value): - if value: - return value +def validate_notification_rule(rule, varprefix): + if "bulk" in rule and rule["notify_plugin"][1] == None: + raise MKUserError(varprefix + "_p_bulk_USE", + _("It does not make sense to add a bulk configuration for cancelling rules.")) + + if "bulk" in rule: + if rule["notify_plugin"][0]: + info = load_notification_scripts()[rule["notify_plugin"][0]] + if not info["bulk"]: + raise MKUserError(varprefix + "_p_notify_plugin", + _("The notification script %s does not allow bulking.") % info["title"]) + else: + raise MKUserError(varprefix + "_p_notify_plugin", + _("Legacy ASCII Emails do not support bulking. You can either disable notification " + "bulking or choose another notification plugin which allows bulking.")) + + +def render_notification_rules(rules, userid="", show_title=False, show_buttons=True, + analyse=False, start_nr=0, profilemode=False): + if not rules: + html.message(_("You have not created any rules yet.")) + + if rules: + if not show_title: + title = "" + elif profilemode: + title = _("Notification rules") + elif userid: + url = html.makeuri([("mode", "user_notifications"), ("user", userid)]) + html.plug() + html.icon_button(url, _("Edit this user's notifications"), "edit") + code = html.drain() + html.unplug() + title = code + _("Notification rules of user %s") % userid else: - return None - -# An attribute for selecting one item out of list using -# a drop down box (' % checkbox_name - checkbox_code += '' % checkbox_name - else: - onclick = "wato_fix_visibility(); wato_toggle_attribute(this, '%s');" % attrname - checkbox_code = '' % ( - checkbox_name, active and "CHECKED" or "", disabled and "DISABLED" or "", onclick) +class MultipleTimeRanges(ValueSpec): + def __init__(self, **kwargs): + ValueSpec.__init__(self, **kwargs) + self._num_columns = kwargs.get("num_columns", 3) + self._rangevs = TimeofdayRange() - forms.section(attr.title(), checkbox=checkbox_code, id="attr_" + attrname) - html.help(attr.help()) + def canonical_value(self): + return [ ((0,0), (24,0)), None, None ] - if len(values) == 1: - defvalue = values[0] + def render_input(self, varprefix, value): + for c in range(0, self._num_columns): + if c: + html.write("   ") + if c < len(value): + v = value[c] else: - defvalue = attr.default_value() + v = self._rangevs.canonical_value() + self._rangevs.render_input(varprefix + "_%d" % c, v) - if not new and not attr.editable(): - # In edit mode only display non editable values, don't show the - # input fields - html.write('') + def value_to_text(self, value): + parts = [] + for v in value: + parts.append(self._rangevs.value_to_text(v)) + return ", ".join(parts) - html.write('
    ' % (attrname)) + def from_html_vars(self, varprefix): + value = [] + for c in range(0, self._num_columns): + v = self._rangevs.from_html_vars(varprefix + "_%d" % c) + if v != None: + value.append(v) + return value - else: - # Now comes the input fields and the inherited / default values - # as two DIV elements, one of which is visible at one time. + def validate_value(self, value, varprefix): + for c, v in enumerate(value): + self._rangevs.validate_value(v, varprefix + "_%d" % c) + ValueSpec.custom_validate(self, value, varprefix) - # DIV with the input elements - html.write('
    ' - % (attrname, (not active) and "display: none" or "")) +# Check, if timeperiod tpa excludes or is tpb +def timeperiod_excludes(timeperiods, tpa_name, tpb_name): + if tpa_name == tpb_name: + return True - attr.render_input(defvalue) - html.write("
    ") + tpa = timeperiods[tpa_name] + for ex in tpa.get("exclude", []): + if ex == tpb_name: + return True + if timeperiod_excludes(timeperiods, ex, tpb_name): + return True + return False - html.write('
    ' - % (attrname, active and "display: none" or "")) +def validate_ical_file(value, varprefix): + filename, ty, content = value + if not filename.endswith('.ics'): + raise MKUserError(varprefix, _('The given file does not seem to be a valid iCalendar file. ' + 'It needs to have the file extension .ics.')) + + if not content.startswith('BEGIN:VCALENDAR'): + raise MKUserError(varprefix, _('The file does not seem to be a valid iCalendar file.')) + + if not content.startswith('END:VCALENDAR'): + raise MKUserError(varprefix, _('The file does not seem to be a valid iCalendar file.')) + +# Returns a dictionary in the format: +# { +# 'name' : '...', +# 'descr' : '...', +# 'events' : [ +# { +# 'name': '...', +# 'date': '...', +# }, +# ], +# } +# +# Relevant format specifications: +# http://tools.ietf.org/html/rfc2445 +# http://tools.ietf.org/html/rfc5545 +def parse_ical(ical_blob, horizon=10, times=(None, None, None)): + ical = {'raw_events': []} + + def get_params(key): + if ';' in key: + return dict([ p.split('=', 1) for p in key.split(';')[1:] ]) + return {} - # - # DIV with actual / inherited / default value - # + def parse_date(params, val): + # First noprmalize the date value to make it easier parsable later + if 'T' not in val and params.get('VALUE') == 'DATE': + val += 'T000000' # add 00:00:00 to date specification + + return list(time.strptime(val, '%Y%m%dT%H%M%S')) + + # First extract the relevant information from the file + in_event = False + event = {} + for l in ical_blob.split('\n'): + line = l.strip() + if not line: + continue + try: + key, val = line.split(':', 1) + except ValueError: + raise Exception('Failed to parse line: "%s"' % line) + + if key == 'X-WR-CALNAME': + ical['name'] = val + elif key == 'X-WR-CALDESC': + ical['descr'] = val + + elif line == 'BEGIN:VEVENT': + in_event = True + event = {} # create new event + + elif line == 'END:VEVENT': + # Finish the current event + ical['raw_events'].append(event) + in_event = False + + elif in_event: + if key.startswith('DTSTART'): + params = get_params(key) + event['start'] = parse_date(params, val) + + elif key.startswith('DTEND'): + params = get_params(key) + event['end'] = parse_date(params, val) + + elif key == 'RRULE': + event['recurrence'] = dict([ p.split('=', 1) for p in val.split(';') ]) + + elif key == 'SUMMARY': + event['name'] = val + + def next_occurrence(start, now, freq): + # convert struct_time to list to be able to modify it, + # then set it to the next occurence + t = start[:] + + if freq == 'YEARLY': + t[0] = now[0]+1 # add 1 year + elif freq == 'MONTHLY': + if now[1] + 1 > 12: + t[0] = now[0]+1 + t[1] = now[1] + 1 - 12 + else: + t[0] = now[0] + t[1] = now[1] + 1 + else: + raise Exception('The frequency "%s" is currently not supported' % freq) + return t + + # Now resolve recurring events starting from 01.01 of current year + # Non-recurring events are simply copied + resolved = [] + now = list(time.strptime(str(time.localtime().tm_year-1), "%Y")) + last = now[:] + last[0] += horizon+1 # update year to horizon + for event in ical['raw_events']: + if 'recurrence' in event and event['start'] < now: + rule = event['recurrence'] + freq = rule['FREQ'] + interval = int(rule.get('INTERVAL', 1)) + cur = now + while cur < last: + cur = next_occurrence(event['start'], cur, freq) + resolved.append({ + 'name' : event['name'], + 'date' : time.strftime('%Y-%m-%d', cur), + }) + else: + resolved.append({ + 'name' : event['name'], + 'date' : time.strftime('%Y-%m-%d', event['start']) + }) + + ical['events'] = sorted(resolved) + + return ical + +# Displays a dialog for uploading an ical file which will then +# be used to generate timeperiod exceptions etc. and then finally +# open the edit_timeperiod page to create a new timeperiod using +# these information +def mode_timeperiod_import_ical(phase): + if phase == "title": + return _("Import iCalendar File to create a Timeperiod") - # in bulk mode we show inheritance only if *all* hosts inherit - explanation = "" - if for_what == "bulk": - if num_haveit == 0: - explanation = " (" + inherited_from + ")" - value = inherited_value - elif not unique: - explanation = _("This value differs between the selected hosts.") - else: - value = values[0] + elif phase == "buttons": + html.context_button(_("All Timeperiods"), make_link([("mode", "timeperiods")]), "back") + return - elif for_what in [ "host", "folder" ]: - if not new and not attr.editable() and active: - value = values[0] - else: - explanation = " (" + inherited_from + ")" - value = inherited_value + vs_ical = Dictionary( + title = _('Import iCalendar File'), + render = "form", + optional_keys = None, + elements = [ + ('file', FileUpload( + title = _('iCalendar File'), + help = _("Select an iCalendar file (*.ics) from your PC"), + allow_empty = False, + custom_validate = validate_ical_file, + )), + ('horizon', Integer( + title = _('Time horizon for repeated events'), + help = _("When the iCalendar file contains definitions of repeating events, these repeating " + "events will be resolved to single events for the number of years you specify here."), + minvalue = 0, + maxvalue = 50, + default_value = 10, + unit = _('years'), + allow_empty = False, + )), + ('times', Optional( + MultipleTimeRanges( + default_value = [None, None, None], + ), + title = _('Use specific times'), + label = _('Use specific times instead of whole day'), + help = _("When you specify explicit time definitions here, these will be added to each " + "date which is added to the resulting time period. By default the whole day is " + "used."), + )), + ] + ) - if for_what != "search" and not (for_what == "bulk" and not unique): - tdclass, content = attr.paint(value, "") - if not content: - content = _("empty") - html.write("" + content + "") + ical = {} - html.write(explanation) - html.write("
    ") + if phase == "action": + if html.check_transaction(): + ical = vs_ical.from_html_vars("ical") + vs_ical.validate_value(ical, "ical") + filename, ty, content = ical['file'] - if len(topics) > 1: - if topic_is_volatile: - volatile_topics.append((topic or _("Basic settings")).encode('utf-8')) + try: + data = parse_ical(content, ical['horizon'], ical['times']) + except Exception, e: + if config.debug: + raise + raise MKUserError('ical_file', _('Failed to parse file: %s') % e) - def dump_json(obj): - return repr(obj).replace('None', 'null') + html.set_var('alias', data.get('descr', data.get('name', filename))) + + for day in [ "monday", "tuesday", "wednesday", "thursday", + "friday", "saturday", "sunday" ]: + html.set_var('%s_0_from' % day, '') + html.set_var('%s_0_until' % day, '') + + html.set_var('except_count', len(data['events'])) + for index, event in enumerate(data['events']): + index += 1 + html.set_var('except_%d_0' % index, event['date']) + html.set_var('except_indexof_%d' % index, index) + if ical['times']: + for n in range(3): + if ical['times'][n]: + html.set_var('except_%d_1_%d_from' % (index, n+1), ical['times'][n][0]) + html.set_var('except_%d_1_%d_until' % (index, n+1), ical['times'][n][1]) + return "edit_timeperiod" + return + html.write('

    %s

    ' % + _('This page can be used to generate a new timeperiod definition based ' + 'on the appointments of an iCalendar (*.ics) file. This import is normally used ' + 'to import events like holidays, therefore only single whole day appointments are ' + 'handled by this import.')) + + html.begin_form("import_ical", method="POST") + vs_ical.render_input("ical", ical) forms.end() - # Provide Javascript world with the tag dependency information - # of all attributes. - html.javascript("var inherited_tags = %s;\n"\ - "var wato_check_attributes = %s;\n"\ - "var wato_depends_on_tags = %s;\n"\ - "var wato_depends_on_roles = %s;\n"\ - "var volatile_topics = %s;\n"\ - "var user_roles = %s;\n"\ - "var hide_attributes = %s;\n"\ - "wato_fix_visibility();\n" % ( - dump_json(inherited_tags), - dump_json(list(set(dependency_mapping_tags.keys()+dependency_mapping_roles.keys()+hide_attributes))), - dump_json(dependency_mapping_tags), - dump_json(dependency_mapping_roles), - dump_json(volatile_topics), - dump_json(config.user_role_ids), - dump_json(hide_attributes))) + html.button("upload", _("Import")) + html.hidden_fields() + html.end_form() +def mode_edit_timeperiod(phase): + num_columns = 3 + timeperiods = load_timeperiods() + name = html.var("edit") # missing -> new group + new = name == None -# Check if at least one host in a folder (or its subfolders) -# has not set a certain attribute. This is needed for the validation -# of mandatory attributes. -def some_host_hasnt_set(folder, attrname): - # Check subfolders - for subfolder in folder[".folders"].values(): - # If the attribute is not set in the subfolder, we need - # to check all hosts and that folder. - if attrname not in subfolder["attributes"] \ - and some_host_hasnt_set(subfolder, attrname): - return True + # ValueSpec for the list of Exceptions + vs_ex = ListOf( + Tuple( + orientation = "horizontal", + show_titles = False, + elements = [ + ExceptionName(), + MultipleTimeRanges()] + ), + movable = False, + add_label = _("Add Exception")) - # Check hosts in this folder - load_hosts(folder) # make sure hosts are loaded - for host in folder[".hosts"].values(): - if attrname not in host: - return True + # ValueSpec for excluded Timeperiods. We offer the list of + # all other timeperiods - but only those that do not + # exclude the current timeperiod (in order to avoid cycles) + other_tps = [] + for tpname, tp in timeperiods.items(): + if not timeperiod_excludes(timeperiods, tpname, name): + other_tps.append((tpname, tp.get("alias") or name)) - return False + vs_excl = ListChoice(choices=other_tps) -# Compute effective (explicit and inherited) attributes -# for a host. This returns a dictionary with a value for -# each host attribute -def effective_attributes(host, folder): - if host: - chain = [ host ] - else: - chain = [ ] + # convert Check_MK representation of range to ValueSpec-representation + def convert_from_tod(tod): + # "00:30" -> (0, 30) + return tuple(map(int, tod.split(":"))) - while folder: - chain.append(folder.get("attributes", {})) - folder = folder.get(".parent") + def convert_from_range(range): + # ("00:30", "10:17") -> ((0,30),(10,17)) + return tuple(map(convert_from_tod, range)) - eff = {} - for a in chain[::-1]: - eff.update(a) + def convert_to_tod(value): + return "%02d:%02d" % value - # now add default values of attributes for all missing values - for attr, topic in host_attributes: - attrname = attr.name() - if attrname not in eff: - eff.setdefault(attrname, attr.default_value()) + def convert_to_range(value): + return tuple(map(convert_to_tod, value)) - return eff + def timeperiod_ranges(vp, keyname, new): + ranges = timeperiod.get(keyname, []) + value = [] + for range in ranges: + value.append(convert_from_range(range)) + if len(value) == 0 and new: + value.append(((0,0),(24,0))) + html.write("") + MultipleTimeRanges().render_input(vp, value) + html.write("") -#. -# .-Snapshots------------------------------------------------------------. -# | ____ _ _ | -# | / ___| _ __ __ _ _ __ ___| |__ ___ | |_ ___ | -# | \___ \| '_ \ / _` | '_ \/ __| '_ \ / _ \| __/ __| | -# | ___) | | | | (_| | |_) \__ \ | | | (_) | |_\__ \ | -# | |____/|_| |_|\__,_| .__/|___/_| |_|\___/ \__|___/ | -# | |_| | -# +----------------------------------------------------------------------+ -# | Mode for backup/restore/creation of snapshots | -# '----------------------------------------------------------------------' + def get_ranges(varprefix): + value = MultipleTimeRanges().from_html_vars(varprefix) + MultipleTimeRanges().validate_value(value, varprefix) + return map(convert_to_range, value) + + if phase == "title": + if new: + return _("Create new time period") + else: + return _("Edit time period") -def mode_snapshot(phase): - if phase == "title": - return _("Backup & Restore") elif phase == "buttons": - home_button() - changelog_button() - html.context_button(_("Create Snapshot"), - make_action_link([("mode", "snapshot"),("_create_snapshot","Yes")]), "snapshot") - html.context_button(_("Factory Reset"), - make_action_link([("mode", "snapshot"),("_factory_reset","Yes")]), "factoryreset") + html.context_button(_("All Timeperiods"), make_link([("mode", "timeperiods")]), "back") return - snapshots = [] - if os.path.exists(snapshot_dir): - for f in os.listdir(snapshot_dir): - snapshots.append(f) - snapshots.sort(reverse=True) + if new: + timeperiod = {} + else: + timeperiod = timeperiods.get(name, {}) - if phase == "action": - if html.has_var("_download_file"): - download_file = html.var("_download_file") + weekdays = [ + ( "monday", _("Monday") ), + ( "tuesday", _("Tuesday") ), + ( "wednesday", _("Wednesday") ), + ( "thursday", _("Thursday") ), + ( "friday", _("Friday") ), + ( "saturday", _("Saturday") ), + ( "sunday", _("Sunday") ), + ] - # Find the latest snapshot file - if download_file == 'latest': - if not snapshots: - return False - download_file = snapshots[-1] - elif download_file not in snapshots: - raise MKUserError(None, _("Invalid download file specified.")) + if phase == "action": + if html.check_transaction(): + alias = html.var_utf8("alias").strip() + if not alias: + raise MKUserError("alias", _("Please specify an alias name for your timeperiod.")) - download_path = os.path.join(snapshot_dir, download_file) - if os.path.exists(download_path): - html.req.headers_out['Content-Disposition'] = 'Attachment; filename=' + download_file - html.req.headers_out['content_type'] = 'application/x-tar' - html.write(open(download_path).read()) - return False + unique, info = is_alias_used("timeperiods", name, alias) + if not unique: + raise MKUserError("alias", info) - # create snapshot - elif html.has_var("_create_snapshot"): - if html.check_transaction(): - filename = create_snapshot() - return None, _("Created snapshot %s.") % filename - else: - return None + timeperiod.clear() - # upload snapshot - elif html.has_var("_upload_file"): - if html.var("_upload_file") == "": - raise MKUserError(None, _("Please select a file for upload.")) - return None - if html.check_transaction(): - multitar.extract_from_buffer(html.var("_upload_file"), backup_paths) - log_pending(SYNCRESTART, None, "snapshot-restored", - _("Restored from uploaded file")) - return None, _("Successfully restored configuration.") - else: - return None + # extract time ranges of weekdays + for weekday, weekday_name in weekdays: + ranges = get_ranges(weekday) + if ranges: + timeperiod[weekday] = ranges + elif weekday in timeperiod: + del timeperiod[weekday] - # delete file - elif html.has_var("_delete_file"): - delete_file = html.var("_delete_file") + # extract ranges for custom days + exceptions = vs_ex.from_html_vars("except") + vs_ex.validate_value(exceptions, "except") + for exname, ranges in exceptions: + timeperiod[exname] = map(convert_to_range, ranges) - if delete_file not in snapshots: - raise MKUserError(None, _("Invalid file specified.")) + # extract excludes + excludes = vs_excl.from_html_vars("exclude") + vs_excl.validate_value(excludes, "exclude") + if excludes: + timeperiod["exclude"] = excludes - c = wato_confirm(_("Confirm deletion of snapshot"), - _("Are you sure you want to delete the snapshot

    %s?") % - htmllib.attrencode(delete_file) - ) - if c: - os.remove(os.path.join(snapshot_dir, delete_file)) - return None, _("Snapshot deleted.") - elif c == False: # not yet confirmed - return "" + if new: + name = html.var("name") + if len(name) == 0: + raise MKUserError("name", _("Please specify a name of the new timeperiod.")) + if not re.match("^[-a-z0-9A-Z_]*$", name): + raise MKUserError("name", _("Invalid timeperiod name. Only the characters a-z, A-Z, 0-9, _ and - are allowed.")) + if name in timeperiods: + raise MKUserError("name", _("This name is already being used by another timeperiod.")) + if name == "24X7": + raise MKUserError("name", _("The time period name 24X7 cannot be used. It is always autmatically defined.")) + timeperiods[name] = timeperiod + log_pending(SYNCRESTART, None, "edit-timeperiods", _("Created new time period %s" % name)) else: - return None # browser reload + log_pending(SYNCRESTART, None, "edit-timeperiods", _("Modified time period %s" % name)) + timeperiod["alias"] = alias + save_timeperiods(timeperiods) + return "timeperiods" + return - # restore snapshot - elif html.has_var("_restore_snapshot"): - snapshot_file = html.var("_restore_snapshot") + html.begin_form("timeperiod", method="POST") + forms.header(_("Timeperiod")) - if snapshot_file not in snapshots: - raise MKUserError(None, _("Invalid file specified.")) + # Name + forms.section(_("Internal name"), simple = not new) + if new: + html.text_input("name") + html.set_focus("name") + else: + html.write(name) - c = wato_confirm(_("Confirm restore snapshot"), - _("Are you sure you want to restore the snapshot

    %s ?") % - htmllib.attrencode(snapshot_file) - ) - if c: - multitar.extract_from_file(snapshot_dir + snapshot_file, backup_paths) - log_pending(SYNCRESTART, None, "snapshot-restored", - _("Restored snapshot %s") % htmllib.attrencode(snapshot_file)) - return None, _("Successfully restored snapshot.") - elif c == False: # not yet confirmed - return "" - else: - return None # browser reload + # Alias + if not new: + alias = timeperiods[name].get("alias", "") + else: + alias = "" - elif html.has_var("_factory_reset"): - c = wato_confirm(_("Confirm factory reset"), - _("If you proceed now, all hosts, folders, rules and other configurations " - "done with WATO will be deleted! Please consider making a snapshot before " - "you do this. Snapshots will not be deleted. Also the password of the currently " - "logged in user (%s) will be kept.

    " - "Do you really want to delete all or your configuration data?") % config.user_id) - if c: - factory_reset() - return None, _("Resetted WATO, wiped all configuration.") - elif c == False: # not yet confirmed - return "" - else: - return None # browser reload + forms.section(_("Alias")) + html.help(_("An alias or description of the timeperiod")) + html.text_input("alias", alias, size = 81) + if not new: + html.set_focus("alias") + # Week days + forms.section(_("Weekdays")) + html.help("For each weekday you can setup no, one or several " + "time ranges in the format 23:39, in which the time period " + "should be active.") + html.write("") - else: - return False + for weekday, weekday_alias in weekdays: + ranges = timeperiod.get(weekday) + html.write("" % weekday_alias) + timeperiod_ranges(weekday, weekday, new) + html.write("") + html.write("
    %s
    ") - else: - table.begin(_("Snapshots"), empty_text=_("There are no snapshots available.")) - for name in snapshots: - table.row() - # Buttons - table.cell(_("Actions"), css="buttons") - html.icon_button(make_action_link( - [("mode","snapshot"),("_restore_snapshot", name)]), _("Restore"), "restore") - html.icon_button(make_action_link( - [("mode","snapshot"),("_delete_file", name)]), _("Delete"), "delete") - # Snapshot name - table.cell(_("Filename"), '%s' % - (make_action_link([("mode","snapshot"),("_download_file", name)]), name)) - # Age and Size - st = os.stat(snapshot_dir + name) - age = time.time() - st.st_mtime - table.cell(_("Age"), html.age_text(age), css="number") - table.cell(_("Size"), "%d" % st.st_size, css="number"), - table.end() + # Exceptions + forms.section(_("Exceptions (from weekdays)")) + html.help(_("Here you can specify exceptional time ranges for certain " + "dates in the form YYYY-MM-DD which are used to define more " + "specific definitions to override the times configured for the matching " + "weekday.")) - html.write("

    " + _("Restore from uploaded file") + "

    ") - html.begin_form("upload_form", None, "POST") - html.upload_file("_upload_file") - html.button("upload_button", _("Restore from file"), "submit") - html.hidden_fields() - html.end_form() + exceptions = [] + for k in timeperiod: + if k not in [ w[0] for w in weekdays ] and k not in [ "alias", "exclude" ]: + exceptions.append((k, map(convert_from_range, timeperiod[k]))) + exceptions.sort() + vs_ex.render_input("except", exceptions) + # Excludes + if other_tps: + forms.section(_("Exclude")) + html.help(_('You can use other timeperiod definitions to exclude the times ' + 'defined in the other timeperiods from this current timeperiod.')) + vs_excl.render_input("exclude", timeperiod.get("exclude", [])) -def create_snapshot(): - make_nagios_directory(snapshot_dir) - snapshot_name = "wato-snapshot-%s.tar.gz" % \ - time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime(time.time())) - multitar.create(snapshot_dir + snapshot_name, backup_paths) + forms.end() + html.button("save", _("Save")) + html.hidden_fields() + html.end_form() - log_audit(None, "snapshot-created", _("Created snapshot %s") % snapshot_name) - # Maintenance, remove old snapshots - snapshots = [] - for f in os.listdir(snapshot_dir): - snapshots.append(f) - snapshots.sort(reverse=True) - while len(snapshots) > config.wato_max_snapshots: - log_audit(None, "snapshot-removed", _("Removed snapshot %s") % snapshots[-1]) - os.remove(snapshot_dir + snapshots.pop()) +class TimeperiodSelection(ElementSelection): + def __init__(self, **kwargs): + ElementSelection.__init__(self, **kwargs) - return snapshot_name + def get_elements(self): + timeperiods = load_timeperiods() + elements = dict([ ("24X7", _("Always")) ] + \ + [ (name, "%s - %s" % (name, tp["alias"])) for (name, tp) in timeperiods.items() ]) + return elements -def factory_reset(): - # Darn. What makes things complicated here is that we need to conserve htpasswd, - # at least the account of the currently logged in user. - users = userdb.load_users() - for id in users.keys(): - if id != config.user_id: - del users[id] + def default_value(self): + return "24x7" - to_delete = [ path for c,n,path - in backup_paths - if n != "auth.secret" ] + [ log_dir ] - for path in to_delete: - if os.path.isdir(path): - shutil.rmtree(path) - elif os.path.exists(path): - os.remove(path) +# Check if a timeperiod is currently in use and cannot be deleted +# Returns a list of occurrances. +# Possible usages: +# - 1. rules: service/host-notification/check-period +# - 2. user accounts (notification period) +# - 3. excluded by other timeperiods +def find_usages_of_timeperiod(tpname): - make_nagios_directory(multisite_dir) - make_nagios_directory(root_dir) + # Part 1: Rules + used_in = [] + for varname, ruleset in load_all_rulesets().items(): + rulespec = g_rulespecs[varname] + if isinstance(rulespec.get("valuespec"), TimeperiodSelection): + for folder, rule in ruleset: + value, tag_specs, host_list, item_list, rule_options = parse_rule(rulespec, rule) + if value == tpname: + used_in.append(("%s: %s" % (_("Ruleset"), g_rulespecs[varname]["title"]), + make_link([("mode", "edit_ruleset"), ("varname", varname)]))) + break + + # Part 2: Users + for userid, user in userdb.load_users().items(): + tp = user.get("notification_period") + if tp == tpname: + used_in.append(("%s: %s" % (_("User"), userid), + make_link([("mode", "edit_user"), ("edit", userid)]))) + + # Part 3: Other Timeperiods + for tpn, tp in load_timeperiods().items(): + if tpname in tp.get("exclude", []): + used_in.append(("%s: %s (%s)" % (_("Timeperiod"), tp.get("alias", tpn), + _("excluded")), + make_link([("mode", "edit_timeperiod"), ("edit", tpn)]))) - userdb.save_users(users) # make sure, omdadmin is present after this - log_pending(SYNCRESTART, None, "factory-reset", _("Complete reset to factory settings.")) + return used_in #. -# .-Value-Editor---------------------------------------------------------. -# | __ __ _ _____ _ _ _ | -# | \ \ / /_ _| |_ _ ___ | ____|__| (_) |_ ___ _ __ | -# | \ \ / / _` | | | | |/ _ \ | _| / _` | | __/ _ \| '__| | -# | \ V / (_| | | |_| | __/ | |__| (_| | | || (_) | | | -# | \_/ \__,_|_|\__,_|\___| |_____\__,_|_|\__\___/|_| | +# .--Multisite Connections-----------------------------------------------. +# | ____ _ _ | +# | / ___|(_) |_ ___ ___ | +# | \___ \| | __/ _ \/ __| | +# | ___) | | || __/\__ \ | +# | |____/|_|\__\___||___/ | # | | # +----------------------------------------------------------------------+ -# | The value editor is used in the configuration and rules module for | -# | editing single values (e.g. configuration parameter for main.mk or | -# | check parameters). | +# | Mode for managing sites. | # '----------------------------------------------------------------------' +# Sort given sites argument by local, followed by slaves +def sort_sites(sites): + def custom_sort(a,b): + return cmp(a[1].get("replication","peer"), b[1].get("replication","peer")) or \ + cmp(a[1].get("alias"), b[1].get("alias")) + sites.sort(cmp = custom_sort) -class CheckTypeSelection(ListChoice): - def __init__(self, **kwargs): - ListChoice.__init__(self, columns=3, **kwargs) +def mode_sites(phase): + if phase == "title": + return _("Distributed Monitoring") - def get_elements(self): - checks = check_mk_local_automation("get-check-information") - elements = [ (cn, "%s" % (c["title"], cn)) for (cn, c) in checks.items()] - elements.sort() - return elements + elif phase == "buttons": + global_buttons() + html.context_button(_("New connection"), make_link([("mode", "edit_site")]), "new") + return + sites = load_sites() -def edit_value(valuespec, value, title=""): - if title: - title = title + "
    " - help = valuespec.help() or "" - html.write('') - html.write('%s' % title) - html.help(help) - html.write("") + if phase == "action": + delid = html.var("_delete") + if delid and html.transaction_valid(): + # The last connection can always be deleted. In that case we + # fallb back to non-distributed-WATO and the site attribute + # will be removed. + test_sites = dict(sites.items()) + del test_sites[delid] + if is_distributed(test_sites): + # Make sure that site is not being used by hosts and folders + site_ids = set([]) + find_folder_sites(site_ids, g_root_folder, True) + if delid in site_ids: + raise MKUserError(None, + _("You cannot delete this connection. " + "It has folders/hosts assigned to it.")) - valuespec.render_input("ve", value) - html.write("") + c = wato_confirm(_("Confirm deletion of site %s" % delid), + _("Do you really want to delete the connection to the site %s?" % delid)) + if c: + del sites[delid] + save_sites(sites) + update_replication_status(delid, None) -def get_edited_value(valuespec): - value = valuespec.from_html_vars("ve") - valuespec.validate_value(value, "ve") - return value + # Due to the deletion the replication state can get clean. + if is_distributed() and global_replication_state() == "clean": + log_commit_pending() + log_pending(SYNCRESTART, None, "edit-sites", _("Deleted site %s" % (delid))) + return None + elif c == False: + return "" + else: + return None -#. -# .-Configuration--------------------------------------------------------. -# | ____ __ _ _ _ | -# | / ___|___ _ __ / _(_) __ _ _ _ _ __ __ _| |_(_) ___ _ __ | -# | | | / _ \| '_ \| |_| |/ _` | | | | '__/ _` | __| |/ _ \| '_ \ | -# | | |__| (_) | | | | _| | (_| | |_| | | | (_| | |_| | (_) | | | | | -# | \____\___/|_| |_|_| |_|\__, |\__,_|_| \__,_|\__|_|\___/|_| |_| | -# | |___/ | -# +----------------------------------------------------------------------+ -# | Main entry page for configuration of global variables, rules, groups,| -# | timeperiods, users, etc. | -# '----------------------------------------------------------------------' + logout_id = html.var("_logout") + if logout_id: + site = sites[logout_id] + c = wato_confirm(_("Confirm logout"), + _("Do you really want to log out of '%s'?") % site["alias"]) + if c: + if "secret" in site: + del site["secret"] + save_sites(sites) + log_audit(None, "edit-site", _("Logged out of remote site '%s'") % site["alias"]) + return None, _("Logged out.") + elif c == False: + return "" + else: + return None -def mode_main(phase): - if phase == "title": - return _("WATO - Check_MK's Web Administration Tool") + login_id = html.var("_login") + if login_id: + if html.var("_abort"): + return "sites" + if not html.check_transaction(): + return + site = sites[login_id] + error = None + # Fetch name/password of admin account + if html.has_var("_name"): + name = html.var("_name", "").strip() + passwd = html.var("_passwd", "").strip() + try: + secret = do_site_login(login_id, name, passwd) + site["secret"] = secret + save_sites(sites) + log_audit(None, "edit-site", _("Successfully logged into remote site '%s'") % site["alias"]) + return None, _("Successfully logged into remote site '%s'!" % site["alias"]) + except MKAutomationException, e: + error = _("Cannot connect to remote site: %s") % e + except MKUserError, e: + html.add_user_error(e.varname, e) + error = "%s" % e + except Exception, e: + if config.debug: + raise + html.add_user_error("_name", error) + error = str(e) - elif phase == "buttons": - changelog_button() - return - elif phase == "action": + wato_html_head(_("Login into site '%s'") % site["alias"]) + if error: + html.show_error(error) + + html.write('

    %s

    ' % (_("For the initial login into the slave site %s " + "we need once your administration login for the Multsite " + "GUI on that site. Your credentials will only be used for " + "the initial handshake and not be stored. If the login is " + "successful then both side will exchange a login secret " + "which is used for the further remote calls.") % site["alias"])) + html.begin_form("login", method="POST") + forms.header(_('Login credentials')) + forms.section(_('Adminstrator name:')) + html.text_input("_name") + html.set_focus("_name") + forms.section(_('Adminstrator password:')) + html.password_input("_passwd") + forms.end() + html.button("_do_login", _("Login")) + html.button("_abort", _("Abort")) + html.hidden_field("_login", login_id) + html.hidden_fields() + html.end_form() + return "" return - render_main_menu(modules) -def render_main_menu(some_modules, columns = 2): - html.write('") + # Disabled + if site.get("disabled", False) == True: + table.cell(_("Disabled"), "%s" % _("yes")) + else: + table.cell(_("Disabled"), _("no")) -#. -# .-Global-Settings------------------------------------------------------. -# | ____ _ _ _ __ __ | -# | / ___| | ___ | |__ __ _| | \ \ / /_ _ _ __ ___ | -# | | | _| |/ _ \| '_ \ / _` | | \ \ / / _` | '__/ __| | -# | | |_| | | (_) | |_) | (_| | | \ V / (_| | | \__ \ | -# | \____|_|\___/|_.__/ \__,_|_| \_/ \__,_|_| |___/ | -# | | -# +----------------------------------------------------------------------+ -# | Editor for global settings in main.mk | -# '----------------------------------------------------------------------' + # Timeout + if "timeout" in site: + table.cell(_("Timeout"), _("%d sec") % site["timeout"], css="number") + else: + table.cell(_("Timeout"), "") + + # Persist + if site.get("persist", False): + table.cell(_("Pers."), "%s" % _("yes")) + else: + table.cell(_("Pers."), _("no")) + + # Replication + if site.get("replication") == "slave": + repl = _("Slave") + else: + repl = "" + table.cell(_("Replication"), repl) + + # Login-Button for Replication + table.cell(_("Login")) + if repl: + if site.get("secret"): + logout_url = make_action_link([("mode", "sites"), ("_logout", id)]) + html.buttonlink(logout_url, _("Logout")) + else: + login_url = make_action_link([("mode", "sites"), ("_login", id)]) + html.buttonlink(login_url, _("Login")) + + table.end() + +def mode_edit_site_globals(phase): + sites = load_sites() + siteid = html.var("site") + site = sites[siteid] -def mode_globalvars(phase): if phase == "title": - return _("Global configuration settings for Check_MK") + return _("Edit site-specific global settings of %s" % siteid) elif phase == "buttons": - global_buttons() + html.context_button(_("All Sites"), make_link([("mode", "sites")]), "back") + html.context_button(_("Connection"), make_link([("mode", "edit_site"), ("edit", siteid)]), "sites") return - # Get default settings of all configuration variables of interest in the domain - # "check_mk". (this also reflects the settings done in main.mk) + # The site's default values are the current global settings check_mk_vars = [ varname for (varname, var) in g_configvars.items() if var[0] == "check_mk" ] default_values = check_mk_local_automation("get-configuration", [], check_mk_vars) - current_settings = load_configuration_settings() + default_values.update(load_configuration_settings()) + current_settings = site.get("globals", {}) if phase == "action": varname = html.var("_varname") action = html.var("_action") if varname: - domain, valuespec, need_restart, allow_reset = g_configvars[varname] + domain, valuespec, need_restart, allow_reset, in_global_settings = g_configvars[varname] def_value = default_values.get(varname, valuespec.default_value()) - if action == "reset" and not isinstance(valuespec, Checkbox): + if action == "reset" and not is_a_checkbox(valuespec): c = wato_confirm( - _("Resetting configuration variable"), - _("Do you really want to reset the configuration variable %s " - "back to the default value of %s?") % + _("Removing site-specific configuration variable"), + _("Do you really want to remove the configuration variable %s " + "of the specific configuration of this site and that way use the global value " + "of %s?") % (varname, valuespec.value_to_text(def_value))) else: if not html.check_transaction(): @@ -5404,5953 +10218,7374 @@ c = True # no confirmation for direct toggle if c: - # if action == "reset": - # del current_settings[varname] - # msg = _("Resetted configuration variable %s to its default.") % varname - # else: if varname in current_settings: current_settings[varname] = not current_settings[varname] else: current_settings[varname] = not def_value - msg = _("Changed Configuration variable %s to %s." % (varname, + msg = _("Changed site-specific configuration variable %s to %s." % (varname, current_settings[varname] and "on" or "off")) - save_configuration_settings(current_settings) - pending_func = g_configvar_domains[domain].get("pending") - if pending_func: - pending_func(msg) - else: - log_pending(need_restart and SYNCRESTART or SYNC, None, "edit-configvar", msg) + site.setdefault("globals", {})[varname] = current_settings[varname] + save_sites(sites, activate=False) + + changes = { "need_sync" : True } + if need_restart: + changes["need_restart"] = True + update_replication_status(siteid, changes) + log_pending(AFFECTED, None, "edit-configvar", msg) if action == "_reset": - return "globalvars", msg + return "edit_site_globals", msg else: - return "globalvars" + return "edit_site_globals" elif c == False: return "" else: return None else: return + return + html.help(_("Here you can configure global settings, that should just be applied " + "on that remote site. Note: this only makes sense if the site " + "is a configuration slave.")) - groupnames = g_configvar_groups.keys() - groupnames.sort() - html.write('
    ') - for groupname in groupnames: - forms.header(groupname, isopen=False) - - for domain, varname, valuespec in g_configvar_groups[groupname]: - if domain == "check_mk" and varname not in default_values: - if config.debug: - raise MKGeneralException("The configuration variable %s is unknown to " - "your local Check_MK installation" % varname) - else: - continue - - defaultvalue = default_values.get(varname, valuespec.default_value()) + if site.get("replication") != "slave": + html.show_error(_("This site is not a replication slave. You cannot configure specific settings for it.")) + return - edit_url = make_link([("mode", "edit_configvar"), ("varname", varname)]) - title = '%s' % (edit_url, valuespec.title()) + render_global_configuration_variables(default_values, current_settings, show_all=True) - if varname in current_settings: - to_text = valuespec.value_to_text(current_settings[varname]) - else: - to_text = valuespec.value_to_text(defaultvalue) +def create_site_globals_file(siteid, tmp_dir): + if not os.path.exists(tmp_dir): + make_nagios_directory(tmp_dir) + sites = load_sites() + site = sites[siteid] + config = site.get("globals", {}) - # Is this a simple (single) value or not? change styling in these cases... - simple = True - if '\n' in to_text or '' in to_text: - simple = False - forms.section(title, simple=simple) + # Add global setting for disabling WATO right here. It is not + # available as a normal global option. That would be too dangerous. + # You could disable WATO on the master very easily that way... + # The default value is True - even for sites configured with an + # older version of Check_MK. + config["wato_enabled"] = not site.get("disable_wato", True) + file(tmp_dir + "/sitespecific.mk", "w").write("%r\n" % config) - toggle_url = make_action_link([("mode", "globalvars"), - ("_action", "toggle"), ("_varname", varname)]) - if varname in current_settings: - if isinstance(valuespec, Checkbox): - html.icon_button(toggle_url, _("Immediately toggle this setting"), - "snapin_switch_" + (current_settings[varname] and "on" or "off"), - cssclass="modified") - else: - html.write('%s' % (edit_url, to_text)) - else: - if isinstance(valuespec, Checkbox): - html.icon_button(toggle_url, _("Immediately toggle this setting"), - # "snapin_greyswitch_" + (defaultvalue and "on" or "off")) - "snapin_switch_" + (defaultvalue and "on" or "off")) - else: - html.write('%s' % (edit_url, to_text)) - forms.end() - html.write('
    ') +def mode_edit_site(phase): + sites = load_sites() + siteid = html.var("edit") # missing -> new site + cloneid = html.var("clone") + new = siteid == None + if cloneid: + site = sites[cloneid] + elif new: + site = {} + else: + site = sites.get(siteid, {}) -def mode_edit_configvar(phase): if phase == "title": - return _("Global configuration settings for Check_MK") + if new: + return _("Create new site connection") + else: + return _("Edit site connection %s" % siteid) elif phase == "buttons": - html.context_button(_("Abort"), make_link([("mode", "globalvars")]), "abort") + html.context_button(_("All Sites"), make_link([("mode", "sites")]), "back") + if not new and site.get("replication"): + html.context_button(_("Site-Globals"), make_link([("mode", "edit_site_globals"), ("site", siteid)]), "configuration") return - varname = html.var("varname") - domain, valuespec, need_restart, allow_reset = g_configvars[varname] - current_settings = load_configuration_settings() - is_on_default = varname not in current_settings + vs_tcp_port = Tuple( + title = _("TCP Port to connect to"), + orientation = "float", + elements = [ + TextAscii(label = _("Host:"), allow_empty = False, size=15), + Integer(label = _("Port:"), minvalue=1, maxvalue=65535, default_value=6557), + ]) + conn_choices = [ + ( None, _("Connect to the local site") ), + ( "tcp", _("Connect via TCP"), vs_tcp_port), + ( "unix", _("Connect via UNIX socket"), TextAscii( + label = _("Path:"), + size = 40, + allow_empty = False)), + ] + if config.liveproxyd_enabled: + conn_choices[2:2] = [ + ( "proxy", _("Use Livestatus Proxy-Daemon"), + Dictionary( + optional_keys = False, + columns = 1, + elements = [ + ( "socket", vs_tcp_port ), + ( "channels", + Integer( + title = _("Number of channels to keep open"), + minvalue = 2, + maxvalue = 50, + default_value = 5)), + ( "heartbeat", + Tuple( + title = _("Regular heartbeat"), + orientation = "float", + elements = [ + Integer(label = _("Rate:"), unit=_("/sec"), minvalue=1, default_value = 5), + Float(label = _("Timeout:"), unit=_("sec"), minvalue=0.1, default_value = 2.0), + ])), + ( "channel_timeout", + Float( + title = _("Timeout waiting for a free channel"), + minvalue = 0.1, + default_value = 3, + unit = _("sec"), + ) + ), + ( "query_timeout", + Float( + title = _("Total query timeout"), + minvalue = 0.1, + unit = _("sec"), + default_value = 120, + ) + ), + ( "connect_retry", + Float( + title = _("Cooling period after failed connect/heartbeat"), + minvalue = 0.1, + unit = _("sec"), + default_value = 4.0, + )), + ( "cache", + Checkbox(title = _("Enable Caching"), + label = _("Cache several non-status queries"), + help = _("This option will enable the caching of several queries that " + "need no current data. This reduces the number of Livestatus " + "queries to sites and cuts down the response time of remote " + "sites with large latencies."), + default_value = True, + )), + ] + ) + ) + ] + + # ValueSpecs for the more complex input fields + vs_conn_method = CascadingDropdown( + html_separator = " ", + choices = conn_choices, + ) + if phase == "action": - if html.var("reset"): - if not isinstance(valuespec, Checkbox): - c = wato_confirm( - _("Resetting configuration variable"), - _("Do you really want to reset this configuration variable " - "back to its default value?")) - if c == False: - return "" - elif c == None: - return None + if not html.check_transaction(): + return "sites" - del current_settings[varname] - msg = _("Resetted configuration variable %s to its default.") % varname - else: - new_value = get_edited_value(valuespec) - current_settings[varname] = new_value - msg = _("Changed global configuration variable %s to %s.") \ - % (varname, valuespec.value_to_text(new_value)) - save_configuration_settings(current_settings) - if need_restart: - status = SYNCRESTART + if new: + id = html.var("id").strip() else: - status = SYNC + id = siteid - pending_func = g_configvar_domains[domain].get("pending") - if pending_func: - pending_func(msg) - else: - log_pending(status, None, "edit-configvar", msg) - return "globalvars" + if new and id in sites: + raise MKUserError("id", _("This id is already being used by another connection.")) + if not re.match("^[-a-z0-9A-Z_]+$", id): + raise MKUserError("id", _("The site id must consist only of letters, digit and the underscore.")) - if varname in current_settings: - value = current_settings[varname] - else: - check_mk_vars = check_mk_local_automation("get-configuration", [], [varname]) - value = check_mk_vars.get(varname, valuespec.default_value()) + # Save copy of old site for later + if not new: + old_site = sites[siteid] - html.begin_form("value_editor", method="POST") - forms.header(valuespec.title()) - if not config.wato_hide_varnames: - forms.section(_("Variable for %s.mk" % - { "check_mk" : "main" }.get(domain, domain))) - html.write("%s" % varname) + new_site = {} + sites[id] = new_site + alias = html.var_utf8("alias", "").strip() + if not alias: + raise MKUserError("alias", _("Please enter an alias name or description of this site.")) - forms.section(_("Current setting")) - valuespec.render_input("ve", value) - valuespec.set_focus("ve") - html.help(valuespec.help()) + new_site["alias"] = alias + url_prefix = html.var("url_prefix", "").strip() + if url_prefix and url_prefix[-1] != '/': + raise MKUserError("url_prefix", _("The URL prefix must end with a slash.")) + if url_prefix: + new_site["url_prefix"] = url_prefix + disabled = html.get_checkbox("disabled") + new_site["disabled"] = disabled - forms.section(_("Default setting")) - defvalue = valuespec.default_value() - if is_on_default: - html.write(_("This variable is at factory settings.")) - else: - curvalue = current_settings[varname] - if curvalue == defvalue: - html.write(_("Your setting and factory settings are identical.")) + # Connection + method = vs_conn_method.from_html_vars("method") + vs_conn_method.validate_value(method, "method") + if type(method) == tuple and method[0] in [ "unix", "tcp"]: + if method[0] == "unix": + new_site["socket"] = "unix:" + method[1] + else: + new_site["socket"] = "tcp:%s:%d" % method[1] + elif method: + new_site["socket"] = method + elif "socket" in new_site: + del new_site["socket"] + + # Timeout + timeout = html.var("timeout", "").strip() + if timeout != "": + try: + timeout = int(timeout) + except: + raise MKUserError("timeout", _("%s is not a valid integer number.") % timeout) + new_site["timeout"] = timeout + + # Persist + new_site["persist"] = html.get_checkbox("persist") + + + # Status host + sh_site = html.var("sh_site") + if sh_site: + if sh_site not in sites: + raise MKUserError("sh_site", _("The site of the status host does not exist.")) + if sh_site in [ siteid, id ]: + raise MKUserError("sh_site", _("You cannot use the site itself as site of the status host.")) + sh_host = html.var("sh_host") + if not sh_host: + raise MKUserError("sh_host", _("Please specify the name of the status host.")) + new_site["status_host"] = ( sh_site, sh_host ) else: - html.write(valuespec.value_to_text(defvalue)) + new_site["status_host"] = None - forms.end() - html.button("save", _("Save")) - if allow_reset and not is_on_default: - curvalue = current_settings[varname] - html.button("reset", curvalue == defvalue and _("Remove explicit setting") or _("Reset to default")) - html.hidden_fields() - html.end_form() + # Replication + repl = html.var("replication") + if repl == "none": + repl = None + if repl: + new_site["replication"] = repl -# domain is one of "check_mk", "multisite" or "nagios" -def register_configvar(group, varname, valuespec, domain="check_mk", need_restart=False, allow_reset=True): - g_configvar_groups.setdefault(group, []).append((domain, varname, valuespec)) - g_configvars[varname] = domain, valuespec, need_restart, allow_reset + multisiteurl = html.var("multisiteurl", "").strip() + if repl: + if not multisiteurl: + raise MKUserError("multisiteurl", + _("Please enter the Multisite URL of the slave site.")) + if not multisiteurl.endswith("/check_mk/"): + raise MKUserError("multisiteurl", + _("The Multisite URL must end with /check_mk/")) + if not multisiteurl.startswith("http://") and not multisiteurl.startswith("https://"): + raise MKUserError("multisiteurl", + _("The Multisites URL must begin with http:// or https://.")) + if "socket" not in new_site: + raise MKUserError("replication", + _("You cannot do replication with the local site.")) -g_configvar_domains = { - "check_mk" : { - "configdir" : root_dir, - }, - "multisite" : { - "configdir" : multisite_dir, - }, -} + # Save Multisite-URL even if replication is turned off. That way that + # setting is not lost if replication is turned off for a while. + new_site["multisiteurl"] = multisiteurl + + # Disabling of WATO + new_site["disable_wato"] = html.get_checkbox("disable_wato") + + # Handle the insecure replication flag + new_site["insecure"] = html.get_checkbox("insecure") + + # Allow direct user login + new_site["user_login"] = html.get_checkbox("user_login") + + # Secret is not checked here, just kept + if not new and "secret" in old_site: + new_site["secret"] = old_site["secret"] + + # Do not forget to add those settings (e.g. "globals") that + # are not edited with this dialog + if not new: + for key in old_site.keys(): + if key not in new_site: + new_site[key] = old_site[key] + + save_sites(sites) + + # Own site needs RESTART in any case + update_replication_status(our_site_id(), { "need_restart" : True }) + if new: + if not site_is_local(id): + update_replication_status(id, { "need_sync" : True, "need_restart" : True }) + log_pending(AFFECTED, None, "edit-sites", _("Created new connection to site %s" % id)) + else: + log_pending(AFFECTED, None, "edit-sites", _("Modified site connection %s" % id)) + # Replication mode has switched on/off => handle replication state + repstatus = load_replication_status() + if repl: # Repl is on + update_replication_status(id, { "need_sync" : True, "need_restart" : True }) + elif id in repstatus: # Repl switched off + update_replication_status(id, None) # Replication switched off + if is_distributed() and global_replication_state() == "clean": + log_commit_pending() + return "sites" -def register_configvar_domain(domain, configdir, pending = None): - g_configvar_domains[domain] = { - "configdir" : configdir, - } - if pending: - g_configvar_domains[domain]["pending"] = pending + html.begin_form("site") -# Persistenz: Speicherung der Werte -# - WATO speichert seine Variablen für main.mk in conf.d/wato/global.mk -# - Daten, die der User in main.mk einträgt, müssen WATO auch bekannt sein. -# Sie werden als Defaultwerte verwendet. -# - Daten, die der User in final.mk oder local.mk einträgt, werden von WATO -# völlig ignoriert. Der Admin kann hier Werte überschreiben, die man mit -# WATO dann nicht ändern kann. Und man sieht auch nicht, dass der Wert -# nicht änderbar ist. -# - WATO muss irgendwie von Check_MK herausbekommen, welche Defaultwerte -# Variablen haben bzw. welche Einstellungen diese Variablen nach main.mk -# haben. -# - WATO kann main.mk nicht selbst einlesen, weil dann der Kontext fehlt -# (Default-Werte der Variablen aus Check_MK und aus den Checks) -# - --> Wir machen eine automation, die alle Konfigurationsvariablen -# ausgibt. -def load_configuration_settings(): - settings = {} - for domain, domain_info in g_configvar_domains.items(): - load_configuration_vars(domain_info["configdir"] + "global.mk", settings) - return settings + # ID + forms.header(_("Basic settings")) + forms.section(_("Site ID"), simple = not new) + if new: + html.text_input("id", siteid or cloneid) + html.set_focus("id") + else: + html.write(siteid) + # Alias + forms.section(_("Alias")) + html.text_input("alias", site.get("alias", ""), size = 60) + if not new: + html.set_focus("alias") + html.help(_("An alias or description of the site")) -def load_configuration_vars(filename, settings): - if not os.path.exists(filename): - return {} - try: - execfile(filename, settings, settings) - for varname in settings.keys(): - if varname not in g_configvars: - del settings[varname] - return settings - except Exception, e: - if config.debug: - raise MKGeneralException(_("Cannot read configuration file %s: %s" % - (filename, e))) - return {} + forms.header(_("Livestatus settings")) + forms.section(_("Connection")) + method = site.get("socket", None) + if type(method) == str and method.startswith("unix:"): + method = ('unix', method[5:]) + elif type(method) == str and method.startswith("tcp:"): + parts = method.split(":")[1:] + method = ('tcp', (parts[0], int(parts[1]))) + vs_conn_method.render_input("method", method) -def save_configuration_settings(vars): - per_domain = {} - for varname, (domain, valuespec, need_restart, allow_reset) in g_configvars.items(): - if varname not in vars: - continue - per_domain.setdefault(domain, {})[varname] = vars[varname] + html.help( _("When connecting to remote site please make sure " + "that Livestatus over TCP is activated there. You can use UNIX sockets " + "to connect to foreign sites on localhost. Please make sure that this " + "site has proper read and write permissions to the UNIX socket of the " + "foreign site.")) - for domain, domain_info in g_configvar_domains.items(): - dir = domain_info["configdir"] - make_nagios_directory(dir) - save_configuration_vars(per_domain.get(domain, {}), dir + "global.mk") + # Timeout + forms.section(_("Connect Timeout")) + timeout = site.get("timeout", 10) + html.number_input("timeout", timeout, size=2) + html.write(_(" seconds")) + html.help(_("This sets the time that Multisite waits for a connection " + "to the site to be established before the site is considered to be unreachable. " + "If not set, the operating system defaults are begin used and just one login attempt is being. " + "performed.")) -def save_configuration_vars(vars, filename): - out = create_user_file(filename, 'w') - out.write("# Written by WATO\n# encoding: utf-8\n\n") - for varname, value in vars.items(): - out.write("%s = %s\n" % (varname, pprint.pformat(value))) + # Persistent connections + forms.section(_("Persistent Connection"), simple=True) + html.checkbox("persist", site.get("persist", False), label=_("Use persistent connections")) + html.help(_("If you enable persistent connections then Multisite will try to keep open " + "the connection to the remote sites. This brings a great speed up in high-latency " + "situations but locks a number of threads in the Livestatus module of the target site.")) -#. -# .-Groups---------------------------------------------------------------. -# | ____ | -# | / ___|_ __ ___ _ _ _ __ ___ | -# | | | _| '__/ _ \| | | | '_ \/ __| | -# | | |_| | | | (_) | |_| | |_) \__ \ | -# | \____|_| \___/ \__,_| .__/|___/ | -# | |_| | -# +----------------------------------------------------------------------+ -# | Mode for editing host-, service- and contact groups | -# '----------------------------------------------------------------------' + # URL-Prefix + docu_url = "http://mathias-kettner.de/checkmk_multisite_modproxy.html" + forms.section(_("URL prefix")) + html.text_input("url_prefix", site.get("url_prefix", ""), size = 60) + html.help(_("The URL prefix will be prepended to links of addons like PNP4Nagios " + "or the classical Nagios GUI when a link to such applications points to a host or " + "service on that site. You can either use an absolute URL prefix like http://some.host/mysite/ " + "or a relative URL like /mysite/. When using relative prefixes you needed a mod_proxy " + "configuration in your local system apache that proxies such URLs to the according remote site. " + "Please refer to the online documentation for details. " + "The prefix should end with a slash. Omit the /pnp4nagios/ from the prefix.") % docu_url) -def find_usages_of_group_in_rules(name, varnames): - used_in = [] - rulesets = load_all_rulesets() - for varname in varnames: - ruleset = rulesets[varname] - rulespec = g_rulespecs[varname] - for folder, rule in ruleset: - value, tag_specs, host_list, item_list, rule_options = parse_rule(rulespec, rule) - if value == name: - used_in.append(("%s: %s" % (_("Ruleset"), g_rulespecs[varname]["title"]), - make_link([("mode", "edit_ruleset"), ("varname", varname)]))) - return used_in + # Status-Host + docu_url = "http://mathias-kettner.de/checkmk_multisite_statushost.html" + forms.section(_("Status host")) -# Check if a group is currently in use and cannot be deleted -# Returns a list of occurrances. -# Possible usages: -# - 1. rules: host to contactgroups, services to contactgroups -# - 2. user memberships -def find_usages_of_contact_group(name): - # Part 1: Rules - used_in = find_usages_of_group_in_rules(name, [ 'host_contactgroups', 'service_contactgroups' ]) + sh = site.get("status_host") + if sh: + sh_site, sh_host = sh + else: + sh_site = "" + sh_host = "" + html.write(_("host: ")) + html.text_input("sh_host", sh_host, size=10) + html.write(_(" on monitoring site: ")) + html.sorted_select("sh_site", + [ ("", _("(no status host)")) ] + [ (sk, si.get("alias", sk)) for (sk, si) in sites.items() ], sh_site) + html.help( _("By specifying a status host for each non-local connection " + "you prevent Multisite from running into timeouts when remote sites do not respond. " + "You need to add the remote monitoring servers as hosts into your local monitoring " + "site and use their host state as a reachability state of the remote site. Please " + "refer to the online documentation for details.") % docu_url) - # Is the contactgroup assigned to a user? - users = filter_hidden_users(userdb.load_users()) - entries = users.items() - entries.sort(cmp = lambda a, b: cmp(a[1].get("alias"), b[1].get("alias"))) - for userid, user in entries: - cgs = user.get("contactgroups", []) - if name in cgs: - used_in.append(('%s: %s' % (_('User'), user.get('alias')), - make_link([('mode', 'edit_user'), ('edit', userid)]))) + # Disabled + forms.section(_("Disable"), simple=True) + html.checkbox("disabled", site.get("disabled", False), label = _("Temporarily disable this connection")) + html.help( _("If you disable a connection, then no data of this site will be shown in the status GUI. " + "The replication is not affected by this, however.")) - global_config = load_configuration_settings() + # Replication + forms.header(_("Configuration Replication (Distributed WATO)")) + forms.section(_("Replication method")) + html.select("replication", + [ ("none", _("No replication with this site")), + ("slave", _("Slave: push configuration to this site")) + ], site.get("replication", "none")) + html.help( _("WATO replication allows you to manage several monitoring sites with a " + "logically centralized WATO. Slave sites receive their configuration " + "from master sites.

    Note: Slave sites " + "do not need any replication configuration. They will be remote-controlled " + "by the master sites.")) - # Used in default_user_profile? - domain, valuespec, need_restart, allow_reset = g_configvars['default_user_profile'] - configured = global_config.get('default_user_profile', {}) - default_value = valuespec.default_value() - if (configured and name in configured['contactgroups']) \ - or name in default_value['contactgroups']: - used_in.append(('%s' % (_('Default User Profile')), - make_link([('mode', 'edit_configvar'), ('varname', 'default_user_profile')]))) + forms.section(_("Multisite-URL of remote site")) + html.text_input("multisiteurl", site.get("multisiteurl", ""), size=60) + html.help( _("URL of the remote Check_MK including /check_mk/. " + "This URL is in many cases the same as the URL-Prefix but with check_mk/ " + "appended, but it must always be an absolute URL. Please note, that " + "that URL will be fetched by the Apache server of the local " + "site itself, whilst the URL-Prefix is used by your local Browser.")) - # Is the contactgroup used in mkeventd notify (if available)? - if 'mkeventd_notify_contactgroup' in g_configvars: - domain, valuespec, need_restart, allow_reset = g_configvars['mkeventd_notify_contactgroup'] - configured = global_config.get('mkeventd_notify_contactgroup') - default_value = valuespec.default_value() - if (configured and name == configured) \ - or name == default_value: - used_in.append(('%s' % (valuespec.title()), - make_link([('mode', 'edit_configvar'), ('varname', 'mkeventd_notify_contactgroup')]))) + forms.section(_("WATO"), simple=True) + html.checkbox("disable_wato", site.get("disable_wato", True), label = _('Disable configuration via WATO on this site')) + html.help( _('It is a good idea to disable access to WATO completely on the slave site. ' + 'Otherwise a user who does not now about the replication could make local ' + 'changes that are overridden at the next configuration activation.')) - return used_in + forms.section(_("SSL"), simple=True) + html.checkbox("insecure", site.get("insecure", False), label = _('Ignore SSL certificate errors')) + html.help( _('This might be needed to make the synchronization accept problems with ' + 'SSL certificates when using an SSL secured connection.')) -def find_usages_of_host_group(name): - return find_usages_of_group_in_rules(name, [ 'host_groups' ]) + forms.section(_('Direct login to Web GUI allowed'), simple=True) + html.checkbox('user_login', site.get('user_login', True), + label = _('Users are allowed to directly login into the Web GUI of this site')) + html.help(_('When enabled, this site is marked for synchronisation every time a Web GUI ' + 'related option is changed in the master site.')) -def find_usages_of_service_group(name): - return find_usages_of_group_in_rules(name, [ 'service_groups' ]) + forms.end() + html.button("save", _("Save")) -def mode_groups(phase, what): - if what == "host": - what_name = _("host groups") - elif what == "service": - what_name = _("service groups") - elif what == "contact": - what_name = _("contact groups") + html.hidden_fields() + html.end_form() - if phase == "title": - return what_name.title() - elif phase == "buttons": - global_buttons() - html.context_button(_("New group"), make_link([("mode", "edit_%s_group" % what)]), "new") - if what == "contact": - html.context_button(_("Rules"), make_link([("mode", "rulesets"), - ("filled_in", "search"), ("search", "contact group")]), "rulesets") - else: - varname = what + "_groups" - html.context_button(_("Rules"), make_link([("mode", "edit_ruleset"), ("varname", varname)]), "rulesets") - return +def load_sites(): + try: + if not os.path.exists(sites_mk): + return {} - all_groups = userdb.load_group_information() - groups = all_groups.get(what, {}) + vars = { "sites" : {} } + execfile(sites_mk, vars, vars) - if phase == "action": - delname = html.var("_delete") + # Be compatible to old "disabled" value in socket attribute. + # Can be removed one day. + for site in vars['sites'].values(): + if site.get('socket') == 'disabled': + site['disabled'] = True + del site['socket'] - if what == 'contact': - usages = find_usages_of_contact_group(delname) - elif what == 'host': - usages = find_usages_of_host_group(delname) - elif what == 'service': - usages = find_usages_of_service_group(delname) - - if usages: - message = "%s
    %s:
      " % \ - (_("You cannot delete this %s group.") % what, - _("It is still in use by")) - for title, link in usages: - message += '
    • %s
    • \n' % (link, title) - message += "
    " - raise MKUserError(None, message) + return vars["sites"] - confirm_txt = _('Do you really want to delete the %s group "%s"?') % (what, delname) + except Exception, e: + if config.debug: + raise MKGeneralException(_("Cannot read configuration file %s: %s" % + (sites_mk, e))) + return {} - c = wato_confirm(_("Confirm deletion of group \"%s\"" % delname), confirm_txt) - if c: - del groups[delname] - save_group_information(all_groups) - log_pending(SYNCRESTART, None, "edit-%sgroups", _("Deleted %s group %s" % (what, delname))) - return None - elif c == False: - return "" - else: - return None - sorted = groups.items() - sorted.sort() - if len(sorted) == 0: - if what == "contact": - render_main_menu([ - ( "edit_contact_group", _("Create new contact group"), "new", - what == "contact" and "users" or "groups", - _("Contact groups are needed for assigning hosts and services to people (contacts)"))]) - else: - html.write("
    " + _("There are not defined any groups yet.") + "
    ") - return +def save_sites(sites, activate=True): + make_nagios_directory(multisite_dir) - # Show member of contact groups - if what == "contact": - users = filter_hidden_users(userdb.load_users()) - members = {} - for userid, user in users.items(): - cgs = user.get("contactgroups", []) - for cg in cgs: - members.setdefault(cg, []).append((userid, user.get('alias', userid))) + # Important: even write out sites if it's empty. The global 'sites' + # variable will otherwise survive in the Python interpreter of the + # Apache processes. + out = create_user_file(sites_mk, "w") + out.write("# Written by WATO\n# encoding: utf-8\n\n") + out.write("sites = \\\n%s\n" % pprint.pformat(sites)) - table.begin(what_name.title()) - for name, alias in sorted: - table.row() + # Do not activate when just the site's global settings have + # been edited + if activate: + config.load_config() # make new site configuration active + update_distributed_wato_file(sites) + declare_site_attribute() + load_all_folders() # make sure that .siteid is present + rewrite_config_files_below(g_root_folder) # fix site attributes + need_sidebar_reload() - table.cell(_("Actions"), css="buttons") - edit_url = make_link([("mode", "edit_%s_group" % what), ("edit", name)]) - delete_url = html.makeactionuri([("_delete", name)]) - clone_url = make_link([("mode", "edit_%s_group" % what), ("clone", name)]) - html.icon_button(edit_url, _("Properties"), "edit") - html.icon_button(clone_url, _("Create a copy of this group"), "clone") - html.icon_button(delete_url, _("Delete"), "delete") + if config.liveproxyd_enabled: + save_liveproxyd_config(sites) - table.cell(_("Name"), name) - table.cell(_("Alias"), alias) + create_nagvis_backends(sites) - if what == "contact": - table.cell(_("Members")) - html.write(", ".join( - [ '%s' % (make_link([("mode", "edit_user"), ("edit", userid)]), alias) - for userid, alias in members.get(name, [])])) + # Call the sites saved hook + call_hook_sites_saved(sites) - table.end() +def save_liveproxyd_config(sites): + path = defaults.default_config_dir + "/liveproxyd.mk" + out = create_user_file(path, "w") + out.write("# Written by WATO\n# encoding: utf-8\n\n") + conf = {} + for siteid, siteconf in sites.items(): + s = siteconf.get("socket") + if type(s) == tuple and s[0] == "proxy": + conf[siteid] = s[1] -def mode_edit_group(phase, what): - name = html.var("edit") # missing -> new group - new = name == None + out.write("sites = \\\n%s\n" % pprint.pformat(conf)) + try: + pidfile = defaults.livestatus_unix_socket + "proxyd.pid" + pid = int(file(pidfile).read().strip()) + os.kill(pid, 10) + except Exception, e: + html.show_error(_("Warning: cannot reload Livestatus Proxy-Daemon: %s" % e)) - if phase == "title": - if new: - if what == "host": - return _("Create new host group") - elif what == "service": - return _("Create new service group") - elif what == "contact": - return _("Create new contact group") - else: - if what == "host": - return _("Edit host group") - elif what == "service": - return _("Edit service group") - elif what == "contact": - return _("Edit contact group") +def create_nagvis_backends(sites): + if not defaults.omd_root: + return # skip when not in OMD environment + cfg = [ + '; MANAGED BY CHECK_MK WATO - Last Update: %s' % time.strftime('%Y-%m-%d %H:%M:%S'), + ] + for site_id, site in sites.items(): + if site == defaults.omd_site: + continue # skip local site, backend already added by omd + if 'socket' not in site: + continue # skip sites without configured sockets + + # Handle special data format of livestatus proxy config + if type(site['socket']) == tuple: + socket = 'tcp:%s:%d' % site['socket'][1]['socket'] + else: + socket = site['socket'] + + cfg += [ + '', + '[backend_%s]' % site_id, + 'backendtype="mklivestatus"', + 'socket="%s"' % socket, + ] - elif phase == "buttons": - html.context_button(_("All groups"), make_link([("mode", "%s_groups" % what)]), "back") - return + if site.get("status_host"): + cfg.append('statushost="%s"' % ':'.join(site['status_host'])) - all_groups = userdb.load_group_information() - groups = all_groups.setdefault(what, {}) + file('%s/etc/nagvis/conf.d/cmk_backends.ini.php' % defaults.omd_root, 'w').write('\n'.join(cfg)) - if phase == "action": - if html.check_transaction(): - alias = html.var_utf8("alias").strip() - if not alias: - raise MKUserError("alias", _("Please specify an alias name.")) - for key, value in groups.items(): - if alias == value and name != key: - raise MKUserError("alias", _("This alias is already used in the group %s ." % key)) - if new: - name = html.var("name").strip() - if len(name) == 0: - raise MKUserError("name", _("Please specify a name of the new group.")) - if ' ' in name: - raise MKUserError("name", _("Sorry, spaces are not allowed in group names.")) - if not re.match("^[-a-z0-9A-Z_]*$", name): - raise MKUserError("name", _("Invalid group name. Only the characters a-z, A-Z, 0-9, _ and - are allowed.")) - if name in groups: - raise MKUserError("name", _("Sorry, there is already a group with that name")) - groups[name] = alias - log_pending(SYNCRESTART, None, "edit-%sgroups" % what, _("Create new %s group %s" % (what, name))) - else: - groups[name] = alias - log_pending(SYNCRESTART, None, "edit-%sgroups" % what, _("Changed alias of %s group %s" % (what, name))) - save_group_information(all_groups) +# Makes sure, that in distributed mode we monitor only +# the hosts that are directly assigned to our (the local) +# site. +def update_distributed_wato_file(sites): + # Note: we cannot access config.sites here, since we + # are currently in the process of saving the new + # site configuration. + distributed = False + found_local = False + for siteid, site in sites.items(): + if site.get("replication"): + distributed = True + if site_is_local(siteid): + found_local = True + create_distributed_wato_file(siteid, site.get("replication")) - return what + "_groups" + # Remove the distributed wato file + # a) If there is no distributed WATO setup + # b) If the local site could not be gathered + if not distributed: # or not found_local: + delete_distributed_wato_file() +#. +# .--Replication---------------------------------------------------------. +# | ____ _ _ _ _ | +# | | _ \ ___ _ __ | (_) ___ __ _| |_(_) ___ _ __ | +# | | |_) / _ \ '_ \| | |/ __/ _` | __| |/ _ \| '_ \ | +# | | _ < __/ |_) | | | (_| (_| | |_| | (_) | | | | | +# | |_| \_\___| .__/|_|_|\___\__,_|\__|_|\___/|_| |_| | +# | |_| | +# +----------------------------------------------------------------------+ +# | Functions dealing with the WATO replication feature. | +# | Let's call this "Distributed WATO". More buzz-word like :-) | +# '----------------------------------------------------------------------' - html.begin_form("group") - forms.header(_("Properties")) - forms.section(_("Name"), simple = not new) - html.help(_("The name of the group is used as an internal key. It cannot be " - "changed later. It is also visible in the status GUI.")) - if new: - clone_group = html.var("clone") - html.text_input("name", clone_group or "") - html.set_focus("name") +def do_site_login(site_id, name, password): + sites = load_sites() + site = sites[site_id] + if not name: + raise MKUserError("_name", + _("Please specify your administrator login on the remote site.")) + if not password: + raise MKUserError("_passwd", + _("Please specify your password.")) + + # Trying basic auth AND form based auth to ensure the site login works. + # Adding _ajaxid makes the web service fail silently with an HTTP code and + # not output HTML code for an error screen. + url = site["multisiteurl"] + 'login.py' + post_data = html.urlencode_vars([ + ('_login', '1'), + ('_username', name), + ('_password', password), + ('_origtarget', 'automation_login.py'), + ('_plain_error', '1'), + ]) + response = get_url(url, site.get('insecure', False), name, password, post_data=post_data).strip() + if '' in response.lower(): + message = _("Authentication to web service failed.
    Message:
    %s") % \ + html.strip_tags(html.strip_scripts(response)) + if config.debug: + message += "
    " + _("Automation URL:") + " %s
    " % url + raise MKAutomationException(message) + elif not response: + raise MKAutomationException(_("Empty response from web service")) else: - clone_group = None - html.write(name) - html.set_focus("alias") + try: + return eval(response) + except: + raise MKAutomationException(response) - forms.section(_("Alias")) - html.help(_("An Alias or description of this group.")) - alias = groups.get(name, "") - if not alias: - if clone_group: - alias = groups.get(clone_group, "") +def upload_file(url, file_path, insecure): + return get_url(url, insecure, params = ' -F snapshot=@%s' % file_path) + +def get_url(url, insecure, user=None, password=None, params = '', post_data = None): + cred = '' + if user: + cred = ' -u "%s:%s"' % (user, password) + + insecure = insecure and ' --insecure' or '' + + # -s: silent + # -S: show errors + # -w '%{http_code}': add the http status code to the end of the output + # -L: follow redirects + # -b /dev/null: handle cookies, but do not persist them + command = 'curl -b /dev/null -L -w "\n%%{http_code}" -s -S%s%s%s "%s" 2>&1' % ( + insecure, cred, params, url) + tmp_file = None + if post_data != None: + # Put POST data on command line as long as it is not + # longer than 50 KB (remember: Linux has an upper limit + # of 132 KB for command line plus environment + if len(post_data) < 50000: + command += ' --data-binary "%s"' % post_data else: - alias = name - html.text_input("alias", alias) - forms.end() - html.button("save", _("Save")) - html.hidden_fields() - html.end_form() + import tempfile + tmp_file = tempfile.NamedTemporaryFile(dir = defaults.tmp_dir) + tmp_file.write(post_data) + tmp_file.flush() + command += ' --data-binary "@%s"' % tmp_file.name -def save_group_information(groups): - make_nagios_directory(root_dir) - out = create_user_file(root_dir + "groups.mk", "w") - out.write("# Written by WATO\n# encoding: utf-8\n\n") - for what in [ "host", "service", "contact" ]: - if what in groups and len(groups[what]) > 0: - out.write("if type(define_%sgroups) != dict:\n define_%sgroups = {}\n" % (what, what)) - out.write("define_%sgroups.update(%s)\n\n" % (what, pprint.pformat(groups[what]))) + response = os.popen(command).read().strip() + try: + status_code = int(response[-3:]) + response_body = response[:-3] + except: + status_code = None + response_body = response + if status_code == 401: + raise MKUserError("_passwd", _("Authentication failed. Invalid login/password.")) + elif status_code != 200: + raise MKUserError("_passwd", _("HTTP Error - %s: %s") % (status_code, response_body)) -class GroupSelection(ElementSelection): - def __init__(self, what, **kwargs): - ElementSelection.__init__(self, **kwargs) - self._what = what - # Allow to have "none" entry with the following title - self._no_selection = kwargs.get("no_selection") + return response_body - def get_elements(self): - all_groups = userdb.load_group_information() - this_group = all_groups.get(self._what, {}) - # replace the title with the key if the title is empty - elements = [ (k, t and t or k) for (k, t) in this_group.items() ] - if self._no_selection: - # Beware: ElementSelection currently can only handle string - # keys, so we cannot take 'None' as a value. - elements.append(('', self._no_selection)) - return dict(elements) +def check_mk_remote_automation(siteid, command, args, indata): + site = config.site(siteid) + if "secret" not in site: + raise MKGeneralException(_("Cannot access site %s - you are not logged in.") + % site.get("alias", siteid)) + # If the site is not up-to-date, synchronize it first. + repstatus = load_replication_status() + if repstatus.get(siteid, {}).get("need_sync"): + synchronize_site(config.site(siteid), False) + + # Now do the actual remote command + response = do_remote_automation( + config.site(siteid), "checkmk-automation", + [ + ("automation", command), # The Check_MK automation command + ("arguments", mk_repr(args)), # The arguments for the command + ("indata", mk_repr(indata)), # The input data + ]) + return response + +def do_remote_automation(site, command, vars): + base_url = site["multisiteurl"] + secret = site.get("secret") + if not secret: + raise MKAutomationException(_("You are not logged into the remote site.")) + + url = base_url + "automation.py?" + \ + html.urlencode_vars([ + ("command", command), + ("secret", secret), + ("debug", config.debug and '1' or '') + ]) + vars_encoded = html.urlencode_vars(vars) + response = get_url(url, site.get('insecure', False), + post_data=vars_encoded) + if not response: + raise MKAutomationException("Empty output from remote site.") + try: + response = eval(response) + except: + # The remote site will send non-Python data in case of an + # error. + raise MKAutomationException("
    %s
    " % response) + return response -class CheckTypeGroupSelection(ElementSelection): - def __init__(self, checkgroup, **kwargs): - ElementSelection.__init__(self, **kwargs) - self._checkgroup = checkgroup +# Determine, if we have any slaves to distribute +# configuration to. +def is_distributed(sites = None): + if sites == None: + sites = config.sites + for site in sites.values(): + if site.get("replication"): + return True + return False - def get_elements(self): - checks = check_mk_local_automation("get-check-information") - elements = dict([ (cn, "%s - %s" % (cn, c["title"])) for (cn, c) in checks.items() - if c.get("group") == self._checkgroup ]) - return elements +def declare_site_attribute(): + undeclare_host_attribute("site") + if is_distributed(): + declare_host_attribute(SiteAttribute(), show_in_table = True, show_in_folder = True) - def value_to_text(self, value): - return "%s" % value +def default_site(): + for id, site in config.sites.items(): + if not "socket" in site \ + or site["socket"] == "unix:" + defaults.livestatus_unix_socket: + return id + try: + return config.sites.keys()[0] + except: + return None +class SiteAttribute(Attribute): + def __init__(self): + # Default is is the local one, if one exists or + # no one if there is no local site + self._choices = [] + for id, site in config.sites.items(): + title = id + if site.get("alias"): + title += " - " + site["alias"] + self._choices.append((id, title)) -#. -# .-Timeperiods----------------------------------------------------------. -# | _____ _ _ _ | -# | |_ _(_)_ __ ___ ___ _ __ ___ _ __(_) ___ __| |___ | -# | | | | | '_ ` _ \ / _ \ '_ \ / _ \ '__| |/ _ \ / _` / __| | -# | | | | | | | | | | __/ |_) | __/ | | | (_) | (_| \__ \ | -# | |_| |_|_| |_| |_|\___| .__/ \___|_| |_|\___/ \__,_|___/ | -# | |_| | -# +----------------------------------------------------------------------+ -# | Modes for managing Nagios' timeperiod definitions. | -# '----------------------------------------------------------------------' + self._choices.sort(cmp=lambda a,b: cmp(a[1], b[1])) + self._choices_dict = dict(self._choices) + Attribute.__init__(self, "site", _("Monitored on site"), + _("Specify the site that should monitor this host."), + default_value = default_site()) -def mode_timeperiods(phase): - if phase == "title": - return _("Time Periods") + def paint(self, value, hostname): + return "", self._choices_dict.get(value, value) - elif phase == "buttons": - global_buttons() - html.context_button(_("New Timeperiod"), make_link([("mode", "edit_timeperiod")]), "new") - return + def render_input(self, value): + html.select("site", self._choices, value) - timeperiods = load_timeperiods() + def from_html_vars(self): + return html.var("site") - if phase == "action": - delname = html.var("_delete") - if html.transaction_valid(): - usages = find_usages_of_timeperiod(delname) - if usages: - message = "%s
    %s:
      " % \ - (_("You cannot delete this timeperiod."), - _("It is still in use by")) - for title, link in usages: - message += '
    • %s
    • \n' % (link, title) - message += "
    " - raise MKUserError(None, message) + def get_tag_list(self, value): + return [ "site:" + value ] - c = wato_confirm(_("Confirm deletion of time period %s") % delname, - _("Do you really want to delete the time period '%s'? I've checked it: " - "it is not being used by any rule or user profile right now.") % delname) - if c: - del timeperiods[delname] - save_timeperiods(timeperiods) - log_pending(SYNCRESTART, None, "edit-timeperiods", _("Deleted timeperiod %s") % delname) - return None - elif c == False: - return "" +# The replication status contains information about each +# site. It is a dictionary from the site id to a dict with +# the following keys: +# "need_sync" : 17, # number of non-synchronized changes +# "need_restart" : True, # True, if remote site needs a restart (cmk -R) +def load_replication_status(): + try: + return eval(file(repstatus_file).read()) + except: + return {} + +def save_replication_status(repstatus): + config.write_settings_file(repstatus_file, repstatus) + +# Updates one or more dict elements of a site in an +# atomic way. If vars is None, the sites status will +# be removed +def update_replication_status(site_id, vars, times = {}): + make_nagios_directory(var_dir) + fd = os.open(repstatus_file, os.O_RDWR | os.O_CREAT) + fcntl.flock(fd, fcntl.LOCK_EX) + repstatus = load_replication_status() + if vars == None: + if site_id in repstatus: + del repstatus[site_id] + else: + repstatus.setdefault(site_id, {}) + repstatus[site_id].update(vars) + old_times = repstatus[site_id].setdefault("times", {}) + for what, duration in times.items(): + if what not in old_times: + old_times[what] = duration else: - return None + old_times[what] = 0.8 * old_times[what] + 0.2 * duration + save_replication_status(repstatus) + os.close(fd) +def update_login_sites_replication_status(): + for siteid, site in config.sites.items(): + if site.get('user_login', True) and not site_is_local(siteid): + update_replication_status(siteid, {'need_sync': True}) - table.begin(_("Time Periods"), empty_text = _("There are no timeperiods defined yet.")) - names = timeperiods.keys() - names.sort() - for name in names: - table.row() +def global_replication_state(): + repstatus = load_replication_status() + some_dirty = False - timeperiod = timeperiods[name] - edit_url = make_link([("mode", "edit_timeperiod"), ("edit", name)]) - delete_url = html.makeactionuri([("_delete", name)]) + for site_id in config.sitenames(): + site = config.site(site_id) + if not site_is_local(site_id) and not site.get("replication"): + continue - table.cell(_("Actions"), css="buttons") - html.icon_button(edit_url, _("Properties"), "edit") - html.icon_button(delete_url, _("Delete"), "delete") + srs = repstatus.get(site_id, {}) + if srs.get("need_sync") or srs.get("need_restart"): + some_dirty = True - table.cell(_("Name"), name) - table.cell(_("Alias"), timeperiod.get("alias", "")) - table.end() + if some_dirty: + return "dirty" + else: + return "clean" +def find_host_sites(site_ids, folder, hostname): + if hostname in folder[".hosts"]: + host = folder[".hosts"][hostname] + if "site" in host and host["site"]: + site_ids.add(host["site"]) + elif folder[".siteid"]: + site_ids.add(folder[".siteid"]) +# Scan recursively for references to sites +# in folders and hosts +def find_folder_sites(site_ids, folder, include_folder = False): + if include_folder and folder[".siteid"]: + site_ids.add(folder[".siteid"]) + load_hosts(folder) + for hostname in folder[".hosts"]: + find_host_sites(site_ids, folder, hostname) + for subfolder in folder[".folders"].values(): + find_folder_sites(site_ids, subfolder, include_folder) -def load_timeperiods(): - filename = root_dir + "timeperiods.mk" - if not os.path.exists(filename): - return {} - try: - vars = { "timeperiods" : {} } - execfile(filename, vars, vars) - return vars["timeperiods"] +# This method is called when: +# a) moving a host from one folder to another (2 times) +# b) deleting a host +# c) deleting a folder +# d) changing a folder's attributes (2 times) +# e) changing the attributes of a host (2 times) +# f) saving check configuration of a single host +# g) doing bulk inventory for a host +# h) doing bulk edit on a host (2 times) +# i) doing bulk cleanup on a host (2 time) +# It scans for the sites affected by the hosts in a folder and its subfolders. +# Please note: The "site" attribute of the folder itself is not relevant +# at all. It's just there to be inherited to the hosts. What counts is +# only the attributes of the hosts. +def mark_affected_sites_dirty(folder, hostname=None, sync = True, restart = True): + if is_distributed(): + site_ids = set([]) + if hostname: + find_host_sites(site_ids, folder, hostname) + else: + find_folder_sites(site_ids, folder) + for site_id in site_ids: + changes = {} + if sync and not site_is_local(site_id): + changes["need_sync"] = True + if restart: + changes["need_restart"] = True + update_replication_status(site_id, changes) - except Exception, e: - if config.debug: - raise MKGeneralException(_("Cannot read configuration file %s: %s" % - (filename, e))) - return {} +# def mark_all_sites_dirty(sites): +# changes = { +# "need_sync" : True, +# "need_restart" : True, +# } +# for site_id, site in sites.items(): +# update_replication_status(site_id, changes) +def remove_sync_snapshot(siteid): + path = sync_snapshot_file(siteid) + if os.path.exists(path): + os.remove(path) -def save_timeperiods(timeperiods): - make_nagios_directory(root_dir) - out = create_user_file(root_dir + "timeperiods.mk", "w") - out.write("# Written by WATO\n# encoding: utf-8\n\n") - out.write("timeperiods.update(%s)\n" % pprint.pformat(timeperiods)) +def sync_snapshot_file(siteid): + return defaults.tmp_dir + "/sync-%s.tar.gz" % siteid -class ExceptionName(TextAscii): - def __init__(self, **kwargs): - kwargs["regex"] = "^[-a-z0-9A-Z /]*$" - kwargs["regex_error"] = _("This is not a valid Nagios timeperiod day specification.") - kwargs["allow_empty"] = False - TextAscii.__init__(self, **kwargs) +def create_sync_snapshot(siteid): + path = sync_snapshot_file(siteid) + if not os.path.exists(path): + tmp_path = "%s-%s" % (path, id(html)) + + # Add site-specific global settings. + site_tmp_dir = defaults.tmp_dir + "/sync-%s-specific-%s" % (siteid, id(html)) + create_site_globals_file(siteid, site_tmp_dir) + + paths = replication_paths + [("dir", "sitespecific", site_tmp_dir)] + multitar.create(tmp_path, paths) + shutil.rmtree(site_tmp_dir) + os.rename(tmp_path, path) - def validate_value(self, value, varprefix): - if value in [ "monday", "tuesday", "wednesday", "thursday", - "friday", "saturday", "sunday" ]: - raise MKUserError(varprefix, _("You cannot use weekday names (%s) in exceptions" % value)) - if value in [ "name", "alias", "timeperiod_name", "register", "use", "exclude" ]: - raise MKUserError(varprefix, _("%s is a reserved keyword.")) - TextAscii.validate_value(self, value, varprefix) +def synchronize_site(site, restart): + if site_is_local(site["id"]): + if restart: + start = time.time() + restart_site(site) + update_replication_status(site["id"], + { "need_restart" : False }, + { "restart" : time.time() - start}) + + return True + + create_sync_snapshot(site["id"]) + try: + start = time.time() + result = push_snapshot_to_site(site, restart) + duration = time.time() - start + update_replication_status(site["id"], {}, + { restart and "sync+restart" or "restart" : duration }) + if result == True: + update_replication_status(site["id"], { + "need_sync": False, + "result" : _("Success"), + }) + if restart: + update_replication_status(site["id"], { "need_restart": False }) + else: + update_replication_status(site["id"], { "result" : result }) + return result + except Exception, e: + update_replication_status(site["id"], { "result" : str(e) }) + raise -class MultipleTimeRanges(ValueSpec): - def __init__(self, **kwargs): - ValueSpec.__init__(self, **kwargs) - self._num_columns = kwargs.get("num_columns", 3) - self._rangevs = TimeofdayRange() +# Isolated restart without prior synchronization. Currently this +# is only being called for the local site. +def restart_site(site): + start = time.time() + check_mk_automation(site["id"], config.wato_activation_method) + duration = time.time() - start + update_replication_status(site["id"], + { "need_restart" : False }, { "restart" : duration }) - def canonical_value(self): - return [ ((0,0), (24,0)), None, None ] +def push_snapshot_to_site(site, do_restart): + mode = site.get("replication", "slave") + url_base = site["multisiteurl"] + "automation.py?" + var_string = html.urlencode_vars([ + ("command", "push-snapshot"), + ("secret", site["secret"]), + ("siteid", site["id"]), # This site must know it's ID + ("mode", mode), + ("restart", do_restart and "yes" or "on"), + ("debug", config.debug and "1" or ""), + ]) + url = url_base + var_string + response_text = upload_file(url, sync_snapshot_file(site["id"]), site.get('insecure', False)) + try: + return eval(response_text) + except: + raise MKAutomationException(_("Garbled automation response from site %s: '%s'") % + (site["id"], response_text)) - def render_input(self, varprefix, value): - for c in range(0, self._num_columns): - if c: - html.write("   ") - if c < len(value): - v = value[c] - else: - v = self._rangevs.canonical_value() - self._rangevs.render_input(varprefix + "_%d" % c, v) +def push_user_profile_to_site(site, user_id, profile): + url = site["multisiteurl"] + "automation.py?" + html.urlencode_vars([ + ("command", "push-profile"), + ("secret", site["secret"]), + ("siteid", site['id']), + ("debug", config.debug and "1" or ""), + ]) + content = html.urlencode_vars([ + ('user_id', user_id), + ('profile', mk_repr(profile)), + ]) - def value_to_text(self, value): - parts = [] - for v in value: - parts.append(self._rangevs.value_to_text(v)) - return ", ".join(parts) + response = get_url(url, site.get('insecure', False), post_data = content) + if not response: + raise MKAutomationException("Empty output from remote site.") - def from_html_vars(self, varprefix): - value = [] - for c in range(0, self._num_columns): - v = self._rangevs.from_html_vars(varprefix + "_%d" % c) - if v != None: - value.append(v) - return value + try: + response = mk_eval(response) + except: + # The remote site will send non-Python data in case of an error. + raise MKAutomationException('Invalid response: %s' % response) + return response - def validate_value(self, value, varprefix): - for c, v in enumerate(value): - self._rangevs.validate_value(v, varprefix + "_%d" % c) +def synchronize_profile(site, user_id): + users = userdb.load_users(lock = False) + if not user_id in users: + raise MKUserError(None, _('The requested user does not exist')) -# Check, if timeperiod tpa excludes or is tpb -def timeperiod_excludes(timeperiods, tpa_name, tpb_name): - if tpa_name == tpb_name: - return True + start = time.time() + result = push_user_profile_to_site(site, user_id, users[user_id]) + duration = time.time() - start + update_replication_status(site["id"], {}, {"profile-sync": duration}) + return result - tpa = timeperiods[tpa_name] - for ex in tpa.get("exclude", []): - if ex == tpb_name: - return True - if timeperiod_excludes(timeperiods, ex, tpb_name): - return True - return False +# AJAX handler for javascript triggered wato activation +def ajax_activation(): + try: + if is_distributed(): + raise MKUserError(None, _('Call not supported in distributed setups.')) -def mode_edit_timeperiod(phase): - num_columns = 3 - timeperiods = load_timeperiods() - name = html.var("edit") # missing -> new group - new = name == None + config.need_permission("wato.activate") - # ValueSpec for the list of Exceptions - vs_ex = ListOf( - Tuple( - orientation = "horizontal", - show_titles = False, - elements = [ - ExceptionName(), - MultipleTimeRanges()] - ), - movable = False, - add_label = _("Add Exception")) + # Initialise g_root_folder, load all folder information + prepare_folder_info() - # ValueSpec for excluded Timeperiods. We offer the list of - # all other timeperiods - but only those that do not - # exclude the current timeperiod (in order to avoid cycles) - other_tps = [] - for tpname, tp in timeperiods.items(): - if not new and not timeperiod_excludes(timeperiods, tpname, name): - other_tps.append((tpname, tp.get("alias") or name)) + # Activate changes for single site + activate_changes() - vs_excl = ListChoice(choices = other_tps) + log_commit_pending() # flush logfile with pending actions + log_audit(None, "activate-config", _("Configuration activated, monitoring server restarted")) - # convert Check_MK representation of range to ValueSpec-representation - def convert_from_tod(tod): - # "00:30" -> (0, 30) - return tuple(map(int, tod.split(":"))) + html.write('OK: ') + html.write('
    %s
    ' % + _("Configuration successfully activated.")) + except Exception, e: + html.show_error(str(e)) - def convert_from_range(range): - # ("00:30", "10:17") -> ((0,30),(10,17)) - return tuple(map(convert_from_tod, range)) +# Try to do a rush-ahead-activation +def cmc_rush_ahead_activation(): + return + +def cmc_reload(): + log_audit(None, "activate-config", "Reloading Check_MK Micro Core on the fly") + html.live.command("[%d] RELOAD_CONFIG" % time.time()) - def convert_to_tod(value): - return "%02d:%02d" % value +# AJAX handler for asynchronous replication of user profiles (changed passwords) +def ajax_profile_repl(): + site_id = html.var("site") - def convert_to_range(value): - return tuple(map(convert_to_tod, value)) + status = html.site_status.get(site_id, {}).get("state", "unknown") + if status == "dead": + result = _('The site is marked as dead. Not trying to replicate.') - def timeperiod_ranges(vp, keyname, new): - ranges = timeperiod.get(keyname, []) - value = [] - for range in ranges: - value.append(convert_from_range(range)) - if len(value) == 0 and new: - value.append(((0,0),(24,0))) + else: + site = config.site(site_id) + try: + result = synchronize_profile(site, config.user_id) + except Exception, e: + result = str(e) - html.write("") - MultipleTimeRanges().render_input(vp, value) - html.write("") + if result == True: + answer = "0 %s" % _("Replication completed successfully."); + else: + answer = "1 %s" % (_("Error: %s") % result) + # Add pending entry to make sync possible later for admins + update_replication_status(site_id, {"need_sync": True}) + log_pending(AFFECTED, None, "edit-users", _('Password changed (sync failed: %s)') % result) - def get_ranges(varprefix): - value = MultipleTimeRanges().from_html_vars(varprefix) - MultipleTimeRanges().validate_value(value, varprefix) - return map(convert_to_range, value) + html.write(answer) - if phase == "title": - if new: - return _("Create new time period") - else: - return _("Edit time period") +# AJAX handler for asynchronous site replication +def ajax_replication(): + site_id = html.var("site") + repstatus = load_replication_status() + srs = repstatus.get(site_id, {}) + need_sync = srs.get("need_sync", False) + need_restart = srs.get("need_restart", False) - elif phase == "buttons": - html.context_button(_("All Timeperiods"), make_link([("mode", "timeperiods")]), "back") - return + # Initialise g_root_folder, load all folder information + prepare_folder_info() - if new: - timeperiod = {} + site = config.site(site_id) + try: + if need_sync: + result = synchronize_site(site, need_restart) + else: + restart_site(site) + result = True + except Exception, e: + result = str(e) + if result == True: + answer = "OK:" + _("Success"); + # Make sure that the pending changes are clean as soon as the + # last site has successfully been updated. + if is_distributed() and global_replication_state() == "clean": + log_commit_pending() else: - timeperiod = timeperiods.get(name, {}) - - weekdays = [ - ( "monday", _("Monday") ), - ( "tuesday", _("Tuesday") ), - ( "wednesday", _("Wednesday") ), - ( "thursday", _("Thursday") ), - ( "friday", _("Friday") ), - ( "saturday", _("Saturday") ), - ( "sunday", _("Sunday") ), - ] + answer = "
    %s: %s
    " % (_("Error"), hilite_errors(result)) - if phase == "action": - if html.check_transaction(): - alias = html.var_utf8("alias").strip() - if not alias: - raise MKUserError("alias", _("Please specify an alias name for your timeperiod.")) + html.write(answer) - timeperiod.clear() +#. +# .--Automation-Webservice-----------------------------------------------. +# | _ _ _ _ | +# | / \ _ _| |_ ___ _ __ ___ __ _| |_(_) ___ _ __ | +# | / _ \| | | | __/ _ \| '_ ` _ \ / _` | __| |/ _ \| '_ \ | +# | / ___ \ |_| | || (_) | | | | | | (_| | |_| | (_) | | | | | +# | /_/ \_\__,_|\__\___/|_| |_| |_|\__,_|\__|_|\___/|_| |_| | +# | | +# +----------------------------------------------------------------------+ +# | These function implement a web service with that a master can call | +# | automation functions on slaves. | +# '----------------------------------------------------------------------' - # extract time ranges of weekdays - for weekday, weekday_name in weekdays: - ranges = get_ranges(weekday) - if ranges: - timeperiod[weekday] = ranges - elif weekday in timeperiod: - del timeperiod[weekday] +def page_automation_login(): + if not config.may("wato.automation"): + raise MKAuthException(_("This account has no permission for automation.")) + # When we are here, a remote (master) site has successfully logged in + # using the credentials of the administrator. The login is done be exchanging + # a login secret. If such a secret is not yet present it is created on + # the fly. + html.write(repr(get_login_secret(True))) - # extract ranges for custom days - exceptions = vs_ex.from_html_vars("except") - vs_ex.validate_value(exceptions, "except") - for exname, ranges in exceptions: - timeperiod[exname] = map(convert_to_range, ranges) +def get_login_secret(create_on_demand = False): + path = var_dir + "automation_secret.mk" + try: + return eval(file(path).read()) + except: + if not create_on_demand: + return None + secret = get_random_string(32) + write_settings_file(path, secret) + return secret - # extract excludes - excludes = vs_excl.from_html_vars("exclude") - vs_excl.validate_value(excludes, "exclude") - if excludes: - timeperiod["exclude"] = excludes +def site_is_local(siteid): + return config.site_is_local(siteid) - if new: - name = html.var("name") - if len(name) == 0: - raise MKUserError("name", _("Please specify a name of the new timeperiod.")) - if not re.match("^[-a-z0-9A-Z_]*$", name): - raise MKUserError("name", _("Invalid timeperiod name. Only the characters a-z, A-Z, 0-9, _ and - are allowed.")) - if name in timeperiods: - raise MKUserError("name", _("This name is already being used by another timeperiod.")) - if name == "24X7": - raise MKUserError("name", _("The time period name 24X7 cannot be used. It is always autmatically defined.")) - timeperiods[name] = timeperiod - log_pending(SYNCRESTART, None, "edit-timeperiods", _("Created new time period %s" % name)) - else: - log_pending(SYNCRESTART, None, "edit-timeperiods", _("Modified time period %s" % name)) - timeperiod["alias"] = alias - save_timeperiods(timeperiods) - return "timeperiods" - return +# Returns the ID of our site. This function only works in replication +# mode and looks for an entry connecting to the local socket. +def our_site_id(): + if not is_distributed(): + return None + for site_id in config.allsites(): + if site_is_local(site_id): + return site_id + return None - html.begin_form("timeperiod", method="POST") - forms.header(_("Timeperiod")) +automation_commands = {} - # Name - forms.section(_("Internal name"), simple = not new) - if new: - html.text_input("name") - html.set_focus("name") - else: - html.write(name) +def page_automation(): + secret = html.var("secret") + if not secret: + raise MKAuthException(_("Missing secret for automation command.")) + if secret != get_login_secret(): + raise MKAuthException(_("Invalid automation secret.")) - # Alias - if not new: - alias = timeperiods[name].get("alias", "") - else: - alias = "" + # To prevent mixups in written files we use the same lock here as for + # the normal WATO page processing. This might not be needed for some + # special automation requests, like inventory e.g., but to keep it simple, + # we request the lock in all cases. + lock_exclusive() - forms.section(_("Alias")) - html.help(_("An alias or description of the timeperiod")) - html.text_input("alias", alias, size = 81) - if not new: - html.set_focus("alias") + # Initialise g_root_folder, load all folder information + prepare_folder_info() - # Week days - forms.section(_("Weekdays")) - html.help("For each weekday you can setup no, one or several " - "time ranges in the format 23:39, in which the time period " - "should be active.") - html.write("") + command = html.var("command") + if command == "checkmk-automation": + cmk_command = html.var("automation") + args = mk_eval(html.var("arguments")) + indata = mk_eval(html.var("indata")) + result = check_mk_local_automation(cmk_command, args, indata) + html.write(repr(result)) - for weekday, weekday_alias in weekdays: - ranges = timeperiod.get(weekday) - html.write("" % weekday_alias) - timeperiod_ranges(weekday, weekday, new) - html.write("") - html.write("
    %s
    ") + elif command == "push-snapshot": + html.write(repr(automation_push_snapshot())) - # Exceptions - nagurl = "../nagios/docs/objectdefinitions.html#timeperiod" - forms.section(_("Exceptions")) - html.help(_("Here you can specify exceptional time ranges for certain " - "relative or absolute dates. Please consult the " - "Nagios documentation about " - "timeperiods for examples." % nagurl)) + elif command == "push-profile": + html.write(mk_repr(automation_push_profile())) - exceptions = [] - for k in timeperiod: - if k not in [ w[0] for w in weekdays ] and k not in [ "alias", "exclude" ]: - exceptions.append((k, map(convert_from_range, timeperiod[k]))) - exceptions.sort() - vs_ex.render_input("except", exceptions) + elif command in automation_commands: + html.write(repr(automation_commands[command]())) - # Excludes - if other_tps: - forms.section(_("Exclude")) - vs_excl.render_input("exclude", timeperiod.get("exclude", [])) + else: + raise MKGeneralException(_("Invalid automation command: %s.") % command) +def automation_push_profile(): + try: + site_id = html.var("siteid") + if not site_id: + raise MKGeneralException(_("Missing variable siteid")) - forms.end() - html.button("save", _("Save")) - html.hidden_fields() - html.end_form() + user_id = html.var("user_id") + if not user_id: + raise MKGeneralException(_("Missing variable user_id")) + our_id = our_site_id() -class TimeperiodSelection(ElementSelection): - def __init__(self, **kwargs): - ElementSelection.__init__(self, **kwargs) + # In peer mode, we have a replication configuration ourselves and + # we have a site ID our selves. Let's make sure that ID matches + # the ID our peer thinks we have. + if our_id != None and our_id != site_id: + raise MKGeneralException( + _("Site ID mismatch. Our ID is '%s', but you are saying we are '%s'.") % + (our_id, site_id)) - def get_elements(self): - timeperiods = load_timeperiods() - elements = dict([ ("24X7", _("Always")) ] + \ - [ (name, "%s - %s" % (name, tp["alias"])) for (name, tp) in timeperiods.items() ]) - return elements + profile = html.var("profile") + if not profile: + raise MKGeneralException(_('Invalid call: The profile is missing.')) + + users = userdb.load_users(lock = True) + profile = mk_eval(profile) + users[user_id] = profile + userdb.save_users(users) -# Check if a timeperiod is currently in use and cannot be deleted -# Returns a list of occurrances. -# Possible usages: -# - 1. rules: service/host-notification/check-period -# - 2. user accounts (notification period) -# - 3. excluded by other timeperiods -def find_usages_of_timeperiod(tpname): + return True + except Exception, e: + if config.debug: + return _("Internal automation error: %s\n%s") % (str(e), format_exception()) + else: + return _("Internal automation error: %s") % e - # Part 1: Rules - used_in = [] - for varname, ruleset in load_all_rulesets().items(): - rulespec = g_rulespecs[varname] - if isinstance(rulespec.get("valuespec"), TimeperiodSelection): - for folder, rule in ruleset: - value, tag_specs, host_list, item_list, rule_options = parse_rule(rulespec, rule) - if value == tpname: - used_in.append(("%s: %s" % (_("Ruleset"), g_rulespecs[varname]["title"]), - make_link([("mode", "edit_ruleset"), ("varname", varname)]))) - break +def automation_push_snapshot(): + try: + site_id = html.var("siteid") + if not site_id: + raise MKGeneralException(_("Missing variable siteid")) + mode = html.var("mode", "slave") - # Part 2: Users - for userid, user in userdb.load_users().items(): - tp = user.get("notification_period") - if tp == tpname: - used_in.append(("%s: %s" % (_("User"), userid), - make_link([("mode", "edit_user"), ("edit", userid)]))) + our_id = our_site_id() - # Part 3: Other Timeperiods - for tpn, tp in load_timeperiods().items(): - if tpname in tp.get("exclude", []): - used_in.append(("%s: %s (%s)" % (_("Timeperiod"), tp.get("alias", tpn), - _("excluded")), - make_link([("mode", "edit_timeperiod"), ("edit", tpn)]))) + if mode == "slave" and is_distributed(): + raise MKGeneralException(_("Configuration error. You treat us as " + "a slave, but we have an own distributed WATO configuration!")) - return used_in + # In peer mode, we have a replication configuration ourselves and + # we have a site ID our selves. Let's make sure that ID matches + # the ID our peer thinks we have. + if our_id != None and our_id != site_id: + raise MKGeneralException( + _("Site ID mismatch. Our ID is '%s', but you are saying we are '%s'.") % + (our_id, site_id)) + # Make sure there are no local changes we would loose! But only if we are + # distributed ourselves (meaning we are a peer). + if is_distributed(): + pending = parse_audit_log("pending") + if len(pending) > 0: + message = _("There are %d pending changes that would get lost. The most recent are: ") % len(pending) + message += ", ".join([e[-1] for e in pending[:10]]) + raise MKGeneralException(message) -#. -# .-Multisite Connections------------------------------------------------. -# | ____ _ _ | -# | / ___|(_) |_ ___ ___ | -# | \___ \| | __/ _ \/ __| | -# | ___) | | || __/\__ \ | -# | |____/|_|\__\___||___/ | -# | | -# +----------------------------------------------------------------------+ -# | Mode for managing sites. | -# '----------------------------------------------------------------------' + tarcontent = html.uploaded_file("snapshot") + if not tarcontent: + raise MKGeneralException(_('Invalid call: The snapshot is missing.')) + tarcontent = tarcontent[2] -# Sort given sites argument by peer/local, followed by slaves -def sort_sites(sites): - def custom_sort(a,b): - return cmp(a[1].get("replication","peer"), b[1].get("replication","peer")) or \ - -cmp(a[1].get("repl_priority",0), b[1].get("repl_priority",0)) or \ - cmp(a[1].get("alias"), b[1].get("alias")) - sites.sort(cmp = custom_sort) + multitar.extract_from_buffer(tarcontent, replication_paths) -def mode_sites(phase): - if phase == "title": - return _("Distributed Monitoring") + # We expect one file containing sitespecific global settings. + # That is contained in the sub-tarball "sitespecific.tar" and + # just contains one file: "sitespecific.mk". The contains a repr() + # of all global settings, that should override the ones in global.mk + # in various directories. + try: + tmp_dir = defaults.tmp_dir + "/sitespecific-%s" % id(html) + if not os.path.exists(tmp_dir): + make_nagios_directory(tmp_dir) + multitar.extract_from_buffer(tarcontent, [ ("dir", "sitespecific", tmp_dir) ]) + site_globals = eval(file(tmp_dir + "/sitespecific.mk").read()) + current_settings = load_configuration_settings() + current_settings.update(site_globals) + save_configuration_settings(current_settings) + shutil.rmtree(tmp_dir) + except Exception, e: + html.log("Warning: cannot extract site-specific global settings: %s" % e) - elif phase == "buttons": - global_buttons() - html.context_button(_("New connection"), make_link([("mode", "edit_site")]), "new") - return + log_commit_pending() # pending changes are lost - sites = load_sites() + call_hook_snapshot_pushed() - if phase == "action": - delid = html.var("_delete") - if delid and html.transaction_valid(): - # The last connection can always be deleted. In that case we - # fallb back to non-distributed-WATO and the site attribute - # will be removed. - test_sites = dict(sites.items()) - del test_sites[delid] - if is_distributed(test_sites): - # Make sure that site is not being used by hosts and folders - site_ids = set([]) - find_folder_sites(site_ids, g_root_folder, True) - if delid in site_ids: - raise MKUserError(None, - _("You cannot delete this connection. " - "It has folders/hosts assigned to it.")) + # Create rule making this site only monitor our hosts + create_distributed_wato_file(site_id, mode) + log_audit(None, "replication", _("Synchronized with master (my site id is %s.)") % site_id) + if html.var("restart", "no") == "yes": + check_mk_local_automation(config.wato_activation_method) + return True + except Exception, e: + if config.debug: + return _("Internal automation error: %s\n%s") % (str(e), format_exception()) + else: + return _("Internal automation error: %s") % e - c = wato_confirm(_("Confirm deletion of site %s" % delid), - _("Do you really want to delete the connection to the site %s?" % delid)) - if c: - del sites[delid] - save_sites(sites) - update_replication_status(delid, None) +def create_distributed_wato_file(siteid, mode): + out = create_user_file(defaults.check_mk_configdir + "/distributed_wato.mk", "w") + out.write("# Written by WATO\n# encoding: utf-8\n\n") + out.write("# This file has been created by the master site\n" + "# push the configuration to us. It makes sure that\n" + "# we only monitor hosts that are assigned to our site.\n\n") + out.write("distributed_wato_site = '%s'\n" % siteid) + +def delete_distributed_wato_file(): + p = defaults.check_mk_configdir + "/distributed_wato.mk" + # We do not delete the file but empty it. That way + # we do not need write permissions to the conf.d + # directory! + if os.path.exists(p): + create_user_file(p, "w").write("") - # Due to the deletion the replication state can get clean. - if is_distributed() and global_replication_state() == "clean": - log_commit_pending() +def has_distributed_wato_file(): + return os.path.exists(defaults.check_mk_configdir + "/distributed_wato.mk") - log_pending(SYNCRESTART, None, "edit-sites", _("Deleted site %s" % (delid))) - return None - elif c == False: - return "" - else: - return None +#. +# .--Users/Contacts------------------------------------------------------. +# | _ _ ______ _ _ | +# || | | |___ ___ _ __ ___ / / ___|___ _ __ | |_ __ _ ___| |_ ___ | +# || | | / __|/ _ \ '__/ __| / / | / _ \| '_ \| __/ _` |/ __| __/ __| | +# || |_| \__ \ __/ | \__ \/ /| |__| (_) | | | | || (_| | (__| |_\__ \ | +# | \___/|___/\___|_| |___/_/ \____\___/|_| |_|\__\__,_|\___|\__|___/ | +# | | +# +----------------------------------------------------------------------+ +# | Mode for managing users and contacts. | +# '----------------------------------------------------------------------' - logout_id = html.var("_logout") - if logout_id: - site = sites[logout_id] - c = wato_confirm(_("Confirm logout"), - _("Do you really want to log out of '%s'?") % site["alias"]) - if c: - if "secret" in site: - del site["secret"] - save_sites(sites) - log_audit(None, "edit-site", _("Logged out of remote site '%s'") % site["alias"]) - return None, _("Logged out.") - elif c == False: - return "" - else: - return None +def service_levels(): + try: + return config.mkeventd_service_levels + except: + return [(0, "(no service level)")] - login_id = html.var("_login") - if login_id: - if html.var("_abort"): - return "sites" - if not html.check_transaction(): - return - site = sites[login_id] - error = None - # Fetch name/password of admin account - if html.has_var("_name"): - name = html.var("_name", "").strip() - passwd = html.var("_passwd", "").strip() +# Example header of a notification script: +#!/usr/bin/python +# HTML Emails with included graphs +# Bulk: yes +# Argument 1: Full system path to the pnp4nagios index.php for fetching the graphs. Usually auto configured in OMD. +# Argument 2: HTTP-URL-Prefix to open Multisite. When provided, several links are added to the mail. +# +# This script creates a nifty HTML email in multipart format with +# attached graphs and such neat stuff. Sweet! + +def load_notification_scripts_from(adir): + scripts = {} + if os.path.exists(adir): + for entry in os.listdir(adir): + path = adir + "/" + entry + if os.path.isfile(path) and os.access(path, os.X_OK): + info = { "title" : entry, "bulk" : False } try: - secret = do_site_login(login_id, name, passwd) - site["secret"] = secret - save_sites(sites) - log_audit(None, "edit-site", _("Successfully logged into remote site '%s'") % site["alias"]) - return None, _("Successfully logged into remote site '%s'!" % site["alias"]) - except MKAutomationException, e: - error = _("Cannot connect to remote site: %s") % e - except MKUserError, e: - html.add_user_error(e.varname, e.message) - error = e.message - except Exception, e: - if config.debug: - raise - html.add_user_error("_name", error) - error = str(e) + lines = file(path) + lines.next() + line = lines.next().strip() + if line.startswith("#") and "encoding:" in line: + line = lines.next().strip() + if line.startswith("#"): + info["title"] = line.lstrip("#").strip().split("#", 1)[0] + while True: + line = lines.next().strip() + if not line.startswith("#") or ":" not in line: + break + key, value = line[1:].strip().split(":", 1) + value = value.strip() + if key.lower() == "bulk": + info["bulk"] = (value == "yes") + except: + pass + scripts[entry] = info + return scripts - wato_html_head(_("Login into site '%s'") % site["alias"]) - if error: - html.show_error(error) - html.write("
    ") - html.write("

    %s

    " % _("Login credentials")) - html.write(_("For the initial login into the slave/peer site %s " - "we need once your administration login for the Multsite " - "GUI on that site. Your credentials will only be used for " - "the initial handshake and not be stored. If the login is " - "successful then both side will exchange a login secret " - "which is used for the further remote calls.") % site["alias"]) - html.begin_form("login") - html.write("") - html.write("" % _("Administrator login")) - html.write("") - html.write("
    %s") - html.write("
    %s" % _("Adminstrator name:")) - html.text_input("_name") - html.set_focus("_name") - html.write("
    %s" % _("Administrator password:")) - html.password_input("_passwd") - html.write("
    ") - html.write("
    ") - html.button("_do_login", _("Login")) - html.button("_abort", _("Abort")) - html.write("
    ") - html.hidden_field("_login", login_id) - html.hidden_fields() - html.end_form() - html.write("
    ") - return "" - return +def load_notification_scripts(): + scripts = {} + try: + not_dir = defaults.notifications_dir + except: + not_dir = defaults.share_dir + "/notifications" # for those with not up-to-date defaults - table.begin(_("Connections to local and remote sites"), - empty_text = _("You have not configured any local or remotes sites. Multisite will " - "implicitely add the data of the local monitoring site. If you add remotes " - "sites, please do not forget to add your local monitoring site also, if " - "you want to display its data.")) + scripts = load_notification_scripts_from(not_dir) + try: + local_dir = defaults.omd_root + "/local/share/check_mk/notifications" + scripts.update(load_notification_scripts_from(local_dir)) + except: + pass - entries = sites.items() - sort_sites(entries) - for id, site in entries: - table.row() - # Buttons - edit_url = make_link([("mode", "edit_site"), ("edit", id)]) - delete_url = html.makeactionuri([("_delete", id)]) - table.cell(_("Actions"), css="buttons") - html.icon_button(edit_url, _("Properties"), "edit") - html.icon_button(delete_url, _("Delete"), "delete") + return scripts - # Site-ID - table.cell(_("Site-ID"), id) +def notification_script_choices(): + scripts = load_notification_scripts() - # Alias - table.cell(_("Alias"), site.get("alias", "")) + choices = [ (name, info["title"].decode('utf-8')) for (name, info) in scripts.items() ] + choices.append((None, _("ASCII Email (legacy)"))) + choices.sort(cmp = lambda a,b: cmp(a[1], b[1])) + # Make choices localizable + choices = [ (k, _(v)) for k, v in choices ] + return choices - # Socket - socket = site.get("socket", _("local site")) - if socket == "disabled:": - socket = _("don't query status") - table.cell(_("Socket"), socket) - # Status host - if "status_host" in site: - sh_site, sh_host = site["status_host"] - table.cell(_("Status host"), "%s/%s" % (sh_site, sh_host)) - else: - table.cell(_("Status host")) +def notification_script_choices_with_parameters(): + choices = [] + for script_name, title in notification_script_choices(): + if script_name in g_notification_parameters: + vs = g_notification_parameters[script_name] + else: + vs = ListOfStrings( + title = _("Call with the following parameters:"), + valuespec = TextUnicode(size = 24), + orientation = "horizontal", + ) + choices.append((script_name, title, + Alternative( + style = "dropdown", + elements = [ + vs, + FixedValue(None, totext = _("previous notifications of this type are cancelled"), + title = _("Cancel previous notifications")), + ] + ) + )) + return choices - # Disabled - if site.get("disabled", False) == True: - table.cell(_("Disabled"), "%s" % _("yes")) - else: - table.cell(_("Disabled"), _("no")) - # Timeout - if "timeout" in site: - table.cell(_("Timeout"), _("%d sec") % site["timeout"], css="number") - else: - table.cell(_("Timeout"), "") - # Persist - if site.get("persist", False): - table.cell(_("Pers."), "%s" % _("yes")) - else: - table.cell(_("Pers."), _("no")) +def notification_script_title(name): + return dict(notification_script_choices()).get(name, name) - # Replication - if site.get("replication") == "slave": - repl = _("Slave") - elif site.get("replication") == "peer": - repl = _("Peer") - else: - repl = "" - table.cell(_("Replication"), repl) - # Replication Priority - table.cell(_("Prio"), (site.get("replication") != "slave" and - str(site.get("repl_priority", 0)) or ""), css="number") +def load_notification_table(): + # Make sure, that list is not trivially false + def validate_only_services(value, varprefix): + for s in value: + if s and s[0] != '!': + return + raise MKUserError(varprefix + "_0", _("The list of services will never match")) - # Login-Button for Replication - table.cell(_("Login")) - if repl: - if site.get("secret"): - logout_url = make_action_link([("mode", "sites"), ("_logout", id)]) - html.buttonlink(logout_url, _("Logout")) - else: - login_url = make_action_link([("mode", "sites"), ("_login", id)]) - html.buttonlink(login_url, _("Login")) + global vs_notification_method + vs_notification_method = \ + CascadingDropdown( + title = _("Notification Method"), + choices = [ + ( "email", _("Plain Text Email (using configured templates)") ), + ( "flexible", + _("Flexible Custom Notifications"), + ListOf( + Foldable( + Dictionary( + optional_keys = [ "service_blacklist", "only_hosts", "only_services", "escalation" , "match_sl"], + columns = 1, + headers = True, + elements = [ + ( "plugin", + DropdownChoice( + title = _("Notification Plugin"), + choices = notification_script_choices, + default_value = "mail", + ), + ), + ( "parameters", + ListOfStrings( + title = _("Plugin Arguments"), + help = _("You can specify arguments to the notification plugin here. " + "Please refer to the documentation about the plugin for what " + "parameters are allowed or required here."), + ) + ), + ( "disabled", + Checkbox( + title = _("Disabled"), + label = _("Currently disable this notification"), + default_value = False, + ) + ), + ( "timeperiod", + TimeperiodSelection( + title = _("Timeperiod"), + help = _("Do only notifiy alerts within this time period"), + ) + ), + ( "escalation", + Tuple( + title = _("Restrict to nth to mth notification (escalation)"), + orientation = "float", + elements = [ + Integer( + label = _("from"), + help = _("Let through notifications counting from this number"), + default_value = 1, + minvalue = 1, + maxvalue = 999999, + ), + Integer( + label = _("to"), + help = _("Let through notifications counting upto this number"), + default_value = 999999, + minvalue = 1, + maxvalue = 999999, + ), + ], + ), + ), + ( "match_sl", + Tuple( + title = _("Match service level"), + help = _("Host or Service must be in the following service level to get notification"), + orientation = "horizontal", + show_titles = False, + elements = [ + DropdownChoice(label = _("from:"), choices = service_levels, prefix_values = True), + DropdownChoice(label = _(" to:"), choices = service_levels, prefix_values = True), + ], + ), + ), + ( "host_events", + ListChoice( + title = _("Host Events"), + choices = [ + ( 'd', _("Host goes down")), + ( 'u', _("Host gets unreachble")), + ( 'r', _("Host goes up again")), + ( 'f', _("Start or end of flapping state")), + ( 's', _("Start or end of a scheduled downtime ")), + ( 'x', _("Acknowledgement of host problem")), + ], + default_value = [ 'd', 'u', 'r', 'f', 's', 'x' ], + ) + ), + ( "service_events", + ListChoice( + title = _("Service Events"), + choices = [ + ( 'w', _("Service goes into warning state")), + ( 'u', _("Service goes into unknown state")), + ( 'c', _("Service goes into critical state")), + ( 'r', _("Service recovers to OK")), + ( 'f', _("Start or end of flapping state")), + ( 's', _("Start or end of a scheduled downtime")), + ( 'x', _("Acknowledgement of service problem")), + ], + default_value = [ 'w', 'c', 'u', 'r', 'f', 's', 'x' ], + ) + ), + ( "only_hosts", + ListOfStrings( + title = _("Limit to the following hosts"), + help = _("Configure the hosts for this notification. Without prefix, only exact, case sensitive matches, " + "! for negation and ~ for regex matches."), + orientation = "horizontal", + valuespec = RegExp(size = 20), + ), + ), + ( "only_services", + ListOfStrings( + title = _("Limit to the following services"), + help = _("Configure regular expressions that match the beginning of the service names here. Prefix an " + "entry with ! in order to exclude that service."), + orientation = "horizontal", + valuespec = RegExp(size = 20), + validate = validate_only_services, + ), + ), + ( "service_blacklist", + ListOfStrings( + title = _("Blacklist the following services"), + help = _("Configure regular expressions that match the beginning of the service names here."), + orientation = "horizontal", + valuespec = RegExp(size = 20), + validate = validate_only_services, + ), + ), + ] + ), + title_function = lambda v: _("Notify by: ") + notification_script_title(v["plugin"]), + ), + title = _("Flexible Custom Notifications"), + add_label = _("Add notification"), + ), + ), + ] + ) - table.end() -def mode_edit_site(phase): - sites = load_sites() - siteid = html.var("edit", None) # missing -> new site - new = siteid == None +def mode_users(phase): if phase == "title": - if new: - return _("Create new site connection") - else: - return _("Edit site connection %s" % siteid) + return _("Users") elif phase == "buttons": - html.context_button(_("All Sites"), make_link([("mode", "sites")]), "back") + global_buttons() + html.context_button(_("New User"), make_link([("mode", "edit_user")]), "new") + html.context_button(_("Custom Attributes"), make_link([("mode", "user_attrs")]), "custom_attr") + if 'wato_users' not in config.userdb_automatic_sync: + html.context_button(_("Sync Users"), html.makeactionuri([("_sync", 1)]), "replicate") + if config.may("general.notify"): + html.context_button(_("Notify Users"), 'notify.py', "notification") + if userdb.connector_enabled('ldap'): + html.context_button(_("LDAP Settings"), make_link([("mode", "ldap_config")]), "ldap") return - if new: - site = {} - else: - site = sites.get(siteid, {}) - - # ValueSpecs for the more complex input fields - vs_conn_method = CascadingDropdown( - html_separator = " ", - choices = [ - ( None, _("Connect to the local site") ), - ( "tcp", _("Connect via TCP"), Tuple( - orientation = "float", - elements = [ - TextAscii(label = _("Host:"), allow_empty = False, size=15), - Integer(label = _("Port:"), minvalue=1, maxvalue=65535, default_value=6557), - ])), - ( "unix", _("Connect via UNIX socket"), TextAscii( - label = _("Path:"), - size = 30, - allow_empty = False)), - ( "disabled", _("Do not connect")), - ]) + # Execute all connectors synchronisations of users. This must be done before + # loading the users, because it might modify the users list. But don't execute + # it during actions, this should save some time. + if phase != "action" and 'wato_users' in config.userdb_automatic_sync: + userdb.hook_sync(add_to_changelog = True) + roles = userdb.load_roles() + users = filter_hidden_users(userdb.load_users(lock = phase == 'action' and html.var('_delete'))) + timeperiods = load_timeperiods() + contact_groups = userdb.load_group_information().get("contact", {}) if phase == "action": - if not html.check_transaction(): - return "sites" - - if new: - id = html.var("id").strip() - else: - id = siteid - - if new and id in sites: - raise MKUserError("id", _("This id is already being used by another connection.")) - if not re.match("^[-a-z0-9A-Z_]+$", id): - raise MKUserError("id", _("The site id must consist only of letters, digit and the underscore.")) - - # Save copy of old site for later - if not new: - old_site = sites[siteid] - - new_site = {} - sites[id] = new_site - alias = html.var_utf8("alias", "").strip() - if not alias: - raise MKUserError("alias", _("Please enter an alias name or description of this site.")) - - new_site["alias"] = alias - url_prefix = html.var("url_prefix", "").strip() - if url_prefix and url_prefix[-1] != '/': - raise MKUserError("url_prefix", _("The URL prefix must end with a slash.")) - if url_prefix: - new_site["url_prefix"] = url_prefix - disabled = html.get_checkbox("disabled") - new_site["disabled"] = disabled + if html.var('_delete'): + delid = html.var("_delete") + if delid == config.user_id: + raise MKUserError(None, _("You cannot delete your own account!")) - # Connection - method = vs_conn_method.from_html_vars("method") - vs_conn_method.validate_value(method, "method") - if type(method) == tuple: - if method[0] == "unix": - new_site["socket"] = "unix:" + method[1] - else: - new_site["socket"] = "tcp:%s:%d" % method[1] - elif method: - new_site["socket"] = method - elif "socket" in new_site: - del new_site["socket"] + if delid not in users: + return None # The account does not exist (anymore), no deletion needed - # Timeout - timeout = html.var("timeout", "").strip() - if timeout != "": + c = wato_confirm(_("Confirm deletion of user %s" % delid), + _("Do you really want to delete the user %s?" % delid)) + if c: + del users[delid] + userdb.save_users(users) + log_pending(SYNCRESTART, None, "edit-users", _("Deleted user %s" % (delid))) + elif c == False: + return "" + elif html.var('_sync'): try: - timeout = int(timeout) - except: - raise MKUserError("timeout", _("%s is not a valid integer number.") % timeout) - new_site["timeout"] = timeout + if userdb.hook_sync(add_to_changelog = True, raise_exc = True): + return None, _('The user synchronization completed successfully.') + except Exception, e: + if config.debug: + import traceback + raise MKUserError(None, traceback.format_exc().replace('\n', '
    \n')) + else: + raise MKUserError(None, str(e)) - # Persist - new_site["persist"] = html.get_checkbox("persist") + return None + visible_custom_attrs = [ + (name, attr) + for name, attr + in userdb.get_user_attributes() + if attr.get('show_in_table', False) + ] - # Status host - sh_site = html.var("sh_site") - if sh_site: - if sh_site not in sites: - raise MKUserError("sh_site", _("The site of the status host does not exist.")) - if sh_site in [ siteid, id ]: - raise MKUserError("sh_site", _("You cannot use the site itself as site of the status host.")) - sh_host = html.var("sh_host") - if not sh_host: - raise MKUserError("sh_host", _("Please specify the name of the status host.")) - new_site["status_host"] = ( sh_site, sh_host ) + entries = users.items() + entries.sort(cmp = lambda a, b: cmp(a[1].get("alias", a[0]).lower(), b[1].get("alias", b[0]).lower())) - # Replication - repl = html.var("replication") - if repl == "none": - repl = None - if repl: - new_site["replication"] = repl + table.begin("users", None, empty_text = _("No users are defined yet.")) + online_threshold = time.time() - config.user_online_maxage + for id, user in entries: + table.row() - # Replication Priority - if not repl or repl != "slave": - try: - new_site["repl_priority"] = int(html.var("repl_priority", 0)) - except: - raise MKUserError("repl_priority", _("Replication Priority '%s' is not a valid number.") % html.var("repl_priority","")) + connector = userdb.get_connector(user.get('connector')) - multisiteurl = html.var("multisiteurl", "").strip() - if repl: - if not multisiteurl: - raise MKUserError("multisiteurl", - _("Please enter the Multisite URL of the slave/peer site.")) - if not multisiteurl.endswith("/check_mk/"): - raise MKUserError("multisiteurl", - _("The Multisite URL must end with /check_mk/")) - if not multisiteurl.startswith("http://") and not multisiteurl.startswith("https://"): - raise MKUserError("multisiteurl", - _("The Multisites URL must begin with http:// or https://.")) - if "socket" not in new_site: - raise MKUserError("replication", - _("You cannot do replication with the local site.")) + # Buttons + table.cell(_("Actions"), css="buttons") + if connector: # only show edit buttons when the connector is available and enabled + edit_url = make_link([("mode", "edit_user"), ("edit", id)]) + html.icon_button(edit_url, _("Properties"), "edit") - # Save Multisite-URL even if replication is turned off. That way that - # setting is not lost if replication is turned off for a while. - new_site["multisiteurl"] = multisiteurl + clone_url = make_link([("mode", "edit_user"), ("clone", id)]) + html.icon_button(clone_url, _("Create a copy of this user"), "clone") - # Handle the insecure replication flag - new_site["insecure"] = html.get_checkbox("insecure") + delete_url = make_action_link([("mode", "users"), ("_delete", id)]) + html.icon_button(delete_url, _("Delete"), "delete") - # Secret is not checked here, just kept - if not new and "secret" in old_site: - new_site["secret"] = old_site["secret"] + notifications_url = make_link([("mode", "user_notifications"), ("user", id)]) + if load_configuration_settings().get("enable_rulebased_notifications"): + html.icon_button(notifications_url, _("Custom notification table of this user"), "notifications") + # ID + table.cell(_("ID"), id) - save_sites(sites) + # Online/Offline + if config.save_user_access_times: + last_seen = user.get('last_seen', 0) + if last_seen >= online_threshold: + title = _('Online') + img_txt = 'on' + else: + title = _('Offline') + img_txt = 'off' + title += ' (%s %s)' % (fmt_date(last_seen), fmt_time(last_seen)) + table.cell(_("Act."), '' % (title, img_txt)) - # Own site needs RESTART in any case - update_replication_status(our_site_id(), { "need_restart" : True }) - if new: - if not site_is_local(id): - update_replication_status(id, { "need_sync" : True, "need_restart" : True }) - log_pending(AFFECTED, None, "edit-sites", _("Created new connection to site %s" % id)) + # Connector + if connector: + table.cell(_("Connector"), connector['short_title']) + locked_attributes = userdb.locked_attributes(user.get('connector')) else: - log_pending(AFFECTED, None, "edit-sites", _("Modified site connection %s" % id)) - # Replication mode has switched on/off => handle replication state - repstatus = load_replication_status() - if repl: # Repl is on - update_replication_status(id, { "need_sync" : True, "need_restart" : True }) - elif id in repstatus: # Repl switched off - update_replication_status(id, None) # Replication switched off - if is_distributed() and global_replication_state() == "clean": - log_commit_pending() - return "sites" + table.cell(_("Connector"), "%s (disabled)" % userdb.get_connector_id(user.get('connector')), css="error") + locked_attributes = [] - html.begin_form("site") + # Authentication + if "automation_secret" in user: + auth_method = _("Automation") + elif user.get("password") or 'password' in locked_attributes: + auth_method = _("Password") + else: + auth_method = "%s" % _("none") + table.cell(_("Authentication"), auth_method) + table.cell(_("State")) + locked = user.get("locked", False) + if user.get("locked", False): + html.icon(_('The login is currently locked'), 'user_locked') + if user.get("disable_notifications", False): + html.icon(_('Notifications are disabled'), 'notif_disabled') - # ID - forms.header(_("Basic settings")) - forms.section(_("Site ID"), simple = not new) - if new: - html.text_input("id", siteid) - html.set_focus("id") - else: - html.write(siteid) + # Full name / Alias + table.cell(_("Alias"), user.get("alias", "")) - # Alias - forms.section(_("Alias")) - html.text_input("alias", site.get("alias", ""), size = 60) - if not new: - html.set_focus("alias") - html.help(_("An alias or description of the site")) + # Email + table.cell(_("Email"), user.get("email", "")) + + # Roles + table.cell(_("Roles")) + if user.get("roles", []): + html.write(", ".join( + [ '%s' % (make_link([("mode", "edit_role"), ("edit", r)]), roles[r].get('alias')) for r in user["roles"]])) + # contact groups + table.cell(_("Contact groups")) + cgs = user.get("contactgroups", []) + if cgs: + html.write(", ".join( + [ '%s' % (make_link([("mode", "edit_contact_group"), ("edit", c)]), + c in contact_groups and contact_groups[c]['alias'] or c) for c in cgs])) + else: + html.write("" + _("none") + "") - forms.header(_("Livestatus settings")) - forms.section(_("Connection")) - method = site.get("socket", None) - if method and method.startswith("unix:"): - method = ('unix', method[5:]) - elif method and method.startswith("tcp:"): - method = ('tcp', tuple(method.split(":")[1:])) - vs_conn_method.render_input("method", method) + # notifications + if not load_configuration_settings().get("enable_rulebased_notifications"): + table.cell(_("Notifications")) + if not cgs: + html.write(_("not a contact")) + elif not user.get("notifications_enabled", True): + html.write(_("disabled")) + elif "" == user.get("host_notification_options", "") \ + and "" == user.get("service_notification_options", ""): + html.write(_("all events disabled")) + else: + tp = user.get("notification_period", "24X7") + if tp != "24X7" and tp not in timeperiods: + tp = tp + _(" (invalid)") + elif tp != "24X7": + url = make_link([("mode", "edit_timeperiod"), ("edit", tp)]) + tp = '%s' % (url, timeperiods[tp].get("alias", tp)) + else: + tp = _("Always") + html.write(tp) - html.help( _("When connecting to remote site please make sure " - "that Livestatus over TCP is activated there. You can use UNIX sockets " - "to connect to foreign sites on localhost. Please make sure that this " - "site has proper read and write permissions to the UNIX socket of the " - "foreign site.")) + # the visible custom attributes + for name, attr in visible_custom_attrs: + vs = attr['valuespec'] + table.cell(_u(vs.title())) + html.write(vs.value_to_text(user.get(name, vs.default_value()))) - # Timeout - forms.section(_("Connect Timeout")) - timeout = site.get("timeout", 10) - html.number_input("timeout", timeout, size=2) - html.write(_(" seconds")) - html.help(_("This sets the time that Multisite waits for a connection " - "to the site to be established before the site is considered to be unreachable. " - "If not set, the operating system defaults are begin used and just one login attempt is being. " - "performed.")) + table.end() - # Persistent connections - forms.section(_("Persistent Connection"), simple=True) - html.checkbox("persist", site.get("persist", False), label=_("Use persistent connections")) - html.help(_("If you enable persistent connections then Multisite will try to keep open " - "the connection to the remote sites. This brings a great speed up in high-latency " - "situations but locks a number of threads in the Livestatus module of the target site.")) + if not userdb.load_group_information().get("contact", {}): + url = "wato.py?mode=contact_groups" + html.write("
    " + + _("Note: you haven't defined any contact groups yet. If you " + "create some contact groups you can assign users to them und thus " + "make them monitoring contacts. Only monitoring contacts can receive " + "notifications.") % url + "
    ") - # URL-Prefix - docu_url = "http://mathias-kettner.de/checkmk_multisite_modproxy.html" - forms.section(_("URL prefix")) - html.text_input("url_prefix", site.get("url_prefix", ""), size = 60) - html.help(_("The URL prefix will be prepended to links of addons like PNP4Nagios " - "or the classical Nagios GUI when a link to such applications points to a host or " - "service on that site. You can either use an absolute URL prefix like http://some.host/mysite/ " - "or a relative URL like /mysite/. When using relative prefixes you needed a mod_proxy " - "configuration in your local system apache that proxies such URLs ot the according remote site. " - "Please refer to the online documentation for details. " - "The prefix should end with a slash. Omit the /pnp4nagios/ from the prefix.") % docu_url) - # Status-Host - docu_url = "http://mathias-kettner.de/checkmk_multisite_statushost.html" - forms.section(_("Status host")) - sh = site.get("status_host") - if sh: - sh_site, sh_host = sh +def mode_edit_user(phase): + # Check if rule based notifications are enabled (via WATO) + rulebased_notifications = load_configuration_settings().get("enable_rulebased_notifications") + + users = userdb.load_users(lock = phase == 'action') + userid = html.var("edit") # missing -> new user + cloneid = html.var("clone") # Only needed in 'new' mode + new = userid == None + if phase == "title": + if new: + return _("Create new user") + else: + return _("Edit user %s" % userid) + + elif phase == "buttons": + html.context_button(_("All Users"), make_link([("mode", "users")]), "back") + if rulebased_notifications and not new: + html.context_button(_("Notifications"), make_link([("mode", "user_notifications"), + ("user", userid)]), "notifications") + return + + if new: + if cloneid: + user = users.get(cloneid, userdb.new_user_template('htpasswd')) + else: + user = userdb.new_user_template('htpasswd') + pw_suffix = 'new' else: - sh_site = "" - sh_host = "" - html.write(_("host: ")) - html.text_input("sh_host", sh_host, size=10) - html.write(_(" on monitoring site: ")) - html.sorted_select("sh_site", - [ ("", _("(no status host)")) ] + [ (sk, si.get("alias", sk)) for (sk, si) in sites.items() ], sh_site) - html.help( _("By specifying a status host for each non-local connection " - "you prevent Multisite from running into timeouts when remote sites do not respond. " - "You need to add the remote monitoring servers as hosts into your local monitoring " - "site and use their host state as a reachability state of the remote site. Please " - "refer to the online documentation for details.") % docu_url) + user = users.get(userid, userdb.new_user_template('htpasswd')) + pw_suffix = userid - # Disabled - forms.section(_("Disable"), simple=True) - html.checkbox("disabled", site.get("disabled", False), label = _("Temporarily disable this connection")) - html.help( _("If you disable a connection, then no data of this site will be shown in the status GUI. " - "The replication is not affected by this, however.")) + # Returns true if an attribute is locked and should be read only. Is only + # checked when modifying an existing user + locked_attributes = userdb.locked_attributes(user.get('connector')) + def is_locked(attr): + return not new and attr in locked_attributes - # Replication - forms.header(_("Configuration Replication (Distributed WATO)")) - forms.section(_("Replication method")) - html.select("replication", - [ ("none", _("No replication with this site")), - ("peer", _("Peer: synchronize configuration with this site")), - ("slave", _("Slave: push configuration to this site")) - ], site.get("replication", "none")) - html.help( _("WATO replication allows you to manage several monitoring sites with a " - "logically centralized WATO. Slave sites receive their configuration " - "from master sites. Several master sites can build a peer-to-peer " - "replication pool for sake of redundancy.

    Note: Slave sites " - "do not need any replication configuration. They will be remote-controlled " - "by the master sites.")) + def custom_user_attributes(topic = None): + for name, attr in userdb.get_user_attributes(): + if topic is not None and topic != attr['topic']: + continue # skip attrs of other topics - forms.section(_("Peer replication priority")) - html.number_input("repl_priority", site.get("repl_priority", 0), size=2) - html.help(_("The replication priority is used to determine the master site " - "from the available peers and local sites. " - "The site with the highest number takes precedence.")) + vs = attr['valuespec'] + forms.section(_u(vs.title())) + if attr['user_editable'] and not is_locked(name): + vs.render_input("ua_" + name, user.get(name, vs.default_value())) + else: + html.write(vs.value_to_text(user.get(name, vs.default_value()))) + # Render hidden to have the values kept after saving + html.write('
    ') + vs.render_input("ua_" + name, user.get(name, vs.default_value())) + html.write('
    ') + html.help(_u(vs.help())) + # Load data that is referenced - in order to display dropdown + # boxes and to check for validity. + contact_groups = userdb.load_group_information().get("contact", {}) + timeperiods = load_timeperiods() + roles = userdb.load_roles() - forms.section(_("Multisite-URL of remote site")) - html.text_input("multisiteurl", site.get("multisiteurl", ""), size=60) - html.help( _("URL of the remote Check_MK including /check_mk/. " - "This URL is in many cases the same as the URL-Prefix but with check_mk/ " - "appended, but it must always be an absolute URL. Please note, that " - "that URL will be fetched by the Apache server of the local " - "site itself, whilst the URL-Prefix is used by your local Browser.")) + if phase == "action": + if not html.check_transaction(): + return "users" - forms.section(_("SSL"), simple=True) - html.checkbox("insecure", site.get("insecure", False), label = _('Ignore SSL certificate errors')) - html.help( _('This might be needed to make the synchronization accept problems with ' - 'SSL certificates when using an SSL secured connection.')) + id = html.var("userid").strip() + if new and id in users: + raise MKUserError("userid", _("This username is already being used by another user.")) + if not re.match("^[-a-z0-9A-Z_\.@]+$", id): + raise MKUserError("userid", _("The username must consist only of letters, digits, @, _ or colon.")) - forms.end() - html.button("save", _("Save")) + if new: + new_user = {} + users[id] = new_user + else: + new_user = users[id] - html.hidden_fields() - html.end_form() + # Full name + alias = html.var_utf8("alias").strip() + if not alias: + raise MKUserError("alias", + _("Please specify a full name or descriptive alias for the user.")) + new_user["alias"] = alias + # Locking + if id == config.user_id and html.get_checkbox("locked"): + raise MKUserError("locked", _("You cannot lock your own account!")) + new_user["locked"] = html.get_checkbox("locked") -def load_sites(): - try: - if not os.path.exists(sites_mk): - return {} + increase_serial = False + if users[id] != new_user["locked"] and new_user["locked"]: + increase_serial = True # when user is being locked now, increase the auth serial - vars = { "sites" : {} } - execfile(sites_mk, vars, vars) - return vars["sites"] + # Authentication: Password or Secret + auth_method = html.var("authmethod") + if auth_method == "secret": + secret = html.var("secret", "").strip() + if not secret or len(secret) < 10: + raise MKUserError('secret', _("Please specify a secret of at least 10 characters length.")) + new_user["automation_secret"] = secret + new_user["password"] = userdb.encrypt_password(secret) + increase_serial = True # password changed, reflect in auth serial - except Exception, e: - if config.debug: - raise MKGeneralException(_("Cannot read configuration file %s: %s" % - (filename, e))) - return {} + else: + password = html.var("password_" + pw_suffix, '').strip() + password2 = html.var("password2_" + pw_suffix, '').strip() + # Detect switch back from automation to password + if "automation_secret" in new_user: + del new_user["automation_secret"] + if "password" in new_user: + del new_user["password"] # which was the encrypted automation password! -def save_sites(sites): - make_nagios_directory(multisite_dir) - # Important: even write out sites if it's empty. The global 'sites' - # variable will otherwise survive in the Python interpreter of the - # Apache processes. - out = create_user_file(sites_mk, "w") - out.write("# Written by WATO\n# encoding: utf-8\n\n") - out.write("sites = \\\n%s\n" % pprint.pformat(sites)) - config.load_config() # make new site configuration active - update_distributed_wato_file(sites) - declare_site_attribute() - rewrite_config_files_below(g_root_folder) # fix site attributes - need_sidebar_reload() + # We compare both passwords only, if the user has supplied + # the repeation! We are so nice to our power users... + if password2 and password != password2: + raise MKUserError("password2", _("The both passwords do not match.")) - # Call the sites saved hook - call_hook_sites_saved(sites) + if password: + verify_password_policy(password) + new_user["password"] = userdb.encrypt_password(password) + new_user['last_pw_change'] = int(time.time()) + increase_serial = True # password changed, reflect in auth serial -# Makes sure, that in distributed mode we monitor only -# the hosts that are directly assigned to our (the local) -# site. -def update_distributed_wato_file(sites): - # Note: we cannot access config.sites here, since we - # are currently in the process of saving the new - # site configuration. - distributed = False - found_local = False - for siteid, site in sites.items(): - if site.get("replication"): - distributed = True - if site_is_local(siteid): - found_local = True - create_distributed_wato_file(siteid, site.get("replication")) + # PW change enforcement + new_user["enforce_pw_change"] = html.get_checkbox("enforce_pw_change") + if new_user["enforce_pw_change"]: + increase_serial = True # invalidate all existing user sessions, enforce relogon - # Remove the distributed wato file - # a) If there is no distributed WATO setup - # b) If the local site could not be gathered - if not distributed: # or not found_local: - delete_distributed_wato_file() -#. -# .-Replication----------------------------------------------------------. -# | ____ _ _ _ _ | -# | | _ \ ___ _ __ | (_) ___ __ _| |_(_) ___ _ __ | -# | | |_) / _ \ '_ \| | |/ __/ _` | __| |/ _ \| '_ \ | -# | | _ < __/ |_) | | | (_| (_| | |_| | (_) | | | | | -# | |_| \_\___| .__/|_|_|\___\__,_|\__|_|\___/|_| |_| | -# | |_| | -# +----------------------------------------------------------------------+ -# | Functions dealing with the WATO replication feature. | -# | Let's call this "Distributed WATO". More buzz-word like :-) | -# '----------------------------------------------------------------------' + # Increase serial (if needed) + if increase_serial: + new_user['serial'] = new_user.get('serial', 0) + 1 -def do_site_login(site_id, name, password): - sites = load_sites() - site = sites[site_id] - if not name: - raise MKUserError("_name", - _("Please specify your administrator login on the remote site.")) - if not password: - raise MKUserError("_passwd", - _("Please specify your password.")) + # Email address + email = html.var("email", '').strip() + regex_email = '^[-a-zäöüÄÖÜA-Z0-9_.+%]+@[-a-zäöüÄÖÜA-Z0-9]+(\.[-a-zäöüÄÖÜA-Z0-9]+)*$' + if email and not re.match(regex_email, email): + raise MKUserError("email", _("'%s' is not a valid email address." % email)) + new_user["email"] = email - # Trying basic auth AND form based auth to ensure the site login works. - # Adding _ajaxid makes the web service fail silently with an HTTP code and - # not output HTML code for an error screen. - url = site["multisiteurl"] + 'automation_login.py?_login=1' \ - '&_username=%s&_password=%s&_origtarget=automation_login.py&_plain_error=1' % \ - (name, password) - response = get_url(url, site.get('insecure', False), name, password).strip() - if '' in response.lower(): - message = _("Authentication to web service failed.
    Message:
    %s") % \ - htmllib.strip_tags(htmllib.strip_scripts(response)) - if config.debug: - message += "
    Automation URL: %s
    " % url - raise MKAutomationException(message) - elif not response: - raise MKAutomationException(_("Empty response from web service")) - else: - try: - return eval(response) - except: - raise MKAutomationException(response) + # Pager + pager = html.var("pager", '').strip() + new_user["pager"] = pager -def upload_file(url, file_path, insecure): - return get_url(url, insecure, params = ' -F snapshot=@%s' % file_path) + # Roles + new_user["roles"] = filter(lambda role: html.get_checkbox("role_" + role), + roles.keys()) -def get_url(url, insecure, user=None, password=None, params = '', post_data = None): - cred = '' - if user: - cred = ' -u "%s:%s"' % (user, password) + # Language configuration + set_lang = html.get_checkbox('_set_lang') + language = html.var('language') + if set_lang: + if language == '': + language = None + new_user['language'] = language + elif not set_lang and 'language' in new_user: + del new_user['language'] - insecure = insecure and ' --insecure' or '' + # Contact groups + cgs = [] + for c in contact_groups: + if html.get_checkbox("cg_" + c): + cgs.append(c) + new_user["contactgroups"] = cgs - # -s: silent - # -S: show errors - # -w '%{http_code}': add the http status code to the end of the output - command = 'curl -w "\n%%{http_code}" -s -S%s%s%s "%s" 2>&1' % ( - insecure, cred, params, url) - tmp_file = None - if post_data != None: - # Put POST data on command line as long as it is not - # longer than 50 KB (remember: Linux has an upper limit - # of 132 KB for command line plus environment - if len(post_data) < 50000: - command += ' --data-binary "%s"' % post_data + # Notification settings are only active if we do *not* have + # rule based notifications! + if not rulebased_notifications: + # Notifications + new_user["notifications_enabled"] = html.get_checkbox("notifications_enabled") + + # Check if user can receive notifications + if new_user["notifications_enabled"]: + if not new_user["email"]: + raise MKUserError("email", + _('You have enabled the notifications but missed to configure a ' + 'Email address. You need to configure your mail address in order ' + 'to be able to receive emails.')) + + if not new_user["contactgroups"]: + raise MKUserError("notifications_enabled", + _('You have enabled the notifications but missed to make the ' + 'user member of at least one contact group. You need to make ' + 'the user member of a contact group which has hosts assigned ' + 'in order to be able to receive emails.')) + + if not new_user["roles"]: + raise MKUserError("role_user", + _("Your user has no roles. Please assign at least one role.")) + + ntp = html.var("notification_period") + if ntp not in timeperiods: + ntp = "24X7" + new_user["notification_period"] = ntp + + for what, opts in [ ( "host", "durfs"), ("service", "wucrfs") ]: + new_user[what + "_notification_options"] = "".join( + [ opt for opt in opts if html.get_checkbox(what + "_" + opt) ]) + + value = vs_notification_method.from_html_vars("notification_method") + vs_notification_method.validate_value(value, "notification_method") + new_user["notification_method"] = value + + # Custom user attributes + for name, attr in userdb.get_user_attributes(): + value = attr['valuespec'].from_html_vars('ua_' + name) + attr['valuespec'].validate_value(value, "ua_" + name) + new_user[name] = value + + # Saving + userdb.save_users(users) + if new: + log_pending(SYNCRESTART, None, "edit-users", _("Create new user %s" % id)) else: - import tempfile - tmp_file = tempfile.NamedTemporaryFile(dir = defaults.tmp_dir) - tmp_file.write(post_data) - tmp_file.flush() - command += ' --data-binary "@%s"' % tmp_file.name + log_pending(SYNCRESTART, None, "edit-users", _("Modified user %s" % id)) + return "users" - response = os.popen(command).read().strip() - try: - status_code = int(response[-3:]) - response_body = response[:-3] - except: - status_code = None - response_body = response + # Let exceptions from loading notification scripts happen now + load_notification_scripts() - if status_code == 401: - raise MKUserError("_passwd", _("Authentication failed. Invalid login/password.")) - elif status_code != 200: - raise MKUserError("_passwd", _("HTTP Error - %s: %s") % (status_code, response_body)) + html.begin_form("user", method="POST") + forms.header(_("Identity")) - return response_body + # ID + forms.section(_("Username"), simple = not new) + if new: + html.text_input("userid", userid) + html.set_focus("userid") + else: + html.write(userid) + html.hidden_field("userid", userid) -def check_mk_remote_automation(siteid, command, args, indata): - site = config.site(siteid) - if "secret" not in site: - raise MKGeneralException(_("Cannot access site %s - you are not logged in.") - % site.get("alias", siteid)) - # If the site is not up-to-date, synchronize it first. - repstatus = load_replication_status() - if repstatus.get(siteid, {}).get("need_sync"): - synchronize_site(config.site(siteid), False) + def lockable_input(name, dflt): + if not is_locked(name): + html.text_input(name, user.get(name, dflt), size = 50) + else: + html.write(user.get(name, dflt)) + html.hidden_field(name, user.get(name, dflt)) - # Now do the actual remote command - response = do_remote_automation( - config.site(siteid), "checkmk-automation", - [ - ("automation", command), # The Check_MK automation command - ("arguments", mk_repr(args)), # The arguments for the command - ("indata", mk_repr(indata)), # The input data - ]) - return response + # Full name + forms.section(_("Full name")) + lockable_input('alias', userid) + html.help(_("Full name or alias of the user")) -def do_remote_automation(site, command, vars): - base_url = site["multisiteurl"] - secret = site.get("secret") - if not secret: - raise MKAutomationException(_("You are not logged into the remote site.")) + # Email address + forms.section(_("Email address")) + lockable_input('email', '') + html.help(_("The email address is optional and is needed " + "if the user is a monitoring contact and receives notifications " + "via Email.")) - url = base_url + "automation.py?" + \ - htmllib.urlencode_vars([ - ("command", command), - ("secret", secret), - ("debug", config.debug and '1' or '') - ]) - vars_encoded = htmllib.urlencode_vars(vars) - response = get_url(url, site.get('insecure', False), - post_data=vars_encoded) - if not response: - raise MKAutomationException("Empty output from remote site.") - try: - response = eval(response) - except: - # The remote site will send non-Python data in case of an - # error. - raise MKAutomationException("
    %s
    " % response) - return response + forms.section(_("Pager address")) + lockable_input('pager', '') + html.help(_("The pager address is optional ")) + custom_user_attributes('ident') + forms.header(_("Security")) + forms.section(_("Authentication")) + is_automation = user.get("automation_secret", None) != None + html.radiobutton("authmethod", "password", not is_automation, + _("Normal user login with password")) + html.write("
      %s" % _("password:")) + if not is_locked('password'): + html.password_input("password_" + pw_suffix, autocomplete="off") + html.write("
      %s" % _("repeat:")) + html.password_input("password2_" + pw_suffix, autocomplete="off") + html.write(" (%s)" % _("optional")) + html.write("
      %s:" % _("Enforce change")) + # Only make password enforcement selection possible when user is allowed to change the PW + if new or config.user_may(userid, 'general.edit_profile') and config.user_may(userid, 'general.change_password'): + html.checkbox("enforce_pw_change", user.get("enforce_pw_change", False), + label=_("Change password at next login or access")) + else: + html.write(_("Not permitted to change the password. Change can not be enforced.")) + else: + html.write('%s' % _('The password can not be changed (It is locked by the user connector).')) + html.hidden_field('password', '') + html.hidden_field('password2', '') + html.write("
    ") + html.radiobutton("authmethod", "secret", is_automation, + _("Automation secret for machine accounts")) + html.write("
      ") + html.text_input("secret", user.get("automation_secret", ""), size=30, + id="automation_secret") + html.write(" ") + html.write("  ") + html.icon_button("javascript:wato_randomize_secret('automation_secret', 20);", + _("Create random secret"), "random") + html.write("") + html.write("
    ") -# Determine, if we have any slaves to distribute -# configuration to. -def is_distributed(sites = None): - if sites == None: - sites = config.sites - for site in sites.values(): - if site.get("replication"): - return True - return False + html.help(_("If you want the user to be able to login " + "then specify a password here. Users without a login make sense " + "if they are monitoring contacts that are just used for " + "notifications. The repetition of the password is optional. " + "
    For accounts used by automation processes (such as fetching " + "data from views for further procession), set the method to " + "secret. The secret will be stored in a local file. Processes " + "with read access to that file will be able to use Multisite as " + "a webservice without any further configuration.")) -def declare_site_attribute(): - undeclare_host_attribute("site") - if is_distributed(): - declare_host_attribute(SiteAttribute(), show_in_table = True, show_in_folder = True) + # Locking + forms.section(_("Disable password"), simple=True) + if not is_locked('locked'): + html.checkbox("locked", user.get("locked", False), label = _("disable the login to this account")) + else: + html.write(user.get("locked", False) and _('Login disabled') or _('Login possible')) + html.hidden_field('locked', user.get("locked", False) and '1' or '') + html.help(_("Disabling the password will prevent a user from logging in while " + "retaining the original password. Notifications are not affected " + "by this setting.")) -def default_site(): - for id, site in config.sites.items(): - if not "socket" in site \ - or site["socket"] == "unix:" + defaults.livestatus_unix_socket: - return id - try: - return config.sites.keys()[0] - except: - return None + # Roles + forms.section(_("Roles")) + entries = roles.items() + entries.sort(cmp = lambda a,b: cmp((a[1]["alias"],a[0]), (b[1]["alias"],b[0]))) + is_member_of_at_least_one = False + for role_id, role in entries: + if not is_locked('roles'): + html.checkbox("role_" + role_id, role_id in user.get("roles", [])) + url = make_link([("mode", "edit_role"), ("edit", role_id)]) + html.write("%s
    " % (url, role["alias"])) + else: + is_member = role_id in user.get("roles", []) + if is_member: + is_member_of_at_least_one = True -class SiteAttribute(Attribute): - def __init__(self): - # Default is is the local one, if one exists or - # no one if there is no local site - self._choices = [] - for id, site in config.sites.items(): - title = id - if site.get("alias"): - title += " - " + site["alias"] - self._choices.append((id, title)) + url = make_link([("mode", "edit_role"), ("edit", role_id)]) + html.write("%s
    " % (url, role["alias"])) - self._choices.sort(cmp=lambda a,b: cmp(a[1], b[1])) - self._choices_dict = dict(self._choices) - Attribute.__init__(self, "site", _("Monitored on site"), - _("Specify the site that should monitor this host."), - default_value = default_site()) + html.hidden_field("role_" + role_id, is_member and '1' or '') + if is_locked('roles') and not is_member_of_at_least_one: + html.write('%s' % _('No roles assigned.')) + custom_user_attributes('security') - def paint(self, value, hostname): - return "", self._choices_dict.get(value, value) + # Contact groups + forms.header(_("Contact Groups"), isopen=False) + forms.section() + url1 = make_link([("mode", "contact_groups")]) + url2 = make_link([("mode", "rulesets"), ("group", "grouping")]) + if len(contact_groups) == 0: + html.write(_("Please first create some contact groups") % + url1) + else: + entries = [ (group['alias'], c) for c, group in contact_groups.items() ] + entries.sort() + is_member_of_at_least_one = False + for alias, gid in entries: + if not alias: + alias = gid + if not is_locked('contactgroups'): + html.checkbox("cg_" + gid, gid in user.get("contactgroups", [])) + url = make_link([("mode", "edit_contact_group"), ("edit", gid)]) + html.write(" %s
    " % (url, alias)) + else: + is_member = gid in user.get("contactgroups", []) + if is_member: + is_member_of_at_least_one = True - def render_input(self, value): - html.select("site", self._choices, value) + url = make_link([("mode", "edit_contact_group"), ("edit", gid)]) + html.write("%s
    " % (url, alias)) - def from_html_vars(self): - return html.var("site") + html.hidden_field("cg_" + gid, is_member and '1' or '') - def get_tag_list(self, value): - return [ "site:" + value ] + if is_locked('contactgroups') and not is_member_of_at_least_one: + html.write('%s' % _('No contact groups assigned.')) -# The replication status contains information about each -# site. It is a dictionary from the site id to a dict with -# the following keys: -# "need_sync" : 17, # number of non-synchronized changes -# "need_restart" : True, # True, if remote site needs a restart (cmk -R) -def load_replication_status(): - try: - return eval(file(repstatus_file).read()) - except: - return {} + html.help(_("Contact groups are used to assign monitoring " + "objects to users. If you haven't defined any contact groups yet, " + "then first do so. Hosts and services can be " + "assigned to contact groups using rules.

    " + "If you do not put the user into any contact group " + "then no monitoring contact will be created for the user.") % (url1, url2)) -def save_replication_status(repstatus): - config.write_settings_file(repstatus_file, repstatus) + if not rulebased_notifications: + forms.header(_("Notifications"), isopen=False) -# Updates one or more dict elements of a site in an -# atomic way. If vars is None, the sites status will -# be removed -def update_replication_status(site_id, vars, times = {}): - make_nagios_directory(var_dir) - fd = os.open(repstatus_file, os.O_RDWR | os.O_CREAT) - fcntl.flock(fd, fcntl.LOCK_EX) - repstatus = load_replication_status() - if vars == None: - if site_id in repstatus: - del repstatus[site_id] - else: - repstatus.setdefault(site_id, {}) - repstatus[site_id].update(vars) - old_times = repstatus[site_id].setdefault("times", {}) - for what, duration in times.items(): - if what not in old_times: - old_times[what] = duration - else: - old_times[what] = 0.8 * old_times[what] + 0.2 * duration - save_replication_status(repstatus) - os.close(fd) + forms.section(_("Enabling"), simple=True) + html.checkbox("notifications_enabled", user.get("notifications_enabled", False), + label = _("enable notifications")) + html.help(_("Notifications are sent out " + "when the status of a host or service changes.")) + + # Notification period + forms.section(_("Notification time period")) + choices = [ ( "24X7", _("Always")) ] + \ + [ ( id, "%s" % (tp["alias"])) for (id, tp) in timeperiods.items() ] + html.sorted_select("notification_period", choices, user.get("notification_period")) + html.help(_("Only during this time period the " + "user will get notifications about host or service alerts.")) + + # Notification options + notification_option_names = { # defined here: _() must be executed always! + "host" : { + "d" : _("Host goes down"), + "u" : _("Host gets unreachble"), + "r" : _("Host goes up again"), + }, + "service" : { + "w" : _("Service goes into warning state"), + "u" : _("Service goes into unknown state"), + "c" : _("Service goes into critical state"), + "r" : _("Service recovers to OK"), + }, + "both" : { + "f" : _("Start or end of flapping state"), + "s" : _("Start or end of a scheduled downtime"), + } + } -def global_replication_state(): - repstatus = load_replication_status() - some_dirty = False + forms.section(_("Notification Options")) + for title, what, opts in [ ( _("Host events"), "host", "durfs"), + (_("Service events"), "service", "wucrfs") ]: + html.write("%s:
      " % title) + user_opts = user.get(what + "_notification_options", opts) + for opt in opts: + opt_name = notification_option_names[what].get(opt, + notification_option_names["both"].get(opt)) + html.checkbox(what + "_" + opt, opt in user_opts, label = opt_name) + html.write("
      ") + html.write("
    ") + html.help(_("Here you specify which types of alerts " + "will be notified to this contact. Note: these settings will only be saved " + "and used if the user is member of a contact group.")) + + forms.section(_("Notification Method")) + vs_notification_method.render_input("notification_method", user.get("notification_method")) + custom_user_attributes('notify') - for site_id in config.sitenames(): - site = config.site(site_id) - if not site_is_local(site_id) and not site.get("replication"): - continue + forms.header(_("Personal Settings"), isopen = False) + select_language(user) + custom_user_attributes('personal') - srs = repstatus.get(site_id, {}) - if srs.get("need_sync") or srs.get("need_restart"): - some_dirty = True + # TODO: Later we could add custom macros here, which + # then could be used for notifications. On the other hand, + # if we implement some check_mk --notify, we could directly + # access the data in the account with the need to store + # values in the monitoring core. We'll see what future brings. + forms.end() + html.button("save", _("Save")) + html.hidden_fields() + html.end_form() - if some_dirty: - return "dirty" +def filter_hidden_users(users): + if config.wato_hidden_users: + return dict([ (id, user) for id, user in users.items() if id not in config.wato_hidden_users ]) else: - return "clean" - -def find_host_sites(site_ids, folder, hostname): - host = folder[".hosts"][hostname] - if "site" in host and host["site"]: - site_ids.add(host["site"]) - elif folder[".siteid"]: - site_ids.add(folder[".siteid"]) + return users -# Scan recursively for references to sites -# in folders and hosts -def find_folder_sites(site_ids, folder, include_folder = False): - if include_folder and folder[".siteid"]: - site_ids.add(folder[".siteid"]) - load_hosts(folder) - for hostname in folder[".hosts"]: - find_host_sites(site_ids, folder, hostname) - for subfolder in folder[".folders"].values(): - find_folder_sites(site_ids, subfolder, include_folder) -# This method is called when: -# a) moving a host from one folder to another (2 times) -# b) deleting a host -# c) deleting a folder -# d) changing a folder's attributes (2 times) -# e) changing the attributes of a host (2 times) -# f) saving check configuration of a single host -# g) doing bulk inventory for a host -# h) doing bulk edit on a host (2 times) -# i) doing bulk cleanup on a host (2 time) -# It scans for the sites affected by the hosts in a folder and its subfolders. -# Please note: The "site" attribute of the folder itself is not relevant -# at all. It's just there to be inherited to the hosts. What counts is -# only the attributes of the hosts. -def mark_affected_sites_dirty(folder, hostname=None, sync = True, restart = True): - if is_distributed(): - site_ids = set([]) - if hostname: - find_host_sites(site_ids, folder, hostname) - else: - find_folder_sites(site_ids, folder) - for site_id in site_ids: - changes = {} - if sync and not site_is_local(site_id): - changes["need_sync"] = True - if restart: - changes["need_restart"] = True - update_replication_status(site_id, changes) +def generate_wato_users_elements_function(none_value, only_contacts = False): + def get_wato_users(nv): + users = filter_hidden_users(userdb.load_users()) + elements = [ (name, "%s - %s" % (name, us.get("alias", name))) + for (name, us) + in users.items() + if (not only_contacts or us.get("contactgroups")) ] + elements.sort() + if nv != None: + elements = [ (None, none_value) ] + elements + return elements + return lambda: get_wato_users(none_value) -# def mark_all_sites_dirty(sites): -# changes = { -# "need_sync" : True, -# "need_restart" : True, -# } -# for site_id, site in sites.items(): -# update_replication_status(site_id, changes) -def remove_sync_snapshot(): - if os.path.exists(sync_snapshot_file): - os.remove(sync_snapshot_file) - -def create_sync_snapshot(): - if not os.path.exists(sync_snapshot_file): - tmp_path = "%s-%s" % (sync_snapshot_file, id(html)) - multitar.create(tmp_path, replication_paths) - os.rename(tmp_path, sync_snapshot_file) +# Dropdown for choosing a multisite user +class UserSelection(DropdownChoice): + def __init__(self, **kwargs): + only_contacts = kwargs.get("only_contacts", False) + kwargs["choices"] = generate_wato_users_elements_function(kwargs.get("none"), only_contacts = only_contacts) + DropdownChoice.__init__(self, **kwargs) -def synchronize_site(site, restart): - if site_is_local(site["id"]): - if restart: - start = time.time() - restart_site(site) - update_replication_status(site["id"], - { "need_restart" : False }, - { "restart" : time.time() - start}) + def value_to_text(self, value): + text = DropdownChoice.value_to_text(self, value) + return text.split(" - ")[-1] - return True - create_sync_snapshot() - try: - start = time.time() - result = push_snapshot_to_site(site, restart) - duration = time.time() - start - update_replication_status(site["id"], {}, - { restart and "sync+restart" or "restart" : duration }) - if result == True: - update_replication_status(site["id"], { - "need_sync": False, - "result" : _("Success"), - }) - if restart: - update_replication_status(site["id"], { "need_restart": False }) - else: - update_replication_status(site["id"], { "result" : result }) - return result - except Exception, e: - update_replication_status(site["id"], { "result" : str(e) }) - raise +#. +# .--Roles---------------------------------------------------------------. +# | ____ _ | +# | | _ \ ___ | | ___ ___ | +# | | |_) / _ \| |/ _ \/ __| | +# | | _ < (_) | | __/\__ \ | +# | |_| \_\___/|_|\___||___/ | +# | | +# +----------------------------------------------------------------------+ +# | Mode for managing roles and permissions. | +# | In order to make getting started easier - Check_MK Multisite comes | +# | with three builtin-roles: admin, user and guest. These roles have | +# | predefined permissions. The builtin roles cannot be deleted. Users | +# | listed in admin_users in multisite.mk automatically get the role | +# | admin - even if no such user or contact has been configured yet. By | +# | that way an initial login - e.g. as omdamin - is possible. The admin | +# | role cannot be removed from that user as long as he is listed in | +# | admin_users. Also the variables guest_users, users and default_user_ | +# | role still work. That way Multisite is fully operable without WATO | +# | and also backwards compatible. | +# | In WATO you can create further roles and also edit the permissions | +# | of the existing roles. Users can be assigned to builtin and custom | +# | roles. | +# | This modes manages the creation of custom roles and the permissions | +# | configuration of all roles. | +# '----------------------------------------------------------------------' +def mode_roles(phase): + if phase == "title": + return _("Roles & Permissions") -# Isolated restart without prior synchronization. Currently this -# is only being called for the local site. -def restart_site(site): - start = time.time() - check_mk_automation(site["id"], config.wato_activation_method) - duration = time.time() - start - update_replication_status(site["id"], - { "need_restart" : False }, { "restart" : duration }) + elif phase == "buttons": + global_buttons() + html.context_button(_("Matrix"), make_link([("mode", "role_matrix")]), "matrix") + return -def push_snapshot_to_site(site, do_restart): - mode = site.get("replication", "slave") - url_base = site["multisiteurl"] + "automation.py?" - var_string = htmllib.urlencode_vars([ - ("command", "push-snapshot"), - ("secret", site["secret"]), - ("siteid", site["id"]), # This site must know it's ID - ("mode", mode), - ("restart", do_restart and "yes" or "on"), - ("debug", config.debug and "1" or ""), - ]) - url = url_base + var_string - response_text = upload_file(url, sync_snapshot_file, site.get('insecure', False)) - try: - response = eval(response_text) - return response - except: - raise MKAutomationException(_("Garbled automation response from site %s: '%s'") % - (site["id"], response_text)) + roles = userdb.load_roles() + users = filter_hidden_users(userdb.load_users()) -# AJAX handler for javascript triggered wato activation -def ajax_activation(): - try: - if is_distributed(): - raise MKUserError(None, _('Call not supported in distributed setups.')) + if phase == "action": + if html.var("_delete"): + delid = html.var("_delete") + if html.transaction_valid() and roles[delid].get('builtin'): + raise MKUserError(None, _("You cannot delete the builtin roles!")) - config.need_permission("wato.activate") + c = wato_confirm(_("Confirm deletion of role %s" % delid), + _("Do you really want to delete the role %s?" % delid)) + if c: + rename_user_role(delid, None) # Remove from existing users + del roles[delid] + save_roles(roles) + update_login_sites_replication_status() + log_pending(AFFECTED, None, "edit-roles", _("Deleted role '%s'" % delid)) + elif c == False: + return "" + elif html.var("_clone"): + if html.check_transaction(): + cloneid = html.var("_clone") + cloned_role = roles[cloneid] + newid = cloneid + while newid in roles: + newid += "x" + new_role = {} + new_role.update(cloned_role) + if cloned_role.get("builtin"): + new_role["builtin"] = False + new_role["basedon"] = cloneid + roles[newid] = new_role + save_roles(roles) + update_login_sites_replication_status() + log_pending(AFFECTED, None, "edit-roles", _("Created new role '%s'" % newid)) + return - # Initialise g_root_folder, load all folder information - prepare_folder_info() + table.begin("roles") - # This is the single site activation mode - try: - start = time.time() - check_mk_local_automation(config.wato_activation_method) - duration = time.time() - start - update_replication_status(None, {}, { 'act': duration }) - except Exception: - if config.debug: - import traceback - raise MKUserError(None, "Error executing hooks: %s" % - traceback.format_exc().replace('\n', '
    ')) - else: - raise + # Show table of builtin and user defined roles + entries = roles.items() + entries.sort(cmp = lambda a,b: cmp((a[1]["alias"],a[0]), (b[1]["alias"],b[0]))) - log_commit_pending() # flush logfile with pending actions - log_audit(None, "activate-config", _("Configuration activated, monitoring server restarted")) + for id, role in entries: + table.row() - # html.message - html.write('OK: ') - html.write('
    %s
    ' % - _("Configuration successfully activated.")) - except Exception, e: - html.show_error(str(e)) + # Actions + table.cell(_("Actions"), css="buttons") + edit_url = make_link([("mode", "edit_role"), ("edit", id)]) + clone_url = html.makeactionuri([("_clone", id)]) + delete_url = html.makeactionuri([("_delete", id)]) + html.icon_button(edit_url, _("Properties"), "edit") + html.icon_button(clone_url, _("Clone"), "clone") + if not role.get("builtin"): + html.icon_button(delete_url, _("Delete this role"), "delete") -# AJAX handler for asynchronous site replication -def ajax_replication(): - site_id = html.var("site") - repstatus = load_replication_status() - srs = repstatus.get(site_id, {}) - need_sync = srs.get("need_sync", False) - need_restart = srs.get("need_restart", False) + # ID + table.cell(_("Name"), id) - # Initialise g_root_folder, load all folder information - prepare_folder_info() + # Alias + table.cell(_("Alias"), role["alias"]) - site = config.site(site_id) - try: - if need_sync: - result = synchronize_site(site, need_restart) - else: - restart_site(site) - result = True - except Exception, e: - result = str(e) - if result == True: - answer = "OK:" + _("Success"); - # Make sure that the pending changes are clean as soon as the - # last site has successfully been updated. - if is_distributed() and global_replication_state() == "clean": - log_commit_pending() - else: - answer = "
    %s: %s
    " % (_("Error"), hilite_errors(result)) + # Type + table.cell(_("Type"), role.get("builtin") and _("builtin") or _("custom")) - html.write(answer) + # Modifications + table.cell(_("Modifications"), "%s" % ( + _("That many permissions do not use the factory defaults."), len(role["permissions"]))) -def preferred_peer(): - local_site = None - best_peer = None - best_working_peer = None + # Users + table.cell(_("Users"), + ", ".join([ '%s' % (make_link([("mode", "edit_user"), ("edit", user_id)]), + user.get("alias", user_id)) + for (user_id, user) in users.items() if (id in user["roles"])])) - for site_id, site in config.allsites().items(): - if site.get("replication") == "slave": - continue # Ignore slave sites - if not site.get("replication") and not site_is_local(site_id): - continue # Ignore sites without distributed WATO + # Possibly we could also display the following information + # - number of set permissions (needs loading users) + # - number of users with this role + table.end() - # a) No peer found yet - # b) Replication priority of current site is greater than best peer - # c) On same priority -> use higher alphabetical order - if best_peer == None \ - or site.get("repl_priority",0) > best_peer.get("repl_priority",0) \ - or (site_id < best_peer["id"] and site.get("repl_priority",0) == best_peer.get("repl_priority",0)): - best_peer = site - if site_is_local(site_id): - best_working_peer = site - local_site = site - else: - ss = html.site_status.get(site_id, {}) - status = ss.get("state", "unknown") - if status == "online": - best_working_peer = site - if best_working_peer: # Good - if best_working_peer == local_site: - if best_peer != best_working_peer: - return False # Only better peer is broken - else: - return None # Means we are the blessed one - else: - return best_working_peer - return None # no peer, not even a local site... +def mode_edit_role(phase): + id = html.var("edit") -def do_peer_redirect(peer): - if is_distributed(): - current_mode = html.var("mode") or "main" - if peer: - rel_url = html.makeuri([]) - frameset_url = "index.py?" + htmllib.urlencode_vars([("start_url", rel_url)]) - url = peer["multisiteurl"] + frameset_url - - html.header(_("Access to standby system"), stylesheets = wato_styles) - if global_replication_state() != "clean": - html.show_error(_("You are currently accessing a standby " - "system while the primary system is available. " - "Furthermore you have local changes in the standby system " - "that are not replicated " - "to all sites. Please first replicate " - "your changes before switching to the primary system.") % - ("wato.py?mode=changelog", url)) - - if current_mode not in [ "sites", "edit_site", "changelog" ]: - html.show_error(_("You have accessed a site that is currently " - "in standby mode. The only accessible modules " - "are the site management " - "and the replication. " - "Please proceed on the currently active system " - "%s.") % - ("wato.py?mode=sites", "wato.py?mode=changelog", - url, peer["alias"])) - html.footer() - return True + if phase == "title": + return _("Edit user role %s") % id + elif phase == "buttons": + html.context_button(_("All Roles"), make_link([("mode", "roles")]), "back") + return -#. -# .-Automation-Webservice------------------------------------------------. -# | _ _ _ _ | -# | / \ _ _| |_ ___ _ __ ___ __ _| |_(_) ___ _ __ | -# | / _ \| | | | __/ _ \| '_ ` _ \ / _` | __| |/ _ \| '_ \ | -# | / ___ \ |_| | || (_) | | | | | | (_| | |_| | (_) | | | | | -# | /_/ \_\__,_|\__\___/|_| |_| |_|\__,_|\__|_|\___/|_| |_| | -# | | -# +----------------------------------------------------------------------+ -# | These function implement a web service with that a master can call | -# | automation functions on slaves and peers. | -# '----------------------------------------------------------------------' + # Make sure that all dynamic permissions are available (e.g. those for custom + # views) + config.load_dynamic_permissions() -def page_automation_login(): - if not config.may("wato.automation"): - raise MKAuthException(_("This account has no permission for automation.")) - # When we are here, a remote (master) site has successfully logged in - # using the credentials of the administrator. The login is done be exchanging - # a login secret. If such a secret is not yet present it is created on - # the fly. - html.write(repr(get_login_secret(True))) + roles = userdb.load_roles() + role = roles[id] -def get_login_secret(create_on_demand = False): - path = var_dir + "automation_secret.mk" - try: - return eval(file(path).read()) - except: - if not create_on_demand: - return None - # We should use /dev/random here for cryptographic safety. But - # that involves the great problem that the system might hang - # because of loss of entropy. So we hope /dev/urandom is enough. - # Furthermore we filter out non-printable characters. The byte - # 0x00 for example does not make it through HTTP and the URL. - secret = "" - urandom = file("/dev/urandom") - while len(secret) < 32: - c = urandom.read(1) - if ord(c) >= 48 and ord(c) <= 90: - secret += c - write_settings_file(path, secret) - return secret + if phase == "action": + alias = html.var_utf8("alias") -def site_is_local(siteid): - return config.site_is_local(siteid) + unique, info = is_alias_used("roles", id, alias) + if not unique: + raise MKUserError("alias", info) -# Returns the ID of our site. This function only works in replication -# mode and looks for an entry connecting to the local socket. -def our_site_id(): - if not is_distributed(): - return None - for site_id in config.allsites(): - if site_is_local(site_id): - return site_id - return None + new_id = html.var("id") + if len(new_id) == 0: + raise MKUserError("id", _("Please specify an ID for the new role.")) + if not re.match("^[-a-z0-9A-Z_]*$", new_id): + raise MKUserError("id", _("Invalid role ID. Only the characters a-z, A-Z, 0-9, _ and - are allowed.")) + if new_id != id: + if new_id in roles: + raise MKUserError("id", _("The ID is already used by another role")) -automation_commands = {} + role["alias"] = alias -def page_automation(): - secret = html.var("secret") - if not secret: - raise MKAuthException(_("Missing secret for automation command.")) - if secret != get_login_secret(): - raise MKAuthException(_("Invalid automation secret.")) + # based on + if not role.get("builtin"): + basedon = html.var("basedon") + if basedon not in config.builtin_role_ids: + raise MKUserError("basedon", _("Invalid valid for based on. Must be id of builtin rule.")) + role["basedon"] = basedon - # Initialise g_root_folder, load all folder information - prepare_folder_info() + # Permissions + permissions = {} + for perm in config.permissions_by_order: + pname = perm["name"] + value = html.var("perm_" + pname) + if value == "yes": + permissions[pname] = True + elif value == "no": + permissions[pname] = False + role["permissions"] = permissions - command = html.var("command") - if command == "checkmk-automation": - cmk_command = html.var("automation") - args = mk_eval(html.var("arguments")) - indata = mk_eval(html.var("indata")) - result = check_mk_local_automation(cmk_command, args, indata) - html.write(repr(result)) + if id != new_id: + roles[new_id] = role + del roles[id] + rename_user_role(id, new_id) - elif command == "push-snapshot": - html.write(repr(automation_push_snapshot())) + save_roles(roles) + update_login_sites_replication_status() + log_pending(AFFECTED, None, "edit-roles", _("Modified user role '%s'" % new_id)) + return "roles" - elif command in automation_commands: - html.write(repr(automation_commands[command]())) + html.begin_form("role", method="POST") + # ID + forms.header(_("Basic Properties")) + forms.section(_("Internal ID"), simple = "builtin" in role) + if role.get("builtin"): + html.write("%s (%s)" % (id, _("builtin role"))) + html.hidden_field("id", id) else: - raise MKGeneralException(_("Invalid automation command: %s.") % command) - -def automation_push_snapshot(): - try: - site_id = html.var("siteid") - if not site_id: - raise MKGeneralException(_("Missing variable siteid")) - mode = html.var("mode", "slave") - - our_id = our_site_id() - - if mode == "slave" and is_distributed(): - raise MKGeneralException(_("Configuration error. You treat us as " - "a slave, but we are a peer!")) - - elif mode == "peer" and not is_distributed(): - raise MKGeneralException(_("Configuration error. You treat us as " - "a peer, but we have no peer configuration!")) + html.text_input("id", id) + html.set_focus("id") - # In peer mode, we have a replication configuration ourselves and - # we have a site ID our selves. Let's make sure that ID matches - # the ID our peer thinks we have. - if our_id != None and our_id != site_id: - raise MKGeneralException( - _("Site ID mismatch. Our ID is '%s', but you are saying we are '%s'.") % - (our_id, site_id)) + # Alias + forms.section(_("Alias")) + html.help(_("An alias or description of the role")) + html.text_input("alias", role.get("alias", ""), size = 50) - # Make sure there are no local changes we would loose! But only if we are - # distributed ourselves (meaning we are a peer). - if is_distributed(): - pending = parse_audit_log("pending") - if len(pending) > 0: - message = _("There are %d pending changes that would get lost. The most recent are: ") % len(pending) - message += ", ".join([e[-1] for e in pending[:10]]) - raise MKGeneralException(message) + # Based on + if not role.get("builtin"): + forms.section(_("Based on role")) + html.help(_("Each user defined role is based on one of the builtin roles. " + "When created it will start with all permissions of that role. When due to a software " + "update or installation of an addons new permissions appear, the user role will get or " + "not get those new permissions based on the default settings of the builtin role it's " + "based on.")) + choices = [ (i, r["alias"]) for i, r in roles.items() if r.get("builtin") ] + html.sorted_select("basedon", choices, role.get("basedon", "user")) - tarcontent = html.var('snapshot') - if not tarcontent: - raise MKGeneralException(_('Invalid call: The snapshot is missing.')) - multitar.extract_from_buffer(tarcontent, replication_paths) - log_commit_pending() # pending changes are lost - call_hook_snapshot_pushed() + # Permissions + base_role_id = role.get("basedon", id) - # Create rule making this site only monitor our hosts - create_distributed_wato_file(site_id, mode) - log_audit(None, "replication", _("Synchronized with master (my site id is %s.)") % site_id) - if html.var("restart", "no") == "yes": - check_mk_local_automation(config.wato_activation_method) - return True - except Exception, e: - if config.debug: - return _("Internal automation error: %s\n%s") % (str(e), format_exception()) - else: - return _("Internal automation error: %s") % e + html.help( + _("When you leave the permissions at "default" then they get their " + "settings from the factory defaults (for builtin roles) or from the " + "factory default of their base role (for user define roles). Factory defaults " + "may change due to software updates. When choosing another base role, all " + "permissions that are on default will reflect the new base role.")) -def create_distributed_wato_file(siteid, mode): - out = create_user_file(defaults.check_mk_configdir + "/distributed_wato.mk", "w") - out.write("# Written by WATO\n# encoding: utf-8\n\n") - out.write("# This file has been created by the master site\n" - "# push the configuration to us. It makes sure that\n" - "# we only monitor hosts that are assigned to our site.\n\n") - out.write("distributed_wato_site = '%s'\n" % siteid) + # Loop all permission sections, but sorted plz + for section, (prio, section_title, do_sort) in sorted(config.permission_sections.iteritems(), + key = lambda x: x[1][0], reverse = True): + forms.header(section_title, False) -def delete_distributed_wato_file(): - p = defaults.check_mk_configdir + "/distributed_wato.mk" - # We do not delete the file but empty it. That way - # we do not need write permissions to the conf.d - # directory! - if os.path.exists(p): - create_user_file(p, "w").write("") + # Loop all permissions + permlist = config.permissions_by_order[:] + if do_sort: + permlist.sort(cmp = lambda a,b: cmp(a["title"], b["title"])) -#. -# .-Users/Contacts-------------------------------------------------------. -# | _ _ ______ _ _ | -# || | | |___ ___ _ __ ___ / / ___|___ _ __ | |_ __ _ ___| |_ ___ | -# || | | / __|/ _ \ '__/ __| / / | / _ \| '_ \| __/ _` |/ __| __/ __| | -# || |_| \__ \ __/ | \__ \/ /| |__| (_) | | | | || (_| | (__| |_\__ \ | -# | \___/|___/\___|_| |___/_/ \____\___/|_| |_|\__\__,_|\___|\__|___/ | -# | | -# +----------------------------------------------------------------------+ -# | Mode for managing users and contacts. | -# '----------------------------------------------------------------------' + for perm in permlist: + pname = perm["name"] + this_section = pname.split(".")[0] + if section != this_section: + continue # Skip permissions of other sections -def declare_user_attribute(name, vs, user_editable = True, permission = None): - userdb.user_attributes[name] = { - 'valuespec': vs, - 'user_editable': user_editable, - } - # Permission needed for editing this attribute - if permission: - userdb.user_attributes[name]["permission"] = permission + forms.section(perm["title"]) -def load_notification_scripts_from(adir): - scripts = {} - if os.path.exists(adir): - for entry in os.listdir(adir): - path = adir + "/" + entry - if os.path.isfile(path) and os.access(path, os.X_OK): - title = entry - try: - lines = file(path) - lines.next() - line = lines.next().strip() - if line.startswith("#"): - title = line.lstrip("#").strip() - except: - pass - scripts[entry] = title - return scripts + pvalue = role["permissions"].get(pname) + def_value = base_role_id in perm["defaults"] + choices = [ ( "yes", _("yes")), + ( "no", _("no")), + ( "default", _("default (%s)") % (def_value and _("yes") or _("no") )) ] + html.select("perm_" + pname, choices, { True: "yes", False: "no" }.get(pvalue, "default"), attrs={"style": "width: 130px;"} ) -def load_notification_scripts(): - scripts = {} - try: - not_dir = defaults.notifications_dir - except: - not_dir = defaults.share_dir + "/notifications" # for those with not up-to-date defaults + html.help(perm["description"]) - scripts = load_notification_scripts_from(not_dir) - try: - local_dir = defaults.omd_root + "/local/share/check_mk/notifications" - scripts.update(load_notification_scripts_from(local_dir)) - except: - pass - choices = scripts.items() - choices.append((None, _("Plain Text Email (using configured templates)"))) - choices.sort(cmp = lambda a,b: cmp(a[1], b[1])) - return choices + forms.end() + html.button("save", _("Save")) + html.hidden_fields() + html.end_form() -def notification_script_title(name): - return dict(load_notification_scripts()).get(name, name) +def make_unicode(s): + if type(s) != unicode: # assume utf-8 encoded bytestring + return s.decode("utf-8") + else: + return s +def save_roles(roles): + # Reflect the data in the roles dict kept in the config module Needed + # for instant changes in current page while saving modified roles. + # Otherwise the hooks would work with old data when using helper + # functions from the config module + config.roles.update(roles) -def load_notification_table(): - global vs_notification_method - vs_notification_method = \ - CascadingDropdown( - title = _("Notification Method"), - choices = [ - ( "email", _("Plain Text Email (using configured templates)") ), - ( "flexible", - _("Flexible Custom Notifications"), - ListOf( - Foldable( - Dictionary( - optional_keys = [ "only_hosts", "only_services", "escalation" ], - columns = 1, - headers = True, - elements = [ - ( "plugin", - DropdownChoice( - title = _("Notification Plugin"), - choices = load_notification_scripts, - ), - ), - ( "disabled", - Checkbox( - title = _("Disabled"), - label = _("Currently disable this notification"), - default_value = False, - ) - ), - ( "timeperiod", - TimeperiodSelection( - title = _("Timeperiod"), - help = _("Do only notifiy alerts within this time period"), - ) - ), - ( "escalation", - Tuple( - title = _("Restrict to nth to mth notification (escalation)"), - orientation = "float", - elements = [ - Integer( - label = _("from"), - help = _("Let through notifications counting from this number"), - default_value = 1, - minvalue = 1, - maxvalue = 999999, - ), - Integer( - label = _("to"), - help = _("Let through notifications counting upto this number"), - default_value = 999999, - minvalue = 1, - maxvalue = 999999, - ), - ], - ), - ), - ( "host_events", - ListChoice( - title = _("Host Events"), - choices = [ - ( 'd', _("Host goes down")), - ( 'u', _("Host gets unreachble")), - ( 'r', _("Host goes up again")), - ( 'f', _("Start or end of flapping state")), - ( 's', _("Start or end of a scheduled downtime ")), - ( 'x', _("Acknowledgement of host problem")), - ], - default_value = [ 'd', 'u' ], - ) - ), - ( "service_events", - ListChoice( - title = _("Service Events"), - choices = [ - ( 'w', _("Service goes into warning state")), - ( 'u', _("Service goes into unknown state")), - ( 'c', _("Service goes into critical state")), - ( 'r', _("Service recovers to OK")), - ( 'f', _("Start or end of flapping state")), - ( 's', _("Start or end of a scheduled downtime")), - ( 'x', _("Acknowledgement of service problem")), - ], - default_value = [ 'w', 'c', 'u' ], - ) - ), - ( "only_hosts", - ListOfStrings( - title = _("Limit to the following hosts"), - help = _("Configure the hosts for this notification. Only exact, case sensitive matches"), - orientation = "horizontal", - valuespec = RegExp(size = 20), - ), - ), - ( "only_services", - ListOfStrings( - title = _("Limit to the following services"), - help = _("Configure regular expressions that match the beginning of the service names here. Prefix an " - "entry with ! in order to exclude that service."), - orientation = "horizontal", - valuespec = RegExp(size = 20), - ), - ), - ( "parameters", - ListOfStrings( - title = _("Plugin Arguments"), - help = _("You can specify arguments to the notification plugin here. " - "Please refer to the documentation about the plugin for what " - "parameters are allowed or required here."), - ) - ), - ] - ), - title_function = lambda v: _("Notify by: ") + notification_script_title(v["plugin"]), - ), - title = _("Flexible Custom Notifications"), - add_label = _("Add notification"), - ), - ), - ] - ) + make_nagios_directory(multisite_dir) + filename = multisite_dir + "roles.mk" + out = create_user_file(filename, "w") + out.write("# Written by WATO\n# encoding: utf-8\n\n") + out.write("roles.update(\n%s)\n" % pprint.pformat(roles)) + + call_hook_roles_saved(roles) -def mode_users(phase): +# Adapt references in users. Builtin rules cannot +# be renamed and are not handled here. If new_id is None, +# the role is being deleted +def rename_user_role(id, new_id): + users = userdb.load_users(lock = True) + for user in users.values(): + if id in user["roles"]: + user["roles"].remove(id) + if new_id: + user["roles"].append(new_id) + userdb.save_users(users) + +def mode_role_matrix(phase): if phase == "title": - return _("Users & Contacts") + return _("Role & Permission Matrix") elif phase == "buttons": global_buttons() - html.context_button(_("New user"), make_link([("mode", "edit_user")]), "new") + html.context_button(_("Back"), make_link([("mode", "roles")]), "back") return - # Execute all connectors synchronisations of users. This must be done before - # loading the users, because it might modify the users list. But don't execute - # it during actions, this should save some time. - if phase != "action": - userdb.hook_sync(add_to_changelog = True) + elif phase == "action": + return + # Show table of builtin and user defined roles, sorted by alias roles = userdb.load_roles() - users = filter_hidden_users(userdb.load_users()) - timeperiods = load_timeperiods() - contact_groups = userdb.load_group_information().get("contact", {}) + role_list = roles.items() + role_list.sort(cmp = lambda a,b: cmp((a[1]["alias"],a[0]), (b[1]["alias"],b[0]))) - if phase == "action": - delid = html.var("_delete") - if delid == config.user_id: - raise MKUserError(None, _("You cannot delete your own account!")) + html.write("") + html.write("") + num_roles = 1 + for id, role in role_list: + html.write('' % role['alias']) + num_roles += 1 + html.write("\n") + + # Loop all permission sections, but sorted plz + odd = "even" + for section, (prio, section_title, do_sort) in sorted(config.permission_sections.iteritems(), + key = lambda x: x[1][0], reverse = True): + + html.write('') + html.write('' % (num_roles, section_title)) + html.write('') - if delid not in users: - return None # The account does not exist (anymore), no deletion needed + # Loop all permissions + permlist = config.permissions_by_order[:] + if do_sort: + permlist.sort(cmp = lambda a,b: cmp(a["title"], b["title"])) - c = wato_confirm(_("Confirm deletion of user %s" % delid), - _("Do you really want to delete the user %s?" % delid)) - if c: - del users[delid] - userdb.save_users(users) - log_pending(SYNCRESTART, None, "edit-users", _("Deleted user %s" % (delid))) - return None - elif c == False: - return "" - else: - return None + for perm in permlist: + pname = perm["name"] + this_section = pname.split(".")[0] + if section != this_section: + continue # Skip permissions of other sections - entries = users.items() - entries.sort(cmp = lambda a, b: cmp(a[1].get("alias", a[0]).lower(), b[1].get("alias", b[0]).lower())) + odd = odd == "odd" and "even" or "odd" - table.begin(_("Users & Contacts"), - empty_text = _("There are not defined any contacts/users yet.")) - for id, user in entries: - table.row() + html.write('' % odd) + html.write('' % perm["title"]) - connector = userdb.get_connector(user.get('connector')) + for id, role in role_list: + base_on_id = role.get('basedon', id) + pvalue = role["permissions"].get(pname) + if pvalue is None: + pvalue = base_on_id in perm["defaults"] - # Buttons - table.cell(_("Actions"), css="buttons") - if connector: # only show edit buttons when the connector is available and enabled - edit_url = make_link([("mode", "edit_user"), ("edit", id)]) - html.icon_button(edit_url, _("Properties"), "edit") + html.write('' % (pvalue and 'X' or '')) - clone_url = make_link([("mode", "edit_user"), ("clone", id)]) - html.icon_button(clone_url, _("Create a copy of this user"), "clone") + html.write('') - delete_url = html.makeactionuri([("_delete", id)]) - html.icon_button(delete_url, _("Delete"), "delete") + html.write("
    %s
    %s
    %s%s
    ") - # ID - table.cell(_("ID"), id) +#. +# .--Host-Tags-----------------------------------------------------------. +# | _ _ _ _____ | +# | | | | | ___ ___| |_ |_ _|_ _ __ _ ___ | +# | | |_| |/ _ \/ __| __| | |/ _` |/ _` / __| | +# | | _ | (_) \__ \ |_ | | (_| | (_| \__ \ | +# | |_| |_|\___/|___/\__| |_|\__,_|\__, |___/ | +# | |___/ | +# +----------------------------------------------------------------------+ +# | Manage the variable config.wato_host_tags -> The set of tags to be | +# | assigned to hosts and that is the basis of the rules. | +# '----------------------------------------------------------------------' - # Connector - if connector: - table.cell(_("Connector"), connector['short_title']) - locked_attributes = userdb.locked_attributes(user.get('connector')) - else: - table.cell(_("Connector"), "%s (disabled)" % userdb.get_connector_id(user.get('connector')), css="error") - locked_attributes = [] +def parse_hosttag_title(title): + if '/' in title: + return title.split('/', 1) + else: + return None, title + +def hosttag_topics(hosttags, auxtags): + names = set([]) + for entry in hosttags + auxtags: + topic, title = parse_hosttag_title(entry[1]) + if topic: + names.add((topic, topic)) + return list(names) + +def group_hosttags_by_topic(hosttags): + tags = {} + for entry in hosttags: + topic, title = parse_hosttag_title(entry[1]) + if not topic: + topic = _('Host tags') + tags.setdefault(topic, []) + tags[topic].append((entry[0], title) + entry[2:]) + return sorted(tags.items(), key = lambda x: x[0]) - # Authentication - if "automation_secret" in user: - auth_method = _("Automation") - elif user.get("password") or 'password' in locked_attributes: - auth_method = _("Password") - else: - auth_method = "%s" % _("none") - table.cell(_("Authentication"), auth_method) - # Locked - locked = user.get("locked", False) - table.cell(_("Locked"), (locked and ("" + _("yes") + "") or _("no"))) +def mode_hosttags(phase): + if phase == "title": + return _("Host tag groups") - # Full name / Alias - table.cell(_("Alias"), user.get("alias", "")) + elif phase == "buttons": + global_buttons() + html.context_button(_("New Tag group"), make_link([("mode", "edit_hosttag")]), "new") + html.context_button(_("New Aux tag"), make_link([("mode", "edit_auxtag")]), "new") + return - # Email - table.cell(_("Email"), user.get("email", "")) + hosttags, auxtags = load_hosttags() - # Roles - table.cell(_("Roles")) - if user.get("roles", []): - html.write(", ".join( - [ '%s' % (make_link([("mode", "edit_role"), ("edit", r)]), roles[r].get('alias')) for r in user["roles"]])) + if phase == "action": + # Deletion of tag groups + del_id = html.var("_delete") + if del_id: + for e in hosttags: + if e[0] == del_id: + # In case of tag group deletion, the operations is a pair of tag_id + # and list of choice-ids. + operations = [ x[0] for x in e[2] ] - # contact groups - table.cell(_("Contact groups")) - cgs = user.get("contactgroups", []) - if cgs: - html.write(", ".join( - [ '%s' % (make_link([("mode", "edit_contact_group"), ("edit", c)]), - c in contact_groups and contact_groups[c] or c) for c in cgs])) - else: - html.write("" + _("none") + "") + message = rename_host_tags_after_confirmation(del_id, operations) + if message == True: # no confirmation yet + c = wato_confirm(_("Confirm deletion of the host " + "tag group '%s'") % del_id, + _("Do you really want to delete the " + "host tag group '%s'?") % del_id) + if c == False: + return "" + elif c == None: + return None - # notifications - table.cell(_("Notifications")) - if not cgs: - html.write(_("not a contact")) - elif not user.get("notifications_enabled", True): - html.write(_("disabled")) - elif "" == user.get("host_notification_options", "") \ - and "" == user.get("service_notification_options", ""): - html.write(_("all events disabled")) - else: - tp = user.get("notification_period", "24X7") - if tp != "24X7" and tp not in timeperiods: - tp = tp + _(" (invalid)") - elif tp != "24X7": - url = make_link([("mode", "edit_timeperiod"), ("edit", tp)]) - tp = '%s' % (url, timeperiods[tp].get("alias", tp)) - else: - tp = _("Always") - html.write(tp) + if message: + hosttags = [ e for e in hosttags if e[0] != del_id ] + save_hosttags(hosttags, auxtags) + rewrite_config_files_below(g_root_folder) # explicit host tags in all_hosts + log_pending(SYNCRESTART, None, "edit-hosttags", _("Removed host tag group %s (%s)") % (message, del_id)) + return "hosttags", message != True and message or None - table.end() + # Deletion of auxiliary tags + del_nr = html.var("_delaux") + if del_nr: + nr = int(del_nr) + del_id = auxtags[nr][0] - if not userdb.load_group_information().get("contact", {}): - url = "wato.py?mode=contact_groups" - html.write("
    " + - _("Note: you haven't defined any contact groups yet. If you " - "create some contact groups you can assign users to them und thus " - "make them monitoring contacts. Only monitoring contacts can receive " - "notifications.") % url + "
    ") + # Make sure that this aux tag is not begin used by any tag group + for entry in hosttags: + choices = entry[2] + for e in choices: + if len(e) > 2: + if del_id in e[2]: + raise MKUserError(None, _("You cannot delete this auxiliary tag. " + "It is being used in the tag group %s.") % entry[1]) + operations = { del_id : False } + message = rename_host_tags_after_confirmation(None, operations) + if message == True: # no confirmation yet + c = wato_confirm(_("Confirm deletion of the auxiliary " + "tag '%s'") % del_id, + _("Do you really want to delete the " + "auxiliary tag '%s'?") % del_id) + if c == False: + return "" + elif c == None: + return None + if message: + del auxtags[nr] + # Remove auxiliary tag from all host tags + for e in hosttags: + choices = e[2] + for choice in choices: + if len(choice) > 2: + if del_id in choice[2]: + choice[2].remove(del_id) -def mode_edit_user(phase): - users = userdb.load_users() - userid = html.var("edit") # missing -> new user - cloneid = html.var("clone") # Only needed in 'new' mode - new = userid == None - if phase == "title": - if new: - return _("Create new user") - else: - return _("Edit user %s" % userid) + save_hosttags(hosttags, auxtags) + rewrite_config_files_below(g_root_folder) # explicit host tags in all_hosts + log_pending(SYNCRESTART, None, "edit-hosttags", _("Removed auxiliary tag %s (%s)") % (message, del_id)) + return "hosttags", message != True and message or None - elif phase == "buttons": - html.context_button(_("All Users"), make_link([("mode", "users")]), "back") + move_nr = html.var("_move") + if move_nr != None: + if html.check_transaction(): + move_nr = int(move_nr) + if move_nr >= 0: + dir = 1 + else: + move_nr = -move_nr + dir = -1 + moved = hosttags[move_nr] + del hosttags[move_nr] + hosttags[move_nr+dir:move_nr+dir] = [moved] + save_hosttags(hosttags, auxtags) + config.wato_host_tags = hosttags + log_pending(SYNCRESTART, None, "edit-hosttags", _("Changed order of host tag groups")) return - if new: - if cloneid: - user = users.get(cloneid, userdb.new_user_template('htpasswd')) - else: - user = userdb.new_user_template('htpasswd') - pw_suffix = 'new' + if len(hosttags) + len(auxtags) == 0: + render_main_menu([ + ("edit_hosttag", _("Create new tag group"), "new", "hosttags", + _("Each host tag group will create one dropdown choice in the host configuration.")), + ("edit_auxtag", _("Create new auxiliary tag"), "new", "hosttags", + _("You can have these tags automatically added if certain primary tags are set.")), + ]) + else: - user = users.get(userid, userdb.new_user_template('htpasswd')) - pw_suffix = 'userid' + table.begin("hosttags", _("Host tag groups"), + help = (_("Host tags are the basis of Check_MK's rule based configuration. " + "If the first step you define arbitrary tag groups. A host " + "has assigned exactly one tag out of each group. These tags can " + "later be used for defining parameters for hosts and services, " + "such as disable notifications for all hosts with the tags " + "Network device and Test.")), + empty_text = _("You haven't defined any tag groups yet."), + searchable = False, sortable = False) - # Returns true if an attribute is locked and should be read only. Is only - # checked when modifying an existing user - locked_attributes = userdb.locked_attributes(user.get('connector')) - def is_locked(attr): - return not new and attr in locked_attributes + if hosttags: + for nr, entry in enumerate(hosttags): + tag_id, title, choices = entry[:3] # fourth: tag dependency information + topic, title = map(_u, parse_hosttag_title(title)) + table.row() + edit_url = make_link([("mode", "edit_hosttag"), ("edit", tag_id)]) + delete_url = make_action_link([("mode", "hosttags"), ("_delete", tag_id)]) + table.cell(_("Actions"), css="buttons") + if nr == 0: + html.empty_icon_button() + else: + html.icon_button(make_action_link([("mode", "hosttags"), ("_move", str(-nr))]), + _("Move this tag group one position up"), "up") + if nr == len(hosttags) - 1: + html.empty_icon_button() + else: + html.icon_button(make_action_link([("mode", "hosttags"), ("_move", str(nr))]), + _("Move this tag group one position down"), "down") + html.icon_button(edit_url, _("Edit this tag group"), "edit") + html.icon_button(delete_url, _("Delete this tag group"), "delete") - # Load data that is referenced - in order to display dropdown - # boxes and to check for validity. - contact_groups = userdb.load_group_information().get("contact", {}) - timeperiods = load_timeperiods() - roles = userdb.load_roles() + table.cell(_("ID"), tag_id) + table.cell(_("Title"), title) + table.cell(_("Topic"), topic or '') + table.cell(_("Type"), (len(choices) == 1 and _("Checkbox") or _("Dropdown"))) + table.cell(_("Choices"), str(len(choices))) + table.cell(_("Demonstration"), sortable=False) + html.begin_form("tag_%s" % tag_id) + host_attribute["tag_%s" % tag_id].render_input(None) + html.end_form() + table.end() + + table.begin("auxtags", _("Auxiliary tags"), + help = _("Auxiliary tags can be attached to other tags. That way " + "you can for example have all hosts with the tag cmk-agent " + "get also the tag tcp. This makes the configuration of " + "your hosts easier."), + empty_text = _("You haven't defined any auxiliary tags."), + searchable = False) + + if auxtags: + for nr, (tag_id, title) in enumerate(auxtags): + table.row() + topic, title = parse_hosttag_title(title) + edit_url = make_link([("mode", "edit_auxtag"), ("edit", nr)]) + delete_url = make_action_link([("mode", "hosttags"), ("_delaux", nr)]) + table.cell(_("Actions"), css="buttons") + html.icon_button(edit_url, _("Edit this auxiliary tag"), "edit") + html.icon_button(delete_url, _("Delete this auxiliary tag"), "delete") + table.cell(_("ID"), tag_id) + table.cell(_("Title"), _u(title)) + table.cell(_("Topic"), _u(topic) or '') + table.end() - if phase == "action": - if not html.check_transaction(): - return "users" - id = html.var("userid").strip() - if new and id in users: - raise MKUserError("userid", _("This username is already being used by another user.")) - if not re.match("^[-a-z0-9A-Z_\.@]+$", id): - raise MKUserError("userid", _("The username must consist only of letters, digits, @, _ or colon.")) +def mode_edit_auxtag(phase): + tag_nr = html.var("edit") + new = tag_nr == None + if not new: + tag_nr = int(tag_nr) + if phase == "title": if new: - new_user = {} - users[id] = new_user + return _("Create new auxiliary tag") else: - new_user = users[id] - - # Full name - alias = html.var_utf8("alias").strip() - if not alias: - raise MKUserError("alias", - _("Please specify a full name or descriptive alias for the user.")) - new_user["alias"] = alias - - # Locking - if id == config.user_id and html.get_checkbox("locked"): - raise MKUserError(_("You cannot lock your own account!")) - new_user["locked"] = html.get_checkbox("locked") - - increase_serial = False - if users[id] != new_user["locked"] and new_user["locked"]: - increase_serial = True # when user is being locked now, increase the auth serial + return _("Edit auxiliary tag") - # Authentication: Password or Secret - auth_method = html.var("authmethod") - if auth_method == "secret": - secret = html.var("secret", "").strip() - if not secret or len(secret) < 10: - raise MKUserError('secret', _("Please specify a secret of at least 10 characters length.")) - new_user["automation_secret"] = secret - new_user["password"] = userdb.encrypt_password(secret) - increase_serial = True # password changed, reflect in auth serial + elif phase == "buttons": + html.context_button(_("All Hosttags"), make_link([("mode", "hosttags")]), "back") + return - else: - password = html.var("password_" + pw_suffix, '').strip() - password2 = html.var("password2_" + pw_suffix, '').strip() + hosttags, auxtags = load_hosttags() - # Detect switch back from automation to password - if "automation_secret" in new_user: - del new_user["automation_secret"] - if "password" in new_user: - del new_user["password"] # which was the encrypted automation password! + vs_topic = OptionalDropdownChoice( + title = _("Topic") + "*", + choices = hosttag_topics(hosttags, auxtags), + explicit = TextUnicode(), + otherlabel = _("Create New Topic"), + default_value = None, + sorted = True + ) - # We compare both passwords only, if the user has supplied - # the repeation! We are so nice to our power users... - if password2 and password != password2: - raise MKUserError("password2", _("The both passwords do not match.")) + if phase == "action": + if html.transaction_valid(): + html.check_transaction() # use up transaction id + if new: + tag_id = html.var("tag_id").strip() + if not tag_id: + raise MKUserError("tag_id", _("Please enter a tag ID")) + validate_tag_id(tag_id, "tag_id") + else: + tag_id = auxtags[tag_nr][0] - if password: - new_user["password"] = userdb.encrypt_password(password) - increase_serial = True # password changed, reflect in auth serial + title = html.var_utf8("title").strip() + if not title: + raise MKUserError("title", _("Please supply a title " + "for you auxiliary tag.")) - # Increase serial (if needed) - if increase_serial: - new_user['serial'] = new_user.get('serial', 0) + 1 + topic = forms.get_input(vs_topic, "topic") + if topic != '': + title = '%s/%s' % (topic, title) - # Email address - email = html.var("email", '').strip() - regex_email = '^[-a-zäöüÄÖÜA-Z0-9_.+%]+@[-a-zäöüÄÖÜA-Z0-9]+(\.[-a-zäöüÄÖÜA-Z0-9]+)*$' - if email and not re.match(regex_email, email): - raise MKUserError("email", _("'%s' is not a valid email address." % email)) - new_user["email"] = email + # Make sure that this ID is not used elsewhere + for entry in config.wato_host_tags: + tgid = entry[0] + tit = entry[1] + ch = entry[2] + for e in ch: + if e[0] == tag_id: + raise MKUserError("tag_id", + _("This tag id is already being used " + "in the host tag group %s") % tit) - # Pager - pager = html.var("pager", '').strip() - new_user["pager"] = pager + for nr, (id, name) in enumerate(auxtags): + if nr != tag_nr and id == tag_id: + raise MKUserError("tag_id", + _("This tag id does already exist in the list " + "of auxiliary tags.")) - # Roles - new_user["roles"] = filter(lambda role: html.get_checkbox("role_" + role), - roles.keys()) + if new: + auxtags.append((tag_id, title)) + else: + auxtags[tag_nr] = (tag_id, title) + save_hosttags(hosttags, auxtags) + return "hosttags" - # Language configuration - set_lang = html.var('_set_lang') - language = html.var('language') - if set_lang and language != config.default_language: - if language == '': - language = None - new_user['language'] = language - elif not set_lang and 'language' in new_user: - del new_user['language'] - # Contact groups - cgs = [] - for c in contact_groups: - if html.get_checkbox("cg_" + c): - cgs.append(c) - new_user["contactgroups"] = cgs + if new: + title = "" + tag_id = "" + topic = "" + else: + tag_id, title = auxtags[tag_nr] + topic, title = parse_hosttag_title(title) - # Notifications - new_user["notifications_enabled"] = html.get_checkbox("notifications_enabled") + html.begin_form("auxtag") + forms.header(_("Auxiliary Tag")) - # Check if user can receive notifications - if new_user["notifications_enabled"]: - if not new_user["email"]: - raise MKUserError("email", - _('You have enabled the notifications but missed to configure a ' - 'Email address. You need to configure your mail address in order ' - 'to be able to receive emails.')) - - if not new_user["contactgroups"]: - raise MKUserError("notifications_enabled", - _('You have enabled the notifications but missed to make the ' - 'user member of at least one contact group. You need to make ' - 'the user member of a contact group which has hosts assigned ' - 'in order to be able to receive emails.')) - - if not new_user["roles"]: - raise MKUserError("role_user", - _("Your user has no roles. Please assign at least one role.")) - - ntp = html.var("notification_period") - if ntp not in timeperiods: - ntp = "24X7" - new_user["notification_period"] = ntp - - for what, opts in [ ( "host", "durfs"), ("service", "wucrfs") ]: - new_user[what + "_notification_options"] = "".join( - [ opt for opt in opts if html.get_checkbox(what + "_" + opt) ]) - # FIXME: Validate notification commands. Do they really exist? - # FIXME(2): This is deprecated anyway. Remove in future. - new_user[what + "_notification_commands"] = html.var(what + "_notification_commands") - - value = vs_notification_method.from_html_vars("notification_method") - vs_notification_method.validate_value(value, "notification_method") - new_user["notification_method"] = value + # Tag ID + forms.section(_("Tag ID")) + if new: + html.text_input("tag_id", "") + html.set_focus("tag_id") + else: + html.write(tag_id) + html.help(_("The internal name of the tag. The special tags " + "snmp, tcp and ping can " + "be used here in order to specify the agent type.")) - # Custom attributes - for name, attr in userdb.get_user_attributes(): - value = attr['valuespec'].from_html_vars('ua_' + name) - attr['valuespec'].validate_value(value, "ua_" + name) - new_user[name] = value + # Title + forms.section(_("Title") + "*") + html.text_input("title", title, size = 30) + html.help(_("An alias or description of this auxiliary tag")) - # Saving - userdb.save_users(users) - if new: - log_pending(SYNCRESTART, None, "edit-users", _("Create new user %s" % id)) - else: - log_pending(SYNCRESTART, None, "edit-users", _("Modified user %s" % id)) - return "users" + # The (optional) topic + forms.section(_("Topic") + "*") + html.help(_("Different taggroups can be grouped in topics to make the visualization and " + "selections in the GUI more comfortable.")) + forms.input(vs_topic, "topic", topic) - # Let exceptions from loading notification scripts happen now - load_notification_scripts() + # Button and end + forms.end() + html.show_localization_hint() + html.button("save", _("Save")) + html.hidden_fields() + html.end_form() - html.begin_form("user") - forms.header(_("Identity")) +# Validate the syntactic form of a tag +def validate_tag_id(id, varname): + if not re.match("^[-a-z0-9A-Z_]*$", id): + raise MKUserError(varname, + _("Invalid tag ID. Only the characters a-z, A-Z, " + "0-9, _ and - are allowed.")) - # ID - forms.section(_("Username"), simple = not new) - if new: - html.text_input("userid", userid) - html.set_focus("userid") - else: - html.write(userid) - html.hidden_field("userid", userid) +def mode_edit_hosttag(phase): + tag_id = html.var("edit") + new = tag_id == None - def lockable_input(name, dflt): - if not is_locked(name): - html.text_input(name, user.get(name, dflt), size = 50) + if phase == "title": + if new: + return _("Create new tag group") else: - html.write(user.get(name, dflt)) - html.hidden_field(name, user.get(name, dflt)) - - # Full name - forms.section(_("Full name")) - lockable_input('alias', userid) - html.help(_("Full name or alias of the user")) + return _("Edit tag group") - # Email address - forms.section(_("Email address")) - lockable_input('email', '') - html.help(_("The email address is optional and is needed " - "if the user is a monitoring contact and receives notifications " - "via Email.")) + elif phase == "buttons": + html.context_button(_("All Hosttags"), make_link([("mode", "hosttags")]), "back") + return - forms.section(_("Pager address")) - lockable_input('pager', '') - html.help(_("The pager address is optional ")) + hosttags, auxtags = load_hosttags() + title = "" + choices = [] + topic = None + if not new: + for entry in hosttags: + id, tit, ch = entry[:3] + if id == tag_id: + topic, title = parse_hosttag_title(tit) + choices = ch + break - forms.header(_("Security")) - forms.section(_("Authentication")) - is_automation = user.get("automation_secret", None) != None - html.radiobutton("authmethod", "password", not is_automation, - _("Normal user login with password")) - html.write("
      %s" % _("password:")) - if not is_locked('password'): - html.password_input("password_" + pw_suffix, autocomplete="off") - html.write("
      %s" % _("repeat:")) - html.password_input("password2_" + pw_suffix, autocomplete="off") - html.write(" (%s)" % _("optional")) - else: - html.write('%s' % _('The password can not be changed (It is locked by the user connector).')) - html.hidden_field('password', '') - html.hidden_field('password2', '') - html.write("
    ") - html.radiobutton("authmethod", "secret", is_automation, - _("Automation secret for machine accounts")) - html.write("
      ") - html.text_input("secret", user.get("automation_secret", ""), size=30, - id="automation_secret") - html.write(" ") - html.write("  ") - html.icon_button("javascript:wato_randomize_secret('automation_secret', 20);", - _("Create random secret"), "random") - html.write("") - html.write("
    ") + vs_topic = OptionalDropdownChoice( + title = _("Topic"), + choices = hosttag_topics(hosttags, auxtags), + explicit = TextUnicode(), + otherlabel = _("Create New Topic"), + default_value = None, + sorted = True + ) - html.help(_("If you want the user to be able to login " - "then specify a password here. Users without a login make sense " - "if they are monitoring contacts that are just used for " - "notifications. The repetition of the password is optional. " - "
    For accounts used by automation processes (such as fetching " - "data from views for further procession), set the method to " - "secret. The secret will be stored in a local file. Processes " - "with read access to that file will be able to use Multisite as " - "a webservice without any further configuration.")) + vs_choices = ListOf( + Tuple( + elements = [ + TextAscii( + title = _("Tag ID"), + size = 16, + regex="^[-a-z0-9A-Z_]*$", + none_is_empty = True, + regex_error = _("Invalid tag ID. Only the characters a-z, A-Z, " + "0-9, _ and - are allowed.")), + TextUnicode( + title = _("Description") + "*", + allow_empty = False, + size = 40), - # Locking - forms.section(_("Disable password"), simple=True) - if not is_locked('locked'): - html.checkbox("locked", user.get("locked", False), label = _("disable the login to this account")) - else: - html.write(user.get("locked", False) and _('Login disabled') or _('Login possible')) - html.hidden_field('locked', user.get("locked", False) and '1' or '') - html.help(_("Disabling the password will prevent a user from logging in while " - "retaining the original password. Notifications are not affected " - "by this setting.")) + Foldable( + ListChoice( + title = _("Auxiliary tags"), + # help = _("These tags will implicitely added to a host if the " + # "user selects this entry in the tag group. Select multiple " + # "entries with the Ctrl key."), + choices = auxtags)), - # Roles - forms.section(_("Roles")) - entries = roles.items() - entries.sort(cmp = lambda a,b: cmp((a[1]["alias"],a[0]), (b[1]["alias"],b[0]))) - is_member_of_at_least_one = False - for role_id, role in entries: - if not is_locked('roles'): - html.checkbox("role_" + role_id, role_id in user.get("roles", [])) - url = make_link([("mode", "edit_role"), ("edit", role_id)]) - html.write("%s
    " % (url, role["alias"])) - else: - is_member = role_id in user.get("roles", []) - if is_member: - is_member_of_at_least_one = True + ], + show_titles = True, + orientation = "horizontal"), - url = make_link([("mode", "edit_role"), ("edit", role_id)]) - html.write("%s
    " % (url, role["alias"])) + add_label = _("Add tag choice"), + row_label = "@. Choice") - html.hidden_field("role_" + role_id, is_member and '1' or '') - if is_locked('roles') and not is_member_of_at_least_one: - html.write('%s' % _('No roles assigned.')) + if phase == "action": + if html.transaction_valid(): + if new: + html.check_transaction() # use up transaction id + tag_id = html.var("tag_id").strip() + validate_tag_id(tag_id, "tag_id") + if len(tag_id) == 0: + raise MKUserError("tag_id", _("Please specify an ID for your tag group.")) + if not re.match("^[-a-z0-9A-Z_]*$", tag_id): + raise MKUserError("tag_id", _("Invalid tag group ID. Only the characters a-z, A-Z, 0-9, _ and - are allowed.")) + for entry in config.wato_host_tags: + tgid = entry[0] + tit = entry[1] + if tgid == tag_id: + raise MKUserError("tag_id", _("The tag group ID %s is already used by the tag group '%s'.") % (tag_id, tit)) - # Contact groups - forms.header(_("Contact Groups"), isopen=False) - forms.section() - url1 = make_link([("mode", "contact_groups")]) - url2 = make_link([("mode", "rulesets"), ("group", "grouping")]) - if len(contact_groups) == 0: - html.write(_("Please first create some contact groups") % - url1) - else: - entries = [ (contact_groups[c], c) for c in contact_groups ] - entries.sort() - is_member_of_at_least_one = False - for alias, gid in entries: - if not alias: - alias = gid - if not is_locked('contactgroups'): - html.checkbox("cg_" + gid, gid in user.get("contactgroups", [])) - url = make_link([("mode", "edit_contact_group"), ("edit", gid)]) - html.write(" %s
    " % (url, alias)) - else: - is_member = gid in user.get("contactgroups", []) - if is_member: - is_member_of_at_least_one = True + title = html.var_utf8("title").strip() + if not title: + raise MKUserError("title", _("Please specify a title for your host tag group.")) - url = make_link([("mode", "edit_contact_group"), ("edit", gid)]) - html.write("%s
    " % (url, alias)) + topic = forms.get_input(vs_topic, "topic") + # always put at least "/" as prefix to the title, the title + # will then be split by the first "/' in future + title = '%s/%s' % (topic, title) - html.hidden_field("cg_" + gid, is_member and '1' or '') + new_choices = forms.get_input(vs_choices, "choices") + have_none_tag = False + for nr, (id, descr, aux) in enumerate(new_choices): + if id or descr: + if not id: + id = None + if have_none_tag: + raise MKUserError("choices_%d_id" % (nr+1), _("Only on tag may be empty.")) + have_none_tag = True + # Make sure tag ID is unique within this group + for (n, x) in enumerate(new_choices): + if n != nr and x[0] == id: + raise MKUserError("choices_id_%d" % (nr+1), _("Tags IDs must be unique. You've used %s twice.") % id) - if is_locked('contactgroups') and not is_member_of_at_least_one: - html.write('%s' % _('No contact groups assigned.')) + if id: + # Make sure this ID is not used elsewhere + for entry in config.wato_host_tags: + tgid = entry[0] + tit = entry[1] + ch = entry[2] + # Do not compare the taggroup with itselfs + if tgid != tag_id: + for e in ch: + # Check primary and secondary tags + if id == e[0] or len(e) > 2 and id in e[2]: + raise MKUserError("choices_id_%d" % (nr+1), + _("The tag ID '%s' is already being used by the choice " + "'%s' in the tag group '%s'.") % + ( id, e[1], tit )) - html.help(_("Contact groups are used to assign monitoring " - "objects to users. If you haven't defined any contact groups yet, " - "then first do so. Hosts and services can be " - "assigned to contact groups using rules.

    " - "If you do not put the user into any contact group " - "then no monitoring contact will be created for the user.") % (url1, url2)) + # Also check all defined aux tags even if they are not used anywhere + for tag, descr in auxtags: + if id == tag: + raise MKUserError("choices_id_%d" % (nr+1), + _("The tag ID '%s' is already being used as auxiliary tag.") % id) - forms.header(_("Notifications"), isopen=False) + if len(new_choices) == 0: + raise MKUserError("id_0", _("Please specify at least one tag.")) + if len(new_choices) == 1 and new_choices[0][0] == None: + raise MKUserError("id_0", _("Tags with only one choice must have an ID.")) - forms.section(_("Enabling"), simple=True) - html.checkbox("notifications_enabled", user.get("notifications_enabled", False), - label = _("enable notifications")) - html.help(_("Notifications are sent out " - "when the status of a host or service changes.")) - - # Notification period - forms.section(_("Notification time period")) - choices = [ ( "24X7", _("Always")) ] + \ - [ ( id, "%s" % (tp["alias"])) for (id, tp) in timeperiods.items() ] - html.sorted_select("notification_period", choices, user.get("notification_period")) - html.help(_("Only during this time period the " - "user will get notifications about host or service alerts.")) - - # Notification options - notification_option_names = { # defined here: _() must be executed always! - "host" : { - "d" : _("Host goes down"), - "u" : _("Host gets unreachble"), - "r" : _("Host goes up again"), - }, - "service" : { - "w" : _("Service goes into warning state"), - "u" : _("Service goes into unknown state"), - "c" : _("Service goes into critical state"), - "r" : _("Service recovers to OK"), - }, - "both" : { - "f" : _("Start or end of flapping state"), - "s" : _("Start or end of a scheduled downtime"), - } - } + if new: + taggroup = tag_id, title, new_choices + hosttags.append(taggroup) + save_hosttags(hosttags, auxtags) + # Make sure, that all tags are active (also manual ones from main.mk) + config.load_config() + declare_host_tag_attributes() + rewrite_config_files_below(g_root_folder) # explicit host tags in all_hosts + log_pending(SYNCRESTART, None, "edit-hosttags", _("Created new host tag group '%s'") % tag_id) + return "hosttags", _("Created new host tag group '%s'") % title + else: + new_hosttags = [] + for entry in hosttags: + if entry[0] == tag_id: + new_hosttags.append((tag_id, title, new_choices)) + else: + new_hosttags.append(entry) - forms.section(_("Notification Options")) - for title, what, opts in [ ( _("Host events"), "host", "durfs"), - (_("Service events"), "service", "wucrfs") ]: - html.write("%s:
      " % title) - user_opts = user.get(what + "_notification_options", opts) - for opt in opts: - opt_name = notification_option_names[what].get(opt, - notification_option_names["both"].get(opt)) - html.checkbox(what + "_" + opt, opt in user_opts, label = opt_name) - html.write("
      ") - html.write("
    ") - html.help(_("Here you specify which types of alerts " - "will be notified to this contact. Note: these settings will only be saved " - "and used if the user is member of a contact group.")) - - forms.section(_("Notification Method")) - vs_notification_method.render_input("notification_method", user.get("notification_method")) - - # Notification commands (deprecated) - forms.section(_("Notification Command for Hosts")) - html.text_input("host_notification_commands", user.get("host_notification_commands", "check-mk-notify")) - html.help(_("Use this Nagios command for sending host notifications.")) - forms.section(_("Notification Command for Services")) - html.text_input("service_notification_commands", user.get("service_notification_commands", "check-mk-notify")) - html.help(_("Use this Nagios command for sending service notifications.")) + # This is the major effort of WATO when it comes to + # host tags: renaming and deleting of tags that might be + # in use by folders, hosts and rules. First we create a + # kind of "patch" from the old to the new tags. The renaming + # of a tag is detected by comparing the titles. Addition + # of new tags is not a problem and need not be handled. + # Result of this is the dict 'operations': it's keys are + # current tag names, its values the corresponding new names + # or False in case of tag removals. + operations = {} - forms.header(_("Personal Settings"), isopen = False) - select_language(user.get('language', '')) - for name, attr in userdb.get_user_attributes(): - if attr['user_editable']: - if not attr.get("permission") or config.may(attr["permission"]): - vs = attr['valuespec'] - forms.section(vs.title()) - vs.render_input("ua_" + name, user.get(name, vs.default_value())) - html.help(vs.help()) + # Detect renaming + new_by_title = dict([e[:2] for e in new_choices]) + for entry in choices: + tag, tit = entry[:2] # optional third element: aux tags + if tit in new_by_title: + new_tag = new_by_title[tit] + if new_tag != tag: + operations[tag] = new_tag # might be None - # TODO: Later we could add custom macros here, which - # then could be used for notifications. On the other hand, - # if we implement some check_mk --notify, we could directly - # access the data in the account with the need to store - # values in the monitoring core. We'll see what future brings. - forms.end() - html.button("save", _("Save")) - html.hidden_fields() - html.end_form() + # Detect removal + for entry in choices: + tag, tit = entry[:2] # optional third element: aux tags + if tag != None \ + and tag not in [ e[0] for e in new_choices ] \ + and tag not in operations: + # remove explicit tag (hosts/folders) or remove it from tag specs (rules) + operations[tag] = False -def filter_hidden_users(users): - if config.wato_hidden_users: - return dict([ (id, user) for id, user in users.items() if id not in config.wato_hidden_users ]) - else: - return users + # Now check, if any folders, hosts or rules are affected + message = rename_host_tags_after_confirmation(tag_id, operations) + if message: + save_hosttags(new_hosttags, auxtags) + config.load_config() + declare_host_tag_attributes() + rewrite_config_files_below(g_root_folder) # explicit host tags in all_hosts + log_pending(SYNCRESTART, None, "edit-hosttags", _("Edited host tag group %s (%s)") % (message, tag_id)) + return "hosttags", message != True and message or None + return "hosttags" -# Dropdown for choosing a multisite user -class UserSelection(ElementSelection): - def __init__(self, **kwargs): - ElementSelection.__init__(self, **kwargs) - self._none = kwargs.get("none") - def get_elements(self): - users = filter_hidden_users(userdb.load_users()) - elements = dict([ (name, "%s - %s" % (name, us.get("alias", name))) for (name, us) in users.items() ]) - if self._none: - elements[None] = self._none - return elements - def value_to_text(self, value): - self.load_elements() - return self._elements.get(value, value).split(" - ")[-1] + html.begin_form("hosttaggroup", method = 'POST') + forms.header(_("Edit group") + (tag_id and " %s" % tag_id or "")) -#. -# .-Roles----------------------------------------------------------------. -# | ____ _ | -# | | _ \ ___ | | ___ ___ | -# | | |_) / _ \| |/ _ \/ __| | -# | | _ < (_) | | __/\__ \ | -# | |_| \_\___/|_|\___||___/ | -# | | -# +----------------------------------------------------------------------+ -# | Mode for managing roles and permissions. | -# | In order to make getting started easier - Check_MK Multisite comes | -# | with three builtin-roles: admin, user and guest. These roles have | -# | predefined permissions. The builtin roles cannot be deleted. Users | -# | listed in admin_users in multisite.mk automatically get the role | -# | admin - even if no such user or contact has been configured yet. By | -# | that way an initial login - e.g. as omdamin - is possible. The admin | -# | role cannot be removed from that user as long as he is listed in | -# | admin_users. Also the variables guest_users, users and default_user_ | -# | role still work. That way Multisite is fully operable without WATO | -# | and also backwards compatible. | -# | In WATO you can create further roles and also edit the permissions | -# | of the existing roles. Users can be assigned to builtin and custom | -# | roles. | -# | This modes manages the creation of custom roles and the permissions | -# | configuration of all roles. | -# '----------------------------------------------------------------------' + # Tag ID + forms.section(_("Internal ID")) + html.help(_("The internal ID of the tag group is used to store the tag's " + "value in the host properties. It cannot be changed later.")) + if new: + html.text_input("tag_id") + html.set_focus("tag_id") + else: + html.write(tag_id) -def mode_roles(phase): - if phase == "title": - return _("Roles & Permissions") + # Title + forms.section(_("Title") + "*") + html.help(_("An alias or description of this tag group")) + html.text_input("title", title, size = 30) - elif phase == "buttons": - global_buttons() - html.context_button(_("Matrix"), make_link([("mode", "role_matrix")]), "matrix") - return + # The (optional) topic + forms.section(_("Topic") + "*") + html.help(_("Different taggroups can be grouped in topics to make the visualization and " + "selections in the GUI more comfortable.")) + forms.input(vs_topic, "topic", topic) - roles = userdb.load_roles() - users = filter_hidden_users(userdb.load_users()) + # Choices + forms.section(_("Choices")) + html.help(_("The first choice of a tag group will be its default value. " + "If a tag group has only one choice, it will be displayed " + "as a checkbox and set or not set the only tag. If it has " + "more choices you may leave at most one tag id empty. A host " + "with that choice will not get any tag of this group.

    " + "The tag ID must contain only of letters, digits and " + "underscores.

    Renaming tags ID: if you want " + "to rename the ID of a tag, then please make sure that you do not " + "change its title at the same time! Otherwise WATO will not " + "be able to detect the renaming and cannot exchange the tags " + "in all folders, hosts and rules accordingly.")) + forms.input(vs_choices, "choices", choices) - if phase == "action": - if html.var("_delete"): - delid = html.var("_delete") - if html.transaction_valid() and roles[delid].get('builtin'): - raise MKUserError(None, _("You cannot delete the builtin roles!")) + # Button and end + forms.end() + html.show_localization_hint() - c = wato_confirm(_("Confirm deletion of role %s" % delid), - _("Do you really want to delete the role %s?" % delid)) - if c: - rename_user_role(delid, None) # Remove from existing users - del roles[delid] - save_roles(roles) - log_pending(False, None, "edit-roles", _("Deleted role '%s'" % delid)) - return None - elif c == False: - return "" - else: - return - elif html.var("_clone"): - if html.check_transaction(): - cloneid = html.var("_clone") - cloned_role = roles[cloneid] - newid = cloneid - while newid in roles: - newid += "x" - new_role = {} - new_role.update(cloned_role) - if cloned_role.get("builtin"): - new_role["builtin"] = False - new_role["basedon"] = cloneid - roles[newid] = new_role - save_roles(roles) - log_pending(False, None, "edit-roles", _("Created new role '%s'" % newid)) - return None - else: - return None + html.button("save", _("Save")) + html.hidden_fields() + html.end_form() - table.begin(_("Roles")) +def format_php(data, lvl = 1): + s = '' + if isinstance(data, tuple) or isinstance(data, list): + s += 'array(\n' + for item in data: + s += ' ' * lvl + format_php(item, lvl + 1) + ',\n' + s += ' ' * (lvl - 1) + ')' + elif isinstance(data, dict): + s += 'array(\n' + for key, val in data.iteritems(): + s += ' ' * lvl + format_php(key, lvl + 1) + ' => ' + format_php(val, lvl + 1) + ',\n' + s += ' ' * (lvl - 1) + ')' + elif isinstance(data, str): + s += '\'%s\'' % data.replace('\'', '\\\'') + elif isinstance(data, unicode): + s += '\'%s\'' % data.encode('utf-8').replace('\'', '\\\'') + elif isinstance(data, bool): + s += data and 'true' or 'false' + elif data is None: + s += 'null' + else: + s += str(data) + + return s + +# Creates a includable PHP file which provides some functions which +# can be used by the calling program, for example NagVis. It declares +# the following API: +# +# taggroup_title(group_id) +# Returns the title of a WATO tag group +# +# taggroup_choice(group_id, list_of_object_tags) +# Returns either +# false: When taggroup does not exist in current config +# null: When no choice can be found for the given taggroup +# array(tag, title): When a tag of the taggroup +# +# all_taggroup_choices(object_tags): +# Returns an array of elements which use the tag group id as key +# and have an assiciative array as value, where 'title' contains +# the tag group title and the value contains the value returned by +# taggroup_choice() for this tag group. +# +def export_hosttags(hosttags, auxtags): + path = php_api_dir + '/hosttags.php' + make_nagios_directory(php_api_dir) + + # need an extra lock file, since we move the auth.php.tmp file later + # to auth.php. This move is needed for not having loaded incomplete + # files into php. + tempfile = path + '.tmp' + lockfile = path + '.state' + file(lockfile, 'a') + aquire_lock(lockfile) + + # Transform WATO internal data structures into easier usable ones + hosttags_dict = {} + for id, title, choices in hosttags: + tags = {} + for tag_id, tag_title, tag_auxtags in choices: + tags[tag_id] = tag_title, tag_auxtags + topic, title = parse_hosttag_title(title) + hosttags_dict[id] = topic, title, tags + auxtags_dict = dict(auxtags) + + # First write a temp file and then do a move to prevent syntax errors + # when reading half written files during creating that new file + file(tempfile, 'w').write(''' $group) { + $choices[$group_id] = array( + 'topic' => $group[0], + 'title' => $group[1], + 'value' => taggroup_choice($group_id, $object_tags), + ); + } + return $choices; +} - # Actions - table.cell(_("Actions"), css="buttons") - edit_url = make_link([("mode", "edit_role"), ("edit", id)]) - clone_url = html.makeactionuri([("_clone", id)]) - delete_url = html.makeactionuri([("_delete", id)]) - html.icon_button(edit_url, _("Properties"), "edit") - html.icon_button(clone_url, _("Clone"), "clone") - if not role.get("builtin"): - html.icon_button(delete_url, _("Delete this role"), "delete") +?> +''' % (format_php(hosttags_dict), format_php(auxtags_dict))) + # Now really replace the destination file + os.rename(tempfile, path) + release_lock(lockfile) + os.unlink(lockfile) + +# Current specification for hosttag entries: One tag definition is stored +# as tuple of at least three elements. The elements are used as follows: +# taggroup_id, group_title, list_of_choices, depends_on_tags, depends_on_roles, editable +def load_hosttags(): + filename = multisite_dir + "hosttags.mk" + if not os.path.exists(filename): + return [], [] + try: + vars = { + "wato_host_tags" : [], + "wato_aux_tags" : []} + execfile(filename, vars, vars) + # Convert manually crafted host tags tags WATO-style. This + # makes the migration easier + for taggroup in vars["wato_host_tags"]: + for nr, entry in enumerate(taggroup[2]): + if len(entry) <= 2: + taggroup[2][nr] = entry + ([],) + return vars["wato_host_tags"], vars["wato_aux_tags"] - # ID - table.cell(_("Name"), id) + except Exception, e: + if config.debug: + raise MKGeneralException(_("Cannot read configuration file %s: %s" % + (filename, e))) + return [], [] - # Alias - table.cell(_("Alias"), role["alias"]) +def save_hosttags(hosttags, auxtags): + make_nagios_directory(multisite_dir) + out = create_user_file(multisite_dir + "hosttags.mk", "w") + out.write("# Written by WATO\n# encoding: utf-8\n\n") + out.write("wato_host_tags += \\\n%s\n\n" % pprint.pformat(hosttags)) + out.write("wato_aux_tags += \\\n%s\n" % pprint.pformat(auxtags)) + export_hosttags(hosttags, auxtags) - # Type - table.cell(_("Type"), role.get("builtin") and _("builtin") or _("custom")) +# Handle renaming and deletion of host tags: find affected +# hosts, folders and rules. Remove or fix those rules according +# the the users' wishes. In case auf auxiliary tags the tag_id +# is None. In other cases it is the id of the tag group currently +# being edited. +def rename_host_tags_after_confirmation(tag_id, operations): + mode = html.var("_repair") + if mode == "abort": + raise MKUserError("id_0", _("Aborting change.")) - # Modifications - table.cell(_("Modifications"), "%s" % ( - _("That many permissions do not use the factory defaults."), len(role["permissions"]))) + elif mode: + if tag_id and type(operations) == list: # make attribute unknown to system, important for save() operations + undeclare_host_tag_attribute(tag_id) + affected_folders, affected_hosts, affected_rulespecs = \ + change_host_tags_in_folders(tag_id, operations, mode, g_root_folder) + return _("Modified folders: %d, modified hosts: %d, modified rulesets: %d" % + (len(affected_folders), len(affected_hosts), len(affected_rulespecs))) - # Users - table.cell(_("Users"), - ", ".join([ '%s' % (make_link([("mode", "edit_user"), ("edit", user_id)]), - user.get("alias", user_id)) - for (user_id, user) in users.items() if (id in user["roles"])])) + message = "" + affected_folders, affected_hosts, affected_rulespecs = \ + change_host_tags_in_folders(tag_id, operations, "check", g_root_folder) + if affected_folders: + message += _("Affected folders with an explicit reference to this tag " + "group and that are affected by the change") + ":
      " + for folder in affected_folders: + message += '
    • %s
    • ' % ( + make_link_to([("mode", "editfolder")], folder), + folder["title"]) + message += "
    " - # Possibly we could also display the following information - # - number of set permissions (needs loading users) - # - number of users with this role - table.end() + if affected_hosts: + message += _("Hosts where this tag group is explicitely set " + "and that are effected by the change") + ":
    • " + for nr, host in enumerate(affected_hosts): + if nr > 20: + message += "... (%d more)" % (len(affected_hosts) - 20) + break + elif nr > 0: + message += ", " + message += '%s' % ( + make_link([("mode", "edithost"), ("host", host[".name"])]), + host[".name"]) + message += "
    " + if affected_rulespecs: + message += _("Rulesets that contain rules with references to the changed tags") + ":
      " + for rulespec in affected_rulespecs: + message += '
    • %s
    • ' % ( + make_link([("mode", "edit_ruleset"), ("varname", rulespec["varname"])]), + rulespec["title"]) + message += "
    " + if not message and type(operations) == tuple: # deletion of unused tag group + html.write("
    ") + html.begin_form("confirm") + html.write(_("Please confirm the deletion of the tag group.")) + html.button("_abort", _("Abort")) + html.button("_do_confirm", _("Proceed")) + html.hidden_fields(add_action_vars = True) + html.end_form() + html.write("
    ") + elif message: + if type(operations) == list: + wato_html_head(_("Confirm tag deletion")) + else: + wato_html_head(_("Confirm tag modifications")) + html.write("
    ") + html.write("

    " + _("Your modifications affect some objects") + "

    ") + html.write(message) + html.write("
    " + _("WATO can repair things for you. It can rename tags in folders, host and rules. " + "Removed tag groups will be removed from hosts and folders, removed tags will be " + "replaced with the default value for the tag group (for hosts and folders). What " + "rules concern, you have to decide how to proceed.")) + html.begin_form("confirm") -def mode_edit_role(phase): - id = html.var("edit") + # Check if operations contains removal + if type(operations) == list: + have_removal = True + else: + have_removal = False + for new_val in operations.values(): + if not new_val: + have_removal = True + break - if phase == "title": - return _("Edit user role %s" % id) + if len(affected_rulespecs) > 0 and have_removal: + html.write("
    " + _("Some tags that are used in rules have been removed by you. What " + "shall we do with that rules?") + "
      ") + html.radiobutton("_repair", "remove", True, _("Just remove the affected tags from the rules.")) + html.write("
      ") + html.radiobutton("_repair", "delete", False, _("Delete rules containing tags that have been removed, if tag is used in a positive sense. Just remove that tag if it's used negated.")) + else: + html.write("
        ") + html.radiobutton("_repair", "repair", True, _("Fix affected folders, hosts and rules.")) - elif phase == "buttons": - html.context_button(_("All Roles"), make_link([("mode", "roles")]), "back") - return + html.write("
        ") + html.radiobutton("_repair", "abort", False, _("Abort your modifications.")) + html.write("
      ") - roles = userdb.load_roles() - role = roles[id] + html.button("_do_confirm", _("Proceed"), "") + html.hidden_fields(add_action_vars = True) + html.end_form() + html.write("
    ") + return False - if phase == "action": - alias = html.var_utf8("alias") - new_id = html.var("id") - if len(new_id) == 0: - raise MKUserError("id", _("Please specify an ID for the new role.")) - if not re.match("^[-a-z0-9A-Z_]*$", new_id): - raise MKUserError("id", _("Invalid role ID. Only the characters a-z, A-Z, 0-9, _ and - are allowed.")) - if new_id != id: - if new_id in roles: - raise MKUserError("id", _("The ID is already used by another role")) + return True - role["alias"] = alias +# operation == None -> tag group is deleted completely +# tag_id == None -> Auxiliary tag has been deleted, no +# tag group affected +def change_host_tags_in_folders(tag_id, operations, mode, folder): + need_save = False + affected_folders = [] + affected_hosts = [] + affected_rulespecs = [] + if tag_id: + attrname = "tag_" + tag_id + attributes = folder["attributes"] + if attrname in attributes: # this folder has set the tag group in question + if type(operations) == list: # deletion of tag group + if attrname in attributes: + affected_folders.append(folder) + if mode != "check": + del attributes[attrname] + need_save = True + else: + current = attributes[attrname] + if current in operations: + affected_folders.append(folder) + if mode != "check": + new_tag = operations[current] + if new_tag == False: # tag choice has been removed -> fall back to default + del attributes[attrname] + else: + attributes[attrname] = new_tag + need_save = True + if need_save: + try: + save_folder(folder) + except MKAuthException, e: + # Ignore MKAuthExceptions of locked host.mk files + pass - # based on - if not role.get("builtin"): - basedon = html.var("basedon") - if basedon not in config.builtin_role_ids: - raise MKUserError("basedon", _("Invalid valid for based on. Must be id of builtin rule.")) - role["basedon"] = basedon + for subfolder in folder[".folders"].values(): + aff_folders, aff_hosts, aff_rulespecs = change_host_tags_in_folders(tag_id, operations, mode, subfolder) + affected_folders += aff_folders + affected_hosts += aff_hosts + affected_rulespecs += aff_rulespecs - # Permissions - permissions = {} - for perm in config.permissions_by_order: - pname = perm["name"] - value = html.var("perm_" + pname) - if value == "yes": - permissions[pname] = True - elif value == "no": - permissions[pname] = False - role["permissions"] = permissions + load_hosts(folder) + affected_hosts += change_host_tags_in_hosts(folder, tag_id, operations, mode, folder[".hosts"]) - if id != new_id: - roles[new_id] = role - del roles[id] - rename_user_role(id, new_id) + affected_rulespecs += change_host_tags_in_rules(folder, operations, mode) + return affected_folders, affected_hosts, affected_rulespecs - save_roles(roles) - log_pending(False, None, "edit-roles", _("Modified user role '%s'" % new_id)) - return "roles" +def change_host_tags_in_hosts(folder, tag_id, operations, mode, hostlist): + need_save = False + affected_hosts = [] + for hostname, host in hostlist.items(): + attrname = "tag_" + tag_id + if attrname in host: + if type(operations) == list: # delete complete tag group + affected_hosts.append(host) + if mode != "check": + del host[attrname] + need_save = True + else: + if host[attrname] in operations: + affected_hosts.append(host) + if mode != "check": + new_tag = operations[host[attrname]] + if new_tag == False: # tag choice has been removed -> fall back to default + del host[attrname] + else: + host[attrname] = new_tag + need_save = True + if need_save: + try: + save_hosts(folder) + except MKAuthException, e: + # Ignore MKAuthExceptions of locked host.mk files + pass + return affected_hosts - html.begin_form("role", method="POST") - # ID - forms.header(_("Basic Properties")) - forms.section(_("Internal ID"), simple = "builtin" in role) - if role.get("builtin"): - html.write("%s (%s)" % (id, _("builtin role"))) - html.hidden_field("id", id) - else: - html.text_input("id", id) - html.set_focus("id") +# The function parses all rules in all rulesets and looks +# for host tags that have been removed or renamed. If tags +# are removed then the depending on the mode affected rules +# are either deleted ("delete") or the vanished tags are +# removed from the rule ("remove"). +def change_host_tags_in_rules(folder, operations, mode): + need_save = False + affected_rulespecs = [] + all_rulesets = load_rulesets(folder) + for varname, ruleset in all_rulesets.items(): + rulespec = g_rulespecs[varname] + rules_to_delete = set([]) + for nr, rule in enumerate(ruleset): + modified = False + value, tag_specs, host_list, item_list, rule_options = parse_rule(rulespec, rule) - # Alias - forms.section(_("Alias")) - html.help(_("An alias or description of the role")) - html.text_input("alias", role.get("alias", ""), size = 50) + # Handle deletion of complete tag group + if type(operations) == list: # this list of tags to remove + for tag in operations: + if tag != None and (tag in tag_specs or "!"+tag in tag_specs): + if rulespec not in affected_rulespecs: + affected_rulespecs.append(rulespec) + if mode != "check": + modified = True + if tag in tag_specs and mode == "delete": + rules_to_delete.add(nr) + elif tag in tag_specs: + tag_specs.remove(tag) + elif "+"+tag in tag_specs: + tag_specs.remove("!"+tag) - # Based on - if not role.get("builtin"): - forms.section(_("Based on role")) - html.help(_("Each user defined role is based on one of the builtin roles. " - "When created it will start with all permissions of that role. When due to a software " - "update or installation of an addons new permissions appear, the user role will get or " - "not get those new permissions based on the default settings of the builtin role it's " - "based on.")) - choices = [ (i, r["alias"]) for i, r in roles.items() if r.get("builtin") ] - html.sorted_select("basedon", choices, role.get("basedon", "user")) + # Removal or renamal of single tag choices + else: + for old_tag, new_tag in operations.items(): + # The case that old_tag is None (an empty tag has got a name) + # cannot be handled when it comes to rules. Rules do not support + # such None-values. + if not old_tag: + continue + if old_tag in tag_specs or ("!" + old_tag) in tag_specs: + if rulespec not in affected_rulespecs: + affected_rulespecs.append(rulespec) + if mode != "check": + modified = True + if old_tag in tag_specs: + tag_specs.remove(old_tag) + if new_tag: + tag_specs.append(new_tag) + elif mode == "delete": + rules_to_delete.add(nr) + # negated tag has been renamed or removed + if "!"+old_tag in tag_specs: + tag_specs.remove("!"+old_tag) + if new_tag: + tag_specs.append("!"+new_tag) + # the case "delete" need not be handled here. Negated + # tags can always be removed without changing the rule's + # behaviour. + if modified: + ruleset[nr] = construct_rule(rulespec, value, tag_specs, host_list, item_list, rule_options) + need_save = True - # Permissions - base_role_id = role.get("basedon", id) + rules_to_delete = list(rules_to_delete) + rules_to_delete.sort() + for nr in rules_to_delete[::-1]: + del ruleset[nr] - html.help( - _("When you leave the permissions at "default" then they get their " - "settings from the factory defaults (for builtin roles) or from the " - "factory default of their base role (for user define roles). Factory defaults " - "may change due to software updates. When choosing another base role, all " - "permissions that are on default will reflect the new base role.")) + if need_save: + save_rulesets(folder, all_rulesets) + affected_rulespecs.sort(cmp = lambda a, b: cmp(a["title"], b["title"])) + return affected_rulespecs - # Loop all permission sections, but sorted plz - for section, (prio, section_title) in sorted(config.permission_sections.iteritems(), - key = lambda x: x[1][0], reverse = True): - forms.header(section_title, False) - # Loop all permissions - for perm in config.permissions_by_order: - pname = perm["name"] - this_section = pname.split(".")[0] - if section != this_section: - continue # Skip permissions of other sections +#. +# .--Rule-Editor---------------------------------------------------------. +# | ____ _ _____ _ _ _ | +# | | _ \ _ _| | ___ | ____|__| (_) |_ ___ _ __ | +# | | |_) | | | | |/ _ \ | _| / _` | | __/ _ \| '__| | +# | | _ <| |_| | | __/ | |__| (_| | | || (_) | | | +# | |_| \_\\__,_|_|\___| |_____\__,_|_|\__\___/|_| | +# | | +# +----------------------------------------------------------------------+ +# | WATO's awesome rule editor: Lets the user edit rule based parameters | +# | from main.mk. | +# '----------------------------------------------------------------------' - forms.section(perm["title"]) +def mode_ruleeditor(phase): + only_host = html.var("host", "") + only_local = "" # html.var("local") - pvalue = role["permissions"].get(pname) - def_value = base_role_id in perm["defaults"] + if phase == "title": + if only_host: + return _("Rules effective on host ") + only_host + else: + return _("Rule-Based Configuration of Host & Service Parameters") - choices = [ ( "yes", _("yes")), - ( "no", _("no")), - ( "default", _("default (%s)") % (def_value and _("yes") or _("no") )) ] - html.select("perm_" + pname, choices, { True: "yes", False: "no" }.get(pvalue, "default"), attrs={"style": "width: 130px;"} ) + elif phase == "buttons": + global_buttons() + if only_host: + html.context_button(only_host, + make_link([("mode", "edithost"), ("host", only_host)]), "host") - html.help(perm["description"]) + html.context_button(_("Ineffective rules"), make_link([("mode", "ineffective_rules")]), "usedrulesets") + return - forms.end() - html.button("save", _("Save")) - html.hidden_fields() - html.end_form() + elif phase == "action": + return -def make_unicode(s): - if type(s) != unicode: # assume utf-8 encoded bytestring - return s.decode("utf-8") + if not only_host: + render_folder_path(keepvarnames = ["mode", "local"]) else: - return s - -def save_roles(roles): - # Reflect the data in the roles dict kept in the config module Needed - # for instant changes in current page while saving modified roles. - # Otherwise the hooks would work with old data when using helper - # functions from the config module - config.roles.update(roles) + html.write("

    %s: %s

    " % (_("Host"), only_host)) - make_nagios_directory(multisite_dir) - filename = multisite_dir + "roles.mk" - out = create_user_file(filename, "w") - out.write("# Written by WATO\n# encoding: utf-8\n\n") - out.write("roles.update(\n%s)\n" % pprint.pformat(roles)) + search_form(_("Search for rules: "), "rulesets") - call_hook_roles_saved(roles) + # Group names are separated with "/" into main group and optional subgroup. + # Do not loose carefully manually crafted order of groups! + groupnames = [] + for gn, rulesets in g_rulespec_groups: + main_group = gn.split('/')[0] + if main_group not in groupnames: + groupnames.append(main_group) + menu = [] + for groupname in groupnames + ["used"]: + url = make_link([("mode", "rulesets"), ("group", groupname), + ("host", only_host), ("local", only_local)]) + if groupname == "used": + title = _("Used Rulesets") + help = _("Show only modified rulesets
    (all rulesets with at least one rule)") + icon = "usedrulesets" + elif groupname == "static": # these have moved into their own WATO module + continue + else: + title, help = g_rulegroups.get(groupname, (groupname, "")) + icon = "rulesets" + help = help.split('\n')[0] # Take only first line as button text + menu.append((url, title, icon, "rulesets", help)) + render_main_menu(menu) +def search_form(title, mode=None): + html.begin_form("search") + html.write(title+' ') + html.text_input("search", size=32) + html.hidden_fields() + if mode: + html.hidden_field("mode", mode) + html.set_focus("search") + html.write(" ") + html.button("_do_seach", _("Search")) + html.end_form() + html.write('
    ') -# Adapt references in users. Builtin rules cannot -# be renamed and are not handled here. If new_id is None, -# the role is being deleted -def rename_user_role(id, new_id): - users = userdb.load_users() - for user in users.values(): - if id in user["roles"]: - user["roles"].remove(id) - if new_id: - user["roles"].append(new_id) - userdb.save_users(users) +def rule_is_ineffective(rule, rule_folder, rulespec, hosts): + value, tag_specs, host_list, item_list, rule_options = parse_rule(rulespec, rule) + found_match = False + for (hostname, hostvalues) in hosts.items(): + reason = rule_matches_host_and_item(rulespec, tag_specs, host_list, item_list, rule_folder, hostvalues[".folder"], hostname, NO_ITEM) + if reason == True: + found_match = True + break + return not found_match -def mode_role_matrix(phase): +def mode_ineffective_rules(phase): if phase == "title": - return _("Role & Permission Matrix") + return _("Ineffective rules") elif phase == "buttons": global_buttons() - html.context_button(_("Back"), make_link([("mode", "roles")]), "back") + html.context_button(_("All Rulesets"), make_link([("mode", "ruleeditor")]), "back") + if config.may("wato.hosts") or config.may("wato.seeall"): + html.context_button(_("Folder"), make_link([("mode", "folder")]), "folder") return elif phase == "action": return - # Show table of builtin and user defined roles, sorted by alias - roles = userdb.load_roles() - role_list = roles.items() - role_list.sort(cmp = lambda a,b: cmp((a[1]["alias"],a[0]), (b[1]["alias"],b[0]))) + # Select matching rule groups while keeping their configured order + all_rulesets = load_all_rulesets() + groupnames = [ gn for gn, rulesets in g_rulespec_groups ] - html.write("") - html.write("") - num_roles = 1 - for id, role in role_list: - html.write('' % role['alias']) - num_roles += 1 - html.write("\n") + html.write('
    ') - # Loop all permission sections, but sorted plz - odd = "even" - for section, (prio, section_title) in sorted(config.permission_sections.iteritems(), - key = lambda x: x[1][0], reverse = True): + all_hosts = load_all_hosts() + html.write("
    " + _("The following rules do not match to any of the existing hosts.") + "
    ") + have_ineffective = False - html.write('
    ') - html.write('' % (num_roles, section_title)) - html.write('') + for groupname in groupnames: + # Show information about a ruleset + # Sort rulesets according to their title + g_rulespec_group[groupname].sort(cmp = lambda a, b: cmp(a["title"], b["title"])) + for rulespec in g_rulespec_group[groupname]: + varname = rulespec["varname"] + valuespec = rulespec["valuespec"] - # Loop all permissions - for perm in config.permissions_by_order: - pname = perm["name"] - this_section = pname.split(".")[0] - if section != this_section: - continue # Skip permissions of other sections + # handle only_used + rules = all_rulesets.get(varname, []) + num_rules = len(rules) + if num_rules == 0: + continue - odd = odd == "odd" and "even" or "odd" + ineffective_rules = [] + current_rule_folder = None + for f, rule in rules: + if current_rule_folder == None or current_rule_folder != f: + current_rule_folder = f + rulenr = 0 + else: + rulenr = rulenr + 1 + if rule_is_ineffective(rule, f, rulespec, all_hosts): + ineffective_rules.append( (rulenr, (f,rule)) ) + if len(ineffective_rules) == 0: + continue + have_ineffective = True + titlename = g_rulegroups[groupname.split("/")[0]][0] + rulegroup, test = g_rulegroups.get(groupname, (groupname, "")) + html.write("
    ") + ruleset_url = make_link([("mode", "edit_ruleset"), ("varname", varname)]) + table.begin("ineffective_rules", title = _("%s (%s)") % (ruleset_url, rulespec["title"], titlename), css="ruleset") + for rel_rulenr, (f, rule) in ineffective_rules: + value, tag_specs, host_list, item_list, rule_options = parse_rule(rulespec, rule) + table.row() - html.write('
    ' % odd) - html.write('' % perm["title"]) + # Actions + table.cell("Actions", css="buttons") + edit_url = make_link([ + ("mode", "edit_rule"), + ("varname", varname), + ("rulenr", rel_rulenr), + ("rule_folder", f[".path"]) + ]) + html.icon_button(edit_url, _("Edit this rule"), "edit") - for id, role in role_list: - base_on_id = role.get('basedon', id) - pvalue = role["permissions"].get(pname) - if pvalue is None: - pvalue = base_on_id in perm["defaults"] + delete_url = make_action_link([ + ("mode", "edit_ruleset"), + ("varname", varname), + ("_action", "delete"), + ("_folder", f[".path"]), + ("_rulenr", rel_rulenr), + ("rule_folder", f[".path"]) + ]) + html.icon_button(delete_url, _("Delete this rule"), "delete") - html.write('' % (pvalue and 'X' or '')) + # Rule folder + table.cell(_("Rule folder")) + html.write(get_folder_aliaspath(f, show_main = False)) + + # Conditions + table.cell(_("Conditions"), css="condition") + render_conditions(rulespec, tag_specs, host_list, item_list, varname, f) + + # Value + table.cell(_("Value")) + if rulespec["valuespec"]: + try: + value_html = rulespec["valuespec"].value_to_text(value) + except: + try: + reason = "" + rulespec["valuespec"].validate_datatype(value, "") + except Exception, e: + reason = str(e) - html.write('') + value_html = '' \ + + _("The value of this rule is not valid. ") \ + + reason + else: + img = value and "yes" or "no" + title = value and _("This rule results in a positive outcome.") \ + or _("this rule results in a negative outcome.") + value_html = '' \ + % (title, img) + html.write(value_html) + + # Comment + table.cell(_("Comment")) + url = rule_options.get("docu_url") + if url: + html.icon_button(url, _("Context information about this rule"), "url", target="_blank") + html.write(" ") + html.write(html.attrencode(rule_options.get("comment", ""))) - html.write("
    %s
    %s
    %s%s
    ") + table.end() + html.write("
    ") -#. -# .-Host-Tags------------------------------------------------------------. -# | _ _ _ _____ | -# | | | | | ___ ___| |_ |_ _|_ _ __ _ ___ | -# | | |_| |/ _ \/ __| __| | |/ _` |/ _` / __| | -# | | _ | (_) \__ \ |_ | | (_| | (_| \__ \ | -# | |_| |_|\___/|___/\__| |_|\__,_|\__, |___/ | -# | |___/ | -# +----------------------------------------------------------------------+ -# | Manage the variable config.wato_host_tags -> The set of tags to be | -# | assigned to hosts and that is the basis of the rules. | -# '----------------------------------------------------------------------' + if not have_ineffective: + html.write("
    " + _("There are no ineffective rules.") + "
    ") + html.write('
    ') + return + +def mode_static_checks(phase): + return mode_rulesets(phase, "static") + + +def mode_rulesets(phase, group=None): + if not group: + group = html.var("group") # obligatory + + search = html.var_utf8("search") + if search != None: + search = search.strip().lower() + + if group == "used": + title = _("Used Rulesets") + help = _("Non-empty rulesets") + only_used = True + elif group == "static": + title = _("Manual Checks") + help = _("Here you can create explicit checks that are not being created by the automatic service discovery.") + only_used = False + elif search != None: + title = _("Rules matching") + ": " + html.attrencode(search) + help = _("All rules that contain '%s' in their name") % html.attrencode(search) + only_used = False + else: + title, help = g_rulegroups.get(group, (group, None)) + only_used = False + + only_host = html.var("host", "") + only_local = "" # html.var("local") -def mode_hosttags(phase): if phase == "title": - return _("Host tag groups") + if only_host: + return _("%s - %s") % (only_host, title) + else: + return title elif phase == "buttons": - global_buttons() - html.context_button(_("New Tag group"), make_link([("mode", "edit_hosttag")]), "new") - html.context_button(_("New Aux tag"), make_link([("mode", "edit_auxtag")]), "new") + if only_host: + home_button() + if group != "static": + html.context_button(_("All Rulesets"), make_link([("mode", "ruleeditor"), ("host", only_host)]), "back") + html.context_button(only_host, + make_link([("mode", "edithost"), ("host", only_host)]), "host") + else: + global_buttons() + if group != "static": + html.context_button(_("All Rulesets"), make_link([("mode", "ruleeditor")]), "back") + if config.may("wato.hosts") or config.may("wato.seeall"): + html.context_button(_("Folder"), make_link([("mode", "folder")]), "folder") return - hosttags, auxtags = load_hosttags() + elif phase == "action": + return - if phase == "action": - # Deletion of tag groups - del_id = html.var("_delete") - if del_id: - for e in hosttags: - if e[0] == del_id: - # In case of tag group deletion, the operations is a pair of tag_id - # and list of choice-ids. - operations = [ x[0] for x in e[2] ] + if not only_host: + render_folder_path(keepvarnames = ["mode", "local", "group"]) - message = rename_host_tags_after_confirmation(del_id, operations) - if message == True: # no confirmation yet - c = wato_confirm(_("Confirm deletion of the host " - "tag group '%s'") % del_id, - _("Do you really want to delete the " - "host tag group '%s'?") % del_id) - if c == False: - return "" - elif c == None: - return None + if search != None or group == 'static': + search_form(_("Search for rules: "), group != "static" and "rulesets") - if message: - hosttags = [ e for e in hosttags if e[0] != del_id ] - save_hosttags(hosttags, auxtags) - rewrite_config_files_below(g_root_folder) # explicit host tags in all_hosts - log_pending(SYNCRESTART, None, "edit-hosttags", _("Removed host tag group %s (%s)") % (message, del_id)) - return "hosttags", message != True and message or None + if help != None: + help = "".join(help.split("\n", 1)[1:]).strip() + if help: + html.help(help) - # Deletion of auxiliary tags - del_nr = html.var("_delaux") - if del_nr: - nr = int(del_nr) - del_id = auxtags[nr][0] + if only_local and not only_host: + all_rulesets = {} + rs = load_rulesets(g_folder) + for varname, rules in rs.items(): + all_rulesets.setdefault(varname, []) + all_rulesets[varname] += [ (g_folder, rule) for rule in rules ] + else: + all_rulesets = load_all_rulesets() + if only_used: + all_rulesets = dict([ r for r in all_rulesets.items() if len(r[1]) > 0 ]) - # Make sure that this aux tag is not begin used by any tag group - for entry in hosttags: - choices = entry[2] - for e in choices: - if len(e) > 2: - if del_id in e[2]: - raise MKUserError(None, _("You cannot delete this auxiliary tag. " - "It is being used in the tag group %s.") % entry[1]) - operations = { del_id : False } - message = rename_host_tags_after_confirmation(None, operations) - if message == True: # no confirmation yet - c = wato_confirm(_("Confirm deletion of the auxiliary " - "tag '%s'") % del_id, - _("Do you really want to delete the " - "auxiliary tag '%s'?") % del_id) - if c == False: - return "" - elif c == None: - return None + # Select matching rule groups while keeping their configured order + groupnames = [ gn for gn, rulesets in g_rulespec_groups + if only_used or search != None or gn == group or (group and gn.startswith(group + "/")) ] - if message: - del auxtags[nr] - # Remove auxiliary tag from all host tags - for e in hosttags: - choices = e[2] - for choice in choices: - if len(choice) > 2: - if del_id in choice[2]: - choice[2].remove(del_id) + # In case of search we need to sort the groups since main chapters would + # appear more than once otherwise. + if search != None: + groupnames.sort() - save_hosttags(hosttags, auxtags) - rewrite_config_files_below(g_root_folder) # explicit host tags in all_hosts - log_pending(SYNCRESTART, None, "edit-hosttags", _("Removed auxiliary tag %s (%s)") % (message, del_id)) - return "hosttags", message != True and message or None + html.write('
    ') - move_nr = html.var("_move") - if move_nr != None: - if html.check_transaction(): - move_nr = int(move_nr) - if move_nr >= 0: - dir = 1 - else: - move_nr = -move_nr - dir = -1 - moved = hosttags[move_nr] - del hosttags[move_nr] - hosttags[move_nr+dir:move_nr+dir] = [moved] - save_hosttags(hosttags, auxtags) - config.wato_host_tags = hosttags - log_pending(SYNCRESTART, None, "edit-hosttags", _("Changed order of host tag groups")) - return + # Loop over all ruleset groups + something_shown = False + title_shown = False + for groupname in groupnames: + # Show information about a ruleset + # Sort rulesets according to their title + g_rulespec_group[groupname].sort( + cmp = lambda a, b: cmp(a["title"], b["title"])) + for rulespec in g_rulespec_group[groupname]: + + varname = rulespec["varname"] + valuespec = rulespec["valuespec"] - if len(hosttags) + len(auxtags) == 0: - render_main_menu([ - ("edit_hosttag", _("Create new tag group"), "new", "hosttags", - _("Each host tag group will create one dropdown choice in the host configuration.")), - ("edit_auxtag", _("Create new auxiliary tag"), "new", "hosttags", - _("You can have these tags automatically added if certain primary tags are set.")), - ]) + # handle only_used + rules = all_rulesets.get(varname, []) + num_rules = len(rules) + if num_rules == 0 and (only_used or only_local): + continue - else: - table.begin(_("Host tag groups"), - help = (_("Host tags are the basis of Check_MK's rule based configuration. " - "If the first step you define arbitrary tag groups. A host " - "has assigned exactly one tag out of each group. These tags can " - "later be used for defining parameters for hosts and services, " - "such as disable notifications for all hosts with the tags " - "Network device and Test.")), - empty_text = _("You haven't defined any tag groups yet.")) + # handle search + if search != None \ + and not (rulespec["help"] and search in rulespec["help"].lower()) \ + and search not in rulespec["title"].lower() \ + and search not in varname: + continue - if hosttags: - for nr, entry in enumerate(hosttags): - tag_id, title, choices = entry[:3] # forth: dependency information - table.row() - edit_url = make_link([("mode", "edit_hosttag"), ("edit", tag_id)]) - delete_url = html.makeactionuri([("_delete", tag_id)]) - table.cell(_("Actions"), css="buttons") - if nr == 0: - html.empty_icon_button() - else: - html.icon_button(html.makeactionuri([("_move", str(-nr))]), - _("Move this tag group one position up"), "up") - if nr == len(hosttags) - 1: - html.empty_icon_button() - else: - html.icon_button(html.makeactionuri([("_move", str(nr))]), - _("Move this tag group one position down"), "down") - html.icon_button(edit_url, _("Edit this tag group"), "edit") - html.icon_button(delete_url, _("Delete this tag group"), "delete") + # Show static checks rules only in on dedicated page and vice versa + if group != 'static' and groupname.startswith("static/"): + continue + elif group == 'static' and not groupname.startswith("static/"): + continue - table.cell(_("ID"), tag_id) - table.cell(_("Title"), title) - table.cell(_("Type"), (len(choices) == 1 and _("Checkbox") or _("Dropdown"))) - table.cell(_("Choices"), str(len(choices))) - table.cell(_("Demonstration")) - html.begin_form("tag_%s" % tag_id) - host_attribute["tag_%s" % tag_id].render_input(None) - html.end_form() - table.end() + # Handle case where a host is specified + rulespec = g_rulespecs[varname] + this_host = False + if only_host: + num_local_rules = 0 + for f, rule in rules: + value, tag_specs, host_list, item_list, rule_options = parse_rule(rulespec, rule) + if only_host and only_host in host_list: + num_local_rules += 1 + else: + num_local_rules = len([ f for (f,r) in rules if f == g_folder ]) - table.begin(_("Auxiliary tags"), - help = _("Auxiliary tags can be attached to other tags. That way " - "you can for example have all hosts with the tag cmk-agent " - "get also the tag tcp. This makes the configuration of " - "your hosts easier."), - empty_text = _("You haven't defined any auxiliary tags.")) + if only_local and num_local_rules == 0: + continue - if auxtags: - table.row() - for nr, (tag_id, title) in enumerate(auxtags): - edit_url = make_link([("mode", "edit_auxtag"), ("edit", nr)]) - delete_url = html.makeactionuri([("_delaux", nr)]) - table.cell(_("Actions"), css="buttons") - html.icon_button(edit_url, _("Edit this auxiliary tag"), "edit") - html.icon_button(delete_url, _("Delete this auxiliary tag"), "delete") - table.cell(_("ID"), tag_id) - table.cell(_("Title"), title) - table.end() + if group != 'static' and (only_used or search != None): + titlename = g_rulegroups[groupname.split("/")[0]][0] + else: + if '/' in groupname: + titlename = groupname.split("/", 1)[1] + else: + titlename = title + if title_shown != titlename: + forms.header(titlename) + forms.container() + title_shown = titlename -def mode_edit_auxtag(phase): - tag_nr = html.var("edit") - new = tag_nr == None - if not new: - tag_nr = int(tag_nr) + something_shown = True - if phase == "title": - if new: - return _("Create new auxiliary tag") + float_cls = '' + if not config.wato_hide_help_in_lists: + if html.help_visible: + float_cls = ' nofloat' + else: + float_cls = ' float' + + url_vars = [("mode", "edit_ruleset"), ("varname", varname)] + if only_host: + url_vars.append(("host", only_host)) + view_url = make_link(url_vars) + html.write('
    ' % + (float_cls, html.strip_tags(rulespec["help"] or ''))) + html.write('%s' % + (num_rules and "nonzero" or "zero", view_url, rulespec["title"])) + html.write('%s
    ' % ("." * 100)) + html.write('
    %d
    ' % + (num_rules and "nonzero" or "zero", num_rules)) + if not config.wato_hide_help_in_lists and rulespec["help"]: + html.help(rulespec["help"]) + html.write('
    ') + + if something_shown: + forms.end() + else: + if only_host: + html.write("
    " + _("There are no rules with an exception for the host %s.") % only_host + "
    ") else: - return _("Edit auxiliary tag") + html.write("
    " + _("There are no rules defined in this folder.") + "
    ") - elif phase == "buttons": - html.context_button(_("All Hosttags"), make_link([("mode", "hosttags")]), "back") - return + html.write('
    ') - hosttags, auxtags = load_hosttags() +def create_new_rule_form(rulespec, hostname = None, item = None, varname = None): + html.begin_form("new_rule", add_transid = False) - if phase == "action": - if html.transaction_valid(): - html.check_transaction() # use up transaction id - if new: - tag_id = html.var("tag_id").strip() - if not tag_id: - raise MKUserError("tag_id", _("Please enter a tag ID")) - validate_tag_id(tag_id, "tag_id") - else: - tag_id = auxtags[tag_nr][0] + html.write('') + if hostname: + label = _("Host %s" % hostname) + ty = _('Host') + if item != NO_ITEM and rulespec["itemtype"]: + label += _(" and %s '%s'") % (rulespec["itemname"], item) + ty = rulespec["itemname"] - title = html.var_utf8("title").strip() - if not title: - raise MKUserError("title", _("Please supply a title " - "for you auxiliary tag.")) + html.write('\n') - # Make sure that this ID is not used elsewhere - for entry in config.wato_host_tags: - tgid = entry[0] - tit = entry[1] - ch = entry[2] - for e in ch: - if e[0] == tag_id: - raise MKUserError("tag_id", - _("This tag id is already being used " - "in the host tag group %s") % tit) + html.write('
    ') + html.button("_new_host_rule", _("Create %s specific rule for: ") % ty) + html.hidden_field("host", hostname) + html.hidden_field("item", mk_repr(item)) + html.write('') + html.write(label) + html.write('
    ') + html.button("_new_rule", _("Create rule in folder: ")) + html.write('') - for nr, (id, name) in enumerate(auxtags): - if nr != tag_nr and id == tag_id: - raise MKUserError("tag_id", - _("This tag id does already exist in the list " - "of auxiliary tags.")) + html.select("rule_folder", folder_selection(g_root_folder), html.var('folder')) + html.write('
    \n') + html.hidden_field("varname", varname) + html.hidden_field("mode", "new_rule") + html.hidden_field('folder', html.var('folder')) + html.end_form() - if new: - auxtags.append((tag_id, title)) - else: - auxtags[tag_nr] = (tag_id, title) - save_hosttags(hosttags, auxtags) - return "hosttags" +def mode_edit_ruleset(phase): + varname = html.var("varname") + item = None + if html.var("check_command"): + check_command = html.var("check_command") + checks = check_mk_local_automation("get-check-information") + if check_command.startswith("check_mk-"): + check_command = check_command[9:] + varname = "checkgroup_parameters:" + checks[check_command].get("group","") + descr_pattern = checks[check_command]["service_description"].replace("%s", "(.*)") + matcher = re.search(descr_pattern, html.var("service_description")) + if matcher: + try: + item = matcher.group(1) + except: + item = None + elif check_command.startswith("check_mk_active-"): + check_command = check_command[16:].split(" ")[0][:-1] + varname = "active_checks:" + check_command - if new: - title = "" - tag_id = "" - else: - tag_id, title = auxtags[tag_nr] + rulespec = g_rulespecs.get(varname) + hostname = html.var("host", "") + if not item: + if html.has_var("item"): + try: + item = mk_eval(html.var("item")) + except: + item = NO_ITEM + else: + item = NO_ITEM - html.begin_form("auxtag") - forms.header(_("Auxiliary Tag")) + if hostname: + hosts = load_hosts(g_folder) + host = hosts.get(hostname) + if not host: + hostname = None # host not found. Should not happen - # Tag ID - forms.section(_("Tag ID")) - if new: - html.text_input("tag_id", "") - html.set_focus("tag_id") - else: - html.write(tag_id) - html.help(_("The internal name of the tag. The special tags " - "snmp, tcp and ping can " - "be used here in order to specify the agent type.")) + if phase == "title": + if not rulespec: + text = html.var("service_description") or varname + return _("No available rule for service %s at host %s") % (text, hostname) + title = rulespec["title"] + if hostname: + title += _(" for host %s") % hostname + if html.has_var("item") and rulespec["itemtype"]: + title += _(" and %s '%s'") % (rulespec["itemname"], item) + return title - # Title - forms.section(_("Title")) - html.text_input("title", title, size = 30) - html.help(_("An alias or description of this auxiliary tag")) + elif phase == "buttons": + global_buttons() + if not rulespec: + html.context_button(_("All Rulesets"), make_link([("mode", "ruleeditor")]), "back") + else: + group = rulespec["group"].split("/")[0] + groupname = g_rulegroups[group][0] + html.context_button(groupname, + make_link([("mode", "rulesets"), ("group", group), ("host", hostname)]), "back") + html.context_button(_("Used Rulesets"), + make_link([("mode", "rulesets"), ("group", "used"), ("host", hostname)]), "usedrulesets") + if hostname: + html.context_button(_("Services"), + make_link([("mode", "inventory"), ("host", hostname)]), "services") + html.context_button(_("Parameters"), + make_link([("mode", "object_parameters"), ("host", hostname), ("service", item)]), "rulesets") + return - # Button and end - forms.end() - html.button("save", _("Save")) - html.hidden_fields() - html.end_form() + elif phase == "action": + if not rulespec: + return + # Folder for the rule actions is defined by _folder + rule_folder = g_folders[html.var("_folder", html.var("folder"))] + check_folder_permissions(rule_folder, "write", True) + rulesets = load_rulesets(rule_folder) + rules = rulesets.get(varname, []) -# Validate the syntactic form of a tag -def validate_tag_id(id, varname): - if not re.match("^[-a-z0-9A-Z_]*$", id): - raise MKUserError(varname, - _("Invalid tag ID. Only the characters a-z, A-Z, " - "0-9, _ and - are allowed.")) + rulenr = int(html.var("_rulenr")) # rule number relativ to folder + action = html.var("_action") + + if action == "delete": + c = wato_confirm(_("Confirm"), _("Delete rule number %d of folder '%s'?") + % (rulenr + 1, rule_folder["title"])) + if c: + del rules[rulenr] + save_rulesets(rule_folder, rulesets) + mark_affected_sites_dirty(rule_folder) + log_pending(AFFECTED, None, "edit-ruleset", + _("Deleted rule in ruleset '%s'") % rulespec["title"]) + return + elif c == False: # not yet confirmed + return "" + else: + return None # browser reload -def mode_edit_hosttag(phase): - tag_id = html.var("edit") - new = tag_id == None + elif action == "insert": + if not html.check_transaction(): + return None # browser reload + rules[rulenr:rulenr] = [rules[rulenr]] + save_rulesets(rule_folder, rulesets) + mark_affected_sites_dirty(rule_folder) + + log_pending(AFFECTED, None, "edit-ruleset", + _("Inserted new rule in ruleset %s") % rulespec["title"]) + return - if phase == "title": - if new: - return _("Create new tag group") else: - return _("Edit tag group") + if not html.check_transaction(): + return None # browser reload + rule = rules[rulenr] + del rules[rulenr] + if action == "up": + rules[rulenr-1:rulenr-1] = [ rule ] + elif action == "down": + rules[rulenr+1:rulenr+1] = [ rule ] + elif action == "top": + rules.insert(0, rule) + else: + rules.append(rule) + save_rulesets(rule_folder, rulesets) + mark_affected_sites_dirty(rule_folder) + log_pending(AFFECTED, None, "edit-ruleset", + _("Changed order of rules in ruleset %s") % rulespec["title"]) + return - elif phase == "buttons": - html.context_button(_("All Hosttags"), make_link([("mode", "hosttags")]), "back") + if not rulespec: + text = html.var("service_description") or varname + html.write("
    " + _("There are no rules availabe for %s.") % html.attrencode(text) + "
    ") return - hosttags, auxtags = load_hosttags() - title = "" - choices = [] - if not new: - for entry in hosttags: - id, tit, ch = entry[:3] - if id == tag_id: - title = tit - choices = ch - break - - vs_choices = ListOf( - Tuple( - elements = [ - TextAscii( - title = _("Tag ID"), - size = 16, - regex="^[-a-z0-9A-Z_]*$", - none_is_empty = True, - regex_error = _("Invalid tag ID. Only the characters a-z, A-Z, " - "0-9, _ and - are allowed.")), - TextUnicode( - title = _("Description"), - allow_empty = False, - size = 40), + if not hostname: + render_folder_path(keepvarnames = ["mode", "varname"]) - Foldable( - ListChoice( - title = _("Auxiliary tags"), - # help = _("These tags will implicitely added to a host if the " - # "user selects this entry in the tag group. Select multiple " - # "entries with the Ctrl key."), - choices = auxtags)), + # Titel ist schon Seitentitel + # html.write("

    " + rulespec["title"] + "

    ") + if not config.wato_hide_varnames: + display_varname = ':' in varname and '%s["%s"]' % tuple(varname.split(":")) or varname + html.write('
    %s
    ' % display_varname) - ], - show_titles = True, - orientation = "horizontal"), + html.help(rulespec["help"]) - add_label = _("Add tag choice"), - row_label = "@. Choice") + # Collect all rulesets + all_rulesets = load_all_rulesets() + ruleset = all_rulesets.get(varname) + if not ruleset: + html.write("
    " + _("There are no rules defined in this set.") + "
    ") - if phase == "action": - if html.transaction_valid(): - if new: - html.check_transaction() # use up transaction id - tag_id = html.var("tag_id").strip() - validate_tag_id(tag_id, "tag_id") - if len(tag_id) == 0: - raise MKUserError("tag_id", _("Please specify an ID for your tag group.")) - if not re.match("^[-a-z0-9A-Z_]*$", tag_id): - raise MKUserError("tag_id", _("Invalid tag group ID. Only the characters a-z, A-Z, 0-9, _ and - are allowed.")) - for entry in config.wato_host_tags: - tgid = entry[0] - tit = entry[1] - if tgid == tag_id: - raise MKUserError("tag_id", _("The tag group ID %s is already used by the tag group '%s'.") % (tag_id, tit)) + else: + alread_matched = False + match_keys = set([]) # in case if match = "dict" + last_folder = None - title = html.var_utf8("title").strip() - if not title: - raise MKUserError("title", _("Please specify a title for your host tag group.")) + skip_this_folder = False + for rulenr in range(0, len(ruleset)): + folder, rule = ruleset[rulenr] + if folder != last_folder: + skip_this_folder = False + if last_folder != None: + table.end() + first_in_group = True + alias_path = get_folder_aliaspath(folder, show_main = False) + last_folder = folder - new_choices = forms.get_input(vs_choices, "choices") - have_none_tag = False - for nr, (id, descr, aux) in enumerate(new_choices): - if id or descr: - if not id: - id = None - if have_none_tag: - raise MKUserError("choices_%d_id" % (nr+1), _("Only on tag may be empty.")) - have_none_tag = True - # Make sure tag ID is unique within this group - for (n, x) in enumerate(new_choices): - if n != nr and x[0] == id: - raise MKUserError("choices_id_%d" % (nr+1), _("Tags IDs must be unique. You've used %s twice.") % id) + if g_folder != g_root_folder and not folder_is_parent_of(folder, g_folder): + skip_this_folder = True + continue - if id: - # Make sure this ID is not used elsewhere - for entry in config.wato_host_tags: - tgid = entry[0] - tit = entry[1] - ch = entry[2] - # Do not compare the taggroup with itselfs - if tgid != tag_id: - for e in ch: - # Check primary and secondary tags - if id == e[0] or len(e) > 2 and id in e[2]: - raise MKUserError("choices_id_%d" % (nr+1), - _("The tag ID '%s' is already being used by the choice " - "'%s' in the tag group '%s'.") % - ( id, e[1], tit )) + table.begin("rules", title="%s %s" % (_("Rules in folder"), alias_path), + css="ruleset", searchable=False, sortable=False) + rel_rulenr = 0 + else: + if skip_this_folder: + continue - # Also check all defined aux tags even if they are not used anywhere - for tag, descr in auxtags: - if id == tag: - raise MKUserError("choices_id_%d" % (nr+1), - _("The tag ID '%s' is already being used as auxiliary tag.") % id) + first_in_group = False + rel_rulenr += 1 - if len(new_choices) == 0: - raise MKUserError("id_0", _("Please specify at least one tag.")) - if len(new_choices) == 1 and new_choices[0][0] == None: - raise MKUserError("id_0", _("Tags with only one choice must have an ID.")) + last_in_group = (rulenr == len(ruleset) - 1 or \ + ruleset[rulenr+1][0] != folder) - if new: - taggroup = tag_id, title, new_choices - hosttags.append(taggroup) - save_hosttags(hosttags, auxtags) - # Make sure, that all tags are active (also manual ones from main.mk) - config.load_config() - declare_host_tag_attributes() - rewrite_config_files_below(g_root_folder) # explicit host tags in all_hosts - log_pending(SYNCRESTART, None, "edit-hosttags", _("Created new host tag group '%s'") % tag_id) - return "hosttags", _("Created new host tag group '%s'") % title - else: - new_hosttags = [] - for entry in hosttags: - if entry[0] == tag_id: - new_hosttags.append((tag_id, title, new_choices)) - else: - new_hosttags.append(entry) + value, tag_specs, host_list, item_list, rule_options = parse_rule(rulespec, rule) + disabled = rule_options.get("disabled") + table.row(disabled and "disabled" or None) - # This is the major effort of WATO when it comes to - # host tags: renaming and deleting of tags that might be - # in use by folders, hosts and rules. First we create a - # kind of "patch" from the old to the new tags. The renaming - # of a tag is detected by comparing the titles. Addition - # of new tags is not a problem and need not be handled. - # Result of this is the dict 'operations': it's keys are - # current tag names, its values the corresponding new names - # or False in case of tag removals. - operations = {} - # Detect renaming - new_by_title = dict([e[:2] for e in new_choices]) - for entry in choices: - tag, tit = entry[:2] # optional third element: aux tags - if tit in new_by_title: - new_tag = new_by_title[tit] - if new_tag != tag: - operations[tag] = new_tag # might be None + # Rule matching + if hostname: + table.cell(_("Ma.")) + if disabled: + reason = _("This rule is disabled") + else: + reason = rule_matches_host_and_item( + rulespec, tag_specs, host_list, item_list, folder, g_folder, hostname, item) - # Detect removal - for entry in choices: - tag, tit = entry[:2] # optional third element: aux tags - if tag != None \ - and tag not in [ e[0] for e in new_choices ] \ - and tag not in operations: - # remove explicit tag (hosts/folders) or remove it from tag specs (rules) - operations[tag] = False + # Handle case where dict is constructed from rules + if reason == True and rulespec["match"] == "dict": + if len(value) == 0: + title = _("This rule matches, but does not define any parameters.") + img = 'imatch' + else: + new_keys = set(value.keys()) + if set_is_disjoint(match_keys, new_keys): + title = _("This rule matches and defines new parameters.") + img = 'match' + elif new_keys.issubset(match_keys): + title = _("This rule matches, but all of its parameters are overridden by previous rules.") + img = 'imatch' + else: + title = _("This rule matches, but some of its parameters are overridden by previous rules.") + img = 'pmatch' + match_keys.update(new_keys) - # Now check, if any folders, hosts or rules are affected - message = rename_host_tags_after_confirmation(tag_id, operations) - if message: - save_hosttags(new_hosttags, auxtags) - config.load_config() - declare_host_tag_attributes() - rewrite_config_files_below(g_root_folder) # explicit host tags in all_hosts - log_pending(SYNCRESTART, None, "edit-hosttags", _("Edited host tag group %s (%s)") % (message, tag_id)) - return "hosttags", message != True and message or None + elif reason == True and (not alread_matched or rulespec["match"] == "all"): + title = _("This rule matches for the host '%s'") % hostname + if rulespec["itemtype"]: + title += _(" and the %s '%s'.") % (rulespec["itemname"], item) + else: + title += "." + img = 'match' + alread_matched = True + elif reason == True: + title = _("This rule matches, but is overridden by a previous rule.") + img = 'imatch' + alread_matched = True + else: + title = _("This rule does not match: %s") % reason + img = 'nmatch' + html.write(' ' % (title, img)) - return "hosttags" + # Disabling + table.cell("", css="buttons") + if disabled: + html.icon(_("This rule is currently disabled and will not be applied"), "disabled") + else: + html.empty_icon() + # Actions + table.cell(_("Order"), css="buttons rulebuttons") + if not first_in_group: + rule_button("top", _("Move this rule to the top of the list"), folder, rel_rulenr) + rule_button("up", _("Move this rule one position up"), folder, rel_rulenr) + else: + rule_button(None) + rule_button(None) + if not last_in_group: + rule_button("down", _("Move this rule one position down"), folder, rel_rulenr) + rule_button("bottom", _("Move this rule to the bottom of the list"), folder, rel_rulenr) + else: + rule_button(None) + rule_button(None) + table.cell(_("Actions"), css="buttons rulebuttons") + edit_url = make_link([ + ("mode", "edit_rule"), + ("varname", varname), + ("rulenr", rel_rulenr), + ("host", hostname), + ("item", mk_repr(item)), + ("rule_folder", folder[".path"])]) + html.icon_button(edit_url, _("Edit this rule"), "edit") + rule_button("insert", _("Insert a copy of this rule in current folder"), + folder, rel_rulenr) + rule_button("delete", _("Delete this rule"), folder, rel_rulenr) - html.begin_form("hosttaggroup") - forms.header(_("Edit group") + (tag_id and " %s" % tag_id or "")) - # Tag ID - forms.section(_("Internal ID")) - html.help(_("The internal ID of the tag group is used to store the tag's " - "value in the host properties. It cannot be changed later.")) - if new: - html.text_input("tag_id") - html.set_focus("tag_id") - else: - html.write(tag_id) + # Folder + # alias_path = get_folder_aliaspath(folder, show_main = False) + # classes = "" + # if first_in_group: + # classes += "first" + # if last_in_group: + # classes += " last" + # html.write('
    %s
    ' % (classes, alias_path)) - # Title - forms.section(_("Title")) - html.help(_("An alias or description of this tag group")) - html.text_input("title", title, size = 30) + # Conditions + table.cell(_("Conditions"), css="condition") + render_conditions(rulespec, tag_specs, host_list, item_list, varname, folder) - # Choices - forms.section(_("Choices")) - html.help(_("The first choice of a tag group will be its default value. " - "If a tag group has only one choice, it will be displayed " - "as a checkbox and set or not set the only tag. If it has " - "more choices you may leave at most one tag id empty. A host " - "with that choice will not get any tag of this group.

    " - "The tag ID must contain only of letters, digits and " - "underscores.

    Renaming tags ID: if you want " - "to rename the ID of a tag, then please make sure that you do not " - "change its title at the same time! Otherwise WATO will not " - "be able to detect the renaming and cannot exchange the tags " - "in all folders, hosts and rules accordingly.")) - forms.input(vs_choices, "choices", choices) + # Value + table.cell(_("Value")) + if rulespec["valuespec"]: + try: + value_html = rulespec["valuespec"].value_to_text(value) + except: + try: + reason = "" + rulespec["valuespec"].validate_datatype(value, "") + except Exception, e: + reason = str(e) - # Button and end - forms.end() - html.button("save", _("Save")) - html.hidden_fields() - html.end_form() + value_html = '' \ + + _("The value of this rule is not valid. ") \ + + reason + else: + img = value and "yes" or "no" + title = value and _("This rule results in a positive outcome.") \ + or _("this rule results in a negative outcome.") + value_html = '' \ + % (title, img) + html.write(value_html) + # Comment + table.cell(_("Comment")) + url = rule_options.get("docu_url") + if url: + html.icon_button(url, _("Context information about this rule"), "url", target="_blank") + html.write(" ") + html.write(html.attrencode(rule_options.get("comment", ""))) -def load_hosttags(): - filename = multisite_dir + "hosttags.mk" - if not os.path.exists(filename): - return [], [] - try: - vars = { - "wato_host_tags" : [], - "wato_aux_tags" : []} - execfile(filename, vars, vars) - # Convert manually crafted host tags tags WATO-style. This - # makes the migration easier - for taggroup in vars["wato_host_tags"]: - for nr, entry in enumerate(taggroup[2]): - if len(entry) <= 2: - taggroup[2][nr] = entry + ([],) - return vars["wato_host_tags"], vars["wato_aux_tags"] + table.end() - except Exception, e: - if config.debug: - raise MKGeneralException(_("Cannot read configuration file %s: %s" % - (filename, e))) - return [], [] + create_new_rule_form(rulespec, hostname, item, varname) -def save_hosttags(hosttags, auxtags): - make_nagios_directory(multisite_dir) - out = create_user_file(multisite_dir + "hosttags.mk", "w") - out.write("# Written by WATO\n# encoding: utf-8\n\n") - out.write("wato_host_tags += \\\n%s\n\n" % pprint.pformat(hosttags)) - out.write("wato_aux_tags += \\\n%s\n" % pprint.pformat(auxtags)) -# Handle renaming and deletion of host tags: find affected -# hosts, folders and rules. Remove or fix those rules according -# the the users' wishes. In case auf auxiliary tags the tag_id -# is None. In other cases it is the id of the tag group currently -# being edited. -def rename_host_tags_after_confirmation(tag_id, operations): - mode = html.var("_repair") - if mode == "abort": - raise MKUserError("id_0", _("Aborting change.")) +def folder_selection(folder, depth=0): + if depth: + title_prefix = "   " * depth + "` " + "- " * depth + else: + title_prefix = "" + sel = [ (folder[".path"], HTML(title_prefix + html.attrencode(folder["title"]))) ] - elif mode: - if tag_id and type(operations) == list: # make attribute unknown to system, important for save() operations - undeclare_host_tag_attribute(tag_id) - affected_folders, affected_hosts, affected_rulespecs = \ - change_host_tags_in_folders(tag_id, operations, mode, g_root_folder) - return _("Modified folders: %d, modified hosts: %d, modified rulesets: %d" % - (len(affected_folders), len(affected_hosts), len(affected_rulespecs))) + subfolders = sorted(folder[".folders"].values(), cmp = lambda x,y : cmp(x.get("title").lower(), y.get("title").lower())) + for subfolder in subfolders: + sel += folder_selection(subfolder, depth + 1) + return sel - message = "" - affected_folders, affected_hosts, affected_rulespecs = \ - change_host_tags_in_folders(tag_id, operations, "check", g_root_folder) - if affected_folders: - message += _("Affected folders with an explicit reference to this tag " - "group and that are affected by the change") + ":
      " - for folder in affected_folders: - message += '
    • %s
    • ' % ( - make_link_to([("mode", "editfolder")], folder), - folder["title"]) - message += "
    " +def create_rule(rulespec, hostname=None, item=NO_ITEM): + new_rule = [] + valuespec = rulespec["valuespec"] + if valuespec: + new_rule.append(valuespec.default_value()) + if hostname: + new_rule.append([hostname]) + else: + new_rule.append(ALL_HOSTS) # bottom: default to catch-all rule + if rulespec["itemtype"]: + if item != NO_ITEM: + new_rule.append(["%s$" % item]) + else: + new_rule.append([""]) + return tuple(new_rule) - if affected_hosts: - message += _("Hosts where this tag group is explicitely set " - "and that are effected by the change") + ":
    • " - for nr, host in enumerate(affected_hosts): - if nr > 20: - message += "... (%d more)" % (len(affected_hosts) - 20) - break - elif nr > 0: - message += ", " +def rule_button(action, help=None, folder=None, rulenr=0): + if action == None: + html.empty_icon_button() + else: + vars = [ + ("mode", html.var('mode', 'edit_ruleset')), + ("varname", html.var('varname')), + ("_folder", folder[".path"]), + ("_rulenr", str(rulenr)), + ("_action", action) + ] + if html.var("rule_folder"): + vars.append(("rule_folder", html.var("rule_folder"))) + if html.var("host"): + vars.append(("host", html.var("host"))) + if html.var("item"): + vars.append(("item", html.var("item"))) + url = make_action_link(vars) + html.icon_button(url, help, action) - message += '%s' % ( - make_link([("mode", "edithost"), ("host", host[".name"])]), - host[".name"]) - message += "
    " +def parse_rule(ruleset, orig_rule): + rule = orig_rule + try: + if type(rule[-1]) == dict: + rule_options = rule[-1] + rule = rule[:-1] + else: + rule_options = {} - if affected_rulespecs: - message += _("Rulesets that contain rules with references to the changed tags") + ":
      " - for rulespec in affected_rulespecs: - message += '
    • %s
    • ' % ( - make_link([("mode", "edit_ruleset"), ("varname", rulespec["varname"])]), - rulespec["title"]) - message += "
    " + # Extract value from front, if rule has a value + if ruleset["valuespec"]: + value = rule[0] + rule = rule[1:] + else: + if rule[0] == NEGATE: + value = False + rule = rule[1:] + else: + value = True - if not message and type(operations) == tuple: # deletion of unused tag group - html.write("
    ") - html.begin_form("confirm") - html.write(_("Please confirm the deletion of the tag group.")) - html.button("_abort", _("Abort")) - html.button("_do_confirm", _("Proceed")) - html.hidden_fields(add_action_vars = True) - html.end_form() - html.write("
    ") + # Extract liste of items from back, if rule has items + if ruleset["itemtype"]: + item_list = rule[-1] + rule = rule[:-1] + else: + item_list = None - elif message: - if type(operations) == list: - wato_html_head(_("Confirm tag deletion")) + # Rest is host list or tag list + host list + if len(rule) == 1: + tag_specs = [] + host_list = rule[0] else: - wato_html_head(_("Confirm tag modifications")) - html.write("
    ") - html.write("

    " + _("Your modifications affects some objects") + "

    ") - html.write(message) - html.write("
    " + _("WATO can repair things for you. It can rename tags in folders, host and rules. " - "Removed tag groups will be removed from hosts and folders, removed tags will be " - "replaced with the default value for the tag group (for hosts and folders). What " - "rules concern, you have to decide how to proceed.")) - html.begin_form("confirm") + tag_specs = rule[0] + host_list = rule[1] + + # Remove folder tag from tag list + tag_specs = filter(lambda t: not t.startswith("/"), tag_specs) + + return value, tag_specs, host_list, item_list, rule_options # (item_list currently not supported) - # Check if operations contains removal - if type(operations) == list: - have_removal = True + except Exception, e: + raise MKGeneralException(_("Invalid rule %s") % (orig_rule,)) + + +def rule_matches_host_and_item(rulespec, tag_specs, host_list, item_list, + rule_folder, host_folder, hostname, item): + reasons = [] + host = host_folder[".hosts"][hostname] + hostname_match = False + negate = False + regex_match = False + + for check_host in host_list: + if check_host == "@all" or hostname == check_host: + hostname_match = True + break else: - have_removal = False - for new_val in operations.values(): - if not new_val: - have_removal = True + if check_host[0] == '!': + check_host = check_host[1:] + negate = True + if check_host[0] == '~': + check_host = check_host[1:] + regex_match = True + + if not regex_match and hostname == check_host: + if negate: + break + hostname_match = True + break + elif regex_match and regex(check_host).match(hostname): + if negate: break + hostname_match = True + break - if len(affected_rulespecs) > 0 and have_removal: - html.write("
    " + _("Some tags that are used in rules have been removed by you. What " - "shall we do with that rules?") + "
      ") - html.radiobutton("_repair", "remove", True, _("Just remove the affected tags from the rules.")) - html.write("
      ") - html.radiobutton("_repair", "delete", False, _("Delete rules containing tags that have been removed, if tag is used in a positive sense. Just remove that tag if it's used negated.")) - else: - html.write("
        ") - html.radiobutton("_repair", "repair", True, _("Fix affected folders, hosts and rules.")) + # No Match until now, but negate, so thats a match + if negate: + hostname_match = True + break - html.write("
        ") - html.radiobutton("_repair", "abort", False, _("Abort your modifications.")) - html.write("
      ") + if not hostname_match: + reasons.append(_("The host name does not match.")) - html.button("_do_confirm", _("Proceed"), "") - html.hidden_fields(add_action_vars = True) - html.end_form() - html.write("
    ") - return False + for tag in tag_specs: + if tag[0] != '/' and tag[0] != '!' and tag not in host[".tags"]: + reasons.append(_("The host is missing the tag %s" % tag)) + elif tag[0] == '!' and tag[1:] in host[".tags"]: + reasons.append(_("The host has the tag %s" % tag)) - return True + if not is_indirect_parent_of(host_folder, rule_folder): + reasons.append(_("The rule does not apply to the folder of the host.")) -# operation == None -> tag group is deleted completely -# tag_id == None -> Auxiliary tag has been deleted, no -# tag group affected -def change_host_tags_in_folders(tag_id, operations, mode, folder): - need_save = False - affected_folders = [] - affected_hosts = [] - affected_rulespecs = [] - if tag_id: - attrname = "tag_" + tag_id - attributes = folder["attributes"] - if attrname in attributes: # this folder has set the tag group in question - if type(operations) == list: # deletion of tag group - if attrname in attributes: - affected_folders.append(folder) - if mode != "check": - del attributes[attrname] - need_save = True - else: - current = attributes[attrname] - if current in operations: - affected_folders.append(folder) - if mode != "check": - new_tag = operations[current] - if new_tag == False: # tag choice has been removed -> fall back to default - del attributes[attrname] - else: - attributes[attrname] = new_tag - need_save = True - if need_save: - try: - save_folder(folder) - except MKAuthException, e: - # Ignore MKAuthExceptions of locked host.mk files - pass + # Check items + if item != NO_ITEM and rulespec["itemtype"]: + item_matches = False + for i in item_list: + if re.match(i, str(item)): + item_matches = True + break + if not item_matches: + reasons.append(_("The %s %s does not match this rule.") % + (rulespec["itemname"], item)) - for subfolder in folder[".folders"].values(): - aff_folders, aff_hosts, aff_rulespecs = change_host_tags_in_folders(tag_id, operations, mode, subfolder) - affected_folders += aff_folders - affected_hosts += aff_hosts - affected_rulespecs += aff_rulespecs + if len(reasons) == 0: + return True + else: + return " ".join(reasons) - load_hosts(folder) - affected_hosts += change_host_tags_in_hosts(folder, tag_id, operations, mode, folder[".hosts"]) +def is_indirect_parent_of(pfolder, sfolder): + return pfolder == sfolder or \ + ('.parent' in pfolder and + is_indirect_parent_of(pfolder[".parent"], sfolder)) - affected_rulespecs += change_host_tags_in_rules(folder, operations, mode) - return affected_folders, affected_hosts, affected_rulespecs -def change_host_tags_in_hosts(folder, tag_id, operations, mode, hostlist): - need_save = False - affected_hosts = [] - for hostname, host in hostlist.items(): - attrname = "tag_" + tag_id - if attrname in host: - if type(operations) == list: # delete complete tag group - affected_hosts.append(host) - if mode != "check": - del host[attrname] - need_save = True - else: - if host[attrname] in operations: - affected_hosts.append(host) - if mode != "check": - new_tag = operations[host[attrname]] - if new_tag == False: # tag choice has been removed -> fall back to default - del host[attrname] - else: - host[attrname] = new_tag - need_save = True - if need_save: - try: - save_hosts(folder) - except MKAuthException, e: - # Ignore MKAuthExceptions of locked host.mk files - pass - return affected_hosts +def construct_rule(ruleset, value, tag_specs, host_list, item_list, rule_options): + if ruleset["valuespec"]: + rule = [ value ] + elif not value: + rule = [ NEGATE ] + else: + rule = [] + if tag_specs != []: + rule.append(tag_specs) + rule.append(host_list) + if item_list != None: + rule.append(item_list) + # Append rule options, but only if they are not trivial. That way we + # keep as close as possible to the original Check_MK in rules.mk so that + # command line users will feel at home... + ro = {} + if rule_options.get("disabled"): + ro["disabled"] = True + if rule_options.get("comment"): + ro["comment"] = rule_options["comment"] + if rule_options.get("docu_url"): + ro["docu_url"] = rule_options["docu_url"] -# The function parses all rules in all rulesets and looks -# for host tags that have been removed or renamed. If tags -# are removed then the depending on the mode affected rules -# are either deleted ("delete") or the vanished tags are -# removed from the rule ("remove"). -def change_host_tags_in_rules(folder, operations, mode): - need_save = False - affected_rulespecs = [] - all_rulesets = load_rulesets(folder) - for varname, ruleset in all_rulesets.items(): - rulespec = g_rulespecs[varname] - rules_to_delete = set([]) - for nr, rule in enumerate(ruleset): - modified = False - value, tag_specs, host_list, item_list, rule_options = parse_rule(rulespec, rule) + # Preserve other keys that we do not know of + for k,v in rule_options.items(): + if k not in [ "disabled", "comment", "docu_url"]: + ro[k] = v + if ro: + rule.append(ro) - # Handle deletion of complete tag group - if type(operations) == list: # this list of tags to remove - for tag in operations: - if tag != None and (tag in tag_specs or "!"+tag in tag_specs): - modified = True - if rulespec not in affected_rulespecs: - affected_rulespecs.append(rulespec) - if tag in tag_specs and mode == "delete": - rules_to_delete.add(nr) - elif tag in tag_specs: - tag_specs.remove(tag) - elif "+"+tag in tag_specs: - tag_specs.remove("!"+tag) + return tuple(rule) - # Removal or renamal of single tag choices - else: - for old_tag, new_tag in operations.items(): - # The case that old_tag is None (an empty tag has got a name) - # cannot be handled when it comes to rules. Rules do not support - # such None-values. - if not old_tag: - continue +def render_conditions(ruleset, tagspecs, host_list, item_list, varname, folder): + html.write("
      ") - if old_tag in tag_specs or ("!" + old_tag) in tag_specs: - modified = True - if rulespec not in affected_rulespecs: - affected_rulespecs.append(rulespec) - if mode != "check": - if old_tag in tag_specs: - tag_specs.remove(old_tag) - if new_tag: - tag_specs.append(new_tag) - elif mode == "delete": - rules_to_delete.add(nr) - # negated tag has been renamed or removed - if "!"+old_tag in tag_specs: - tag_specs.remove("!"+old_tag) - if new_tag: - tag_specs.append("!"+new_tag) - # the case "delete" need not be handled here. Negated - # tags can always be removed without changing the rule's - # behaviour. - if modified: - ruleset[nr] = construct_rule(rulespec, value, tag_specs, host_list, item_list, rule_options) - need_save = True + # Host tags + for tagspec in tagspecs: + if tagspec[0] == '!': + negate = True + tag = tagspec[1:] + else: + negate = False + tag = tagspec - rules_to_delete = list(rules_to_delete) - rules_to_delete.sort() - for nr in rules_to_delete[::-1]: - del ruleset[nr] - if need_save: - save_rulesets(folder, all_rulesets) - affected_rulespecs.sort(cmp = lambda a, b: cmp(a["title"], b["title"])) - return affected_rulespecs + html.write('
    • ') + alias = config.tag_alias(tag) + group_alias = config.tag_group_title(tag) + if alias: + if group_alias: + html.write(_("Host") + ": " + group_alias + " " + _("is") + " ") + if negate: + html.write("%s " % _("not")) + else: + if negate: + html.write(_("Host does not have tag")) + else: + html.write(_("Host has tag")) + html.write(" " + alias + "") + else: + if negate: + html.write(_("Host has not the tag ") + "" + tag + "") + else: + html.write(_("Host has the tag ") + "" + tag + "") + html.write('
    • ') + + # Explicit list of hosts + if host_list != ALL_HOSTS: + condition = None + if host_list == []: + condition = _("This rule does never apply due to an empty list of explicit hosts!") + elif host_list[-1] != ALL_HOSTS[0]: + tt_list = [] + for h in host_list: + f = find_host(h) + if f: + uri = html.makeuri([("mode", "edithost"), ("folder", f[".path"]), ("host", h)]) + host_spec = '%s' % (uri, h) + else: + host_spec = h + tt_list.append("%s" % host_spec) + if len(host_list) == 1: + condition = _("Host name is %s") % tt_list[0] + else: + condition = _("Host name is ") + ", ".join(tt_list[:-1]) + condition += _(" or ") + tt_list[-1] + elif host_list[0][0] == '!': + hosts = [ h[1:] for h in host_list[:-1] ] + condition = _("Host is not one of ") + ", ".join(hosts) + # other cases should not occur, e.g. list of explicit hosts + # plus ALL_HOSTS. + if condition: + html.write('
    • %s
    • ' % condition) + + # Item list + if ruleset["itemtype"] and item_list != ALL_SERVICES: + tt_list = [] + for t in item_list: + if t.endswith("$"): + tt_list.append("%s %s" % (_("is"), t[:-1])) + else: + tt_list.append("%s %s" % (_("begins with"), t)) + if ruleset["itemtype"] == "service": + condition = _("Service name ") + " or ".join(tt_list) + elif ruleset["itemtype"] == "item": + condition = ruleset["itemname"] + " " + " or ".join(tt_list) + html.write('
    • %s
    • ' % condition) -#. -# .-Rule-Editor----------------------------------------------------------. -# | ____ _ _____ _ _ _ | -# | | _ \ _ _| | ___ | ____|__| (_) |_ ___ _ __ | -# | | |_) | | | | |/ _ \ | _| / _` | | __/ _ \| '__| | -# | | _ <| |_| | | __/ | |__| (_| | | || (_) | | | -# | |_| \_\\__,_|_|\___| |_____\__,_|_|\__\___/|_| | -# | | -# +----------------------------------------------------------------------+ -# | WATO's awesome rule editor: Lets the user edit rule based parameters | -# | from main.mk. | -# '----------------------------------------------------------------------' + html.write("
    ") -def mode_ruleeditor(phase): - only_host = html.var("host", "") - only_local = "" # html.var("local") - if phase == "title": - if only_host: - return _("Rules effective on host ") + only_host - else: - return _("Rule-Based Configuration of Host & Service Parameters") +def ruleeditor_hover_code(varname, rulenr, mode, boolval, folder=None): + if boolval in [ True, False ]: + url = html.makeactionuri([("_rulenr", rulenr), ("_action", "toggle")]) + else: + url = make_link_to([("mode", mode), ("varname", varname), ("rulenr", rulenr)], folder or g_folder) + return \ + ' onmouseover="this.style.cursor=\'pointer\'; this.style.backgroundColor=\'#b7ced3\';" ' \ + ' onmouseout="this.style.cursor=\'auto\'; this.style.backgroundColor=\'#a7bec3\';" ' \ + ' onclick="location.href=\'%s\'"' % url - elif phase == "buttons": - global_buttons() - if only_host: - html.context_button(only_host, - make_link([("mode", "edithost"), ("host", only_host)]), "host") - return - elif phase == "action": - return +def get_rule_conditions(ruleset): + tag_list = get_tag_conditions() - if not only_host: - render_folder_path(keepvarnames = ["mode", "local"]) + # Host list + if not html.get_checkbox("explicit_hosts"): + host_list = ALL_HOSTS else: - html.write("

    %s: %s

    " % (_("Host"), only_host)) + negate = html.get_checkbox("negate_hosts") + nr = 0 + host_list = ListOfStrings().from_html_vars("hostlist") + if negate: + host_list = [ "!" + h for h in host_list ] + # append ALL_HOSTS to negated host lists + if len(host_list) > 0 and host_list[0][0] == '!': + host_list += ALL_HOSTS + elif len(host_list) == 0 and negate: + host_list = ALL_HOSTS # equivalent - # Group names are separated with "/" into main group and optional subgroup. - # Do not loose carefully manually crafted order of groups! - groupnames = [] - for gn, rulesets in g_rulespec_groups: - main_group = gn.split('/')[0] - if main_group not in groupnames: - groupnames.append(main_group) - menu = [] - for groupname in groupnames + ["used"]: - url = make_link([("mode", "rulesets"), ("group", groupname), - ("host", only_host), ("local", only_local)]) - if groupname == "used": - title = _("Used Rulesets") - help = _("Show only modified rulesets
    (all rulesets with at least one rule)") - icon = "usedrulesets" + # Item list + itemtype = ruleset["itemtype"] + if itemtype: + explicit = html.get_checkbox("explicit_services") + if not explicit: + item_list = [ "" ] else: - title, help = g_rulegroups.get(groupname, (groupname, "")) - icon = "rulesets" - help = help.split('\n')[0] # Take only first line as button text - menu.append((url, title, icon, "rulesets", help)) - render_main_menu(menu) - - html.write("
    ") - rule_search_form() + itemenum = ruleset["itemenum"] + if itemenum: + itemspec = ListChoice(choices = itemenum, columns = 3) + item_list = [ x+"$" for x in itemspec.from_html_vars("item") ] + else: + vs = ListOfStrings(valuespec = RegExpUnicode()) + item_list = vs.from_html_vars("itemlist") + vs.validate_value(item_list, "itemlist") + if len(item_list) == 0: + raise MKUserError("item_0", _("Please specify at least one %s or " + "this rule will never match.") % ruleset["itemname"]) + else: + item_list = None + return tag_list, host_list, item_list -def rule_search_form(): - html.begin_form("search") - html.write(_("Search for rules: ")) - html.text_input("search", size=32) - html.hidden_fields() - html.hidden_field("mode", "rulesets") - html.set_focus("search") - html.write(" ") - html.button("_do_seach", _("Search")) - html.end_form() - html.write("
    ") +def date_and_user(): + return time.strftime("%F", time.localtime()) + " " + config.user_id + ": " -def mode_rulesets(phase): - group = html.var("group") # obligatory - search = html.var("search") - if search != None: - search = search.strip().lower() - if group == "used": - title = _("Used Rulesets") - help = _("Non-empty rulesets") - only_used = True - elif search != None: - title = _("Rules matching ") + search - help = _("All rules that contain '%s' in their name") % search - only_used = False - else: - title, help = g_rulegroups.get(group, (group, None)) - only_used = False +def mode_edit_rule(phase, new = False): + # Due to localization this cannot be defined in the global context! + vs_rule_options = Dictionary( + title = _("Additional options"), + optional_keys = False, + render = "form", + elements = [ + ( "comment", + TextUnicode( + title = _("Comment"), + help = _("An optional comment that helps you document the purpose of " + "this rule"), + size = 80, + attrencode = True, + prefix_buttons = [ ("insertdate", date_and_user, _("Prefix date and your name to the comment")) ] + ) + ), + ( "docu_url", + TextAscii( + title = _("Documentation-URL"), + help = _("An optional URL pointing to documentation or any other page. This will be displayed " + "as an icon and open a new page when clicked. " + "You can use either global URLs (beginning with http://), absolute local urls " + "(beginning with /) or relative URLs (that are relative to check_mk/)."), + size = 80, + ), + ), + ( "disabled", + Checkbox( + title = _("Rule activation"), + help = _("Disabled rules are kept in the configuration but are not applied."), + label = _("do not apply this rule"), + ) + ), + ] + ) - only_host = html.var("host", "") - only_local = "" # html.var("local") + varname = html.var("varname") + rulespec = g_rulespecs[varname] + back_mode = html.var('back_mode', 'edit_ruleset') if phase == "title": - if only_host: - return _("%s - %s") % (only_host, title) - else: - return title + return _("%s rule %s") % (new and _("New") or _("Edit"), rulespec["title"]) elif phase == "buttons": - if only_host: - home_button() - html.context_button(_("All Rulesets"), make_link([("mode", "ruleeditor"), ("host", only_host)]), "back") - html.context_button(only_host, - make_link([("mode", "edithost"), ("host", only_host)]), "host") + if back_mode == 'edit_ruleset': + var_list = [("mode", "edit_ruleset"), ("varname", varname), ("host", html.var("host",""))] + if html.var("item"): + var_list.append( ("item", html.var("item")) ) + backurl = make_link(var_list) + else: + backurl = make_link([('mode', back_mode), ("host", html.var("host",""))]) + html.context_button(_("Abort"), backurl, "abort") + return + + folder = html.has_var("_new_host_rule") and g_folder or g_folders[html.var("rule_folder")] + rulesets = load_rulesets(folder) + rules = rulesets[varname] + + if new: + host = None + item = NO_ITEM + if html.has_var("_new_host_rule"): + host = html.var("host") + item = html.has_var("item") and mk_eval(html.var("item")) or NO_ITEM + try: + if item != NO_ITEM: + item = escape_regex_chars(item) + rule = create_rule(rulespec, host, item) + except Exception, e: + if phase != "action": + html.message(_("Cannot create rule: %s") % e) + return + rulenr = len(rules) + else: + rulenr = int(html.var("rulenr")) + rule = rules[rulenr] + + valuespec = rulespec.get("valuespec") + value, tag_specs, host_list, item_list, rule_options = parse_rule(rulespec, rule) + + if phase == "action": + if html.check_transaction(): + # Additional options + rule_options = vs_rule_options.from_html_vars("options") + vs_rule_options.validate_value(rule_options, "options") + + # CONDITION + tag_specs, host_list, item_list = get_rule_conditions(rulespec) + new_rule_folder = g_folders[html.var("new_rule_folder")] + + # Check permissions on folders + if not new: + check_folder_permissions(folder, "write", True) + check_folder_permissions(new_rule_folder, "write", True) + + # VALUE + if valuespec: + value = get_edited_value(valuespec) + else: + value = html.var("value") == "yes" + rule = construct_rule(rulespec, value, tag_specs, host_list, item_list, rule_options) + if new_rule_folder == folder: + if new: + rules.append(rule) + else: + rules[rulenr] = rule + save_rulesets(folder, rulesets) + mark_affected_sites_dirty(folder) + + if new: + log_pending(AFFECTED, None, "edit-rule", _("Created new rule in ruleset %s in folder %s") % + (rulespec["title"], new_rule_folder["title"])) + else: + log_pending(AFFECTED, None, "edit-rule", _("Changed properties of rule %s in folder %s") % + (rulespec["title"], new_rule_folder["title"])) + else: # Move rule to new folder + if not new: + del rules[rulenr] + save_rulesets(folder, rulesets) + rulesets = load_rulesets(new_rule_folder) + rules = rulesets.setdefault(varname, []) + rules.append(rule) + save_rulesets(new_rule_folder, rulesets) + mark_affected_sites_dirty(folder) + mark_affected_sites_dirty(new_rule_folder) + log_pending(AFFECTED, None, "edit-rule", _("Changed properties of rule %s, moved rule from " + "folder %s to %s") % (rulespec["title"], folder["title"], + new_rule_folder["title"])) else: - global_buttons() - html.context_button(_("All Rulesets"), make_link([("mode", "ruleeditor")]), "back") - if config.may("wato.hosts") or config.may("wato.seeall"): - html.context_button(_("Folder"), make_link([("mode", "folder")]), "folder") - return - - elif phase == "action": - return - - if not only_host: - render_folder_path(keepvarnames = ["mode", "local", "group"]) + return back_mode - if search != None: - rule_search_form() + return (back_mode, + (new and _("Created new rule in ruleset '%s' in folder %s") + or _("Edited rule in ruleset '%s' in folder %s")) % + (rulespec["title"], new_rule_folder["title"])) - if help != None: - help = "".join(help.split("\n", 1)[1:]).strip() - if help: - html.help(help) + if rulespec.get("help"): + html.write("
    " + rulespec["help"] + "
    ") - if only_local and not only_host: - all_rulesets = {} - rs = load_rulesets(g_folder) - for varname, rules in rs.items(): - all_rulesets.setdefault(varname, []) - all_rulesets[varname] += [ (g_folder, rule) for rule in rules ] - else: - all_rulesets = load_all_rulesets() - if only_used: - all_rulesets = dict([ r for r in all_rulesets.items() if len(r[1]) > 0 ]) + html.begin_form("rule_editor", method="POST") - # Select matching rule groups while keeping their configured order - groupnames = [ gn for gn, rulesets in g_rulespec_groups - if only_used or search != None or gn == group or gn.startswith(group + "/") ] + # Conditions + forms.header(_("Conditions")) - # In case of search we need to sort the groups since main chapters would - # appear more than once otherwise. - if search != None: - groupnames.sort() + # Rule folder + forms.section(_("Folder")) + html.select("new_rule_folder", folder_selection(g_root_folder), folder[".path"]) + html.help(_("The rule is only applied to hosts directly in or below this folder.")) - something_shown = False - html.write('
    ') - # Loop over all ruleset groups - title_shown = False - for groupname in groupnames: - # Show information about a ruleset - # Sort rulesets according to their title - g_rulespec_group[groupname].sort( - cmp = lambda a, b: cmp(a["title"], b["title"])) - for rulespec in g_rulespec_group[groupname]: + # Host tags + forms.section(_("Host tags")) + render_condition_editor(tag_specs) + html.help(_("The rule will only be applied to hosts fullfilling all of " + "of the host tag conditions listed here, even if they appear " + "in the list of explicit host names.")) - varname = rulespec["varname"] - valuespec = rulespec["valuespec"] + # Explicit hosts / ALL_HOSTS + forms.section(_("Explicit hosts")) + div_id = "div_all_hosts" - # handle only_used - rules = all_rulesets.get(varname, []) - num_rules = len(rules) - if num_rules == 0 and (only_used or only_local): - continue + checked = host_list != ALL_HOSTS + html.checkbox("explicit_hosts", checked, onclick="valuespec_toggle_option(this, %r)" % div_id, + label = _("Specify explicit host names")) + html.write('
    ' % ( + div_id, not checked and "none" or "")) + negate_hosts = len(host_list) > 0 and host_list[0].startswith("!") - # handle search - if search != None \ - and not (rulespec["help"] and search in rulespec["help"].lower()) \ - and search not in rulespec["title"].lower() \ - and search not in varname: - continue + explicit_hosts = [ h.strip("!") for h in host_list if h != ALL_HOSTS[0] ] + ListOfStrings( + orientation = "horizontal", + valuespec = TextAscii(size = 30)).render_input("hostlist", explicit_hosts) + html.checkbox("negate_hosts", negate_hosts, label = + _("Negate: make rule apply for all but the above hosts")) + html.write("
    ") + html.help(_("You can enter a number of explicit host names that rule should or should " + "not apply to here. Leave this option disabled if you want the rule to " + "apply for all hosts specified by the given tags.")) - # Handle case where a host is specified - rulespec = g_rulespecs[varname] - this_host = False - if only_host: - num_local_rules = 0 - for f, rule in rules: - value, tag_specs, host_list, item_list, rule_options = parse_rule(rulespec, rule) - if only_host and only_host in host_list: - num_local_rules += 1 + # Itemlist + itemtype = rulespec["itemtype"] + if itemtype: + if itemtype == "service": + forms.section(_("Services")) + html.help(_("Specify a list of service patterns this rule shall apply to. " + "The patterns must match the beginning of the service " + "in question. Adding a $ to the end forces an excact " + "match. Pattern use regular expressions. A .* will " + "match an arbitrary text.")) + elif itemtype == "checktype": + forms.section(_("Check types")) + elif itemtype == "item": + forms.section(rulespec["itemname"].title()) + if rulespec["itemhelp"]: + html.help(rulespec["itemhelp"]) else: - num_local_rules = len([ f for (f,r) in rules if f == g_folder ]) - - if only_local and num_local_rules == 0: - continue + html.help(_("You can make the rule apply only to certain services of the " + "specified hosts. Do this by specifying explicit items to " + "match here. Hint: make sure to enter the item only, " + "not the full Service description. " + "Note: the match is done on the beginning " + "of the item in question. Regular expressions are interpreted, " + "so appending a $ will force an exact match.")) + else: + raise MKGeneralException("Invalid item type '%s'" % itemtype) - if only_used or search != None: - titlename = g_rulegroups[groupname.split("/")[0]][0] + if itemtype: + checked = html.get_checkbox("explicit_services") + if checked == None: # read from rule itself + checked = len(item_list) == 0 or item_list[0] != "" + div_id = "item_list" + html.checkbox("explicit_services", checked, onclick="valuespec_toggle_option(this, %r)" % div_id, + label = _("Specify explicit values")) + html.write('
    ' % ( + div_id, not checked and "none" or "")) + itemenum = rulespec["itemenum"] + if itemenum: + value = [ x.rstrip("$") for x in item_list ] + itemspec = ListChoice(choices = itemenum, columns = 3) + itemspec.render_input("item", value) else: - if '/' in groupname: - titlename = groupname.split("/", 1)[1] - else: - titlename = title + ListOfStrings( + orientation = "horizontal", + valuespec = RegExpUnicode(size = 30)).render_input("itemlist", item_list) - if title_shown != titlename: - forms.header(titlename) - forms.container() - title_shown = titlename + html.write("

    ") + html.help(_("The entries here are regular expressions to match the beginning. " + "Add a $ for an exact match. An arbitrary substring is matched " + "with .*
    Please note that on windows systems any backslashes need to be escaped." + "For example C:\\\\tmp\\\\message.log")) + html.write("
    ") - something_shown = True + # Value + if valuespec: + forms.header(valuespec.title() or _("Value")) + value = rule[0] + forms.section() + try: + valuespec.validate_datatype(value, "ve") + valuespec.render_input("ve", value) + except Exception, e: + if config.debug: + raise + else: + html.show_warning(_('Unable to read current options of this rule. Falling back to ' + 'default values. When saving this rule now, your previous settings ' + 'will be overwritten. Problem was: %s.') % e) - url_vars = [("mode", "edit_ruleset"), ("varname", varname)] - if only_host: - url_vars.append(("host", only_host)) - view_url = make_link(url_vars) + # In case of validation problems render the input with default values + valuespec.render_input("ve", valuespec.default_value()) - html.write('
    ') - html.write('%s' % - (num_rules and "nonzero" or "zero", view_url, rulespec["title"])) - html.write('%s
    ' % ("." * 100)) - html.write('
    %d
    ' % - (num_rules and "nonzero" or "zero", title, num_rules)) - html.write('
    ') + valuespec.set_focus("ve") + else: + forms.header(_("Positive / Negative")) + forms.section("") + for posneg, img in [ ("positive", "yes"), ("negative", "no")]: + val = img == "yes" + html.write(' ' % img) + html.radiobutton("value", img, value == val, _("Make the outcome of the ruleset %s
    ") % posneg) - if something_shown: - forms.end() + # Additonal rule options + vs_rule_options.render_input("options", rule_options) - else: - if only_host: - html.write("
    " + _("There are no rules with an exception for the host %s.") % only_host + "
    ") - else: - html.write("
    " + _("There are no rules defined in this folder.") + "
    ") + forms.end() + html.button("save", _("Save")) + html.hidden_fields() + vs_rule_options.set_focus("options") + html.end_form() - html.write('
    ') +# Render HTML input fields for editing a tag based condition +def render_condition_editor(tag_specs, varprefix=""): + if varprefix: + varprefix += "_" -def create_new_rule_form(rulespec, hostname = None, item = None): - html.begin_form("new_rule", add_transid = False) + if len(config.wato_aux_tags) + len(config.wato_host_tags) == 0: + html.write(_("You have not configured any host tags.")) + return - html.write('') - if hostname: - label = _("Host %s" % hostname) - ty = _('Host') - if item != NO_ITEM and rulespec["itemtype"]: - label += _(" and %s '%s'") % (rulespec["itemname"], item) - ty = rulespec["itemname"] + # Determine current (default) setting of tag by looking + # into tag_specs (e.g. [ "snmp", "!tcp", "test" ] ) + def current_tag_setting(choices): + default_tag = None + ignore = True + for t in tag_specs: + if t[0] == '!': + n = True + t = t[1:] + else: + n = False + if t in [ x[0] for x in choices ]: + default_tag = t + ignore = False + negate = n + if ignore: + deflt = "ignore" + elif negate: + deflt = "isnot" + else: + deflt = "is" + return default_tag, deflt - html.write('\n') + # Show dropdown with "is/isnot/ignore" and beginning + # of div that is switched visible by is/isnot + def tag_condition_dropdown(tagtype, deflt, id): + html.write("
    ') - html.button("_new_host_rule", _("Create %s specific rule for: ") % ty) - html.hidden_field("host", hostname) - html.hidden_field("item", mk_repr(item)) - html.write('') - html.write(label) - html.write('
    ") + html.select(varprefix + tagtype + "_" + id, [ + ("ignore", _("ignore")), + ("is", _("is")), + ("isnot", _("isnot"))], deflt, + onchange="valuespec_toggle_dropdownn(this, '%stag_sel_%s');" % \ + (varprefix, id) + ) + html.write("") + if html.form_submitted(): + div_is_open = html.var(tagtype + "_" + id, "ignore") != "ignore" + else: + div_is_open = deflt != "ignore" + html.write('
    ' % ( + varprefix, id, not div_is_open and "display: none;" or "")) - html.write('
    ') - html.button("_new_rule", _("Create rule in folder: ")) - html.write('') - html.select("rule_folder", folder_selection(g_root_folder)) - html.write('
    \n') - html.hidden_field("varname", html.var("varname")) - html.hidden_field("mode", "new_rule") - html.end_form() + auxtags = group_hosttags_by_topic(config.wato_aux_tags) + hosttags = group_hosttags_by_topic(config.wato_host_tags) + all_topics = set([]) + for topic, taggroups in auxtags + hosttags: + all_topics.add(topic) + all_topics = list(all_topics) + all_topics.sort() + make_foldable = len(all_topics) > 1 + for topic in all_topics: + if make_foldable: + html.begin_foldable_container("topic", topic, True, "%s" % (_u(topic))) + html.write("") + + # Show main tags + for t, grouped_tags in hosttags: + if t == topic: + for entry in grouped_tags: + id, title, choices = entry[:3] + html.write("" % _u(title)) + default_tag, deflt = current_tag_setting(choices) + tag_condition_dropdown("tag", deflt, id) + if len(choices) == 1: + html.write(" " + _("set")) + else: + html.select(varprefix + "tagvalue_" + id, + [(t[0], _u(t[1])) for t in choices if t[0] != None], deflt=default_tag) + html.write("") + html.write("") + + # And auxiliary tags + for t, grouped_tags in auxtags: + if t == topic: + for id, title in grouped_tags: + html.write("" % _u(title)) + default_tag, deflt = current_tag_setting([(id, _u(title))]) + tag_condition_dropdown("auxtag", deflt, id) + html.write(" " + _("set")) + html.write("") + html.write("") + + html.write("
    %s:  
    %s:  
    ") + if make_foldable: + html.end_foldable_container() -def mode_edit_ruleset(phase): - varname = html.var("varname") - rulespec = g_rulespecs[varname] - hostname = html.var("host", "") - if html.has_var("item"): - item = mk_eval(html.var("item")) - else: - item = NO_ITEM - if hostname: - hosts = load_hosts(g_folder) - host = hosts.get(hostname) - if not host: - hostname = None # host not found. Should not happen +# Retrieve current tag condition settings from HTML variables +def get_tag_conditions(varprefix=""): + if varprefix: + varprefix += "_" + # Main tags + tag_list = [] + for entry in config.wato_host_tags: + id, title, tags = entry[:3] + mode = html.var(varprefix + "tag_" + id) + if len(tags) == 1: + tagvalue = tags[0][0] + else: + tagvalue = html.var(varprefix + "tagvalue_" + id) - if phase == "title": - title = rulespec["title"] - if hostname: - title += _(" for host %s") % hostname - if html.has_var("item") and rulespec["itemtype"]: - title += _(" and %s '%s'") % (rulespec["itemname"], item) - return title + if mode == "is": + tag_list.append(tagvalue) + elif mode == "isnot": + tag_list.append("!" + tagvalue) - elif phase == "buttons": - global_buttons() - group = rulespec["group"].split("/")[0] - groupname = g_rulegroups[group][0] - html.context_button(groupname, - make_link([("mode", "rulesets"), ("group", group), ("host", hostname)]), "back") - html.context_button(_("Used Rulesets"), - make_link([("mode", "rulesets"), ("group", "used"), ("host", hostname)]), "usedrulesets") - if hostname: - html.context_button(_("Services"), - make_link([("mode", "inventory"), ("host", hostname)]), "back") - return + # Auxiliary tags + for id, title in config.wato_aux_tags: + mode = html.var(varprefix + "auxtag_" + id) + if mode == "is": + tag_list.append(id) + elif mode == "isnot": + tag_list.append("!" + id) - elif phase == "action": - # Folder for the rule actions is defined by _folder - rule_folder = g_folders[html.var("_folder", html.var("folder"))] - check_folder_permissions(rule_folder, "write", True) - rulesets = load_rulesets(rule_folder) - rules = rulesets.get(varname, []) + return tag_list - rulenr = int(html.var("_rulenr")) # rule number relativ to folder - action = html.var("_action") - if action == "delete": - c = wato_confirm(_("Confirm"), _("Delete rule number %d of folder '%s'?") - % (rulenr + 1, rule_folder["title"])) - if c: - del rules[rulenr] - save_rulesets(rule_folder, rulesets) - mark_affected_sites_dirty(rule_folder) - log_pending(AFFECTED, None, "edit-ruleset", - _("Deleted rule in ruleset '%s'") % rulespec["title"]) - return - elif c == False: # not yet confirmed - return "" - else: - return None # browser reload +def save_rulesets(folder, rulesets): + make_nagios_directory(root_dir) + path = root_dir + '/' + folder['.path'] + '/' + "rules.mk" + out = create_user_file(path, "w") + out.write("# Written by WATO\n# encoding: utf-8\n\n") - elif action == "insert": - if not html.check_transaction(): - return None # browser reload - rules[rulenr:rulenr] = [rules[rulenr]] - save_rulesets(rule_folder, rulesets) - mark_affected_sites_dirty(rule_folder) + for varname, rulespec in g_rulespecs.items(): + ruleset = rulesets.get(varname) + if not ruleset: + continue # don't save empty rule sets - log_pending(AFFECTED, None, "edit-ruleset", - _("Inserted new rule in ruleset %s") % rulespec["title"]) - return + if ':' in varname: + dictname, subkey = varname.split(':') + varname = '%s[%r]' % (dictname, subkey) + out.write("\n%s.setdefault(%r, [])\n" % (dictname, subkey)) + else: + if rulespec["optional"]: + out.write("\nif %s == None:\n %s = []\n" % (varname, varname)) + + out.write("\n%s = [\n" % varname) + for rule in ruleset: + save_rule(out, folder, rulespec, rule) + out.write("] + %s\n\n" % varname) + +def save_rule(out, folder, rulespec, rule): + out.write(" ( ") + value, tag_specs, host_list, item_list, rule_options = parse_rule(rulespec, rule) + if rulespec["valuespec"]: + out.write(repr(value) + ", ") + elif not value: + out.write("NEGATE, ") + out.write("[") + for tag in tag_specs: + out.write(repr(tag)) + out.write(", ") + if folder != g_root_folder: + out.write("'/' + FOLDER_PATH + '/+'") + out.write("], ") + if len(host_list) > 0 and host_list[-1] == ALL_HOSTS[0]: + if len(host_list) > 1: + out.write(repr(host_list[:-1])) + out.write(" + ALL_HOSTS") else: - if not html.check_transaction(): - return None # browser reload - rule = rules[rulenr] - del rules[rulenr] - if action == "up": - rules[rulenr-1:rulenr-1] = [ rule ] - else: - rules[rulenr+1:rulenr+1] = [ rule ] - save_rulesets(rule_folder, rulesets) - mark_affected_sites_dirty(rule_folder) - log_pending(AFFECTED, None, "edit-ruleset", - _("Changed order of rules in ruleset %s") % rulespec["title"]) - return + out.write("ALL_HOSTS") + else: + out.write(repr(host_list)) - if not hostname: - render_folder_path(keepvarnames = ["mode", "varname"]) + if rulespec["itemtype"]: + out.write(", ") + if item_list == ALL_SERVICES: + out.write("ALL_SERVICES") + else: + out.write(repr(item_list)) - # Titel ist schon Seitentitel - # html.write("

    " + rulespec["title"] + "

    ") - if not config.wato_hide_varnames: - display_varname = ':' in varname and '%s["%s"]' % tuple(varname.split(":")) or varname - html.write('
    %s
    ' % display_varname) + if rule_options: + out.write(", %r" % rule_options) - html.help(rulespec["help"]) + out.write(" ),\n") - # Collect all rulesets - all_rulesets = load_all_rulesets() - ruleset = all_rulesets.get(varname) - if not ruleset: - html.write("
    " + _("There are no rules defined in this set.") + "
    ") - else: - alread_matched = False - match_keys = set([]) # in case if match = "dict" - last_folder = None - for rulenr in range(0, len(ruleset)): - folder, rule = ruleset[rulenr] - if folder != last_folder: - first_in_group = True - alias_path = get_folder_aliaspath(folder, show_main = False) - table.begin(title = "%s %s" % (_("Rules in folder"), alias_path), css="ruleset") - rel_rulenr = 0 - last_folder = folder - else: - first_in_group = False - rel_rulenr += 1 - last_in_group = (rulenr == len(ruleset) - 1 or \ - ruleset[rulenr+1][0] != folder) +def load_rulesets(folder): + # TODO: folder berücksichtigen + if folder[".path"]: + path = root_dir + folder[".path"] + "/" + "rules.mk" + else: + path = root_dir + "rules.mk" - value, tag_specs, host_list, item_list, rule_options = parse_rule(rulespec, rule) - disabled = rule_options.get("disabled") - table.row(disabled and "disabled" or None) + vars = { + "ALL_HOSTS" : ALL_HOSTS, + "ALL_SERVICES" : [ "" ], + "NEGATE" : NEGATE, + "FOLDER_PATH" : folder[".path"], + "FILE_PATH" : folder[".path"] + "/hosts.mk", + } + # Prepare empty rulesets so that rules.mk has something to + # append to + for varname, ruleset in g_rulespecs.items(): + if ':' in varname: + dictname, subkey = varname.split(":") + vars[dictname] = {} + else: + vars[varname] = [] - # Rule matching - if hostname: - table.cell(_("Ma.")) - if disabled: - reason = _("This rule is disabled") - else: - reason = rule_matches_host_and_item( - rulespec, tag_specs, host_list, item_list, folder, g_folder, hostname, item) + try: + execfile(path, vars, vars) + except IOError: + pass # Non existant files are ok... + except Exception, e: + if config.debug: + raise MKGeneralException(_("Cannot read configuration file %s: %s" % + (path, e))) + else: + html.log('load_rulesets: Problem while loading rulesets (%s - %s). ' + 'Continue with partly loaded rules...' % (path, e)) - # Handle case where dict is constructed from rules - if reason == True and rulespec["match"] == "dict": - if len(value) == 0: - title = _("This rule matches, but does not define any parameters.") - img = 'imatch' - else: - new_keys = set(value.keys()) - if set_is_disjoint(match_keys, new_keys): - title = _("This rule matches and defines new parameters.") - img = 'match' - elif new_keys.issubset(match_keys): - title = _("This rule matches, but all of its parameters are overridden by previous rules.") - img = 'imatch' - else: - title = _("This rule matches, but some of its parameters are overridden by previous rules.") - img = 'pmatch' - match_keys.update(new_keys) + # Extract only specified rule variables + rulevars = {} + for ruleset in g_rulespecs.values(): + varname = ruleset["varname"] + # handle extra_host_conf:max_check_attempts + if ':' in varname: + dictname, subkey = varname.split(":") + if dictname in vars: + dictionary = vars[dictname] + if subkey in dictionary: + rulevars[varname] = dictionary[subkey] + # If this ruleset is not defined in rules.mk use empty list. + if varname not in rulevars: + rulevars[varname] = [] - elif reason == True and (not alread_matched or rulespec["match"] == "all"): - title = _("This rule matches for the host '%s'") % hostname - if rulespec["itemtype"]: - title += _(" and the %s '%s'.") % (rulespec["itemname"], item) - else: - title += "." - img = 'match' - alread_matched = True - elif reason == True: - title = _("This rule matches, but is overridden by a previous rule.") - img = 'imatch' - alread_matched = True - else: - title = _("This rule does not match: %s") % reason - img = 'nmatch' - html.write(' ' % (title, img)) + else: + if varname in vars: + rulevars[varname] = vars[varname] + return rulevars - # Disabling - table.cell("", css="buttons") - if disabled: - html.icon(_("This rule is currently disabled and will not be applied"), "disabled") - else: - html.empty_icon() +# Load all rules of all folders into a dictionary that +# has the rules' varnames as keys and a list of (folder, rule) +# as values. +def load_rulesets_recursively(folder, all_rulesets): + for subfolder in folder[".folders"].values(): + load_rulesets_recursively(subfolder, all_rulesets) - # Actions - table.cell(_("Order"), css="buttons rulebuttons") - if not first_in_group: - rule_button("up", _("Move this rule one position up"), folder, rel_rulenr) - else: - rule_button(None) - if not last_in_group: - rule_button("down", _("Move this rule one position down"), folder, rel_rulenr) - else: - rule_button(None) + rs = load_rulesets(folder) + for varname, rules in rs.items(): + all_rulesets.setdefault(varname, []) + all_rulesets[varname] += [ (folder, rule) for rule in rules ] - table.cell(_("Actions"), css="buttons rulebuttons") - edit_url = make_link([ - ("mode", "edit_rule"), - ("varname", varname), - ("rulenr", rel_rulenr), - ("host", hostname), - ("item", mk_repr(item)), - ("rule_folder", folder[".path"])]) - html.icon_button(edit_url, _("Edit this rule"), "edit") - rule_button("insert", _("Insert a copy of this rule in current folder"), - folder, rel_rulenr) - rule_button("delete", _("Delete this rule"), folder, rel_rulenr) +def load_all_rulesets(): + all_rulesets = {} + load_rulesets_recursively(g_root_folder, all_rulesets) + return all_rulesets - # Folder - # alias_path = get_folder_aliaspath(folder, show_main = False) - # classes = "" - # if first_in_group: - # classes += "first" - # if last_in_group: - # classes += " last" - # html.write('
    %s
    ' % (classes, alias_path)) +g_rulegroups = {} +def register_rulegroup(group, title, help): + g_rulegroups[group] = (title, help) - # Conditions - table.cell(_("Conditions"), css="condition") - render_conditions(rulespec, tag_specs, host_list, item_list, varname, folder) +g_rulespecs = {} +g_rulespec_group = {} # for conveniant lookup +g_rulespec_groups = [] # for keeping original order +NO_FACTORY_DEFAULT = [] # needed for unique ID +FACTORY_DEFAULT_UNUSED = [] # means this ruleset is not used if no rule is entered +def register_rule(group, varname, valuespec = None, title = None, + help = None, itemspec = None, itemtype = None, itemname = None, + itemhelp = None, itemenum = None, + match = "first", optional = False, factory_default = NO_FACTORY_DEFAULT): + if not itemname and itemtype == "service": + itemname = _("Service") - # Value - table.cell(_("Value")) + ruleset = { + "group" : group, + "varname" : varname, + "valuespec" : valuespec, + "itemspec" : itemspec, # original item spec, e.g. if validation is needed + "itemtype" : itemtype, # None, "service", "checktype" or "checkitem" + "itemname" : itemname, # e.g. "mount point" + "itemhelp" : itemhelp, # a description of the item, only rarely used + "itemenum" : itemenum, # possible fixed values for items + "match" : match, # used by WATO rule analyzer (green and grey balls) + "title" : title or valuespec.title(), + "help" : help or valuespec.help(), + "optional" : optional, # rule may be None (like only_hosts) + "factory_default" : factory_default, + } - if rulespec["valuespec"]: - try: - value_html = rulespec["valuespec"].value_to_text(value) - except: - try: - reason = "" - rulespec["valuespec"].validate_datatype(value, "") - except Exception, e: - reason = str(e) + # Register group + if group not in g_rulespec_group: + rulesets = [ ruleset ] + g_rulespec_groups.append((group, rulesets)) + g_rulespec_group[group] = rulesets + else: + # If a ruleset for this variable already exist, then we need to replace + # it. How can this happen? If a user puts his own copy of the definition + # into some file below local/. + for nr, rs in enumerate(g_rulespec_group[group]): + if rs["varname"] == varname: + del g_rulespec_group[group][nr] + break # There cannot be two duplicates! + g_rulespec_group[group].append(ruleset) - value_html = '' \ - + _("The value of this rule is not valid. ") \ - + reason - else: - img = value and "yes" or "no" - title = value and _("This rule results in a positive outcome.") \ - or _("this rule results in a negative outcome.") - value_html = '' \ - % (title, img) - html.write(value_html) + g_rulespecs[varname] = ruleset - # Comment - table.cell(_("Comment")) - url = rule_options.get("docu_url") - if url: - html.icon_button(url, _("Context information about this rule"), "url", target="_blank") - html.write(" ") - html.write(htmllib.attrencode(rule_options.get("comment", ""))) +# Special version of register_rule, dedicated to checks. This is not really +# modular here, but we cannot put this function into the plugins file because +# the order is not defined there. +def register_check_parameters(subgroup, checkgroup, title, valuespec, itemspec, matchtype, has_inventory=True, register_static_check=True): + # Register rule for discovered checks + if valuespec and has_inventory: # would be useless rule if check has no parameters + itemenum = None + if itemspec: + itemtype = "item" + itemname = itemspec.title() + itemhelp = itemspec.help() + if isinstance(itemspec, DropdownChoice) or isinstance(itemspec, OptionalDropdownChoice): + itemenum = itemspec._choices + else: + itemtype = None + itemname = None + itemhelp = None - table.end() + register_rule( + "checkparams/" + subgroup, + varname = "checkgroup_parameters:%s" % checkgroup, + title = title, + valuespec = valuespec, + itemspec = itemspec, + itemtype = itemtype, + itemname = itemname, + itemhelp = itemhelp, + itemenum = itemenum, + match = matchtype) - create_new_rule_form(rulespec, hostname, item) + if register_static_check: + # Register rule for static checks + elements = [ + CheckTypeGroupSelection( + checkgroup, + title = _("Checktype"), + help = _("Please choose the check plugin")) ] + if itemspec: + elements.append(itemspec) + else: + # In case of static checks without check-item, add the fixed + # valuespec to add "None" as second element in the tuple + elements.append(FixedValue( + None, + totext = '', + )) + if not valuespec: + valuespec =\ + FixedValue(None, + help = _("This check has no parameters."), + totext = "") + if not valuespec.title(): + valuespec._title = _("Parameters") + elements.append(valuespec) + register_rule( + "static/" + subgroup, + "static_checks:%s" % checkgroup, + title = title, + valuespec = Tuple( + title = valuespec.title(), + elements = elements, + ), + itemspec = itemspec, + match = "all") -def folder_selection(folder, depth=0): - if depth: - title_prefix = "   " * depth + "` " + "- " * depth - else: - title_prefix = "" - sel = [ (folder[".path"], title_prefix + folder["title"]) ] +# Registers notification parameters for a certain notification script, +# e.g. "mail" or "sms". This will create: +# - A WATO host rule +# - A parametrization of the not-script also in the RBN module +# Notification parameters are always expected to be of type Dictionary. +# The match type will be set to "dict". +g_notification_parameters = {} +def register_notification_parameters(scriptname, valuespec): + + script_title = notification_script_title(scriptname) + title = _("Parameters for %s") % script_title + valuespec._title = _("Call with the following parameters:") - for subfolder in folder[".folders"].values(): - sel += folder_selection(subfolder, depth + 1) - return sel + register_rule( + "monconf/" + _("Notifications"), + "notification_parameters:" + scriptname, + valuespec, + title, + itemtype = None, + match = "dict" + ) + g_notification_parameters[scriptname] = valuespec -def create_rule(rulespec, hostname=None, item=NO_ITEM): - new_rule = [] - valuespec = rulespec["valuespec"] - if valuespec: - new_rule.append(valuespec.default_value()) - if hostname: - new_rule.append([hostname]) - else: - new_rule.append(ALL_HOSTS) # bottom: default to catch-all rule - if rulespec["itemtype"]: - if item != NO_ITEM: - new_rule.append(["%s$" % item]) - else: - new_rule.append([""]) - return tuple(new_rule) +# The following function looks like a value spec and in fact +# can be used like one (but take no parameters) +def PredictiveLevels(**args): + dif = args.get("default_difference", (2.0, 4.0)) + unitname = args.get("unit", "") + if unitname: + unitname += " " -def rule_button(action, help=None, folder=None, rulenr=0): - if action == None: - html.empty_icon_button() - else: - vars = [("_folder", folder[".path"]), - ("_rulenr", str(rulenr)), - ("_action", action)] - if html.var("host"): - vars.append(("host", html.var("host"))) - url = html.makeactionuri(vars) - html.icon_button(url, help, action) + return Dictionary( + title = _("Predictive Levels"), + optional_keys = [ "weight", "levels_upper", "levels_upper_min", "levels_lower", "levels_lower_max" ], + default_keys = [ "levels_upper" ], + columns = 1, + headers = "sup", + elements = [ + ( "period", + DropdownChoice( + title = _("Base prediction on"), + choices = [ + ( "wday", _("Day of the week (1-7, 1 is Monday)") ), + ( "day", _("Day of the month (1-31)") ), + ( "hour", _("Hour of the day (0-23)") ), + ( "minute", _("Minute of the hour (0-59)") ), + ] + )), + ( "horizon", + Integer( + title = _("Time horizon"), + unit = _("days"), + minvalue = 1, + default_value = 90, + )), + # ( "weight", + # Percentage( + # title = _("Raise weight of recent time"), + # label = _("by"), + # default_value = 0, + # )), + ( "levels_upper", + CascadingDropdown( + title = _("Dynamic levels - upper bound"), + choices = [ + ( "absolute", + _("Absolute difference from prediction"), + Tuple( + elements = [ + Float(title = _("Warning at"), + unit = unitname + _("above predicted value"), default_value = dif[0]), + Float(title = _("Critical at"), + unit = unitname + _("above predicted value"), default_value = dif[1]), + ] + )), + ( "relative", + _("Relative difference from prediction"), + Tuple( + elements = [ + Percentage(title = _("Warning at"), unit = _("% above predicted value"), default_value = 10), + Percentage(title = _("Critical at"), unit = _("% above predicted value"), default_value = 20), + ] + )), + ( "stdev", + _("In relation to standard deviation"), + Tuple( + elements = [ + Percentage(title = _("Warning at"), unit = _("times the standard deviation above the predicted value"), default_value = 2), + Percentage(title = _("Critical at"), unit = _("times the standard deviation above the predicted value"), default_value = 4), + ] + )), + ] + )), + ( "levels_upper_min", + Tuple( + title = _("Limit for upper bound dynamic levels"), + help = _("Regardless of how the dynamic levels upper bound are computed according to the prediction: " + "the will never be set below the following limits. This avoids false alarms " + "during times where the predicted levels would be very low."), + elements = [ + Float(title = _("Warning level is at least"), unit = unitname), + Float(title = _("Critical level is at least"), unit = unitname), + ] + )), + ( "levels_lower", + CascadingDropdown( + title = _("Dynamic levels - lower bound"), + choices = [ + ( "absolute", + _("Absolute difference from prediction"), + Tuple( + elements = [ + Float(title = _("Warning at"), + unit = unitname + _("below predicted value"), default_value = 2.0), + Float(title = _("Critical at"), + unit = unitname + _("below predicted value"), default_value = 4.0), + ] + )), + ( "relative", + _("Relative difference from prediction"), + Tuple( + elements = [ + Percentage(title = _("Warning at"), unit = _("% below predicted value"), default_value = 10), + Percentage(title = _("Critical at"), unit = _("% below predicted value"), default_value = 20), + ] + )), + ( "stdev", + _("In relation to standard deviation"), + Tuple( + elements = [ + Percentage(title = _("Warning at"), unit = _("times the standard deviation below the predicted value"), default_value = 2), + Percentage(title = _("Critical at"), unit = _("times the standard deviation below the predicted value"), default_value = 4), + ] + )), + ] + )), + ] + ) -def parse_rule(ruleset, orig_rule): - rule = orig_rule - try: - if type(rule[-1]) == dict: - rule_options = rule[-1] - rule = rule[:-1] - else: - rule_options = {} - # Extract value from front, if rule has a value - if ruleset["valuespec"]: - value = rule[0] - rule = rule[1:] - else: - if rule[0] == NEGATE: - value = False - rule = rule[1:] - else: - value = True +# To be used as ValueSpec for levels on numeric values, with +# prediction +def match_levels_alternative(v): + if type(v) == dict: + return 2 + elif type(v) == tuple and v != (None, None): + return 1 + else: + return 0 + +def Levels(**kwargs): + help = kwargs.get("help") + unit = kwargs.get("unit") + title = kwargs.get("title") + default_levels = kwargs.get("default_levels", (0.0, 0.0)) + default_difference = kwargs.get("default_difference", (0,0)) + if "default_value" in kwargs: + default_value = kwargs["default_value"] + else: + default_value = default_levels and default_levels or None + + return Alternative( + title = title, + help = help, + show_titles = False, + style = "dropdown", + elements = [ + FixedValue( + None, + title = _("No Levels"), + totext = _("Do not impose levels, always be OK"), + ), + Tuple( + title = _("Fixed Levels"), + elements = [ + Float(unit = unit, title = _("Warning at"), default_value = default_levels[0], allow_int = True), + Float(unit = unit, title = _("Critical at"), default_value = default_levels[1], allow_int = True), + ], + ), + PredictiveLevels( + default_difference = default_difference, + ), + ], + match = match_levels_alternative, + default_value = default_value, + ) - # Extract liste of items from back, if rule has items - if ruleset["itemtype"]: - item_list = rule[-1] - rule = rule[:-1] - else: - item_list = None +def HostnameTranslation(**kwargs): + help = kwargs.get("help") + title = kwargs.get("title") + return Dictionary( + title = title, + help = help, + elements = [ + ( "case", + DropdownChoice( + title = _("Case translation"), + choices = [ + ( None, _("Do not convert case") ), + ( "upper", _("Convert hostnames to upper case") ), + ( "lower", _("Convert hostnames to lower case") ), + ] + )), + ( "drop_domain", + FixedValue( + True, + title = _("Convert FQHN"), + totext = _("Drop domain part (host123.foobar.dehost123)"), + )), + ( "regex", + Tuple( + title = _("Regular expression substitution"), + help = _("Please specify a regular expression in the first field. This expression should at " + "least contain one subexpression exclosed in brackets - for example vm_(.*)_prod. " + "In the second field you specify the translated host name and can refer to the first matched " + "group with \\1, the second with \\2 and so on, for example \\1.example.org"), + elements = [ + RegExpUnicode( + title = _("Regular expression"), + help = _("Must contain at least one subgroup (...)"), + mingroups = 0, + maxgroups = 9, + size = 30, + allow_empty = False, + ), + TextUnicode( + title = _("Replacement"), + help = _("Use \\1, \\2 etc. to replace matched subgroups"), + size = 30, + allow_empty = False, + ) + ] + )), + ( "mapping", + ListOf( + Tuple( + orientation = "horizontal", + elements = [ + TextUnicode( + title = _("Original hostname"), + size = 30, + allow_empty = False, + attrencode = True, + ), + TextUnicode( + title = _("Translated hostname"), + size = 30, + allow_empty = False, + attrencode = True, + ), + ], + ), + title = _("Explicit host name mapping"), + help = _("If case conversion and regular expression do not work for all cases then you can " + "specify explicity pairs of origin host name and translated host name here. This " + "mapping is being applied after the case conversion and after a regular " + "expression conversion (if that matches)."), + add_label = _("Add new mapping"), + movable = False, + )), + ]) - # Rest is host list or tag list + host list - if len(rule) == 1: - tag_specs = [] - host_list = rule[0] - else: - tag_specs = rule[0] - host_list = rule[1] +#. +# .--User Profile--------------------------------------------------------. +# | _ _ ____ __ _ _ | +# | | | | |___ ___ _ __ | _ \ _ __ ___ / _(_) | ___ | +# | | | | / __|/ _ \ '__| | |_) | '__/ _ \| |_| | |/ _ \ | +# | | |_| \__ \ __/ | | __/| | | (_) | _| | | __/ | +# | \___/|___/\___|_| |_| |_| \___/|_| |_|_|\___| | +# | | +# +----------------------------------------------------------------------+ +# | A user can change several aspects of it's own profile | +# '----------------------------------------------------------------------' - # Remove folder tag from tag list - tag_specs = filter(lambda t: not t.startswith("/"), tag_specs) +def verify_password_policy(password): + policy = config.password_policy + min_len = config.password_policy.get('min_length') + if min_len and len(password) < min_len: + raise MKUserError('password', _('The given password is too short. It must have at least %d characters.') % min_len) + + num_groups = config.password_policy.get('num_groups') + if num_groups: + groups = {} + for c in password: + if c in "abcdefghijklmnopqrstuvwxyz": + groups['lcase'] = 1 + elif c in "ABCDEFGHIJKLMNOPQRSTUVWXYZ": + groups['ucase'] = 1 + elif c in "0123456789": + groups['numbers'] = 1 + else: + groups['special'] = 1 + + if sum(groups.values()) < num_groups: + raise MKUserError('password', _('The password does not use enough character groups. You need to ' + 'set a password which uses at least %d of them.') % num_groups) - return value, tag_specs, host_list, item_list, rule_options # (item_list currently not supported) - except Exception, e: - raise MKGeneralException(_("Invalid rule %s") % (orig_rule,)) +def select_language(user): + languages = [ l for l in get_languages() if not config.hide_language(l[0]) ] + if languages: + active = 'language' in user + forms.section(_("Language"), checkbox = ('_set_lang', active, 'language')) + default_label = _('Default: %s') % (get_language_alias(config.default_language) or _('English')) + html.write('
    %s
    ' % + ((active) and "display: none" or "", default_label)) + html.write('
    ' % ((not active) and "display: none" or "")) + html.select("language", languages, user.get('language') or '') + html.write("
    ") + html.help(_('Configure the default language ' + 'to be used by the user in the user interface here. If you do not check ' + 'the checkbox, then the system default will be used.

    ' + 'Note: currently Multisite is internationalized ' + 'but comes without any actual localisations (translations). If you want to ' + 'create you own translation, you find documentation online.') % + { "url" : "http://mathias-kettner.de/checkmk_multisite_i18n.html"} ) +def user_profile_async_replication_page(): + html.header(_('Replicate new User Profile'), + javascripts = ['wato'], + stylesheets = ['check_mk', 'pages', 'wato', 'status']) + html.begin_context_buttons() + html.context_button(_('User Profile'), 'user_profile.py', 'back') + html.end_context_buttons() -def rule_matches_host_and_item(rulespec, tag_specs, host_list, item_list, - rule_folder, host_folder, hostname, item): - reasons = [] - host = host_folder[".hosts"][hostname] - if not ( - (hostname in host_list) - or - (("!"+hostname) not in host_list - and len(host_list) > 0 - and host_list[-1] == ALL_HOSTS[0])): - reasons.append(_("The host name does not match.")) + user_profile_async_replication_dialog() - tags_match = True - for tag in tag_specs: - if tag[0] != '/' and tag[0] != '!' and tag not in host[".tags"]: - reasons.append(_("The host is missing the tag %s" % tag)) - elif tag[0] == '!' and tag[1:] in host[".tags"]: - reasons.append(_("The host has the tag %s" % tag)) + html.footer() - if not is_indirect_parent_of(host_folder, rule_folder): - reasons.append(_("The rule does not apply to the folder of the host.")) - # Check items - if item != NO_ITEM and rulespec["itemtype"]: - item_matches = False - for i in item_list: - if re.match(i, str(item)): - item_matches = True - break - if not item_matches: - reasons.append(_("The %s %s does not match this rule.") % - (rulespec["itemname"], item)) +def user_profile_async_replication_dialog(): + sites = [(name, config.site(name)) for name in config.sitenames() ] + sort_sites(sites) + repstatus = load_replication_status() - if len(reasons) == 0: - return True - else: - return " ".join(reasons) + html.message(_('In order to activate your changes available on all remote sites, your user profile needs ' + 'to be replicated to the remote sites. This is done on this page now. Each site ' + 'is being represented by a single image which is first shown gray and then fills ' + 'to green during synchronisation.')) + + html.write('

    %s

    ' % _('Replication States')) + html.write('
    ') + num_replsites = 0 + for site_id, site in sites: + is_local = site_is_local(site_id) + + if is_local or (not is_local and not site.get("replication")): + continue # Skip non replication slaves + + if site.get("disabled"): + ss = {} + status = "disabled" + else: + ss = html.site_status.get(site_id, {}) + status = ss.get("state", "unknown") -def is_indirect_parent_of(pfolder, sfolder): - return pfolder == sfolder or \ - ('.parent' in pfolder and - is_indirect_parent_of(pfolder[".parent"], sfolder)) + srs = repstatus.get(site_id, {}) + + if not "secret" in site: + status_txt = _('Not logged in.') + start_sync = False + icon = 'repl_locked' + else: + status_txt = _('Waiting for replication to start') + start_sync = True + icon = 'repl_pending' + + html.write('
    ' % (html.attrencode(site_id))) + html.icon(status_txt, icon) + if start_sync: + estimated_duration = srs.get("times", {}).get("profile-sync", 2.0) + html.javascript('wato_do_profile_replication(\'%s\', %d, \'%s\');' % + (site_id, int(estimated_duration * 1000.0), _('Replication in progress'))) + num_replsites += 1 + html.write('%s' % site.get('alias', site_id)) + html.write('
    ') + html.javascript('var g_num_replsites = %d;\n' % num_replsites) -def construct_rule(ruleset, value, tag_specs, host_list, item_list, rule_options): - if ruleset["valuespec"]: - rule = [ value ] - elif not value: - rule = [ NEGATE ] - else: - rule = [] - if tag_specs != []: - rule.append(tag_specs) - rule.append(host_list) - if item_list != None: - rule.append(item_list) + html.write('
    ') - # Append rule options, but only if they are not trivial. That way we - # keep as close as possible to the original Check_MK in rules.mk so that - # command line users will feel at home... - ro = {} - if rule_options.get("disabled"): - ro["disabled"] = True - if rule_options.get("comment"): - ro["comment"] = rule_options["comment"] - if rule_options.get("docu_url"): - ro["docu_url"] = rule_options["docu_url"] - # Preserve other keys that we do not know of - for k,v in rule_options.items(): - if k not in [ "disabled", "comment", "docu_url"]: - ro[k] = v - if ro: - rule.append(ro) +def page_user_profile(change_pw=False): + start_async_replication = False - return tuple(rule) + if not config.user_id: + raise MKUserError(None, _('Not logged in.')) + if not config.may('general.edit_profile') and not config.may('general.change_password'): + raise MKAuthException(_("You are not allowed to edit your user profile.")) -def tag_alias(tag): - for entry in config.wato_host_tags: - id, title, tags = entry[:3] - for t in tags: - if t[0] == tag: - return t[1] - for id, alias in config.wato_aux_tags: - if id == tag: - return alias + if not config.wato_enabled: + raise MKAuthException(_('User profiles can not be edited (WATO is disabled).')) -def render_conditions(ruleset, tagspecs, host_list, item_list, varname, folder): - html.write("
      ") + success = None + if html.has_var('_save') and html.check_transaction(): + users = userdb.load_users(lock = True) - # Host tags - for tagspec in tagspecs: - if tagspec[0] == '!': - negate = True - tag = tagspec[1:] - else: - negate = False - tag = tagspec + try: + # Profile edit (user options like language etc.) + if config.may('general.edit_profile'): + if not change_pw: + set_lang = html.get_checkbox('_set_lang') + language = html.var('language') + # Set the users language if requested + if set_lang: + if language == '': + language = None + # Set custom language + users[config.user_id]['language'] = language + config.user['language'] = language + else: + # Remove the customized language + if 'language' in users[config.user_id]: + del users[config.user_id]['language'] + if 'language' in config.user: + del config.user['language'] + + # load the new language + load_language(config.get_language()) + load_all_plugins() + + user = users.get(config.user_id) + if config.may('general.edit_notifications') and user.get("notifications_enabled"): + value = forms.get_input(vs_notification_method, "notification_method") + users[config.user_id]["notification_method"] = value + + # Custom attributes + if config.may('general.edit_user_attributes'): + for name, attr in userdb.get_user_attributes(): + if attr['user_editable']: + if not attr.get("permission") or config.may(attr["permission"]): + vs = attr['valuespec'] + value = vs.from_html_vars('ua_' + name) + vs.validate_value(value, "ua_" + name) + users[config.user_id][name] = value - html.write('
    • ') - alias = tag_alias(tag) - if alias: - if negate: - html.write(_("Host is not of type ")) - else: - html.write(_("Host is of type ")) - html.write("" + alias + "") - else: - if negate: - html.write(_("Host has not the tag ") + "" + tag + "") - else: - html.write(_("Host has the tag ") + "" + tag + "") - html.write('
    • ') + # Change the password if requested + if config.may('general.change_password'): + cur_password = html.var('cur_password') + password = html.var('password') + password2 = html.var('password2', '') + + if change_pw: + # Force change pw mode + if not cur_password: + raise MKUserError("cur_password", _("You need to provide your current password.")) + if not password: + raise MKUserError("password", _("You need to change your password.")) + if cur_password == password: + raise MKUserError("password", _("The new password must differ from your current one.")) + + if cur_password and password: + if userdb.hook_login(config.user_id, cur_password) in [ None, False ]: + raise MKUserError("cur_password", _("Your old password is wrong.")) + if password2 and password != password2: + raise MKUserError("password2", _("The both new passwords do not match.")) - # Explicit list of hosts - if host_list != ALL_HOSTS: - condition = None - if host_list == []: - condition = _("This rule does never apply due to an empty list of explicit hosts!") - elif host_list[-1] != ALL_HOSTS[0]: - tt_list = [] - for h in host_list: - f = find_host(h) - if f: - uri = html.makeuri([("mode", "edithost"), ("folder", f[".path"]), ("host", h)]) - host_spec = '%s' % (uri, h) - else: - host_spec = h - tt_list.append("%s" % host_spec) - if len(host_list) == 1: - condition = _("Host name is %s") % tt_list[0] - else: - condition = _("Host name is ") + ", ".join(tt_list[:-1]) - condition += _(" or ") + tt_list[-1] - elif host_list[0][0] == '!': - hosts = [ h[1:] for h in host_list[:-1] ] - condition = _("Host is not one of ") + ", ".join(hosts) - # other cases should not occur, e.g. list of explicit hosts - # plus ALL_HOSTS. - if condition: - html.write('
    • %s
    • ' % condition) + verify_password_policy(password) + users[config.user_id]['password'] = userdb.encrypt_password(password) + users[config.user_id]['last_pw_change'] = int(time.time()) - # Item list - if ruleset["itemtype"] and item_list != ALL_SERVICES: - tt_list = [] - for t in item_list: - if t.endswith("$"): - tt_list.append("%s %s" % (_("is"), t[:-1])) - else: - tt_list.append("%s %s" % (_("begins with"), t)) + if change_pw: + # Has been changed, remove enforcement flag + del users[config.user_id]['enforce_pw_change'] - if ruleset["itemtype"] == "service": - condition = _("Service name ") + " or ".join(tt_list) - elif ruleset["itemtype"] == "item": - condition = ruleset["itemname"] + " " + " or ".join(tt_list) - html.write('
    • %s
    • ' % condition) + # Increase serial to invalidate old cookies + if 'serial' not in users[config.user_id]: + users[config.user_id]['serial'] = 1 + else: + users[config.user_id]['serial'] += 1 - html.write("
    ") + # Set the new cookie to prevent logout for the current user + login.set_auth_cookie(config.user_id, users[config.user_id]['serial']) + # Now, if in distributed environment, set the trigger for pushing the new + # auth information to the slave sites asynchronous + if is_distributed(): + start_async_replication = True -def ruleeditor_hover_code(varname, rulenr, mode, boolval, folder=None): - if boolval in [ True, False ]: - url = html.makeactionuri([("_rulenr", rulenr), ("_action", "toggle")]) + userdb.save_users(users) + success = True + except MKUserError, e: + html.add_user_error(e.varname, e) else: - url = make_link_to([("mode", mode), ("varname", varname), ("rulenr", rulenr)], folder or g_folder) - return \ - ' onmouseover="this.style.cursor=\'pointer\'; this.style.backgroundColor=\'#b7ced3\';" ' \ - ' onmouseout="this.style.cursor=\'auto\'; this.style.backgroundColor=\'#a7bec3\';" ' \ - ' onclick="location.href=\'%s\'"' % url + users = userdb.load_users() + # When in distributed setup, display the replication dialog instead of the normal + # profile edit dialog after changing the password. + if start_async_replication: + user_profile_async_replication_page() + return + if change_pw: + title = _("Change Password") + else: + title = _("Edit User Profile") -def get_rule_conditions(ruleset): - tag_list = get_tag_conditions() + html.header(title, javascripts = ['wato'], stylesheets = ['check_mk', 'pages', 'wato', 'status']) - # Host list - if not html.get_checkbox("explicit_hosts"): - host_list = ALL_HOSTS - else: - negate = html.get_checkbox("negate_hosts") - nr = 0 - host_list = ListOfStrings().from_html_vars("hostlist") - if negate: - host_list = [ "!" + h for h in host_list ] - # append ALL_HOSTS to negated host lists - if len(host_list) > 0 and host_list[0][0] == '!': - host_list += ALL_HOSTS - elif len(host_list) == 0 and negate: - host_list = ALL_HOSTS # equivalent + # Rule based notifications: The user currently cannot simply call the according + # WATO module due to WATO permission issues. So we cannot show this button + # right now. + if not change_pw: + rulebased_notifications = load_configuration_settings().get("enable_rulebased_notifications") + if rulebased_notifications and config.may('general.edit_notifications'): + html.begin_context_buttons() + url = "wato.py?mode=user_notifications_p" + html.context_button(_("Notifications"), url, "notifications") + html.end_context_buttons() + else: + reason = html.var('reason') + if reason == 'expired': + html.write('

    %s

    ' % _('Your password is too old, you need to choose a new password.')) + else: + html.write('

    %s

    ' % _('You are required to change your password before proceeding.')) - # Item list - itemtype = ruleset["itemtype"] - if itemtype: - explicit = html.get_checkbox("explicit_services") - if not explicit: - item_list = [ "" ] + if success: + html.reload_sidebar() + if change_pw: + html.message(_("Your password has been changed.")) + html.http_redirect(html.var('_origtarget', 'index.py')) else: - itemenum = ruleset["itemenum"] - if itemenum: - itemspec = ListChoice(choices = itemenum, columns = 3) - item_list = [ x+"$" for x in itemspec.from_html_vars("item") ] - else: - item_list = ListOfStrings().from_html_vars("itemlist") + html.message(_("Successfully updated user profile.")) - if len(item_list) == 0: - raise MKUserError("item_0", _("Please specify at least one %s or " - "this rule will never match.") % ruleset["itemname"]) - else: - item_list = None + if html.has_user_errors(): + html.show_user_errors() - return tag_list, host_list, item_list + user = users.get(config.user_id) + if user == None: + html.show_warning(_("Sorry, your user account does not exist.")) + html.footer() + return + # Returns true if an attribute is locked and should be read only. Is only + # checked when modifying an existing user + locked_attributes = userdb.locked_attributes(user.get('connector')) + def is_locked(attr): + return attr in locked_attributes + html.begin_form("profile", method="POST") + html.write('
    ') + forms.header(_("Personal Settings")) -def mode_edit_rule(phase, new = False): - # Due to localization this cannot be defined in the global context! - vs_rule_options = Dictionary( - title = _("Additional options"), - optional_keys = False, - render = "form", - elements = [ - ( "comment", - TextUnicode( - title = _("Comment"), - help = _("An optional comment that helps you documenting the purpose of " - "this rule"), - size = 80, - attrencode = True, - ) - ), - ( "docu_url", - TextAscii( - title = _("Documentation-URL"), - help = _("An optional URL pointing to documentation or any other page. This will be displayed " - "as an icon and open a new page when clicked. " - "You can use either global URLs (beginning with http://), absolute local urls " - "(beginning with /) or relative URLs (that are relative to check_mk/)."), - size = 80, - ), - ), - ( "disabled", - Checkbox( - title = _("Rule activation"), - help = _("Disabled rules are kept in the configuration but are not applied."), - label = _("do not apply this rule"), - ) - ), - ] - ) + if not change_pw: + forms.section(_("Name"), simple=True) + html.write(user.get("alias", config.user_id)) + + if config.may('general.change_password') and not is_locked('password'): + forms.section(_("Current Password")) + html.password_input('cur_password', autocomplete = "off") + + forms.section(_("New Password")) + html.password_input('password', autocomplete = "off") + + forms.section(_("New Password Confirmation")) + html.password_input('password2', autocomplete = "off") + + if not change_pw and config.may('general.edit_profile'): + select_language(user) + + # Let the user configure how he wants to be notified + if not rulebased_notifications \ + and config.may('general.edit_notifications') \ + and user.get("notifications_enabled"): + forms.section(_("Notifications")) + html.help(_("Here you can configure how you want to be notified about host and service problems and " + "other monitoring events.")) + vs_notification_method.render_input("notification_method", user.get("notification_method")) + + if config.may('general.edit_user_attributes'): + for name, attr in userdb.get_user_attributes(): + if attr['user_editable']: + vs = attr['valuespec'] + forms.section(_u(vs.title())) + value = user.get(name, vs.default_value()) + if not attr.get("permission") or config.may(attr["permission"]): + vs.render_input("ua_" + name, value) + html.help(_u(vs.help())) + else: + html.write(vs.value_to_text(value)) + + # Save button + forms.end() + html.button("_save", _("Save")) + html.write('
    ') + html.hidden_fields() + html.end_form() + html.footer() + +#. +# .--Sampleconfig--------------------------------------------------------. +# | ____ _ __ _ | +# | / ___| __ _ _ __ ___ _ __ | | ___ ___ ___ _ __ / _(_) __ _ | +# | \___ \ / _` | '_ ` _ \| '_ \| |/ _ \/ __/ _ \| '_ \| |_| |/ _` | | +# | ___) | (_| | | | | | | |_) | | __/ (_| (_) | | | | _| | (_| | | +# | |____/ \__,_|_| |_| |_| .__/|_|\___|\___\___/|_| |_|_| |_|\__, | | +# | |_| |___/ | +# +----------------------------------------------------------------------+ +# | Functions for creating an example configuration | +# '----------------------------------------------------------------------' + +# Create a very basic sample configuration, but only if none of the +# files that we will create already exists. That is e.g. the case +# after an update from an older version where no sample config had +# been created. +def create_sample_config(): + if os.path.exists(multisite_dir + "hosttags.mk") \ + or os.path.exists(root_dir + "rules.mk") \ + or os.path.exists(root_dir + "groups.mk") \ + or os.path.exists(root_dir + "notifications.mk") \ + or os.path.exists(root_dir + "global.mk"): + return - varname = html.var("varname") - rulespec = g_rulespecs[varname] + # Global configuration settings + save_configuration_settings({ + "use_new_descriptions_for": [ + "df", + "df_netapp", + "df_netapp32", + "esx_vsphere_datastores", + "hr_fs", + "vms_diskstat.df", + "zfsget", + "ps", + "ps.perf", + "wmic_process", + "services", + "logwatch", + "cmk-inventory", + "hyperv_vms", + ], + "inventory_check_interval": 120, + "enable_rulebased_notifications": True, + }) - if phase == "title": - return _("%s rule %s") % (new and _("New") or _("Edit"), rulespec["title"]) - elif phase == "buttons": - var_list = [("mode", "edit_ruleset"), ("varname", varname), ("host", html.var("host",""))] - if html.var("item"): - var_list.append( ("item", html.var("item")) ) - html.context_button(_("Abort"), make_link(var_list), "abort") - return + # A contact group where everyone is member of + groups = { + "contact" : { 'all' : {'alias': u'Everybody'} }, + } + save_group_information(groups) - folder = html.has_var("_new_host_rule") and g_folder or g_folders[html.var("rule_folder")] - rulesets = load_rulesets(folder) - rules = rulesets[varname] + # Basic setting of host tags + wato_host_tags = \ + [('agent', + u'Agent type', + [('cmk-agent', u'Check_MK Agent (Server)', ['tcp']), + ('snmp-only', u'SNMP (Networking device, Appliance)', ['snmp']), + ('snmp-v1', u'Legacy SNMP device (using V1)', ['snmp']), + ('snmp-tcp', u'Dual: Check_MK Agent + SNMP', ['snmp', 'tcp']), + ('ping', u'No Agent', [])]), + ('criticality', + u'Criticality', + [('prod', u'Productive system', []), + ('critical', u'Business critical', []), + ('test', u'Test system', []), + ('offline', u'Do not monitor this host', [])]), + ('networking', + u'Networking Segment', + [('lan', u'Local network (low latency)', []), + ('wan', u'WAN (high latency)', []), + ('dmz', u'DMZ (low latency, secure access)', [])])] - if new: - host = None - item = NO_ITEM - if html.has_var("_new_host_rule"): - host = html.var("host") - item = html.has_var("item") and mk_eval(html.var("item")) or NO_ITEM - try: - rule = create_rule(rulespec, host, item) - except Exception, e: - if phase != "action": - html.message(_("Cannot create rule: %s") % e) - return - rulenr = len(rules) - else: - rulenr = int(html.var("rulenr")) - rule = rules[rulenr] + wato_aux_tags = \ + [('snmp', u'monitor via SNMP'), + ('tcp', u'monitor via Check_MK Agent')] - valuespec = rulespec.get("valuespec") - value, tag_specs, host_list, item_list, rule_options = parse_rule(rulespec, rule) + save_hosttags(wato_host_tags, wato_aux_tags) - if phase == "action": - if html.check_transaction(): - # Additional options - rule_options = vs_rule_options.from_html_vars("options") - vs_rule_options.validate_value(rule_options, "options") + # Rules that match the upper host tag definition + rulesets = { + # Make the tag 'offline' remove hosts from the monitoring + 'only_hosts': [ + (['!offline'], ['@all'], + {'comment': u'Do not monitor hosts with the tag "offline"'})], - # CONDITION - tag_specs, host_list, item_list = get_rule_conditions(rulespec) - new_rule_folder = g_folders[html.var("new_rule_folder")] + # Rule for WAN hosts with adapted PING levels + 'ping_levels': [ + ({'loss': (80.0, 100.0), + 'packets': 6, + 'rta': (1500.0, 3000.0), + 'timeout': 20}, ['wan'], ['@all'], + {'comment': u'Allow longer round trip times when pinging WAN hosts'})], - # Check permissions on folders - check_folder_permissions(folder, "write", True) - check_folder_permissions(new_rule_folder, "write", True) + # All hosts should use SNMP v2c if not specially tagged + 'bulkwalk_hosts': [ + (['snmp', '!snmp-v1'], ['@all'], {'comment': u'Hosts with the tag "snmp-v1" must not use bulkwalk'})], - # VALUE - if valuespec: - value = get_edited_value(valuespec) - else: - value = html.var("value") == "yes" - rule = construct_rule(rulespec, value, tag_specs, host_list, item_list, rule_options) - if new_rule_folder == folder: - if new: - rules.append(rule) - else: - rules[rulenr] = rule - save_rulesets(folder, rulesets) - mark_affected_sites_dirty(folder) + # Put all hosts and the contact group 'all' + 'host_contactgroups': [ + ('all', [], ALL_HOSTS, {'comment': u'Put all hosts into the contact group "all"'} ), + ], - if new: - log_pending(AFFECTED, None, "edit-rule", _("Created new rule in ruleset %s in folder %s") % - (rulespec["title"], new_rule_folder["title"])) - else: - log_pending(AFFECTED, None, "edit-rule", _("Changed properties of rule %s in folder %s") % - (rulespec["title"], new_rule_folder["title"])) - else: # Move rule to new folder - if not new: - del rules[rulenr] - save_rulesets(folder, rulesets) - rulesets = load_rulesets(new_rule_folder) - rules = rulesets.setdefault(varname, []) - rules.append(rule) - save_rulesets(new_rule_folder, rulesets) - mark_affected_sites_dirty(folder) - mark_affected_sites_dirty(new_rule_folder) - log_pending(AFFECTED, None, "edit-rule", _("Changed properties of rule %s, moved rule from " - "folder %s to %s") % (rulespec["title"], folder["title"], - new_rule_folder["title"])) - else: - return "edit_ruleset" + # Interval for HW/SW-Inventory check + 'extra_service_conf:check_interval': [ + ( 1440, [], ALL_HOSTS, [ "Check_MK HW/SW Inventory$" ], {'comment': u'Restrict HW/SW-Inventory to once a day'} ), + ], + } - return ("edit_ruleset", _("%s rule in ruleset '%s' in folder %s") % - (new and _("Created new") or _("Edited"), rulespec["title"], new_rule_folder["title"])) + save_rulesets(g_root_folder, rulesets) - if rulespec.get("help"): - html.write("
    " + rulespec["help"] + "
    ") + notification_rules = [{ + 'allow_disable' : True, + 'contact_all' : False, + 'contact_all_with_email' : False, + 'contact_object' : True, + 'description' : 'Notify all contacts of a host/service via HTML email', + 'disabled' : False, + 'notify_plugin' : ('mail', {}), + }] + save_notification_rules(notification_rules) - html.begin_form("rule_editor", method="POST") - # Conditions - forms.header(_("Conditions")) + # Make sure the host tag attributes are immediately declared! + config.wato_host_tags = wato_host_tags + config.wato_aux_tags = wato_aux_tags - # Rule folder - forms.section(_("Folder")) - html.select("new_rule_folder", folder_selection(g_root_folder), folder[".path"]) - html.help(_("The rule is only applied to hosts directly in or below this folder.")) + # Global settings + use_new_descriptions_for = [ "df", "ps" ] - # Host tags - forms.section(_("Host tags")) - render_condition_editor(tag_specs) - html.help(_("The rule will only be applied to hosts fullfulling all of " - "of the host tag conditions listed here, even if they appear " - "in the list of explicit host names.")) + # Initial baking of agents (when bakery is available) + if 'bake_agents' in globals(): + try: + bake_agents() + except: + pass # silently ignore building errors here - # Explicit hosts / ALL_HOSTS - forms.section(_("Explicit hosts")) - div_id = "div_all_hosts" +#. +# .--Pattern Editor------------------------------------------------------. +# | ____ _ _ _____ _ _ _ | +# | | _ \ __ _| |_| |_ ___ _ __ _ __ | ____|__| (_) |_ ___ _ __ | +# | | |_) / _` | __| __/ _ \ '__| '_ \ | _| / _` | | __/ _ \| '__| | +# | | __/ (_| | |_| || __/ | | | | | | |__| (_| | | || (_) | | | +# | |_| \__,_|\__|\__\___|_| |_| |_| |_____\__,_|_|\__\___/|_| | +# | | +# +----------------------------------------------------------------------+ +# | | +# '----------------------------------------------------------------------' - checked = host_list != ALL_HOSTS - html.checkbox("explicit_hosts", checked, onclick="valuespec_toggle_option(this, %r)" % div_id, - label = _("Specify explicit host names")) - html.write('
    ' % ( - div_id, not checked and "none" or "")) - negate_hosts = len(host_list) > 0 and host_list[0].startswith("!") +def mode_pattern_editor(phase): + import logwatch - explicit_hosts = [ h.strip("!") for h in host_list if h != ALL_HOSTS[0] ] - ListOfStrings( - orientation = "horizontal", - valuespec = TextAscii(size = 30)).render_input("hostlist", explicit_hosts) + # 1. Variablen auslesen + hostname = html.var('host', '') + item = html.var('file', '') + match_txt = html.var('match', '') + master_url = html.var('master_url', '') - html.checkbox("negate_hosts", negate_hosts, label = - _("Negate: make rule apply for all but the above hosts")) - html.write("
    ") - html.help(_("You can enter a number of explicit host names that rule should or should " - "not apply to here. Leave this option disabled if you want the rule to " - "apply for all hosts specified by the given tags.")) + hosts = load_hosts(g_folder) + host = hosts.get(hostname) - # Itemlist - itemtype = rulespec["itemtype"] - if itemtype: - if itemtype == "service": - forms.section(_("Services")) - html.help(_("Specify a list of service patterns this rule shall apply to. " - "The patterns must match the beginning of the service " - "in question. Adding a $ to the end forces an excact " - "match. Pattern use regular expressions. A .* will " - "match an arbitrary text.")) - elif itemtype == "checktype": - forms.section(_("Check types")) - elif itemtype == "item": - forms.section(rulespec["itemname"].title()) - if rulespec["itemhelp"]: - html.help(rulespec["itemhelp"]) - else: - html.help(_("You can make the rule apply only on certain services of the " - "specified hosts. Do this by specifying explicit items to mach " - "here. Note: the match is done on the beginning " - "of the item in question. Regular expressions are interpreted, " - "so appending a $ will force an exact match.")) + if phase == "title": + if not hostname and not item: + return _("Logfile Pattern Analyzer") + elif not hostname: + return _("Logfile Patterns of Logfile %s on all Hosts") % (item) + elif not item: + return _("Logfile Patterns of Host %s") % (hostname) else: - raise MKGeneralException("Invalid item type '%s'" % itemtype) + return _("Logfile Patterns of Logfile %s on Host %s") % (item, hostname) - if itemtype: - checked = html.get_checkbox("explicit_services") - if checked == None: # read from rule itself - checked = len(item_list) == 0 or item_list[0] != "" - div_id = "item_list" - html.checkbox("explicit_services", checked, onclick="valuespec_toggle_option(this, %r)" % div_id, - label = _("Specify explicit values")) - html.write('
    ' % ( - div_id, not checked and "none" or "")) - itemenum = rulespec["itemenum"] - if itemenum: - value = [ x.rstrip("$") for x in item_list ] - itemspec = ListChoice(choices = itemenum, columns = 3) - itemspec.render_input("item", value) + elif phase == "buttons": + html.context_button(_("Main Menu"), make_link([("mode", "main")]), "home") + if host: + if item: + title = _("Show Logfile") else: - ListOfStrings( - orientation = "horizontal", - valuespec = TextAscii(size = 30)).render_input("itemlist", item_list) + title = _("Host Logfiles") - html.help(_("The entries here are regular expressions to match the beginning. " - "Add a $ for an exact match. An arbitrary substring is matched " - "with .*
    Please note that on windows systems any backslashes need to be escaped." - "For example C:\\\\tmp\\\\message.log")) - html.write("
    ") + master_url = '' + if config.is_multisite(): + master_url = '&master_url=' + defaults.url_prefix + 'check_mk/' + html.context_button(title, "logwatch.py?host=%s&file=%s%s" % + (html.urlencode(hostname), html.urlencode(item), master_url), 'logwatch') - # Value - forms.header(_("Value")) - if valuespec: - value = rule[0] - forms.section() - try: - valuespec.validate_datatype(value, "ve") - valuespec.render_input("ve", value) - except Exception, e: - if config.debug: - raise - else: - html.show_warning(_('Unable to read current options of this rule. Falling back to ' - 'default values. When saving this rule now, your previous settings ' - 'will be overwritten. Problem was: %s.') % e) + html.context_button(_('Edit Logfile Rules'), make_link([ + ('mode', 'edit_ruleset'), + ('varname', 'logwatch_rules') + ]), + 'edit' + ) - # In case of validation problems render the input with default values - valuespec.render_input("ve", valuespec.default_value()) + return - valuespec.set_focus("ve") - else: - forms.section("") - for posneg, img in [ ("positive", "yes"), ("negative", "no")]: - val = img == "yes" - html.write(' ' % img) - html.radiobutton("value", img, value == val, _("Make the outcome of the ruleset %s
    ") % posneg) + if phase == "action": + return - # Addiitonal rule options - vs_rule_options.render_input("options", rule_options) + html.help(_('On this page you can test the defined logfile patterns against a custom text, ' + 'for example a line from a logfile. Using this dialog it is possible to analyze ' + 'and debug your whole set of logfile patterns.')) + # Render the tryout form + html.begin_form('try') + forms.header(_('Try Pattern Match')) + forms.section(_('Hostname')) + html.text_input('host') + forms.section(_('Logfile')) + html.text_input('file') + forms.section(_('Text to match')) + html.help(_('You can insert some text (e.g. a line of the logfile) to test the patterns defined ' + 'for this logfile. All patterns for this logfile are listed below. Matching patterns ' + 'will be highlighted after clicking the "Try out" button.') + ) + html.text_input('match', cssclass = 'match', size=100) forms.end() - html.button("save", _("Save")) + html.button('_try', _('Try out')) + html.del_var('folder') # Never hand over the folder here html.hidden_fields() - vs_rule_options.set_focus("options") html.end_form() -# Render HTML input fields for editing a tag based condition -def render_condition_editor(tag_specs): - if len(config.wato_aux_tags) + len(config.wato_host_tags) == 0: - html.write(_("You have not configured any host tags.")) + # Bail out if the given hostname does not exist + if hostname and not host: + html.add_user_error('host', _('The given host does not exist.')) + html.show_user_errors() return - # Determine current (default) setting of tag by looking - # into tag_specs (e.g. [ "snmp", "!tcp", "test" ] ) - def current_tag_setting(choices): - default_tag = None - ignore = True - for t in tag_specs: - if t[0] == '!': - n = True - t = t[1:] - else: - n = False - if t in [ x[0] for x in choices ]: - default_tag = t - ignore = False - negate = n - if ignore: - deflt = "ignore" - elif negate: - deflt = "isnot" + varname = 'logwatch_rules' + rulespec = g_rulespecs[varname] + all_rulesets = load_all_rulesets() + ruleset = all_rulesets.get(varname) + + html.write('

    %s

    ' % _('Logfile Patterns')) + if not ruleset: + html.write( + "
    " + + _('There are no logfile patterns defined. You may create ' + 'logfile patterns using the Rule Editor.') % make_link([ + ('mode', 'edit_ruleset'), + ('varname', 'logwatch_rules') + ]) + + "
    " + ) + + # Loop all rules for this ruleset + already_matched = False + last_folder = None + for rulenr in range(0, len(ruleset)): + folder, rule = ruleset[rulenr] + if folder != last_folder: + rel_rulenr = 0 + last_folder = folder else: - deflt = "is" - return default_tag, deflt + rel_rulenr += 1 + last_in_group = rulenr == len(ruleset) - 1 or ruleset[rulenr+1][0] != folder + pattern_list, tag_specs, host_list, item_list, rule_options = parse_rule(rulespec, rule) - # Show dropdown with "is/isnot/ignore" and beginning - # of div that is switched visible by is/isnot - def tag_condition_dropdown(tagtype, deflt, id): - html.write("") - html.select(tagtype + "_" + id, [ - ("ignore", _("ignore")), - ("is", _("is")), - ("isnot", _("isnot"))], deflt, - onchange="valuespec_toggle_dropdownn(this, 'tag_sel_%s');" % id) - html.write("") - if html.form_submitted(): - div_is_open = html.var(tagtype + "_" + id, "ignore") != "ignore" + # Check if this rule applies to the given host/service + if hostname: + # If hostname (and maybe filename) try match it + reason = rule_matches_host_and_item( + rulespec, tag_specs, host_list, item_list, folder, g_folder, hostname, item) + elif item: + # If only a filename is given + reason = False + for i in item_list: + if re.match(i, str(item)): + reason = True + break else: - div_is_open = deflt != "ignore" - html.write('
    ' % ( - id, not div_is_open and "display: none;" or "")) + # If no host/file given match all rules + reason = True - # Show main tags - html.write("") - if len(config.wato_host_tags): - for entry in config.wato_host_tags: - id, title, choices = entry[:3] - html.write("" % title) - default_tag, deflt = current_tag_setting(choices) - tag_condition_dropdown("tag", deflt, id) - html.select("tagvalue_" + id, - [t[0:2] for t in choices if t[0] != None], deflt=default_tag) - html.write("") - html.write("") + match_img = '' + if reason == True: + # Applies to the given host/service + reason_class = 'reason' + # match_title/match_img are set below per pattern + else: + # not matching + reason_class = 'noreason' + match_img = 'nmatch' + match_title = reason - # And auxiliary tags - if len(config.wato_aux_tags): - for id, title in config.wato_aux_tags: - html.write("" % title) - default_tag, deflt = current_tag_setting([(id, title)]) - tag_condition_dropdown("auxtag", deflt, id) - html.write(" " + _("set")) - html.write("") - html.write("") + html.begin_foldable_container("rule", str(rulenr), True, "Rule #%d" % (rulenr + 1), indent = False) + html.write('
    %s:  
    %s:  
    ') + html.write('') + html.write('') + html.write('') + html.write('') + html.write('') + html.write('\n') + # Each rule can hold no, one or several patterns. Loop them all here + odd = "odd" + for state, pattern, comment in pattern_list: + match_class = '' + disp_match_txt = '' + if reason == True: + matched = re.search(pattern, match_txt) + if matched: - html.write("
    ' + _('Match') + '' + _('State') + '' + _('Pattern') + '' + _('Comment') + '' + _('Matched line') + '
    ") + # Prepare highlighted search txt + match_start = matched.start() + match_end = matched.end() + disp_match_txt = match_txt[:match_start] \ + + '' + match_txt[match_start:match_end] + '' \ + + match_txt[match_end:] + if already_matched == False: + # First match + match_class = 'match first' + match_img = 'match' + match_title = _('This logfile pattern matches first and will be used for ' + 'defining the state of the given line.') + already_matched = True + else: + # subsequent match + match_class = 'match' + match_img = 'imatch' + match_title = _('This logfile pattern matches but another matched first.') + else: + match_img = 'nmatch' + match_title = _('This logfile pattern does not match the given string.') -# Retrieve current tag condition settings from HTML variables -def get_tag_conditions(): - # Main tags - tag_list = [] - for entry in config.wato_host_tags: - id, title, tags = entry[:3] - mode = html.var("tag_" + id) - tagvalue = html.var("tagvalue_" + id) - if mode == "is": - tag_list.append(tagvalue) - elif mode == "isnot": - tag_list.append("!" + tagvalue) + html.write('' % (odd, reason_class)) + html.write('' % \ + (match_title, match_img)) - # Auxiliary tags - for id, title in config.wato_aux_tags: - mode = html.var("auxtag_" + id) - if mode == "is": - tag_list.append(id) - elif mode == "isnot": - tag_list.append("!" + id) + cls = '' + if match_class == 'match first': + cls = ' class="svcstate state%d"' % logwatch.level_state(state) + html.write('%s' % (cls, logwatch.level_name(state))) + html.write('%s' % pattern) + html.write('%s' % comment) + html.write('%s' % disp_match_txt) + html.write('\n') - return tag_list + odd = odd == "odd" and "even" or "odd" + html.write('' % odd) + edit_url = make_link([ + ("mode", "edit_rule"), + ("varname", varname), + ("rulenr", rel_rulenr), + ("host", hostname), + ("item", mk_repr(item)), + ("rule_folder", folder[".path"])]) + html.icon_button(edit_url, _("Edit this rule"), "edit") + html.write('\n') -def save_rulesets(folder, rulesets): - make_nagios_directory(root_dir) - path = root_dir + '/' + folder['.path'] + '/' + "rules.mk" - out = create_user_file(path, "w") - out.write("# Written by WATO\n# encoding: utf-8\n\n") + html.write('\n') + html.end_foldable_container() - for varname, rulespec in g_rulespecs.items(): - ruleset = rulesets.get(varname) - if not ruleset: - continue # don't save empty rule sets +#. +# .--BI Rules------------------------------------------------------------. +# | ____ ___ ____ _ | +# | | __ )_ _| | _ \ _ _| | ___ ___ | +# | | _ \| | | |_) | | | | |/ _ \/ __| | +# | | |_) | | | _ <| |_| | | __/\__ \ | +# | |____/___| |_| \_\\__,_|_|\___||___/ | +# | | +# +----------------------------------------------------------------------+ +# | Editor for the Rules of BI | +# '----------------------------------------------------------------------' +def mode_bi_rules(phase): + if phase == "title": + return _("BI - Business Intelligence") - if ':' in varname: - dictname, subkey = varname.split(':') - varname = '%s[%r]' % (dictname, subkey) - out.write("\n%s.setdefault(%r, [])\n" % (dictname, subkey)) - else: - if rulespec["optional"]: - out.write("\nif %s == None:\n %s = []\n" % (varname, varname)) + aggregations, aggregation_rules = load_bi_rules() - out.write("\n%s = [\n" % varname) - for rule in ruleset: - save_rule(out, folder, rulespec, rule) - out.write("] + %s\n\n" % varname) + if phase == "buttons": + html.context_button(_("Main Menu"), make_link([("mode", "main")]), "home") + if aggregation_rules: + html.context_button(_("New Aggregation"), + make_link([("mode", "bi_edit_aggregation")]), "new") + html.context_button(_("New Rule"), + make_link([("mode", "bi_edit_rule")]), "new") + return -def save_rule(out, folder, rulespec, rule): - out.write(" ( ") - value, tag_specs, host_list, item_list, rule_options = parse_rule(rulespec, rule) - if rulespec["valuespec"]: - out.write(repr(value) + ", ") - elif not value: - out.write("NEGATE, ") + if phase == "action": + if html.var("_del_rule"): + ruleid = html.var("_del_rule") + c = wato_confirm(_("Confirm rule deletion"), + _("Do you really want to delete the rule with " + "the id %s?") % ruleid) + if c: + del aggregation_rules[ruleid] + log_pending(SYNC, None, "bi-delete-rule", _("Deleted BI rule with id %s") % ruleid) + save_bi_rules(aggregations, aggregation_rules) + elif c == False: # not yet confirmed + return "" + else: + return None # browser reload + elif html.var("_del_aggr"): + nr = int(html.var("_del_aggr")) + c = wato_confirm(_("Confirm aggregation deletion"), + _("Do you really want to delete the aggregation number %s?") % (nr+1)) + if c: + del aggregations[nr] + log_pending(SYNC, None, "bi-delete-aggregation", _("Deleted BI aggregation number %d") % (nr+1)) + save_bi_rules(aggregations, aggregation_rules) + elif c == False: # not yet confirmed + return "" + else: + return None # browser reload + + return + + if not aggregations and not aggregation_rules: + menu_items = [ + ("bi_edit_rule", _("Create aggregation rule"), "new", "bi_rules", + _("Rules are the nodes in BI aggregations. " + "Each aggregation has one rule as its root.")) + ] + render_main_menu(menu_items) + return - out.write("[") - for tag in tag_specs: - out.write(repr(tag)) - out.write(", ") - if folder != g_root_folder: - out.write("'/' + FOLDER_PATH + '/+'") - out.write("], ") - if len(host_list) > 0 and host_list[-1] == ALL_HOSTS[0]: - if len(host_list) > 1: - out.write(repr(host_list[:-1])) - out.write(" + ALL_HOSTS") - else: - out.write("ALL_HOSTS") - else: - out.write(repr(host_list)) - if rulespec["itemtype"]: - out.write(", ") - if item_list == ALL_SERVICES: - out.write("ALL_SERVICES") - else: - out.write(repr(item_list)) + table.begin("bi_aggr", _("Aggregations")) + for nr, aggregation in enumerate(aggregations): + table.row() + table.cell(_("Actions"), css="buttons") + edit_url = make_link([("mode", "bi_edit_aggregation"), ("id", nr)]) + html.icon_button(edit_url, _("Edit this aggregation"), "edit") + delete_url = make_action_link([("mode", "bi_rules"), ("_del_aggr", nr)]) + html.icon_button(delete_url, _("Delete this aggregation"), "delete") + table.cell(_("Nr."), nr+1, css="number") + table.cell("", css="buttons") + if aggregation["disabled"]: + html.icon(_("This aggregation is currently disabled."), "disabled") + if aggregation["single_host"]: + html.icon(_("This aggregation covers only data from a single host."), "host") + table.cell(_("Groups"), ", ".join(aggregation["groups"])) + ruleid, description = bi_called_rule(aggregation["node"]) + edit_url = make_link([("mode", "bi_edit_rule"), ("id", ruleid)]) + table.cell(_("Rule"), '%s' % (edit_url, ruleid)) + table.cell(_("Note"), description) - if rule_options: - out.write(", %r" % rule_options) + table.end() - out.write(" ),\n") + rules = aggregation_rules.items() + # Sort rules according to nesting level, and then to id + rules_refs = [ (ruleid, rule, count_bi_rule_references(aggregations, aggregation_rules, ruleid)) + for (ruleid, rule) in rules ] + rules_refs.sort(cmp = lambda a,b: cmp(a[2][2], b[2][2]) or cmp(a[1]["title"], b[1]["title"])) + table.begin("bi_rules", _("Rules")) + for ruleid, rule, (aggr_refs, rule_refs, level) in rules_refs: + table.row() + table.cell(_("Actions"), css="buttons") + edit_url = make_link([("mode", "bi_edit_rule"), ("id", ruleid)]) + html.icon_button(edit_url, _("Edit this rule"), "edit") + if rule_refs == 0: + tree_url = make_link([("mode", "bi_rule_tree"), ("id", ruleid)]) + html.icon_button(tree_url, _("This is a top-level rule. Show rule tree"), "aggr") + refs = aggr_refs + rule_refs + if refs == 0: + delete_url = make_action_link([("mode", "bi_rules"), ("_del_rule", ruleid)]) + html.icon_button(delete_url, _("Delete this rule"), "delete") + table.cell(_("Lvl"), level, css="number") + table.cell(_("ID"), '%s' % (edit_url, ruleid)) + table.cell(_("Parameters"), " ".join(rule["params"])) + table.cell(_("Title"), rule["title"]) + table.cell(_("Aggregation"), "/".join([rule["aggregation"][0]] + map(str, rule["aggregation"][1]))) + table.cell(_("Nodes"), len(rule["nodes"]), css="number") + table.cell(_("Usages"), refs, css="number") + table.cell(_("Comment"), rule.get("comment", "")) + table.end() +def mode_bi_rule_tree(phase): + ruleid = html.var("id") + if phase == "title": + return _("BI - Rule Tree of") + " " + ruleid -def load_rulesets(folder): - # TODO: folder berücksichtigen - path = root_dir + "/" + folder[".path"] + "/" + "rules.mk" - vars = { - "ALL_HOSTS" : ALL_HOSTS, - "ALL_SERVICES" : [ "" ], - "NEGATE" : NEGATE, - "FOLDER_PATH" : folder[".path"], - "FILE_PATH" : folder[".path"] + "/hosts.mk", - } - # Prepare empty rulesets so that rules.mk has something to - # append to + aggregations, aggregation_rules = load_bi_rules() - for varname, ruleset in g_rulespecs.items(): - if ':' in varname: - dictname, subkey = varname.split(":") - vars[dictname] = {} - else: - vars[varname] = [] + if phase == "buttons": + html.context_button(_("Main Menu"), make_link([("mode", "main")]), "home") + html.context_button(_("Back"), make_link([("mode", "bi_rules")]), "back") + return - try: - execfile(path, vars, vars) - except: - pass + if phase == "action": + return - # Extract only specified rule variables - rulevars = {} - for ruleset in g_rulespecs.values(): - varname = ruleset["varname"] - # handle extra_host_conf:max_check_attempts - if ':' in varname: - dictname, subkey = varname.split(":") - if dictname in vars: - dictionary = vars[dictname] - if subkey in dictionary: - rulevars[varname] = dictionary[subkey] - # If this ruleset is not defined in rules.mk use empty list. - if varname not in rulevars: - rulevars[varname] = [] + aggr_refs, rule_refs, level = count_bi_rule_references(aggregations, aggregation_rules, ruleid) + if rule_refs == 0: + render_rule_tree(aggregation_rules, ruleid) + +def render_rule_tree(aggregation_rules, ruleid): + rule = aggregation_rules[ruleid] + html.write('
    ') + edit_url = make_link([("mode", "bi_edit_rule"), ("id", ruleid)]) + html.write('' % edit_url) + html.icon(rule.get("comment", rule["title"]), "aggr") + html.write(" " + ruleid + "") + html.write('
    ') + for node in rule["nodes"]: + r = bi_called_rule(node) + if r: + subnode_id = r[0] + html.write('
    ') + html.write('
    ') + render_rule_tree(aggregation_rules, subnode_id) + html.write('
    ') + html.write('
    ') - else: - if varname in vars: - rulevars[varname] = vars[varname] - return rulevars -# Load all rules of all folders into a dictionary that -# has the rules' varnames as keys and a list of (folder, rule) -# as values. -def load_rulesets_recursively(folder, all_rulesets): - for subfolder in folder[".folders"].values(): - load_rulesets_recursively(subfolder, all_rulesets) - rs = load_rulesets(folder) - for varname, rules in rs.items(): - all_rulesets.setdefault(varname, []) - all_rulesets[varname] += [ (folder, rule) for rule in rules ] +def bi_called_rule(node): + if node[0] == "call": + if node[1][1]: + args = _("with arguments: %s") % ", ".join(node[1][1]) + else: + args = _("without arguments") + return node[1][0], _("Explicit call ") + args + elif node[0] == "foreach_host": + subnode = node[1][-1] + if subnode[0] == 'call': + if node[1][0] == 'host': + info = _("Called for all hosts...") + elif node[1][0] == 'child': + info = _("Called for each child of...") + else: + info = _("Called for each parent of...") + return subnode[1][0], info + elif node[0] == "foreach_service": + subnode = node[1][-1] + if subnode[0] == 'call': + return subnode[1][0], _("Called for each service...") + +def count_bi_rule_references(aggregations, aggregation_rules, ruleid): + aggr_refs = 0 + for aggregation in aggregations: + called_rule_id, info = bi_called_rule(aggregation["node"]) + if called_rule_id == ruleid: + aggr_refs += 1 + + level = 0 + rule_refs = 0 + for rid, rule in aggregation_rules.items(): + l = bi_rule_uses_rule(aggregation_rules, rule, ruleid) + level = max(l, level) + if l == 1: + rule_refs += 1 + + return aggr_refs, rule_refs, level + +# Checks if the rule 'rule' uses either directly +# or indirectly the rule with the id 'ruleid'. In +# case of success, returns the nesting level +def bi_rule_uses_rule(aggregation_rules, rule, ruleid, level=0): + for node in rule["nodes"]: + r = bi_called_rule(node) + if r: + ru_id, info = r + if ru_id == ruleid: # Rule is directly being used + return level + 1 + # Check if lower rules use it + else: + l = bi_rule_uses_rule(aggregation_rules, aggregation_rules[ru_id], ruleid, level + 1) + if l: + return l + return False -def load_all_rulesets(): - all_rulesets = {} - load_rulesets_recursively(g_root_folder, all_rulesets) - return all_rulesets +# ValueSpec for editing a tag-condition +class HostTagCondition(ValueSpec): + def __init__(self, **kwargs): + ValueSpec.__init__(self, **kwargs) -g_rulegroups = {} -def register_rulegroup(group, title, help): - g_rulegroups[group] = (title, help) + def render_input(self, varprefix, value): + render_condition_editor(value, varprefix) -g_rulespecs = {} -g_rulespec_group = {} # for conveniant lookup -g_rulespec_groups = [] # for keeping original order -def register_rule(group, varname, valuespec = None, title = None, - help = None, itemtype = None, itemname = None, - itemhelp = None, itemenum = None, - match = "first", optional = False): - ruleset = { - "group" : group, - "varname" : varname, - "valuespec" : valuespec, - "itemtype" : itemtype, # None, "service", "checktype" or "checkitem" - "itemname" : itemname, # e.g. "mount point" - "itemhelp" : itemhelp, # a description of the item, only rarely used - "itemenum" : itemenum, # possible fixed values for items - "match" : match, - "title" : title or valuespec.title(), - "help" : help or valuespec.help(), - "optional" : optional, # rule may be None (like only_hosts) - } + def from_html_vars(self, varprefix): + return get_tag_conditions(varprefix=varprefix) - # Register group - if group not in g_rulespec_group: - rulesets = [ ruleset ] - g_rulespec_groups.append((group, rulesets)) - g_rulespec_group[group] = rulesets - else: - g_rulespec_group[group].append(ruleset) + def canonical_value(self): + return [] - g_rulespecs[varname] = ruleset + def value_to_text(self, value): + return "|".join(value) -# Special version of register_rule, dedicated to checks. This is not really -# modular here, but we cannot put this function into the plugins file because -# the order is not defined there. -def register_check_parameters(subgroup, checkgroup, title, valuespec, itemspec, matchtype, has_inventory=True): - # Register rule for inventorized checks - if valuespec and has_inventory: # would be useless rule if check has no parameters - itemenum = None - if itemspec: - itemtype = "item" - itemname = itemspec.title() - itemhelp = itemspec.help() - if isinstance(itemspec, DropdownChoice): - itemenum = itemspec._choices - else: - itemtype = None - itemname = None - itemhelp = None + def validate_datatype(self, value, varprefix): + if type(value) != list: + raise MKUserError(varprefix, _("The list of host tags must be a list, but " + "is %r") % type(value)) + for x in value: + if type(x) != str: + raise MKUserError(varprefix, _("The list of host tags must only contain strings " + "but also contains %r") % x) - register_rule( - "checkparams/" + subgroup, - varname = "checkgroup_parameters:%s" % checkgroup, - title = title, - valuespec = valuespec, - itemtype = itemtype, itemname = itemname, - itemhelp = itemhelp, - itemenum = itemenum, - match = matchtype) + def validate_value(self, value, varprefix): + pass - # Register rule for static checks - elements = [ - CheckTypeGroupSelection( - checkgroup, - title = _("Checktype"), - help = _("Please choose the check plugin")) ] - if itemspec: - elements.append(itemspec) - else: - # In case of static checks without check-item, add the fixed - # valuespec to add "None" as second element in the tuple - elements.append(FixedValue( - None, - totext = '', - )) - if not valuespec: - valuespec =\ - FixedValue(None, - help = _("This check has no parameters."), - totext = "") - if not valuespec.title(): - valuespec._title = _("Parameters") - elements.append(valuespec) - register_rule( - "static/" + subgroup, - "static_checks:%s" % checkgroup, - title = title, - valuespec = Tuple( - title = valuespec.title(), - elements = elements, - ), - match = "all") -# -# User profile edit page -# The user can edit the own profile -# -def select_language(user_language): - languages = [ l for l in get_languages() if not config.hide_language(l[0]) ] - inactive = user_language != '' +# We need to replace the BI constants internally with something +# that we can replace back after writing the BI-Rules out +# with pprint.pformat +bi_constants = { + 'ALL_HOSTS' : 'ALL_HOSTS-f41e728b-0bce-40dc-82ea-51091d034fc3', + 'HOST_STATE' : 'HOST_STATE-f41e728b-0bce-40dc-82ea-51091d034fc3', + 'HIDDEN' : 'HIDDEN-f41e728b-0bce-40dc-82ea-51091d034fc3', + 'FOREACH_HOST' : 'FOREACH_HOST-f41e728b-0bce-40dc-82ea-51091d034fc3', + 'FOREACH_CHILD' : 'FOREACH_CHILD-f41e728b-0bce-40dc-82ea-51091d034fc3', + 'FOREACH_PARENT' : 'FOREACH_PARENT-f41e728b-0bce-40dc-82ea-51091d034fc3', + 'FOREACH_SERVICE' : 'FOREACH_SERVICE-f41e728b-0bce-40dc-82ea-51091d034fc3', + 'REMAINING' : 'REMAINING-f41e728b-0bce-40dc-82ea-51091d034fc3', + 'DISABLED' : 'DISABLED-f41e728b-0bce-40dc-82ea-51091d034fc3', + 'HARD_STATES' : 'HARD_STATES-f41e728b-0bce-40dc-82ea-51091d034fc3', +} - if languages: - forms.section(_("Language"), - checkbox = ('_set_lang', inactive, 'language')) - # html.checkbox('_set_lang', inactive, onclick = 'wato_toggle_attribute(this, \'language\')') - # html.write(" ") - default_label = _('Default: %s') % (get_language_alias(config.default_language) or _('English')) - html.write('
    %s
    ' % - (inactive and "display: none" or "", default_label)) - html.write('
    ' % ((not inactive) and "display: none" or "")) - html.select("language", languages, user_language) - html.write("
    ") - html.help(_('Configure the default language ' - 'to be used by the user in the user interface here. If you do not check ' - 'the checkbox, then the system default will be used.

    ' - 'Note: currently Multisite is internationalized ' - 'but comes without any actual localisations (translations). If you want to ' - 'create you own translation, you find documentation online.') % - { "url" : "http://mathias-kettner.de/checkmk_multisite_i18n.html"} ) +# returns aggregations, aggregation_rules +def load_bi_rules(): + filename = multisite_dir + "bi.mk" + try: + vars = { "aggregation_rules" : {}, + "aggregations" : [], + "host_aggregations" : [], + } + vars.update(bi_constants) + if os.path.exists(filename): + execfile(filename, vars, vars) + else: + exec(bi_example, vars, vars) -def page_user_profile(): - if not config.user_id: - raise MKUserError(None, _('Not logged in.')) + # Convert rules from old-style tuples to new-style dicts + rules = {} + for ruleid, rule in vars["aggregation_rules"].items(): + rules[ruleid] = convert_rule_from_bi(rule, ruleid) + aggregations = [] + for aggregation in vars["aggregations"]: + aggregations.append(convert_aggregation_from_bi(aggregation, single_host = False)) + for aggregation in vars["host_aggregations"]: + aggregations.append(convert_aggregation_from_bi(aggregation, single_host = True)) + return aggregations, rules - if not config.may('general.edit_profile') and not config.may('general.change_password'): - raise MKAuthException(_("You are not allowed to edit your user profile.")) + except Exception, e: + if config.debug: + raise - success = None - if html.has_var('_save') and html.check_transaction(): - try: - users = userdb.load_users() + raise MKGeneralException(_("Cannot read configuration file %s: %s" % + (filename, e))) - # Profile edit (user options like language etc.) - if config.may('general.edit_profile'): - set_lang = html.var('_set_lang') - language = html.var('language') - # Set the users language if requested - if set_lang and language != config.get_language(): - if language == '': - language = None - # Set custom language - users[config.user_id]['language'] = language - config.user['language'] = language - - else: - # Remove the customized language - if 'language' in users[config.user_id]: - del users[config.user_id]['language'] - if 'language' in config.user: - del config.user['language'] - - # load the new language - load_language(config.get_language()) - load_all_plugins() - - user = users.get(config.user_id) - if config.may('general.edit_notifications') and user.get("notifications_enabled"): - value = forms.get_input(vs_notification_method, "notification_method") - users[config.user_id]["notification_method"] = value - - # Custom attributes - if config.may('general.edit_user_attributes'): - for name, attr in userdb.get_user_attributes(): - if attr['user_editable']: - if not attr.get("permission") or config.may(attr["permission"]): - vs = attr['valuespec'] - value = vs.from_html_vars('ua_' + name) - vs.validate_value(value, "ua_" + name) - users[config.user_id][name] = value +def save_bi_rules(aggregations, aggregation_rules): + def replace_constants(s): + for name, uuid in bi_constants.items(): + while True: + n = s.replace("'%s'" % uuid, name) + if n != s: + s = n + else: + break + return s[0] + '\n ' + s[1:-1] + '\n' + s[-1] - # Change the password if requested - password = False - if config.may('general.change_password'): - password = html.var('password') - password2 = html.var('password2', '') - if password: - if password2 and password != password2: - raise MKUserError("password2", _("The both passwords do not match.")) + make_nagios_directory(multisite_dir) + out = create_user_file(multisite_dir + "bi.mk", "w") + out.write("# Written by WATO\n# encoding: utf-8\n\n") + for ruleid, rule in aggregation_rules.items(): + rule = convert_rule_to_bi(rule) + out.write('aggregation_rules["%s"] = %s\n\n' % + ( ruleid, replace_constants(pprint.pformat(rule, width=50)))) + out.write('\n') + for aggregation in aggregations: + if aggregation["single_host"]: + out.write("host_aggregations.append(\n") + else: + out.write("aggregations.append(\n") + out.write(replace_constants(pprint.pformat(convert_aggregation_to_bi(aggregation)))) + out.write(")\n") + + # Make sure that BI aggregates are replicated to all other sites that allow + # direct user login + update_login_sites_replication_status() + +def rename_host_in_bi(oldname, newname): + renamed = 0 + aggregations, rules = load_bi_rules() + for aggregation in aggregations: + renamed += rename_host_in_bi_aggregation(aggregation, oldname, newname) + for rule in rules.values(): + renamed += rename_host_in_bi_rule(rule, oldname, newname) + if renamed: + save_bi_rules(aggregations, rules) + return renamed + +def rename_host_in_bi_aggregation(aggregation, oldname, newname): + node = aggregation["node"] + if node[0] == 'call': + if rename_host_in_list(aggregation["node"][1][1], oldname, newname): + return 1 + return 0 + +def rename_host_in_bi_rule(rule, oldname, newname): + renamed = 0 + nodes = rule["nodes"] + for nr, node in enumerate(nodes): + if node[0] in [ "host", "service", "remaining" ]: + if node[1][0] == oldname: + nodes[nr] = (node[0], ( newname, ) + node[1][1:]) + renamed = 1 + elif node[0] == "call": + if rename_host_in_list(node[1][1], oldname, newname): + renamed = 1 + return renamed + + +bi_aggregation_functions = {} + +# Make some conversions so that the format of the +# valuespecs is matched +def convert_rule_from_bi(rule, ruleid): + if type(rule) == tuple: + rule = { + "title" : rule[0], + "params" : rule[1], + "aggregation" : rule[2], + "nodes" : rule[3], + } + crule = {} + crule.update(rule) + crule["nodes"] = map(convert_node_from_bi, rule["nodes"]) + parts = rule["aggregation"].split("!") + crule["aggregation"] = (parts[0], tuple(map(tryint, parts[1:]))) + crule["id"] = ruleid + return crule + +def convert_rule_to_bi(rule): + brule = {} + brule.update(rule) + if "id" in brule: + del brule["id"] + brule["nodes"] = map(convert_node_to_bi, rule["nodes"]) + brule["aggregation"] = "!".join( + [ rule["aggregation"][0] ] + map(str, rule["aggregation"][1])) + return brule + +# Convert node-Tuple into format used by CascadingDropdown +def convert_node_from_bi(node): + if len(node) == 2: + if type(node[1]) == list: + return ("call", node) + elif node[1] == bi_constants['HOST_STATE']: + return ("host", (node[0],)) + elif node[1] == bi_constants['REMAINING']: + return ("remaining", (node[0],)) + else: + return ("service", node) + + else: # FOREACH_... + + if type(node[1]) == list: + tags = node[1] + node = node[0:1] + node[2:] + else: + tags = [] + spec = node[1] + if spec == bi_constants['ALL_HOSTS']: + spec = None + if node[0] == bi_constants['FOREACH_SERVICE']: + service = node[2] + subnode = convert_node_from_bi(node[3:]) + return ("foreach_service", (tags, spec, service, subnode)) + else: + subnode = convert_node_from_bi(node[2:]) + if node[0] == bi_constants['FOREACH_HOST']: + what = "host" + elif node[0] == bi_constants['FOREACH_CHILD']: + what = "child" + elif node[0] == bi_constants['FOREACH_PARENT']: + what = "parent" + return ("foreach_host", (what, tags, spec, subnode)) + + +def convert_node_to_bi(node): + if node[0] == "call": + return node[1] + elif node[0] == "host": + return (node[1][0], bi_constants['HOST_STATE']) + elif node[0] == "remaining": + return (node[1][0], bi_constants['REMAINING']) + elif node[0] == "service": + return node[1] + elif node[0] == "foreach_host": + what = node[1][0] + tags = node[1][1] + if node[1][2]: + spec = node[1][2] + else: + spec = bi_constants['ALL_HOSTS'] + return (bi_constants["FOREACH_" + what.upper()], tags, spec) + convert_node_to_bi(node[1][3]) + elif node[0] == "foreach_service": + tags = node[1][0] + if node[1][1]: + spec = node[1][1] + else: + spec = bi_constants['ALL_HOSTS'] + service = node[1][2] + return (bi_constants["FOREACH_SERVICE"], tags, spec, service) + convert_node_to_bi(node[1][3]) + +def convert_aggregation_from_bi(aggr, single_host): + if aggr[0] == bi_constants["DISABLED"]: + disabled = True + aggr = aggr[1:] + else: + disabled = False + + if aggr[0] == bi_constants["HARD_STATES"]: + hard_states = True + aggr = aggr[1:] + else: + hard_states = False + + if type(aggr[0]) != list: + groups = [aggr[0]] + else: + groups = aggr[0] + node = convert_node_from_bi(aggr[1:]) + return { + "disabled" : disabled, + "hard_states" : hard_states, + "groups" : groups, + "node" : node, + "single_host" : single_host, + } - users[config.user_id]['password'] = userdb.encrypt_password(password) +def convert_aggregation_to_bi(aggr): + if len(aggr["groups"]) == 1: + conv = (aggr["groups"][0],) + else: + conv = (aggr["groups"],) + node = convert_node_to_bi(aggr["node"]) + convaggr = conv + node + if aggr["hard_states"]: + convaggr = (bi_constants["HARD_STATES"],) + convaggr + if aggr["disabled"]: + convaggr = (bi_constants["DISABLED"],) + convaggr + return convaggr + + +def validate_bi_rule_call(value, varprefix): + rule_id, arguments = value + aggregations, aggregation_rules = load_bi_rules() + rule_params = aggregation_rules[rule_id]['params'] + + if len(arguments) != len(rule_params): + raise MKUserError(varprefix+"_1_0", _("The rule you selected needs %d argument(s) (%s), " + "but you configured %d arguments.") % + (len(rule_params), ', '.join(rule_params), len(arguments))) + + +# Not in global context, so that l10n will happen again +def declare_bi_valuespecs(aggregation_rules): + global vs_aggregation, aggregation_choices, vs_bi_node + + rule_choices = [ + (key, key + " - " + rule["title"]) + for (key, rule) + in aggregation_rules.items() ] - # Increase serial to invalidate old cookies - if 'serial' not in users[config.user_id]: - users[config.user_id]['serial'] = 1 - else: - users[config.user_id]['serial'] += 1 - userdb.save_users(users) - success = True + vs_call_rule = Tuple( + elements = [ + DropdownChoice( + title = _("Rule:"), + choices = rule_choices, + sorted = True, + ), + ListOfStrings( + orientation = "horizontal", + size = 12, + title = _("Arguments:"), + ), + ], + validate = validate_bi_rule_call, + ) - if password: - html.javascript( - "if(top) top.location.reload(); " - "else document.location.reload();") - else: - html.reload_sidebar() - except MKUserError, e: - html.add_user_error(e.varname, e.message) + host_re_help = _("Either an exact host name or a regular expression exactly matching the host " + "name. Example: srv.*p will match srv4711p but not xsrv4711p2. ") + vs_host_re = TextUnicode( + title = _("Host:"), + help = host_re_help, + allow_empty = False, + ) - html.header(_("Edit user profile"), - javascripts = ['wato'], - stylesheets = ['check_mk', 'pages', 'wato', 'status']) + # Configuration of leaf nodes + vs_bi_node_simplechoices = [ + ( "host", _("State of a host"), + Tuple( + help = _("Will create child nodes representing the state of hosts (usually the " + "host check is done via ping)."), + elements = [ vs_host_re, ] + ) + ), + ( "service", _("State of a service"), + Tuple( + help = _("Will create child nodes representing the state of services."), + elements = [ + vs_host_re, + TextUnicode( + title = _("Service:"), + help = _("A regular expression matching the beginning of a service description. You can " + "use a trailing $ in order to define an exact match. For each " + "matching service on the specified hosts one child node will be created. "), + ), + ] + ), + ), + ( "remaining", _("State of remaining services"), + Tuple( + help = _("Create a child node for each service on the specified hosts that is not " + "contained in any other node of the aggregation."), + elements = [ vs_host_re ], + ) + ), + ] - if success: - html.message(_("Successfully updated user profile.")) + # Configuration of explicit rule call + vs_bi_node_call_choices = [ + ( "call", _("Call a Rule"), vs_call_rule ), + ] - if html.has_user_errors(): - html.show_user_errors() + # Configuration of FOREACH_...-type nodes + def foreach_choices(subnode_choices): + return [ + ( "foreach_host", _("Create nodes based on a host search"), + Tuple( + elements = [ + DropdownChoice( + title = _("Refer to:"), + choices = [ + ( 'host', _("The found hosts themselves") ), + ( 'child', _("The found hosts' childs") ), + ( 'parent', _("The found hosts' parents") ), + ], + help = _('When selecting The found hosts\' childs, the conditions ' + '(tags and host name) are used to match a host, but you will get one ' + 'node created for each child of the matched host. The ' + 'place holder $1$ contains the name of the found child.

    ' + 'When selecting The found hosts\' parents, the conditions ' + '(tags and host name) are used to match a host, but you will get one ' + 'node created for each of the parent hosts of the matched host. ' + 'The place holder $1$ contains the name of the child host ' + 'and $2$ the name of the parent host.'), + ), + HostTagCondition( + title = _("Host Tags:") + ), + OptionalDropdownChoice( + title = _("Host Name:"), + choices = [ + ( None, _("All Hosts")), + ], + explicit = TextAscii(size = 60), + otherlabel = _("Regex for host name"), + default_value = None, + ), + CascadingDropdown( + title = _("Nodes to create:"), + help = _("When calling a rule you can use the place holder $1$ " + "in the rule arguments. It will be replaced by the actual host " + "names found by the search - one host name for each rule call."), + choices = subnode_choices, + ), + ] + ) + ), + ( "foreach_service", _("Create nodes based on a service search"), + Tuple( + elements = [ + HostTagCondition( + title = _("Host Tags:") + ), + OptionalDropdownChoice( + title = _("Host Name:"), + choices = [ + ( None, _("All Hosts")), + ], + explicit = TextAscii(size = 60), + otherlabel = _("Regex for host name"), + default_value = None, + ), + TextAscii( + title = _("Service Regex:"), + help = _("Subexpressions enclosed in ( and ) will be available " + "as arguments $2$, $3$, etc."), + size = 80, + ), + CascadingDropdown( + title = _("Nodes to create:"), + help = _("When calling a rule you can use the place holder $1$ " + "in the rule arguments. It will be replaced by the actual host " + "names found by the search - one host name for each rule call. If you " + "have regular expression subgroups in the service pattern, then " + "the place holders $2$ will represent the first group match, " + "$3 the second, and so on..."), + choices = subnode_choices, + ), + ] + ) + ) + ] - users = userdb.load_users() - user = users.get(config.user_id) - if user == None: - html.show_warning(_("Sorry, your user account does not exist.")) - html.footer() - return + vs_bi_node = CascadingDropdown( + choices = vs_bi_node_simplechoices + vs_bi_node_call_choices \ + + foreach_choices(vs_bi_node_simplechoices + vs_bi_node_call_choices) + ) - # Returns true if an attribute is locked and should be read only. Is only - # checked when modifying an existing user - locked_attributes = userdb.locked_attributes(user.get('connector')) - def is_locked(attr): - return attr in locked_attributes + aggregation_choices = [] + for aid, ainfo in bi_aggregation_functions.items(): + aggregation_choices.append(( + aid, + ainfo["title"], + ainfo["valuespec"], + )) - html.begin_form("profile", method="POST") - html.write('
    ') + vs_aggregation = Dictionary( + title = _("Aggregation Properties"), + optional_keys = False, + render = "form", + elements = [ + ( "groups", + ListOfStrings( + title = _("Aggregation Groups"), + help = _("List of groups in which to show this aggregation. Usually " + "each aggregation is only in one group. Group names are arbitrary " + "texts. At least one group is mandatory."), + valuespec = TextUnicode(), + ), + ), + ( "node", + CascadingDropdown( + title = _("Rule to call"), + choices = vs_bi_node_call_choices + foreach_choices(vs_bi_node_call_choices) + ) + ), + ( "disabled", + Checkbox( + title = _("Disabled"), + label = _("Currently disable this aggregation"), + ) + ), + ( "hard_states", + Checkbox( + title = _("Use Hard States"), + label = _("Base state computation on hard states"), + help = _("Hard states can only differ from soft states if at least one host or service " + "of the BI aggregate has more than 1 maximum check attempt. For example if you " + "set the maximum check attempts of a service to 3 and the service is CRIT " + "just since one check then it's soft state is CRIT, but its hard state is still OK."), + ) + ), + ( "single_host", + Checkbox( + title = _("Optimization"), + label = _("The aggregation covers data from only one host and its parents."), + help = _("If you have a large number of aggregations that cover only one host and " + "maybe its parents (such as Check_MK cluster hosts), " + "then please enable this optimization. It reduces the time for the " + "computation. Do not enable this for aggregations that contain " + "data of more than one host!"), + ), + ), + ] + ) - forms.header(_("Personal Settings")) - forms.section(_("Name"), simple=True) - html.write(user.get("alias", config.user_id)) - if config.may('general.change_password') and not is_locked('password'): - forms.section(_("Password")) - html.password_input('password', autocomplete = "off") - forms.section(_("Password confirmation")) - html.password_input('password2', autocomplete = "off") +def mode_bi_edit_aggregation(phase): + nr = int(html.var("id", "-1")) # In case of Aggregations: index in list + new = nr == -1 + + if phase == "title": + if new: + return _("BI - Create New Aggregation") + else: + return _("BI - Edit Aggregation") + + elif phase == "buttons": + html.context_button(_("Abort"), make_link([("mode", "bi_rules")]), "abort") + return + + aggregations, aggregation_rules = load_bi_rules() + declare_bi_valuespecs(aggregation_rules) - if config.may('general.edit_profile'): - select_language(config.get_language('')) - # Let the user configure how he wants to be notified - if config.may('general.edit_notifications') and user.get("notifications_enabled"): - forms.section(_("Notifications")) - html.help(_("Here you can configure how you want to be notified about host and service problems and " - "other monitoring events.")) - vs_notification_method.render_input("notification_method", user.get("notification_method")) - # forms.input(vs_notification_method, "notification_method", user.get("notification_method")) + if phase == "action": + if html.check_transaction(): + new_aggr = vs_aggregation.from_html_vars('aggr') + vs_aggregation.validate_value(new_aggr, 'aggr') + if len(new_aggr["groups"]) == 0: + raise MKUserError('rule_p_groups_0', _("Please define at least one aggregation group")) + if new: + aggregations.append(new_aggr) + log_pending(SYNC, None, "bi-new-aggregation", _("Created new BI aggregation %d") % (len(aggregations))) + else: + aggregations[nr] = new_aggr + log_pending(SYNC, None, "bi-new-aggregation", _("Modified BI aggregation %d") % (nr + 1)) + save_bi_rules(aggregations, aggregation_rules) + return "bi_rules" - if config.may('general.edit_user_attributes'): - for name, attr in userdb.get_user_attributes(): - vs = attr['valuespec'] - forms.section(vs.title()) - vs.render_input("ua_" + name, user.get(name, vs.default_value())) - html.help(vs.help()) + if new: + value = { "groups" : [ _("Main") ] } + else: + value = aggregations[nr] - # Save button + html.begin_form("biaggr", method = "POST") + vs_aggregation.render_input("aggr", value) forms.end() - html.button("_save", _("Save")) - html.write('
    ') html.hidden_fields() + html.button("_save", new and _("Create") or _("Save"), "submit") + html.set_focus("rule_p_groups_0") html.end_form() - html.footer() -#. -# .--Sampleconfig--------------------------------------------------------. -# | ____ _ __ _ | -# | / ___| __ _ _ __ ___ _ __ | | ___ ___ ___ _ __ / _(_) __ _ | -# | \___ \ / _` | '_ ` _ \| '_ \| |/ _ \/ __/ _ \| '_ \| |_| |/ _` | | -# | ___) | (_| | | | | | | |_) | | __/ (_| (_) | | | | _| | (_| | | -# | |____/ \__,_|_| |_| |_| .__/|_|\___|\___\___/|_| |_|_| |_|\__, | | -# | |_| |___/ | -# +----------------------------------------------------------------------+ -# | Functions for creating an example configuration | -# '----------------------------------------------------------------------' +def mode_bi_edit_rule(phase): + ruleid = html.var("id") # In case of Aggregations: index in list + new = not ruleid -# Create a very basic sample configuration, but only if none of the -# files that we will create already exists. That is e.g. the case -# after an update from an older version where no sample config had -# been created. -def create_sample_config(): - if os.path.exists(multisite_dir + "hosttags.mk") \ - or os.path.exists(root_dir + "rules.mk") \ - or os.path.exists(root_dir + "groups.mk"): - return + if phase == "title": + if new: + return _("BI - Create New Rule") + else: + return _("BI - Edit Rule") + " " + html.attrencode(ruleid) - # A contact group where everyone is member of - groups = { - "contact" : { 'all' : u'Everybody' }, - } - save_group_information(groups) - # Basic setting of host tags - wato_host_tags = \ - [('agent', - u'Agent type', - [('cmk-agent', u'Check_MK Agent (Server)', ['tcp']), - ('snmp-only', u'SNMP (Networking device, Appliance)', ['snmp']), - ('snmp-v1', u'Legacy SNMP device (using V1)', ['snmp']), - ('snmp-tcp', u'Dual: Check_MK Agent + SNMP', ['snmp', 'tcp']), - ('ping', u'Only PING this device', [])]), - ('criticality', - u'Criticality', - [('prod', u'Productive system', []), - ('critical', u'Business critical', []), - ('test', u'Test system', []), - ('offline', u'Do not monitor this host', [])]), - ('networking', - u'Networking Segment', - [('lan', u'Local network (low latency)', []), - ('wan', u'WAN (high latency)', []), - ('dmz', u'DMZ (low latency, secure access)', [])])] + elif phase == "buttons": + html.context_button(_("Abort"), make_link([("mode", "bi_rules")]), "abort") + return - wato_aux_tags = \ - [('snmp', u'monitor via SNMP'), - ('tcp', u'monitor via Check_MK Agent')] + aggregations, aggregation_rules = load_bi_rules() + declare_bi_valuespecs(aggregation_rules) - save_hosttags(wato_host_tags, wato_aux_tags) + elements = [ + ( "title", + TextUnicode( + title = _("Rule Title"), + help = _("The title of the BI nodes which are created from this rule. This will be " + "displayed as the name of the node in the BI view. For " + "top level nodes this title must be unique. You can insert " + "rule parameters like $FOO$ or $BAR$ here."), + allow_empty = False, + size = 64, + ), + ), - # Rules that match the upper host tag definition - rulesets = { - # Make the tag 'offline' remove hosts from the monitoring - 'only_hosts': [ - (['!offline'], ['@all'], - {'comment': u'Do not monitor hosts with the tag "offline"'})], + ( "comment", + TextUnicode( + title = _("Comment"), + help = _("An arbitrary comment of this rule for you."), + size = 64, + ), + ), + ( "params", + ListOfStrings( + title = _("Parameters"), + help = _("Parameters are used in order to make rules more flexible. They must " + "be named like variables in programming languages. For example you can " + "make your rule have the two parameters HOST and INST. " + "When calling the rule - from an aggergation or a higher level rule - " + "you can then specify two arbitrary values for these parameters. In the " + "title of the rule as well as the host and service names, you can insert the " + "actual value of the parameters by $HOST$ and $INST$ " + "(enclosed in dollar signs)."), + orientation = "horizontal", + valuespec = TextAscii( + size = 12, + regex = '[A-Za-z_][A-Za-z0-9_]*', + regex_error = _("Parameters must contain only A-Z, a-z, 0-9 and _ " + "and must not begin with a digit."), + ) + ) + ), + ( "aggregation", + CascadingDropdown( + title = _("Aggregation Function"), + help = _("The aggregation function decides how the status of a node " + "is constructed from the states of the child nodes."), + html_separator = "", + choices = aggregation_choices, + ) + ), + ( "nodes", + ListOf( + vs_bi_node, + add_label = _("Add child node generator"), + title = _("Nodes that are aggregated by this rule"), + ), + ), + ] - # Rule for WAN hosts with adapted PING levels - 'ping_levels': [ - ({'loss': (80.0, 100.0), - 'packets': 6, - 'rta': (1500.0, 3000.0), - 'timeout': 20}, ['wan'], ['@all'], - {'comment': u'Allow longer round trip times when pinging WAN hosts'})], - # All hosts should use SNMP v2c if not specially tagged - 'bulkwalk_hosts': [ - (['!snmp-v1'], ['@all'], {'comment': u'Hosts with the tag "snmp-v1" must not use bulkwalk'})], + if new: + elements = [ + ( "id", + TextAscii( + title = _("Unique Rule ID"), + help = _("The ID of the rule must be a unique text. It will be used as an internal key " + "when rules refer to each other. The rule IDs will not be visible in the status " + "GUI. They are just used within the configuration."), + allow_empty = False, + size = 12, + ), + )] + elements - # Put all hosts and the contact group 'all' - 'host_contactgroups': [ - ('all', [], ALL_HOSTS, {'comment': u'Put all hosts into the contact group "all"'} ), + vs_rule = Dictionary( + title = _("Rule Properties"), + optional_keys = False, + render = "form", + elements = elements, + headers = [ + ( _("General Properties"), [ "id", "title", "comment", "params" ]), + ( _("Aggregation Function"), [ "aggregation" ], ), + ( _("Child Node Generation"), [ "nodes" ] ), ] - } + ) - save_rulesets(g_root_folder, rulesets) - # Make sure the host tag attributes are immediately declared! - config.wato_host_tags = wato_host_tags - config.wato_aux_tags = wato_aux_tags + if phase == "action": + if html.check_transaction(): + new_rule = vs_rule.from_html_vars('rule') + vs_rule.validate_value(new_rule, 'rule') + if new: + ruleid = new_rule["id"] + + if new and ruleid in aggregation_rules: + raise MKUserError('rule_p_id', + _("There is already a rule with the id %s" % ruleid)) + if not new_rule["nodes"]: + raise MKUserError(None, + _("Please add at least one child node. Empty rules are useless.")) + + if new: + del new_rule["id"] + aggregation_rules[ruleid] = new_rule + log_pending(SYNC, None, "bi-new-rule", _("Create new BI rule %s") % ruleid) + else: + aggregation_rules[ruleid].update(new_rule) + new_rule["id"] = ruleid + if bi_rule_uses_rule(aggregation_rules, new_rule, new_rule["id"]): + raise MKUserError(None, _("There is a cycle in your rules. This rule calls itself - " + "either directly or indirectly.")) + log_pending(SYNC, None, "bi-edit-rule", _("Modified BI rule %s") % ruleid) + + save_bi_rules(aggregations, aggregation_rules) + return "bi_rules" + + if new: + value = {} + else: + value = aggregation_rules[ruleid] + + html.begin_form("birule", method="POST") + vs_rule.render_input("rule", value) + forms.end() + html.hidden_fields() + html.button("_save", new and _("Create") or _("Save"), "submit") + if new: + html.set_focus("rule_p_id") + else: + html.set_focus("rule_p_title") + html.end_form() #. -# .--Pattern Editor------------------------------------------------------. -# | ____ _ _ _____ _ _ _ | -# | | _ \ __ _| |_| |_ ___ _ __ _ __ | ____|__| (_) |_ ___ _ __ | -# | | |_) / _` | __| __/ _ \ '__| '_ \ | _| / _` | | __/ _ \| '__| | -# | | __/ (_| | |_| || __/ | | | | | | |__| (_| | | || (_) | | | -# | |_| \__,_|\__|\__\___|_| |_| |_| |_____\__,_|_|\__\___/|_| | +# .--Custom-Attributes---------------------------------------------------. +# | ____ _ _ _ _ | +# | / ___| _ ___| |_ ___ _ __ ___ / \ | |_| |_ _ __ ___ | +# | | | | | | / __| __/ _ \| '_ ` _ \ _____ / _ \| __| __| '__/ __| | +# | | |__| |_| \__ \ || (_) | | | | | |_____/ ___ \ |_| |_| | \__ \_ | +# | \____\__,_|___/\__\___/|_| |_| |_| /_/ \_\__|\__|_| |___(_) | # | | # +----------------------------------------------------------------------+ -# | | +# | Mange custom attributes of users (in future hosts etc.) | # '----------------------------------------------------------------------' -def mode_pattern_editor(phase): - import logwatch +custom_attr_types = [ + ('TextAscii', _('Simple Text')), +] - # 1. Variablen auslesen - hostname = html.var('host', '') - item = html.var('file', '') - match_txt = html.var('match', '') - master_url = html.var('master_url', '') +def save_custom_attrs(attrs): + make_nagios_directory(multisite_dir) + out = create_user_file(multisite_dir + "custom_attrs.mk", "w") + out.write("# Written by WATO\n# encoding: utf-8\n\n") + for what in [ "user" ]: + if what in attrs and len(attrs[what]) > 0: + out.write("if type(wato_%s_attrs) != list:\n wato_%s_attrs = []\n" % (what, what)) + out.write("wato_%s_attrs += %s\n\n" % (what, pprint.pformat(attrs[what]))) - hosts = load_hosts(g_folder) - host = hosts.get(hostname) +def mode_edit_custom_attr(phase, what): + name = html.var("edit") # missing -> new group + new = name == None if phase == "title": - if not hostname and not item: - return _("Logfile Pattern Analyzer") - elif not hostname: - return _("Logfile Patterns of Logfile %s on all Hosts") % (item) - elif not item: - return _("Logfile Patterns of Host %s") % (hostname) + if new: + if what == "user": + return _("Create User Attribute") else: - return _("Logfile Patterns of Logfile %s on Host %s") % (item, hostname) + if what == "user": + return _("Edit User Attribute") elif phase == "buttons": - html.context_button(_("Main Menu"), make_link([("mode", "main")]), "home") - if host: - if item: - title = _("Show Logfile") - else: - title = _("Host Logfiles") - - master_url = '' - if config.is_multisite(): - master_url = '&master_url=' + defaults.url_prefix + 'check_mk/' - html.context_button(title, "logwatch.py?host=%s&file=%s%s" % - (htmllib.urlencode(hostname), htmllib.urlencode(item), master_url), 'logwatch') + html.context_button(_("User Attributes"), make_link([("mode", "%s_attrs" % what)]), "back") + return - html.context_button(_('Edit Logfile Rules'), make_link([ - ('mode', 'edit_ruleset'), - ('varname', 'logwatch_rules') - ]), - 'edit' - ) + all_attrs = userdb.load_custom_attrs() + attrs = all_attrs.setdefault(what, []) - return + if not new: + attr = [ a for a in attrs if a['name'] == name ] + if not attr: + raise MKUserError(_('The attribute does not exist.')) + else: + attr = attr[0] + else: + attr = {} if phase == "action": - return + if html.check_transaction(): + title = html.var_utf8("title").strip() + if not title: + raise MKUserError("title", _("Please specify a title.")) + for this_attr in attrs: + if title == this_attr['title'] and name != this_attr['name']: + raise MKUserError("alias", _("This alias is already used by the attribute %s.") % this_attr['name']) + + topic = html.var('topic', '').strip() + help = html.var_utf8('help').strip() + user_editable = html.get_checkbox('user_editable') + show_in_table = html.get_checkbox('show_in_table') + add_custom_macro = html.get_checkbox('add_custom_macro') - html.help(_('On this page you can test the defined logfile patterns against a custom text, ' - 'for example a line from a logfile. Using this dialog it is possible to analyze ' - 'and debug your whole set of logfile patterns.')) + if new: + name = html.var("name", '').strip() + if not name: + raise MKUserError("name", _("Please specify a name for the new attribute.")) + if ' ' in name: + raise MKUserError("name", _("Sorry, spaces are not allowed in attribute names.")) + if not re.match("^[-a-z0-9A-Z_]*$", name): + raise MKUserError("name", _("Invalid attribute name. Only the characters a-z, A-Z, 0-9, _ and - are allowed.")) + if [ a for a in attrs if a['name'] == name ]: + raise MKUserError("name", _("Sorry, there is already an attribute with that name.")) + + ty = html.var('type', '').strip() + if ty not in [ t[0] for t in custom_attr_types ]: + raise MKUserError('type', _('The choosen attribute type is invalid.')) + + attr = { + 'name' : name, + 'type' : ty, + } + attrs.append(attr) - # Render the tryout form - html.begin_form('try') - forms.header(_('Try Pattern Match')) - forms.section(_('Hostname')) - html.text_input('host') - forms.section(_('Logfile')) - html.text_input('file') - forms.section(_('Text to match')) - html.help(_('You can insert some text (e.g. a line of the logfile) to test the patterns defined ' - 'for this logfile. All patterns for this logfile are listed below. Matching patterns ' - 'will be highlighted after clicking the "Try out" button.') - ) - html.text_input('match', cssclass = 'match', size=100) - forms.end() - html.button('_try', _('Try out')) - html.del_var('folder') # Never hand over the folder here - html.hidden_fields() - html.end_form() + log_pending(SYNCRESTART, None, "edit-%sattr" % what, _("Create new %s attribute %s") % (what, name)) + else: + log_pending(SYNCRESTART, None, "edit-%sattr" % what, _("Changed title of %s attribute %s") % (what, name)) + attr.update({ + 'title' : title, + 'topic' : topic, + 'help' : help, + 'user_editable' : user_editable, + 'show_in_table' : show_in_table, + 'add_custom_macro' : add_custom_macro, + }) - # Bail out if the given hostname does not exist - if hostname and not host: - html.add_user_error('host', _('The given host does not exist or is not managed by WATO.')) - html.show_user_errors() - return + save_custom_attrs(all_attrs) - varname = 'logwatch_rules' - rulespec = g_rulespecs[varname] - all_rulesets = load_all_rulesets() - ruleset = all_rulesets.get(varname) + return what + "_attrs" - html.write('

    %s

    ' % _('Logfile Patterns')) - if not ruleset: - html.write( - "
    " - + _('There are no logfile patterns defined. You may create ' - 'logfile patterns using the Rule Editor.') % make_link([ - ('mode', 'edit_ruleset'), - ('varname', 'logwatch_rules') - ]) - + "
    " - ) + html.begin_form("attr") + forms.header(_("Properties")) + forms.section(_("Name"), simple = not new) + html.help(_("The name of the attribute is used as an internal key. It cannot be " + "changed later.")) + if new: + html.text_input("name", attr.get('name')) + html.set_focus("name") + else: + html.write(name) + html.set_focus("title") - # Loop all rules for this ruleset - already_matched = False - last_folder = None - for rulenr in range(0, len(ruleset)): - folder, rule = ruleset[rulenr] - if folder != last_folder: - rel_rulenr = 0 - last_folder = folder - else: - rel_rulenr += 1 - last_in_group = rulenr == len(ruleset) - 1 or ruleset[rulenr+1][0] != folder - pattern_list, tag_specs, host_list, item_list, rule_options = parse_rule(rulespec, rule) + forms.section(_("Title") + "*") + html.help(_("The title is used to label this attribute.")) + html.text_input("title", attr.get('title')) + + forms.section(_('Topic')) + html.help(_('The attribute is added to this section in the edit dialog.')) + html.select('topic', [ + ('ident', _('Identity')), + ('security', _('Security')), + ('notify', _('Notifications')), + ('personal', _('Personal Settings')), + ], attr.get('topic', 'personal')) + + forms.section(_('Help Text') + "*") + html.help(_('You might want to add some helpful description for the attribute.')) + html.text_area('help', attr.get('help', '')) - # Check if this rule applies to the given host/service - if hostname: - # If hostname (and maybe filename) try match it - reason = rule_matches_host_and_item( - rulespec, tag_specs, host_list, item_list, folder, g_folder, hostname, item) - elif item: - # If only a filename is given - reason = False - for i in item_list: - if re.match(i, str(item)): - reason = True - break - else: - # If no host/file given match all rules - reason = True + forms.section(_('Data type')) + html.help(_('The type of information to be stored in this attribute.')) + if new: + html.select('type', custom_attr_types, attr.get('type')) + else: + html.write(dict(custom_attr_types)[attr.get('type')]) - match_img = '' - if reason == True: - # Applies to the given host/service - reason_class = 'reason' - # match_title/match_img are set below per pattern - else: - # not matching - reason_class = 'noreason' - match_img = 'nmatch' - match_title = reason + forms.section(_('Editable by Users')) + html.help(_('It is possible to let users edit their custom attributes.')) + html.checkbox('user_editable', attr.get('user_editable', True)) + + forms.section(_('Show in Table')) + html.help(_('This attribute is only visibile on the detail pages by default, but ' + 'you can also make it visible in the overview tables.')) + html.checkbox('show_in_table', attr.get('show_in_table', False)) + + forms.section(_('Add as Custom Macro')) + html.help(_('The attribute can be added to the contact definiton in order ' + 'to use it for notifications.')) + html.checkbox('add_custom_macro', attr.get('add_custom_macro', False)) - html.begin_foldable_container("rule", str(rulenr), True, "Rule #%d" % (rulenr + 1), indent = False) - html.write('') - html.write('') - html.write('') - html.write('') - html.write('') - html.write('') - html.write('\n') + forms.end() + html.show_localization_hint() + html.button("save", _("Save")) + html.hidden_fields() + html.end_form() - # Each rule can hold no, one or several patterns. Loop them all here - odd = "odd" - for state, pattern, comment in pattern_list: - match_class = '' - disp_match_txt = '' - if reason == True: - matched = re.search(pattern, match_txt) - if matched: +def mode_custom_attrs(phase, what): + if what == "user": + title = _("Custom User Attributes") - # Prepare highlighted search txt - match_start = matched.start() - match_end = matched.end() - disp_match_txt = match_txt[:match_start] \ - + '' + match_txt[match_start:match_end] + '' \ - + match_txt[match_end:] + if phase == "title": + return title - if already_matched == False: - # First match - match_class = 'match first' - match_img = 'match' - match_title = _('This logfile pattern matches first and will be used for ' - 'defining the state of the given line.') - already_matched = True - else: - # subsequent match - match_class = 'match' - match_img = 'imatch' - match_title = _('This logfile pattern matches but another matched first.') - else: - match_img = 'nmatch' - match_title = _('This logfile pattern does not match the given string.') + elif phase == "buttons": + global_buttons() + html.context_button(_("Users"), make_link([("mode", "users")]), "back") + html.context_button(_("New Attribute"), make_link([("mode", "edit_%s_attr" % what)]), "new") + return - html.write('' % (odd, reason_class)) - html.write('' % (cls, logwatch.level_name(state))) - html.write('' % pattern) - html.write('' % comment) - html.write('' % disp_match_txt) - html.write('\n') + if phase == "action": + if html.var('_delete'): + delname = html.var("_delete") - odd = odd == "odd" and "even" or "odd" + # FIXME: Find usages and warn + #if usages: + # message = "%s
    %s:
      " % \ + # (_("You cannot delete this %s attribute.") % what, + # _("It is still in use by")) + # for title, link in usages: + # message += '
    • %s
    • \n' % (link, title) + # message += "
    " + # raise MKUserError(None, message) - html.write('\n') + confirm_txt = _('Do you really want to delete the custom attribute "%s"?') % (delname) - html.write('
    ' + _('Match') + '' + _('State') + '' + _('Pattern') + '' + _('Comment') + '' + _('Matched line') + '
    ' % \ - (match_title, match_img)) + all_attrs = userdb.load_custom_attrs() + attrs = all_attrs.get(what, {}) - cls = '' - if match_class == 'match first': - cls = ' class="svcstate state%d"' % logwatch.level_state(state) - html.write('%s%s%s%s
    ' % odd) - edit_url = make_link([ - ("mode", "edit_rule"), - ("varname", varname), - ("rulenr", rel_rulenr), - ("host", hostname), - ("item", mk_repr(item)), - ("rule_folder", folder[".path"])]) - html.icon_button(edit_url, _("Edit this rule"), "edit") - html.write('
    \n') - html.end_foldable_container() + c = wato_confirm(_("Confirm deletion of attribute \"%s\"" % delname), confirm_txt) + if c: + for index, attr in enumerate(attrs): + if attr['name'] == delname: + attrs.pop(index) + save_custom_attrs(all_attrs) + log_pending(SYNCRESTART, None, "edit-%sattrs" % what, _("Deleted attribute %s" % (delname))) + elif c == False: + return "" + + return None + + if not attrs: + html.write("
    " + _("No custom attributes are defined yet.") + "
    ") + return + + table.begin(what + "attrs") + for attr in sorted(attrs, key = lambda x: x['title']): + table.row() + + table.cell(_("Actions"), css="buttons") + edit_url = make_link([("mode", "edit_%s_attr" % what), ("edit", attr['name'])]) + delete_url = html.makeactionuri([("_delete", attr['name'])]) + html.icon_button(edit_url, _("Properties"), "edit") + html.icon_button(delete_url, _("Delete"), "delete") + + table.cell(_("Name"), attr['name']) + table.cell(_("Title"), attr['title']) + table.cell(_("Type"), attr['type']) + table.end() -# .-Hooks-&-API----------------------------------------------------------. +#. +# .--Hooks-&-API---------------------------------------------------------. # | _ _ _ ___ _ ____ ___ | # | | | | | ___ ___ | | _____ ( _ ) / \ | _ \_ _| | # | | |_| |/ _ \ / _ \| |/ / __| / _ \/\ / _ \ | |_) | | | @@ -11367,162 +17602,552 @@ # Inform plugins about changes of hosts. the_thing can be: # a folder, a file or a host +def register_hook(name, func): + hooks.register(name, func) -class API: - def register_hook(self, name, func): - hooks.register(name, func) +def num_pending_changes(): + return len(parse_audit_log("pending")) - def get_all_users(self): - return userdb.load_users() +def get_folder_tree(): + load_all_folders() + num_hosts_in(g_root_folder) # sets ".total_hosts" + return g_root_folder + +# Find a folder by its path. Raise an exception if it does +# not exist. +def get_folder(path): + prepare_folder_info() - # Get a (flat) dictionary containing all hosts with their *effective* - # attributes (containing all inherited and default values where appropriate) - # of the given folder. If folder is None, returns all hosts from the root folder - # Folder must be returned by get_folder() - def get_all_hosts(self, folder=None): - if not folder: - self.prepare_folder_info() - return collect_hosts(folder or g_root_folder) + folder = g_folders.get(path) + if folder: + load_hosts(folder) + return folder + else: + raise MKGeneralException("No WATO folder %s." % path) - # Find a folder by its path. Raise an exception if it does - # not exist. - def get_folder(self, path): - self.prepare_folder_info() - - folder = g_folders.get(path) - if folder: - load_hosts(folder) - return folder - else: - raise MKGeneralException("No WATO folder %s." % path) - - # Get the number of hosts recursive from the given folder. Folder must be returned by get_folder() - def num_hosts_in_folder(self, folder): - return num_hosts_in(folder, True) +# Return the title of a folder - which is given as a string path +def get_folder_title(path): + load_all_folders() # TODO: use in-memory-cache + folder = g_folders.get(path) + if folder: + return folder["title"] + else: + return path - # Get all effective data of a host. Folder must be returned by get_folder() - def get_host(self, folder, hostname): - host = folder[".hosts"][hostname] - eff = effective_attributes(host, folder) - eff["name"] = hostname - return eff +# Return a list with all the titles of the paths' +# components, e.g. "muc/north" -> [ "Main Directory", "Munich", "North" ] +def get_folder_title_path(path, with_links=False): + # In order to speed this up, we work with a per HTML-request cache + cache_name = "wato_folder_titles" + (with_links and "_linked" or "") + cache = html.get_cached(cache_name) + if cache == None: + load_all_folders() + cache = {} + html.set_cache(cache_name, cache) + if path not in cache: + cache[path] = folder_title_path(path, with_links) + return cache[path] + +def sort_by_title(folders): + def folder_cmp(f1, f2): + return cmp(f1["title"].lower(), f2["title"].lower()) + folders.sort(cmp = folder_cmp) + return folders - # Clean the attributes of the given host and returns the resulting host attributes - # host must be returned by get_host() / get_all_hosts() - def clean_host_attributes(self, host, attr): - folder = g_folders.get(host["path"]) - load_hosts(folder) - for entry in attr: - try: - del folder[".hosts"][host["name"]][entry] - except: - continue +def get_all_hosts(folder=None): + if not folder: + prepare_folder_info() + return collect_hosts(folder or g_root_folder) - save_folder_and_hosts(folder) - return folder[".hosts"][host["name"]] +def get_host(folder, hostname): + host = folder[".hosts"][hostname] + eff = effective_attributes(host, folder) + eff["name"] = hostname + return eff - # Update the attributes of the given host and returns the resulting host attributes - # host must be returned by get_host() / get_all_hosts() - def update_host_attributes(self, host, attr): - folder = g_folders.get(host["path"]) - load_hosts(folder) - folder[".hosts"][host["name"]].update(attr) - save_folder_and_hosts(folder) - return folder[".hosts"][host["name"]] +# Create an URL to a certain WATO folder. +def link_to_path(path): + return "wato.py?mode=folder&folder=" + html.urlencode(path) + +# Create an URL to the edit-properties of a host. +def link_to_host(hostname): + return "wato.py?" + html.urlencode_vars( + [("mode", "edithost"), ("host", hostname)]) + + +#. +# .--API Helpers---------------------------------------------------------. +# | _ ____ ___ _ _ _____ _ ____ _____ ____ ____ | +# | / \ | _ \_ _| | | | | ____| | | _ \| ____| _ \/ ___| | +# | / _ \ | |_) | | | |_| | _| | | | |_) | _| | |_) \___ \ | +# | / ___ \| __/| | | _ | |___| |___| __/| |___| _ < ___) | | +# | /_/ \_\_| |___| |_| |_|_____|_____|_| |_____|_| \_\____/ | +# | | +# +----------------------------------------------------------------------+ +# | These functions are used by the Web-API and by WATO as well | +# '----------------------------------------------------------------------' + +# This is the single site activation mode +def activate_changes(): + try: + start = time.time() + check_mk_local_automation(config.wato_activation_method) + duration = time.time() - start + update_replication_status(None, {}, { 'act': duration }) + except Exception: + if config.debug: + import traceback + raise MKUserError(None, "Error executing hooks: %s" % + traceback.format_exc().replace('\n', '
    ')) + else: + raise + +# Checks if the given host_tags are all in known host tag groups and have a valid value +def check_host_tags(host_tags): + for key, value in host_tags.items(): + for group_entry in configured_host_tags: + if group_entry[0] == key: + for value_entry in group_entry[2]: + if value_entry[0] == value: + break + else: + raise MKUserError(None, _("Unknown host tag %s") % html.attrencode(value)) + break + else: + raise MKUserError(None, _("Unknown host tag group %s") % html.attrencode(key)) + +# Create wato folders up to the given path if they don't exists +def create_wato_folders(path): + path_tokens = path.split("/") + current_folder = g_root_folder + for i in range(0, len(path_tokens)): + check_path = "/".join(path_tokens[:i+1]) + if check_path in g_folders: + current_folder = g_folders[check_path] + else: + check_folder_permissions(current_folder, "write") + current_folder = create_wato_folder(current_folder, path_tokens[i], path_tokens[i]) + +# Creates and returns an empty wato folder with the given title +# Write permissions are NOT checked! +def create_wato_folder(parent, name, title, attributes={}): + if parent and parent[".path"]: + newpath = parent[".path"] + "/" + name + else: + newpath = name + + new_folder = { + ".name" : name, + ".path" : newpath, + "title" : title or name, + "attributes" : attributes, + ".folders" : {}, + ".hosts" : {}, + "num_hosts" : 0, + ".lock" : False, + ".parent" : parent, + } + + save_folder(new_folder) + new_folder = reload_folder(new_folder) + + call_hook_folder_created(new_folder) + + # Note: sites are not marked as dirty. + # The creation of a folder without hosts has not effect on the + # monitoring. + log_pending(AFFECTED, new_folder, "new-folder", _("Created new folder %s") % title) + + return new_folder + + +# new_hosts: {"hostA": {attr}, "hostB": {attr}} +def add_hosts_to_folder(folder, new_hosts): + load_hosts(folder) + folder[".hosts"].update(new_hosts) + folder["num_hosts"] = len(folder[".hosts"]) + + for hostname in new_hosts.keys(): + log_pending(AFFECTED, hostname, "create-host",_("Created new host %s.") % hostname) + + save_folder_and_hosts(folder) + + reload_hosts(folder) + mark_affected_sites_dirty(folder, hostname) + call_hook_hosts_changed(folder) + + +# hosts: {"hostname": {"set": {attr}, "unset": [attr]}} +def update_hosts_in_folder(folder, hosts): + updated_hosts = {} + + for hostname, attributes in hosts.items(): + cleaned_attr = dict([ + (k, v) for + (k, v) in + attributes.get("set", {}).iteritems() + if (not k.startswith('.') or k == ".nodes") ]) + # unset keys + for key in attributes.get("unset", []): + if key in cleaned_attr: + del cleaned_attr[key] + + updated_hosts[hostname] = cleaned_attr + + # The site attribute might change. In that case also + # the old site of the host must be marked dirty. + mark_affected_sites_dirty(folder, hostname) + + load_hosts(folder) + folder[".hosts"].update(updated_hosts) + + for hostname in updated_hosts.keys(): + mark_affected_sites_dirty(folder, hostname) + log_pending(AFFECTED, hostname, "edit-host", _("edited properties of host [%s]") % hostname) + + save_folder_and_hosts(folder) + reload_hosts(folder) + call_hook_hosts_changed(folder) + + +# hosts: ["hostA", "hostB", "hostC"] +def delete_hosts_in_folder(folder, hosts): + if folder.get(".lock_hosts"): + raise MKUserError(None, _("Cannot delete host. Hosts in this folder are locked")) + + for hostname in hosts: + del folder[".hosts"][hostname] + folder["num_hosts"] -= 1 + mark_affected_sites_dirty(folder, hostname) + log_pending(AFFECTED, hostname, "delete-host", _("Deleted host %s") % hostname) + + save_folder_and_hosts(folder) + call_hook_hosts_changed(folder) + + +#. +# .--WEB API-------------------------------------------------------------. +# | __ _______ ____ _ ____ ___ | +# | \ \ / / ____| __ ) / \ | _ \_ _| | +# | \ \ /\ / /| _| | _ \ / _ \ | |_) | | | +# | \ V V / | |___| |_) | / ___ \| __/| | | +# | \_/\_/ |_____|____/ /_/ \_\_| |___| | +# | | +# +----------------------------------------------------------------------+ +# | | +# '----------------------------------------------------------------------' + +class API: + __all_hosts = None + __prepared_folder_info = False + + def __prepare_folder_info(self, force = False): + if not self.__prepared_folder_info or force: + prepare_folder_info() + self.__prepared_folder_info = True + + def __get_all_hosts(self, force = False): + if not self.__all_hosts or force: + self.__all_hosts = load_all_hosts() + return self.__all_hosts + + def __validate_host_parameters(self, host_foldername, hostname, attributes, all_hosts, create_folders, validate): + if "hostname" in validate: + check_new_hostname(None, hostname) + + if "foldername" in validate: + if not os.path.exists(host_foldername) and not create_folders: + raise MKUserError(None, _("Folder does not exist and no permission to create folders")) + + if host_foldername != "": + host_folder_tokens = host_foldername.split("/") + for dir_token in host_folder_tokens: + check_wato_foldername(None, dir_token, just_name = True) + + if "host_exists" in validate: + if hostname in all_hosts: + raise MKUserError(None, _("Hostname %s already exists") % html.attrencode(hostname)) + + if "host_missing" in validate: + if hostname not in all_hosts: + raise MKUserError(None, _("Hostname %s does not exist") % html.attrencode(hostname)) + + + # Returns the closest parent of an upcoming folder + def get_closest_parent(): + if host_foldername in g_folders: + return g_folders[host_foldername] + + host_folder_tokens = host_foldername.split("/") + for i in range(len(host_folder_tokens), -1, -1): + check_path = "/".join(host_folder_tokens[:i]) + if check_path in g_folders: + return g_folders[check_path] + + def check_folder_lock(check_folder): + # Check if folder or host file is locked + if check_folder == host_foldername: # Target folder exists + if check_folder.get(".lock_hosts"): + raise MKAuthException(_("You are not allowed to modify hosts in this folder. The host configuration in the folder " + "is locked, because it has been created by an external application.")) + else: + if check_folder.get(".lock_subfolders"): + raise MKAuthException(_("Not allowed to create subfolders in this folder. The Folder has been " + "created by an external application and is locked.")) + + if "permissions_create" in validate: + # Find the closest parent folder. If we can write there, we can also write in our new folder + check_folder = get_closest_parent() + check_new_host_permissions(check_folder, attributes, hostname) + check_folder_lock(check_folder) + + if "permissions_edit" in validate: + check_folder = all_hosts[hostname][".folder"] + check_edit_host_permissions(check_folder, attributes, hostname) + check_folder_lock(check_folder) + + if "permissions_read" in validate: + check_folder = all_hosts[hostname][".folder"] + check_host_permissions(hostname, folder = check_folder) + + if "tags" in validate: + check_host_tags(dict((key[4:], value) for key, value in attributes.items() if key.startswith("tag_") and value != False)) + + if "site" in validate: + if attributes.get("site"): + if attributes.get("site") not in config.allsites().keys(): + raise MKUserError(None, _("Unknown site %s") % html.attrencode(attributes.get("site"))) + + return True + + def __get_valid_api_host_attributes(self, attributes): + result = {} + + host_attribute_names = map(lambda (x, y): x.name(), host_attributes) + ["inventory_failed", ".nodes"] + + for key, value in attributes.items(): + if key in host_attribute_names: + result[key] = value - # Rewrite the WATO configuration files - def rewrite_configuration(self): - self.prepare_folder_info() - rewrite_config_files_below(g_root_folder) - - # Return displayable information about host (call with result of get_host()) - def get_host_painted(self, host): - result = [] - for attr, topic in host_attributes: - attrname = attr.name() - if attrname in host: - tdclass, content = attr.paint(host[attrname], host["name"]) - result.append((attr.title(), content)) return result - # Get information about the folder and directory tree. - # This is useful for components that display hosts in - # the tree (e.g. the status GUI). - def get_folder_tree(self): - load_all_folders() - num_hosts_in(g_root_folder) # sets ".total_hosts" - return g_root_folder + def lock_wato(self): + lock_exclusive() - # sort list of folders or files by their title - def sort_by_title(self, folders): - def folder_cmp(f1, f2): - return cmp(f1["title"].lower(), f2["title"].lower()) - folders.sort(cmp = folder_cmp) - return folders - - # Create an URL to a certain WATO folder. - def link_to_path(self, path): - return "wato.py?mode=folder&folder=" + htmllib.urlencode(path) - - # Create an URL to the edit-properties of a host. - def link_to_host(self, hostname): - return "wato.py?" + htmllib.urlencode_vars( - [("mode", "edithost"), ("host", hostname)]) - - # Same, but links to services of that host - def link_to_host_inventory(self, hostname): - return "wato.py?" + htmllib.urlencode_vars( - [("mode", "inventory"), ("host", hostname)]) - - # Return the title of a folder - which is given as a string path - def get_folder_title(self, path): - load_all_folders() # TODO: use in-memory-cache - folder = g_folders.get(path) - if folder: - return folder["title"] - else: - return path - - # Return a list with all the titles of the paths' - # components, e.g. "muc/north" -> [ "Main Directory", "Munich", "North" ] - def get_folder_title_path(self, path, withlinks=False): - load_all_folders() # TODO: speed up! - return folder_title_path(path, withlinks) - - # Returns the number of not activated changes. - def num_pending_changes(self): - return len(parse_audit_log("pending")) - - # BELOW ARE PRIVATE HELPER FUNCTIONS - def prepare_folder_info(self): - # Initialize attributes and load all folders - declare_host_tag_attributes() - declare_site_attribute() - load_all_folders() + def validate_host_parameters(self, host_foldername, hostname, host_attr, validate = [], create_folders = True): + self.__prepare_folder_info() + all_hosts = self.__get_all_hosts() + + if host_foldername: + host_foldername = host_foldername.strip("/") + else: + if hostname in all_hosts: + host_foldername = all_hosts[hostname][".folder"][".path"] + attributes = self.__get_valid_api_host_attributes(host_attr) + self.__validate_host_parameters(host_foldername, hostname, attributes, all_hosts, create_folders, validate) + + # hosts: [ { "attributes": {attr}, "hostname": "hostA", "folder": "folder1" }, .. ] + def add_hosts(self, hosts, create_folders = True, validate_hosts = True): + self.__prepare_folder_info() + all_hosts = self.__get_all_hosts() + + # Sort hosts into folders + target_folders = {} + for host_data in hosts: + host_foldername = host_data["folder"] + hostname = host_data["hostname"] + host_attr = host_data["attributes"] + + # Tidy up foldername + host_foldername = host_foldername.strip("/") + attributes = self.__get_valid_api_host_attributes(host_attr) + if validate_hosts: + self.__validate_host_parameters(host_foldername, hostname, host_attr, all_hosts, create_folders, + ["hostname", "foldername", "host_exists", "tags", "site", "permissions_create"]) + target_folders.setdefault(host_foldername, {})[hostname] = attributes + + for target_foldername, new_hosts in target_folders.items(): + # Create target folder(s) if required... + create_wato_folders(target_foldername) + + folder = g_folders[target_foldername] + add_hosts_to_folder(folder, new_hosts) + + # As long as some hooks are able to invalidate the + # entire g_folders variable we need to enforce a reload + self.__prepare_folder_info(force = True) + self.__get_all_hosts(force = True) +# +# for host_foldername, new_hosts in target_folders.items(): +# for hostname in new_hosts.keys(): +# all_hosts[hostname] = g_folders[host_foldername][".hosts"][hostname] + + + # hosts: [ { "attributes": {attr}, "unset_attributes": {attr}, "hostname": "hostA"}, .. ] + def edit_hosts(self, hosts, validate_hosts = True): + self.__prepare_folder_info() + all_hosts = self.__get_all_hosts() + + target_folders = {} + for host_data in hosts: + hostname = host_data["hostname"] + host_attr = host_data.get("attributes", {}) + host_unset_attr = host_data.get("unset_attributes", []) + + attributes = self.__get_valid_api_host_attributes(host_attr) + if validate_hosts: + self.__validate_host_parameters(None, hostname, attributes, all_hosts, True, + ["host_missing", "tags", "site", "permissions_edit"]) + host_foldername = all_hosts[hostname][".folder"][".path"] + new_attr = dict([(k, v) for (k, v) in all_hosts[hostname].iteritems() \ + if (not k.startswith('.'))]) + new_attr.update(attributes) + + target_folders.setdefault(host_foldername, {})[hostname] = {"set": new_attr, + "unset": host_unset_attr} + + for target_foldername, update_hosts in target_folders.items(): + update_hosts_in_folder(g_folders[target_foldername], update_hosts) + + # As long as some hooks are able to invalidate the + # entire g_folders variable we need to enforce a reload + self.__prepare_folder_info(force = True) + self.__get_all_hosts(force = True) +# +# for host_foldername, update_hosts in target_folders.items(): +# for hostname in update_hosts.keys(): +# all_hosts[hostname] = g_folders[host_foldername][".hosts"][hostname] + + + def get_host(self, hostname, effective_attr = False): + self.__prepare_folder_info() + all_hosts = self.__get_all_hosts() + + self.__validate_host_parameters(None, hostname, {}, all_hosts, True, ["host_missing", "permissions_read"]) + the_host = all_hosts[hostname] + if effective_attr: + the_host = effective_attributes(the_host, the_host[".folder"]) - def _cleanup_directory(self, thing): - # drop 'parent' entry, recursively - def drop_internal(folder): - new_folder = {} - new_folder.update(folder) - if ".parent" in new_folder: - del new_folder[".parent"] - if ".folders" in new_folder: - new_folder[".folders"] = drop_internal_dict(new_folder[".folders"]) - return new_folder - - def drop_internal_dict(self, folderdict): - new_dict = {} - for name, thing in folderdict.items(): - new_dict[name] = drop_internal(thing) - return new_dict + cleaned_host = dict([(k, v) for (k, v) in the_host.iteritems() if not k.startswith('.') ]) - return drop_internal(thing) + return { "attributes": cleaned_host, "path": the_host[".folder"][".path"], "hostname": hostname } -api = API() + # hosts: [ "hostA", "hostB", "hostC" ] + def delete_hosts(self, hosts, validate_hosts = True): + self.__prepare_folder_info() + all_hosts = self.__get_all_hosts() + target_folders = {} + for hostname in hosts: + if validate_hosts: + self.__validate_host_parameters(None, hostname, {}, all_hosts, True, ["host_missing", "permissions_edit"]) + host_foldername = all_hosts[hostname][".folder"][".path"] + target_folders.setdefault(host_foldername, []) + target_folders[host_foldername].append(hostname) + + for target_foldername, hosts in target_folders.items(): + folder = g_folders[target_foldername] + delete_hosts_in_folder(folder, hosts) + + # As long as some hooks are able to invalidate the + # entire g_folders variable we need to enforce a reload + self.__prepare_folder_info(force = True) + self.__get_all_hosts(force = True) + + def discover_services(self, hostname, mode = "new"): + self.__prepare_folder_info() + all_hosts = self.__get_all_hosts() + + host = all_hosts[hostname] + folder = host[".folder"] + + config.need_permission("wato.services") + self.__validate_host_parameters(None, hostname, {}, all_hosts, True, ["host_missing"]) + check_host_permissions(hostname, folder=folder) + + ### Start inventory + counts, failed_hosts = check_mk_automation(host[".siteid"], "inventory", [ "@scan", mode ] + [hostname]) + if failed_hosts: + if not host.get("inventory_failed") and not folder.get(".lock_hosts"): + host["inventory_failed"] = True + save_hosts(folder) + raise MKUserError(None, _("Failed to inventorize %s: %s") % (hostname, failed_hosts[hostname])) + + if host.get("inventory_failed") and not folder.get(".lock_hosts"): + del host["inventory_failed"] + save_hosts(folder) + + msg = _("Service discovery successful. Added %d, Removed %d, Kept %d, New Count %d") % \ + tuple(counts[hostname]) + + mark_affected_sites_dirty(folder, hostname, sync=False, restart=True) + log_pending(AFFECTED, hostname, "api-inventory", msg) + + return msg + + def activate_changes(self, sites, mode = "dirty", allow_foreign_changes = False): + self.__prepare_folder_info() + + config.need_permission("wato.activate") + + if foreign_changes(): + if not config.may("wato.activateforeign"): + raise MKAuthException(_("You are not allowed to activate changes of other users.")) + if not allow_foreign_changes: + raise MKAuthException(_("There are changes from other users and foreign changes "\ + "are not allowed in this API call.")) + + if mode == "specific": + for site in sites: + if site not in config.allsites().keys(): + raise MKUserError(None, _("Unknown site %s") % html.attrencode(site)) + + + + ### Start activate changes + repstatus = load_replication_status() + errors = [] + if is_distributed(): + for site in config.allsites().values(): + if mode == "all" or (mode == "dirty" and repstatus.get(site["id"],{}).get("need_restart")) or\ + (sites and site["id"] in sites): + try: + synchronize_site(site, True) + except Exception, e: + errors.append("%s: %s" % (site["id"], e)) + + if not site_is_local(site["id"]): + remove_sync_snapshot(site["id"]) + else: # Single site + if mode == "all" or (mode == "dirty" and log_exists("pending")): + try: + activate_changes() + except Exception, e: + errors.append("Exception: %s" % e) + + if not errors: + log_commit_pending() + else: + raise MKUserError(None, ", ".join(errors)) + + def get_all_hosts(self, effective_attr = False): + self.__prepare_folder_info() + all_hosts = self.__get_all_hosts() + return_hosts = {} + + for hostname in all_hosts.keys(): + self.__validate_host_parameters(None, hostname, {}, all_hosts, True, ["host_missing", "permissions_read"]) + + the_host = all_hosts[hostname] + if effective_attr: + the_host = effective_attributes(the_host, the_host[".folder"]) + cleaned_host = dict([(k, v) for (k, v) in the_host.iteritems() if not k.startswith('.') ]) + + return_hosts[hostname] = { "attributes": cleaned_host, "path": the_host[".folder"][".path"], "hostname": hostname } + + return return_hosts # internal helper functions for API def collect_hosts(folder): @@ -11612,7 +18237,7 @@ try: hk(eff) except MKUserError, e: - errors.append(e.message) + errors.append("%s" % e) return errors else: return [] @@ -11636,14 +18261,14 @@ try: hk(eff, all_hosts) except MKUserError, e: - errors.append(e.message) + errors.append("%s" % e) hosts_errors[name] = errors return hosts_errors else: return {} #. -# .-Helpers--------------------------------------------------------------. +# .--Helpers-------------------------------------------------------------. # | _ _ _ | # | | | | | ___| |_ __ ___ _ __ ___ | # | | |_| |/ _ \ | '_ \ / _ \ '__/ __| | @@ -11657,10 +18282,19 @@ import base64 def mk_eval(s): - return pickle.loads(base64.b64decode(s)) + try: + if literal_eval and not config.wato_legacy_eval: + return literal_eval(base64.b64decode(s)) + else: + return pickle.loads(base64.b64decode(s)) + except: + raise MKGeneralException(_('Unable to parse provided data: %s') % html.attrencode(repr(s))) def mk_repr(s): - return base64.b64encode(pickle.dumps(s)) + if literal_eval and not config.wato_legacy_eval: + return base64.b64encode(repr(s)) + else: + return base64.b64encode(pickle.dumps(s)) # Returns true when at least one folder is defined in WATO def have_folders(): @@ -11683,16 +18317,25 @@ def host_status_button(hostname, viewname): html.context_button(_("Status"), - "view.py?" + htmllib.urlencode_vars([ + "view.py?" + html.urlencode_vars([ ("view_name", viewname), ("filename", g_folder[".path"] + "/hosts.mk"), ("host", hostname), ("site", "")]), "status") # TODO: support for distributed WATO +def service_status_button(hostname, servicedesc): + html.context_button(_("Status"), + "view.py?" + html.urlencode_vars([ + ("view_name", "service"), + ("host", hostname), + ("service", servicedesc), + ]), + "status") # TODO: support for distributed WATO + def folder_status_button(viewname = "allhosts"): html.context_button(_("Status"), - "view.py?" + htmllib.urlencode_vars([ + "view.py?" + html.urlencode_vars([ ("view_name", viewname), ("wato_folder", g_folder[".path"])]), "status") # TODO: support for distributed WATO @@ -11710,7 +18353,7 @@ def changelog_button(): pending = parse_audit_log("pending") if len(pending) > 0: - buttontext = "%d " % len(pending) + _("Changes") + "" + buttontext = "%d " % len(pending) + _("Changes") hot = True icon = "wato_changes" else: @@ -11774,7 +18417,7 @@ return html.makeuri_contextless(vars) def make_action_link(vars): - return make_link(vars + [("_transid", html.fresh_transid())]) + return make_link(vars + [("_transid", html.get_transid())]) # Show confirmation dialog, send HTML-header if dialog is shown. @@ -11867,8 +18510,71 @@ return config.may("wato.use") and \ (config.may("wato.seeall") or config.may("wato.hosts")) +def is_alias_used(my_what, my_name, my_alias): + # Host / Service / Contact groups + all_groups = userdb.load_group_information() + for what, groups in all_groups.items(): + for gid, group in groups.items(): + if group['alias'] == my_alias and (my_what != what or my_name != gid): + return False, _("This alias is already used in the %s group %s.") % (what, gid) + + # Timeperiods + timeperiods = load_timeperiods() + for key, value in timeperiods.items(): + if value.get("alias") == my_alias and (my_what != "timeperiods" or my_name != key): + return False, _("This alias is already used in timeperiod %s.") % key + + # Roles + roles = userdb.load_roles() + for key, value in roles.items(): + if value.get("alias") == my_alias and (my_what != "roles" or my_name != key): + return False, _("This alias is already used in the role %s.") % key + + return True, None + +# Checks if a valuespec is a Checkbox +def is_a_checkbox(vs): + if isinstance(vs, Checkbox): + return True + elif isinstance(vs, Transform): + return is_a_checkbox(vs._valuespec) + else: + return False + +def site_neutral_path(path): + if path.startswith('/omd'): + parts = path.split('/') + parts[3] = '<siteid>' + return '/'.join(parts) + else: + return path + +syslog_facilities = [ + (0, "kern"), + (1, "user"), + (2, "mail"), + (3, "daemon"), + (4, "auth"), + (5, "syslog"), + (6, "lpr"), + (7, "news"), + (8, "uucp"), + (9, "cron"), + (10, "authpriv"), + (11, "ftp"), + (16, "local0"), + (17, "local1"), + (18, "local2"), + (19, "local3"), + (20, "local4"), + (21, "local5"), + (22, "local6"), + (23, "local7"), +] + + #. -# .-Plugins--------------------------------------------------------------. +# .--Plugins-------------------------------------------------------------. # | ____ _ _ | # | | _ \| |_ _ __ _(_)_ __ ___ | # | | |_) | | | | |/ _` | | '_ \/ __| | @@ -11879,17 +18585,26 @@ # | Prepare plugin-datastructures and load WATO plugins | # '----------------------------------------------------------------------' +# permissions = None -> every user can use this mode, permissions +# are checked by the mode itself. Otherwise the user needs at +# least wato.use and - if he makes actions - wato.edit. Plus wato.* +# for each permission in the list. modes = { + # ident, permissions, handler function "main" : ([], mode_main), "folder" : (["hosts"], mode_folder), "newfolder" : (["hosts", "manage_folders"], lambda phase: mode_editfolder(phase, True)), "editfolder" : (["hosts" ], lambda phase: mode_editfolder(phase, False)), "newhost" : (["hosts", "manage_hosts"], lambda phase: mode_edithost(phase, True, False)), "newcluster" : (["hosts", "manage_hosts"], lambda phase: mode_edithost(phase, True, True)), + "rename_host" : (["hosts", "manage_hosts"], lambda phase: mode_rename_host(phase)), + "bulk_import" : (["hosts", "manage_hosts"], lambda phase: mode_bulk_import(phase)), "edithost" : (["hosts"], lambda phase: mode_edithost(phase, False, None)), "parentscan" : (["hosts"], mode_parentscan), "firstinventory" : (["hosts", "services"], lambda phase: mode_inventory(phase, True)), "inventory" : (["hosts"], lambda phase: mode_inventory(phase, False)), + "diag_host" : (["hosts", "diag_host"], mode_diag_host), + "object_parameters" : (["hosts", "rulesets"], mode_object_parameters), "search" : (["hosts"], mode_search), "search_results" : (["hosts"], mode_search_results), "bulkinventory" : (["hosts", "services"], mode_bulk_inventory), @@ -11900,9 +18615,13 @@ "auditlog" : (["auditlog"], mode_auditlog), "snapshot" : (["snapshots"], mode_snapshot), "globalvars" : (["global"], mode_globalvars), + "snapshot_detail" : (["snapshots"], mode_snapshot_detail), "edit_configvar" : (["global"], mode_edit_configvar), + "ldap_config" : (["global"], mode_ldap_config), "ruleeditor" : (["rulesets"], mode_ruleeditor), + "static_checks" : (["rulesets"], mode_static_checks), "rulesets" : (["rulesets"], mode_rulesets), + "ineffective_rules" : (["rulesets"], mode_ineffective_rules), "edit_ruleset" : (["rulesets"], mode_edit_ruleset), "new_rule" : (["rulesets"], lambda phase: mode_edit_rule(phase, True)), "edit_rule" : (["rulesets"], lambda phase: mode_edit_rule(phase, False)), @@ -11912,34 +18631,49 @@ "edit_host_group" : (["groups"], lambda phase: mode_edit_group(phase, "host")), "edit_service_group" : (["groups"], lambda phase: mode_edit_group(phase, "service")), "edit_contact_group" : (["users"], lambda phase: mode_edit_group(phase, "contact")), + "notifications" : (["notifications"], mode_notifications), + "notification_rule" : (["notifications"], lambda phase: mode_notification_rule(phase, False)), + "user_notifications" : (["users"], lambda phase: mode_user_notifications(phase, False)), + "notification_rule_p": (None, lambda phase: mode_notification_rule(phase, True)), # for personal settings + "user_notifications_p":(None, lambda phase: mode_user_notifications(phase, True)), # for personal settings "timeperiods" : (["timeperiods"], mode_timeperiods), "edit_timeperiod" : (["timeperiods"], mode_edit_timeperiod), + "import_ical" : (["timeperiods"], mode_timeperiod_import_ical), "sites" : (["sites"], mode_sites), "edit_site" : (["sites"], mode_edit_site), + "edit_site_globals" : (["sites"], mode_edit_site_globals), "users" : (["users"], mode_users), "edit_user" : (["users"], mode_edit_user), + "user_attrs" : (["users"], lambda phase: mode_custom_attrs(phase, "user")), + "edit_user_attr" : (["users"], lambda phase: mode_edit_custom_attr(phase, "user")), "roles" : (["users"], mode_roles), "role_matrix" : (["users"], mode_role_matrix), "edit_role" : (["users"], mode_edit_role), "hosttags" : (["hosttags"], mode_hosttags), "edit_hosttag" : (["hosttags"], mode_edit_hosttag), "edit_auxtag" : (["hosttags"], mode_edit_auxtag), - "pattern_editor" : (["pattern_editor"], mode_pattern_editor) + "pattern_editor" : (["pattern_editor"], mode_pattern_editor), + "bi_rules" : (["bi_rules"], mode_bi_rules), + "bi_rule_tree" : (["bi_rules"], mode_bi_rule_tree), + "bi_edit_rule" : (["bi_rules"], mode_bi_edit_rule), + "bi_edit_aggregation": (["bi_rules"], mode_bi_edit_aggregation), } loaded_with_language = False def load_plugins(): - global extra_buttons + global g_git_messages + g_git_messages = [] + global loaded_with_language if loaded_with_language == current_language: return # Reset global vars - global extra_buttons, configured_host_tags, host_attributes + global extra_buttons, configured_host_tags, host_attributes, modules extra_buttons = [] configured_host_tags = None host_attributes = [] - userdb.reset_user_attributes() + modules = [] load_notification_table() @@ -12021,7 +18755,7 @@ [ "admin", ]) config.declare_permission("wato.auditlog", - _("Audit log"), + _("Audit Log"), _("Access to the historic audit log. A user with write " "access can delete the audit log. " "The currently pending changes can be seen by all users " @@ -12063,6 +18797,14 @@ "Modify existing hosts."), [ "admin", "user" ]) + config.declare_permission("wato.diag_host", + _("Host Diagnostic"), + _("Check whether or not the host is reachable, test the different methods " + "a host can be accessed, for example via agent, SNMPv1, SNMPv2 to find out " + "the correct monitoring configuration for that host."), + [ "admin", "user" ]) + + config.declare_permission("wato.clone_hosts", _("Clone hosts"), _("Clone existing hosts to create new ones from the existing one." @@ -12076,6 +18818,12 @@ "hosts and thus simulate larger environments."), [ ]) + config.declare_permission("wato.update_dns_cache", + _("Update DNS Cache"), + _("Updating the DNS cache is neccessary in order to reflect IP address " + "changes in hosts that are configured without an explicit address."), + [ "admin", "user" ]) + config.declare_permission("wato.services", _("Manage services"), _("Do inventory and service configuration on existing hosts."), @@ -12138,12 +18886,17 @@ config.declare_permission("wato.automation", _("Site remote automation"), _("This permission is needed for a remote administration of the site " - "as a distributed WATO slave or peer."), + "as a distributed WATO slave."), [ "admin", ]) config.declare_permission("wato.users", _("User management"), - _("This permission is needed for the modules Users & Contacts, Roles and Contact Groups"), + _("This permission is needed for the modules Users, Roles and Contact Groups"), + [ "admin", ]) + + config.declare_permission("wato.notifications", + _("Notification configuration"), + _("This permission is needed for the new rule based notification configuration via the WATO module Notifications."), [ "admin", ]) config.declare_permission("wato.snapshots", @@ -12159,6 +18912,11 @@ _("Access to the module for analyzing and validating logfile patterns."), [ "admin", "user" ]) + config.declare_permission("wato.bi_rules", + _("Business Intelligence Rules"), + _("Edit the rules for the BI aggregations."), + [ "admin" ]) + load_web_plugins("wato", globals()) diff -Nru check-mk-1.2.2p3/htdocs/webapi.py check-mk-1.2.6p12/htdocs/webapi.py --- check-mk-1.2.2p3/htdocs/webapi.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/htdocs/webapi.py 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,119 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +from lib import * +from wato import API +import config + +# Python 2.3 does not have 'set' in normal namespace. +# But it can be imported from 'sets' +try: + set() +except NameError: + from sets import Set as set + +api_actions = {} +loaded_with_language = False + +def load_plugins(): + global loaded_with_language + if loaded_with_language == current_language: + return + + load_web_plugins("webapi", globals()) + + # This must be set after plugin loading to make broken plugins raise + # exceptions all the time and not only the first time (when the plugins + # are loaded). + loaded_with_language = current_language + + config.declare_permission("wato.api_allowed", _("Access to Web-API"), + _("This permissions specifies if the role "\ + "is able to use Web-API functions. It is only available "\ + "for automation users."), + config.builtin_role_ids) + +g_api = None + +def page_api(): + global g_api + + try: + if not config.user.get("automation_secret"): + raise MKAuthException("The WATO API is only available for automation users") + + config.need_permission("wato.use") + config.need_permission("wato.api_allowed") + + action = html.var('action') + if action not in api_actions: + raise MKUserError(None, "Unknown API action %s" % html.attrencode(action)) + + # Create API instance + g_api = API() + + # Prepare request_object + # Most of the time the request is given as json + # However, the plugin may have an own mechanism to interpret the request + request_object = {} + if html.var("request"): + if api_actions[action].get("dont_eval_request"): + request_object = html.var("request") + else: + eval_function = None + request = html.var("request") + + try: + import json + eval_function = json.loads + except ImportError: + eval_function = literal_eval + # modify request so it can be read by literal_eval... + for old, new in [ (": null", ": None"), + (": true", ": True"), + (": false", ": False"), ]: + request = request.replace(old, new) + request_object = eval_function(request) + else: + request_object = {} + + if api_actions[action].get("locking", True): + g_api.lock_wato() + + + action_response = api_actions[action]["handler"](request_object) + response = { "result_code": 0, "result": action_response } + except Exception, e: + response = { "result_code": 1, "result": str(e) } + + output_format = html.var("output_format", "json") + if output_format == "json": + # TODO: implement json alternative for python < 2.5 + import json + html.write(json.dumps(response)) + else: + html.write(repr(response)) + diff -Nru check-mk-1.2.2p3/htdocs/weblib.py check-mk-1.2.6p12/htdocs/weblib.py --- check-mk-1.2.2p3/htdocs/weblib.py 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/htdocs/weblib.py 2015-06-24 09:48:38.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -26,58 +26,20 @@ import config import lib - -# .--Treestates----------------------------------------------------------. -# | _____ _ _ | -# | |_ _| __ ___ ___ ___| |_ __ _| |_ ___ ___ | -# | | || '__/ _ \/ _ \/ __| __/ _` | __/ _ \/ __| | -# | | || | | __/ __/\__ \ || (_| | || __/\__ \ | -# | |_||_| \___|\___||___/\__\__,_|\__\___||___/ | -# | | -# +----------------------------------------------------------------------+ -# | Saves and loads the current states of foldertrees for the user | -# +----------------------------------------------------------------------+ - -treestates = {} -treestates_for_id = None - -def load_tree_states(): - global treestates - global treestates_for_id - if html.id is not treestates_for_id: - treestates = config.load_user_file("treestates", {}) - treestates_for_id = html.id - -def save_tree_states(): - config.save_user_file("treestates", treestates) - -def get_tree_states(tree): - load_tree_states() - return treestates.get(tree, {}) - -def set_tree_state(tree, key, val): - load_tree_states() - - if tree not in treestates: - treestates[tree] = {} - - treestates[tree][key] = val - -def set_tree_states(tree, val): - load_tree_states() - treestates[tree] = val +import re def ajax_tree_openclose(): - load_tree_states() + html.load_tree_states() tree = html.var("tree") - name = html.var("name") + name = html.var_utf8("name") if not tree or not name: MKUserError('tree or name parameter missing') - set_tree_state(tree, name, html.var("state")) - save_tree_states() + html.set_tree_state(tree, name, html.var("state")) + html.save_tree_states() + html.write('OK') # Write out something to make debugging easier # .--Row Selector--------------------------------------------------------. # | ____ ____ _ _ | @@ -116,16 +78,25 @@ # Generates a selection id or uses the given one def selection_id(): if not html.has_var('selection'): - sel_id = file('/proc/sys/kernel/random/uuid').read().strip() - html.add_var('selection', sel_id) - return html.var('selection') + sel_id = lib.gen_id() + html.set_var('selection', sel_id) + return sel_id + else: + sel_id = html.var('selection') + # Avoid illegal file access by introducing .. or / + if not re.match("^[-0-9a-zA-Z]+$", sel_id): + new_id = lib.gen_id() + html.set_var('selection', new_id) + return new_id + else: + return sel_id def get_rowselection(ident): vo = config.load_user_file("rowselection/%s" % selection_id(), {}) return vo.get(ident, []) def set_rowselection(ident, rows, action): - vo = config.load_user_file("rowselection/%s" % selection_id(), {}) + vo = config.load_user_file("rowselection/%s" % selection_id(), {}, True) if action == 'set': vo[ident] = rows @@ -142,7 +113,7 @@ if not os.path.exists(config.user_confdir + '/rowselection'): make_nagios_directory(config.user_confdir + '/rowselection') - config.save_user_file("rowselection/%s" % selection_id(), vo) + config.save_user_file("rowselection/%s" % selection_id(), vo, True) def ajax_set_rowselection(): ident = html.var('id') diff -Nru check-mk-1.2.2p3/hwg_temp check-mk-1.2.6p12/hwg_temp --- check-mk-1.2.2p3/hwg_temp 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/hwg_temp 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,10 +24,16 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -hwg_temp_defaultlevels = (23, 25) + +hwg_temp_defaultlevels = (30, 35) + +def inventory_hwg_temp(info): + return [ (line[0], "hwg_temp_defaultlevels") + for line in info + if int(line[2]) != 0 and line[4] in ["1", "2", "3"] ] + def check_hwg_temp(item, params, info): - warn, crit = params status_text = { "0" : "Invalid", "1" : "Normal", @@ -35,56 +41,61 @@ "3" : "Out Of Range High", "4" : "Alarm Low", "5" : "Alarm High", - } + } + unit_text = { "0" : "unknown", "1" : "°C", "2" : "°F", "3" : "°K", "4" : "%", - } - for line in info: - if line[0] == item: - descr, status, current, current_m, unit = line[1:] - current_state = savefloat(current_m)/10 - - state = 0 - if current_state >= crit: - state = 2 - elif current_state >= warn: - state = 1 - elif status in (4, 2): - state = 1 - elif status == 3: - state = 2 - - perfdata = [ ("temp", current, warn, crit, 0 ) ] - unit = unit_text.get(unit) - status = status_text.get(status, "UNKOWN") - return(state, nagios_state_names[state] + " - %s at %s%s (warn/crit at %s%s/%s%s), Status is %s" % \ - (descr, current, unit, warn, unit, crit, unit, status), perfdata) + } + + # Nomenclature in this check: sensorstatus is what the device sends, state is what the check returns. + for index, descr, sensorstatus, current, unit in info: + if index == item: + tempval = float(current) + if unit == "2": + temp = fahrenheit_to_celsius(tempval) + elif unit == "3": + temp = tempval - 273.15 + elif unit == "4": + return + else: + temp = tempval + + state, infotext, perfdata = check_temperature(temp, params) + if descr: + infotext += " (%s)" % descr + yield state, infotext, perfdata + + if sensorstatus != '1': + if sensorstatus in ['2', '3', '4', '5']: + state = 2 + else: + state = 3 + + yield state, "Status is %s" % status_text.get(sensorstatus, "UNKNOWN") - return (3, "UNKNOWN - Sensor %s not found in SNMP data %s" % (item) ) check_info['hwg_temp'] = { - "check_function" : check_hwg_temp, - "inventory_function" : lambda info: [ (line[0], hwg_temp_defaultlevels ) for line in info if int(line[2]) != 0] , - "service_description" : "Temperature %s", - "has_perfdata" : True, + "check_function" : check_hwg_temp, + "inventory_function" : inventory_hwg_temp, + "service_description" : "Temperature %s", + "has_perfdata" : True, "snmp_info" : (".1.3.6.1.4.1.21796.4.1.3", [# sensors index (1-2) - "1.1", - # sensor name string - "1.2", - # unit state: 0=Invalid, 1=Normal, 2=OutOfRangeLo, 3=OutOfRangeHi, 4=AlarmLo, 5=AlarmHi - "1.3", - # current value string - "1.4", - # current value*10 integer - "1.5", - # sensor unit integer 0=unknown, 1=°C, 2=°F, 3=°K, 4=% - "1.7", + "1.1", + # sensor name string + "1.2", + # unit state: 0=Invalid, 1=Normal, 2=OutOfRangeLo, 3=OutOfRangeHi, 4=AlarmLo, 5=AlarmHi + "1.3", + # current value string + "1.4", + # sensor unit integer 0=unknown, 1=°C, 2=°F, 3=°K, 4=% + "1.7", ]), - "snmp_scan_function" : lambda oid: "hwg" in oid(".1.3.6.1.2.1.1.1.0").lower(), - "group" : "hw_temperature", + "snmp_scan_function" : lambda oid: "hwg" in oid(".1.3.6.1.2.1.1.1.0").lower(), + "group" : "room_temperature", + "includes" : [ "temperature.include" ], } diff -Nru check-mk-1.2.2p3/hyperv_vms check-mk-1.2.6p12/hyperv_vms --- check-mk-1.2.2p3/hyperv_vms 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/hyperv_vms 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,125 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +# <<>> +# DMZ-DC1 Running 4.21:44:58 Operating normally +# DMZ-DC2 Running 4.21:44:47 Operating normally + +# Another example, here with a snapshow with spaces in the name: +# <<>> +# windows-hyperv2-z4058044 Running 21:33:08 Operating normally +# windows-hyperv2-z4058044_snap (23.05.2014 - 09:29:29) Running 18:20:34 Operating normally +# windows-hyperv2-z4065002 Running 11:04:50 Operating normally +# windows-hyperv2-z4065084 Running 1.10:42:33 Operating normally +# windows-hyperv2-z4133235 Running 1.03:52:18 Operating normally + +# A broken version of the agent outputted this: +# <<>> +# z4058044 Running 21:19:14 Operating normally +# z4058044_snap (2... Running 18:06:39 Operating normally +# z4065002 Running 10:50:55 Operating normally +# z4065084 Running 1.10:28:39 Operating normally +# z4133235 Running 1.03:38:23 Operating normally + +# A Version with a plugin that uses tab as seperator and quotes the strings: +# <<>> +# "Name" "State" "Uptime" "Status" +# "z4058013" "Running" "06:05:16" "Operating normally" +# "z4058020" "Running" "01:01:57" "Operating normally" +# "z4058021" "Running" "01:02:11" "Operating normally" +# "z4065012" "Running" "01:02:04" "Operating normally" +# "z4065013" "Running" "07:47:27" "Operating normally" +# "z4065020" "Running" "01:02:09" "Operating normally" +# "z4065025" "Running" "01:02:05" "Operating normally" +# "z4133199" "Running" "00:57:23" "Operating normally" + +# result: +# { +# "windows-hyperv2-z4058044_snap (23.05.2014 - 09:29:29)" : { +# "vm_state" : "Running", +# "uptime" : "1.10:42:33", +# "state_msg" : "Operating normally", +# } +# } + +def parse_hyperv_vms(info): + parsed = {} + for line in info: + # Remove quotes + line = [ x.strip('"') for x in line ] + if line[1].endswith("..."): # broken output + vm_name = line[0] + line = line[2:] + elif line[1].startswith("("): + idx = 2 + while idx < len(line): + if line[idx].endswith(")"): + vm_name = " ".join(line[:idx+1]) + break + idx += 1 + line = line[idx+1:] + else: + vm_name = line[0] + line = line[1:] + + if ':' in line[1]: # skip heading line + parsed[vm_name] = { + "state" : line[0], + "uptime" : line[1], + "state_msg" : " ".join(line[2:]), + } + return parsed + + +def inventory_hyperv_vms(parsed): + return [ (vm_name, {'state': vm["state"] }) + for (vm_name, vm) + in parsed.items() ] + + +def check_hyperv_vms(item, params, parsed): + if item in parsed: + vm = parsed[item] + if vm["state"] == params['state']: + state = 0 + message = "State is %s (%s)" % (vm["state"], vm["state_msg"]) + else: + message = "State has changed from %s to %s (%s)" % ( + params['state'],\ + vm["state"], + vm["state_msg"]) + state = 2 + return state, message + + +check_info["hyperv_vms"] = { + "parse_function" : parse_hyperv_vms, + "check_function" : check_hyperv_vms, + "inventory_function" : inventory_hyperv_vms, + "service_description" : "VM %s", +} + diff -Nru check-mk-1.2.2p3/ibm_imm_health check-mk-1.2.6p12/ibm_imm_health --- check-mk-1.2.2p3/ibm_imm_health 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/ibm_imm_health 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,7 +24,12 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -# Author: Michael Nieporte - Based on rsa_health by Mathias Kettner +def ibm_imm_health_scan(oid): + if oid('.1.3.6.1.2.1.1.1.0').lower().endswith(" mips") or \ + oid('.1.3.6.1.2.1.1.1.0').lower().endswith(" sh4a"): + return True + else: + return False def inventory_ibm_imm_health(info): if len(info) > 0: @@ -42,18 +47,22 @@ state = info[0][0] if state == '255': - return (0, "OK - no problem found") + return (0, "no problem found") elif state in ['0']: - return (2, "CRIT - " + infotext + " - manual log clearing needed to recover state" ) + return (2, infotext + " - manual log clearing needed to recover state" ) elif state in ['2']: - return (2, "CRIT - " + infotext) + return (2, infotext) elif state == ['4']: - return (1, "WARN - " + infotext) + return (1, infotext) else: - return (3, "UNKNOWN - " + infotext) - -check_info['ibm_imm_health'] = ( check_ibm_imm_health, "System health", 0, inventory_ibm_imm_health ) -snmp_info['ibm_imm_health'] = ( ".1.3.6.1.4.1.2.3.51.3.1", ["4"] ) + return (3, infotext) -snmp_scan_functions['ibm_imm_health'] = lambda oid: \ - oid('.1.3.6.1.2.1.1.1.0').lower().endswith(" mips") +check_info["ibm_imm_health"] = { + 'check_function': check_ibm_imm_health, + 'inventory_function': inventory_ibm_imm_health, + 'service_description': 'System health', + 'snmp_info': ('.1.3.6.1.4.1.2.3.51.3.1', ['4']), + 'snmp_scan_function': + lambda oid: oid('.1.3.6.1.2.1.1.1.0').lower().endswith(" mips") or \ + oid('.1.3.6.1.2.1.1.1.0').lower().endswith(" sh4a"), +} diff -Nru check-mk-1.2.2p3/ibm_rsa_health check-mk-1.2.6p12/ibm_rsa_health --- check-mk-1.2.2p3/ibm_rsa_health 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/ibm_rsa_health 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -52,15 +52,20 @@ state = info[0][0] if state == '255': - return (0, "OK - no problem found") + return (0, "no problem found") elif state in ['0', '2']: - return (2, "CRIT - " + infotext) + return (2, infotext) elif state == '4': - return (1, "WARN - " + infotext) + return (1, infotext) else: - return (3, "UNKNOWN - " + infotext) + return (3, infotext) -check_info['ibm_rsa_health'] = ( check_ibm_rsa_health, "System health", 0, inventory_ibm_rsa_health) -snmp_info['ibm_rsa_health'] = ( ".1.3.6.1.4.1.2.3.51.1.2", ["7"] ) -snmp_scan_functions['ibm_rsa_health'] = \ - lambda oid: "Remote Supervisor Adapter" in oid(".1.3.6.1.2.1.1.1.0") + +check_info["ibm_rsa_health"] = { + 'check_function': check_ibm_rsa_health, + 'inventory_function': inventory_ibm_rsa_health, + 'service_description': 'System health', + 'snmp_info': ('.1.3.6.1.4.1.2.3.51.1.2', ['7']), + 'snmp_scan_function': \ + lambda oid: "Remote Supervisor Adapter" in oid(".1.3.6.1.2.1.1.1.0"), +} diff -Nru check-mk-1.2.2p3/ibm_svc_array check-mk-1.2.6p12/ibm_svc_array --- check-mk-1.2.2p3/ibm_svc_array 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ibm_svc_array 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,67 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +# <<>> +# 27:SSD_mdisk27:online:1:POOL_0_V7000_RZ:372.1GB:online:raid1:1:256:generic_ssd +# 28:SSD_mdisk28:online:2:POOL_1_V7000_BRZ:372.1GB:online:raid1:1:256:generic_ssd +# 29:SSD_mdisk0:online:1:POOL_0_V7000_RZ:372.1GB:online:raid1:1:256:generic_ssd +# 30:SSD_mdisk1:online:2:POOL_1_V7000_BRZ:372.1GB:online:raid1:1:256:generic_ssd + + +def inventory_ibm_svc_array(info): + for line in info: + if len(line) in (11, 12): + yield line[0], None + +def check_ibm_svc_array(item, _no_params, info): + for line in info: + if len(line) in (11, 12) and line[0] == item: + raid_status = line[6] + raid_level = line[7] + tier = line[10] + + # Check raid_status + message = "Status: %s" % raid_status + if raid_status == "online": + status = 0 + elif raid_status in ("offline", "degraded"): + status = 2 + message += "(!!)" + else: + status = 1 + message += "(!)" + + # add information + message += ", RAID Level: %s, Tier: %s" % (raid_level, tier) + + return status, message + +check_info["ibm_svc_array"] = { + "check_function" : check_ibm_svc_array, + "inventory_function" : inventory_ibm_svc_array, + "service_description" : "RAID Array %s", +} diff -Nru check-mk-1.2.2p3/ibm_svc_enclosure check-mk-1.2.6p12/ibm_svc_enclosure --- check-mk-1.2.2p3/ibm_svc_enclosure 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ibm_svc_enclosure 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,101 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +# <<>> +# 1:online:control:yes:0:io_grp0:2072-24C:7804037:2:2:2:2:24 +# 2:online:expansion:yes:0:io_grp0:2072-24E:7804306:2:2:2:2:24 +# 3:online:expansion:yes:0:io_grp0:2072-24E:7804326:2:2:2:2:24 +# 4:online:expansion:yes:0:io_grp0:2072-24E:7804352:2:2:2:2:24 + +# After a firmware upgrade the output looked like this: +# 1:online:control:yes:0:io_grp0:2072-24C:7804037:2:2:2:2:24:0:0 +# 2:online:expansion:yes:0:io_grp0:2072-24E:7804306:2:2:2:2:24:0:0 +# 3:online:expansion:yes:0:io_grp0:2072-24E:7804326:2:2:2:2:24:0:0 +# 4:online:expansion:yes:0:io_grp0:2072-24E:7804352:2:2:2:2:24:0:0 + +# The names of the columns are: +# id:status:type:managed:IO_group_id:IO_group_name:product_MTM:serial_number:total_canisters:online_canisters:total_PSUs:online_PSUs:drive_slots:total_fan_modules:online_fan_modules + + +def inventory_ibm_svc_enclosure(info): + inventory = [] + for line in info: + enclosure_id = line[0] + inventory.append( (enclosure_id, None) ) + return inventory + +def check_ibm_svc_enclosure(item, _no_params, info): + for line in info: + if line[0] == item: + if len(line) < 15: # old format + line = line + ["0", "0"] # do not modify line! + + enclosure_id, enclosure_status, enclosure_type, managed, IO_group_id, \ + IO_group_name, product_MTM, serial_number, total_canisters, online_canisters, \ + total_PSUs, online_PSUs, drive_slots, total_fan_modules, online_fan_modules = line + + # Check status + message = "Enclosure %s is %s" % (enclosure_id, enclosure_status) + if enclosure_status == "online": + status = 0 + else: + status = 2 + message += "(!!)" + + # Check canisters + if online_canisters == total_canisters: + message += ", all %s canisters are online" % total_canisters + else: + status = 2 + message += ", only %s of %s canisters are online(!!)" % (online_canisters, total_canisters) + + # Check PSUs + if online_PSUs == total_PSUs: + message += ", all %s PSUs are online" % total_PSUs + else: + status = 2 + message += ", only %s of %s PSUs are online(!!)" % (online_PSUs, total_PSUs) + + # Check FANs (only new firmware) + if online_fan_modules == total_fan_modules: + if total_fan_modules != "0": + message += ", all %s fan modules are online" % total_fan_modules + else: + status = 2 + message += ", only %s of %s fan modules are online(!!)" % (online_fan_modules, total_fan_modules) + + return status, message + + return 3, "Enclosure %s not found in agent output" % item + +check_info["ibm_svc_enclosure"] = { + "check_function" : check_ibm_svc_enclosure, + "inventory_function" : inventory_ibm_svc_enclosure, + "service_description" : "Enclosure %s", + "has_perfdata" : False, +} + diff -Nru check-mk-1.2.2p3/ibm_svc_enclosurestats check-mk-1.2.6p12/ibm_svc_enclosurestats --- check-mk-1.2.2p3/ibm_svc_enclosurestats 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ibm_svc_enclosurestats 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,109 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +# <<>> +# 1:power_w:207:218:140410113051 +# 1:temp_c:22:22:140410113246 +# 1:temp_f:71:71:140410113246 +# 2:power_w:126:128:140410113056 +# 2:temp_c:21:21:140410113246 +# 2:temp_f:69:69:140410113246 +# 3:power_w:123:126:140410113041 +# 3:temp_c:22:22:140410113246 +# 3:temp_f:71:71:140410113246 +# 4:power_w:133:138:140410112821 +# 4:temp_c:22:23:140410112836 +# 4:temp_f:71:73:140410112836 + + +# .--temperature---------------------------------------------------------. +# | _ _ | +# | | |_ ___ _ __ ___ _ __ ___ _ __ __ _| |_ _ _ _ __ ___ | +# | | __/ _ \ '_ ` _ \| '_ \ / _ \ '__/ _` | __| | | | '__/ _ \ | +# | | || __/ | | | | | |_) | __/ | | (_| | |_| |_| | | | __/ | +# | \__\___|_| |_| |_| .__/ \___|_| \__,_|\__|\__,_|_| \___| | +# | |_| | +# '----------------------------------------------------------------------' + +ibm_svc_enclosurestats_temperature_default_levels = (35, 40) + +def inventory_ibm_svc_enclosurestats_temp(info): + for enclosure_id, stat_name, stat_current, stat_peak, stat_peak_time in info: + if stat_name == "temp_c": + yield enclosure_id, "ibm_svc_enclosurestats_temperature_default_levels" + +def check_ibm_svc_enclosurestats_temp(item, params, info): + for enclosure_id, stat_name, stat_current, stat_peak, stat_peak_time in info: + if enclosure_id == item and stat_name == "temp_c": + return check_temperature(int(stat_current), params) + + +check_info["ibm_svc_enclosurestats.temp"] = { + "check_function" : check_ibm_svc_enclosurestats_temp, + "inventory_function" : inventory_ibm_svc_enclosurestats_temp, + "service_description" : "Temperature Enclosure %s", + "has_perfdata" : True, + "group" : "room_temperature", + "includes" : [ "temperature.include" ], +} + +#. +# .--power---------------------------------------------------------------. +# | | +# | _ __ _____ _____ _ __ | +# | | '_ \ / _ \ \ /\ / / _ \ '__| | +# | | |_) | (_) \ V V / __/ | | +# | | .__/ \___/ \_/\_/ \___|_| | +# | |_| | +# '----------------------------------------------------------------------' + +def inventory_ibm_svc_enclosurestats_power(info): + inventory = [] + for enclosure_id, stat_name, stat_current, stat_peak, stat_peak_time in info: + if stat_name == "power_w": + inventory.append( (enclosure_id, None) ) + return inventory + +def check_ibm_svc_enclosurestats_power(item, _no_params, info): + perfdata = [] + + for enclosure_id, stat_name, stat_current, stat_peak, stat_peak_time in info: + if enclosure_id == item and stat_name == "power_w": + stat_current = int(stat_current) + perfdata = [ ('power', str(stat_current)+"Watt") ] + return 0, "Enclosure %s Power Consumption is %s Watt" % (enclosure_id, stat_current), perfdata + + return 3, "Power for enclosure %s not found in agent output" % item + +check_info["ibm_svc_enclosurestats.power"] = { + "check_function" : check_ibm_svc_enclosurestats_power, + "inventory_function" : inventory_ibm_svc_enclosurestats_power, + "service_description" : "Power Enclosure %s", + "has_perfdata" : True, +} + +#. diff -Nru check-mk-1.2.2p3/ibm_svc_enclosurestats.power check-mk-1.2.6p12/ibm_svc_enclosurestats.power --- check-mk-1.2.2p3/ibm_svc_enclosurestats.power 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ibm_svc_enclosurestats.power 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,24 @@ +title: IBM SVC / Storwize V3700 / V7000: Power Consumption of Enclosures +agents: ibm_svc +catalog: hw/storagehw/ibm +license: GPL +distribution: check_mk +description: + Displays the power consumption of enclosures of an IBM SVC / Storwize V3700 / V7000 + device in watt. + + The check is only for reporting and returns always {OK}. + + Please note: You need the Special Agent agent_ibmsvc to retrieve the monitoring + data from the device. Your monitoring user must be able to SSH to the device + with SSH Key Authentification. Please exchange SSH key. The Special Agent itself + can be configured by WATO. + +item: + The ID of the enclosure. + +inventory: + Creates one check per enclosure. + +perfdata: + One value: The power consumption in watt. diff -Nru check-mk-1.2.2p3/ibm_svc_enclosurestats.temp check-mk-1.2.6p12/ibm_svc_enclosurestats.temp --- check-mk-1.2.2p3/ibm_svc_enclosurestats.temp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ibm_svc_enclosurestats.temp 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,46 @@ +title: IBM SVC / Storwize V3700 / V7000: Temperature of Enclosures +agents: ibm_svc +catalog: hw/storagehw/ibm +license: GPL +distribution: check_mk +description: + Checks the temperature in enclosures of an IBM SVC / Storwize V3700 / V7000 device. + + The check returns {WARN} or {CRIT} if the temperature in degree celsius is higher + then given levels and {OK} otherwise. + + Please note: You need the Special Agent agent_ibmsvc to retrieve the monitoring + data from the device. Your monitoring user must be able to SSH to the device + with SSH Key Authentification. Please exchange SSH key. The Special Agent itself + can be configured by WATO. + +item: + The ID of the enclosure. + +inventory: + Creates one check per enclosure. + +perfdata: + One value: The temperature in degree celsius, together with it's levels for + warn and crit. + +examples: + # set default levels to 30 and 45 percent: + ibm_svc_enclosurestats_temperature_default_levels = (30, 45) + + # Check temperature of enclosure 1 on a IBM SVC called my-svc with default levels + checks += [ + ("my-svc", "ibm_svc_enclosurestats.temp", '1', ibm_svc_enclosurestats_temperature_default_levels) + ] + + # or use individual levels for warn and crit + checks += [ + ("my-svc", "ibm_svc_enclosurestats.temp", '1', (40, 50)) + ] + +[parameters] +parameters (int, int): temperature levels in degree celsius for {WARN} and {CRIT} + +[configuration] +ibm_svc_enclosurestats_temperature_default_levels (int, int): The standard levels + for {WARN} and {CRIT}, preset to (35, 40) diff -Nru check-mk-1.2.2p3/ibm_svc_eventlog check-mk-1.2.6p12/ibm_svc_eventlog --- check-mk-1.2.2p3/ibm_svc_eventlog 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ibm_svc_eventlog 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,61 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +# <<>> +# 588:120404112526:mdiskgrp:6:md07_sas10k::alert:no:989001::Managed Disk Group space warning +# 589:120404112851:mdiskgrp:7:md08_nlsas7k_1t::alert:no:989001::Managed Disk Group space warning +# 590:120404112931:mdiskgrp:8:md09_nlsas7k_1t::alert:no:989001::Managed Disk Group space warning +# 591:120404113001:mdiskgrp:9:md10_nlsas7k_1t::alert:no:989001::Managed Disk Group space warning +# 592:120404113026:mdiskgrp:10:md11_nlsas7k_1t::alert:no:989001::Managed Disk Group space warning +# 593:120404113111:mdiskgrp:11:md12_nlsas7k_1t::alert:no:989001::Managed Disk Group space warning +# 1690:130801070656:drive:59:::alert:no:981020::Managed Disk error count warning threshold met +# 2058:131030112416:drive:42:::alert:no:981020::Managed Disk error count warning threshold met + +def inventory_ibm_svc_eventlog(info): + return [ (None, None) ] + +def check_ibm_svc_eventlog(item, _no_params, info): + messagecount = 0 + last_err = "" + + for sequence_number, last_timestamp, object_type, object_id, object_name, copy_id, status, fixed, event_id, error_code, description in info: + messagecount += 1 + last_err = description + + if messagecount > 0: + return 1, "%d messages not expired and not yet fixed found in event log, last was: %s" % \ + (messagecount, last_err) + + return 0, "No messages not expired and not yet fixed found in event log" + +check_info["ibm_svc_eventlog"] = { + "check_function" : check_ibm_svc_eventlog, + "inventory_function" : inventory_ibm_svc_eventlog, + "service_description" : "Eventlog", + "has_perfdata" : False, +} + diff -Nru check-mk-1.2.2p3/ibm_svc_host check-mk-1.2.6p12/ibm_svc_host --- check-mk-1.2.2p3/ibm_svc_host 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ibm_svc_host 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,92 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +# <<>> +# 0:h_esx01:2:4:degraded +# 1:host206:2:2:online +# 2:host105:2:2:online +# 3:host106:2:2:online + +factory_settings['ibm_svc_host_default_levels'] = { + 'always_ok' : False +} + +def inventory_ibm_svc_host(info): + return [(None, None)] + + +def check_ibm_svc_host_state(state, aw_ok): + if aw_ok: + return 0 + else: + return state + +def check_ibm_svc_host(item, params, info): + degraded = 0 + offline = 0 + active = 0 + inactive = 0 + other = 0 + status = 0 + for line in info: + if line[4] == 'degraded': + degraded += 1 + elif line[4] == 'offline': + offline += 1 + elif line[4] == 'active' or line[4] == 'online': + active += 1 + elif line[4] == 'inactive': + inactive += 1 + else: + other +=1 + + perfdata = [ ("active", active), + ("inactive", inactive), + ("degraded", degraded), + ("offline", offline), + ("other", other), + ] + yield 0, "%s hosts active, %s inactive" % (active, inactive), perfdata + + aw_ok = params['always_ok'] # Needed in function check_ibm_svc_host_state + if degraded > 0: + yield check_ibm_svc_host_state(1, aw_ok), "%s degraded" % degraded + if offline > 0: + yield check_ibm_svc_host_state(2, aw_ok), "%s offline" % offline + if other > 0: + yield check_ibm_svc_host_state(1, aw_ok), "%s in an unidentified state(!)" % other + + +check_info["ibm_svc_host"] = { + "check_function" : check_ibm_svc_host, + "inventory_function" : inventory_ibm_svc_host, + "service_description" : "Hosts", + "has_perfdata" : True, + "default_levels_variable": "ibm_svc_host_default_levels", + "group" : "ibm_svc_host", +} + diff -Nru check-mk-1.2.2p3/ibm_svc_license check-mk-1.2.6p12/ibm_svc_license --- check-mk-1.2.2p3/ibm_svc_license 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ibm_svc_license 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,85 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +# <<>> +# used_flash:0.00 +# used_remote:0.00 +# used_virtualization:192.94 +# license_flash:0 +# license_remote:0 +# license_virtualization:412 +# license_physical_disks:0 +# license_physical_flash:off +# license_physical_remote:off +# used_compression_capacity:0.00 +# license_compression_capacity:0 +# license_compression_enclosures:0 + + +def parse_ibm_svc_license(info): + licenses = {} + for line in info: + if line[0].startswith("license_"): + license = line[0].replace("license_", "") + if not license in licenses.keys(): + licenses[license] = [0.0, 0.0] + if line[1] == "off": + licenses[license][0] = 0.0 + else: + licenses[license][0] = float(line[1]) + if line[0].startswith("used_"): + license = line[0].replace("used_", "") + if not license in licenses.keys(): + licenses[license] = [0.0, 0.0] + licenses[license][1] = float(line[1]) + return licenses + +def inventory_ibm_svc_license(info): + inventory = [] + licenses = parse_ibm_svc_license(info) + for license in licenses.keys(): + inventory.append( (license, None) ) + return inventory + +def check_ibm_svc_license(item, _no_params, info): + licenses = parse_ibm_svc_license(info) + licensed, used = licenses[item] + perfdata = [ ("licensed", licensed), ("used", used) ] + + if used > licensed: + status = 2 + else: + status = 0 + return status, "%s %s licensed, %s used" % (item, licensed, used), perfdata + +check_info["ibm_svc_license"] = { + "check_function" : check_ibm_svc_license, + "inventory_function" : inventory_ibm_svc_license, + "service_description" : "License %s", + "has_perfdata" : True, +} + diff -Nru check-mk-1.2.2p3/ibm_svc_mdisk check-mk-1.2.6p12/ibm_svc_mdisk --- check-mk-1.2.2p3/ibm_svc_mdisk 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ibm_svc_mdisk 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,74 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +# <<>> +# 0:stp5_300G_01-01:online:managed:16:stp5_300G_01:1.1TB:0000000000000000:BLUBB5:600a0b80006e1dbc0000f6f9513026a000000000000000000000000000000000:generic_hdd +# 1:Quorum_BLUBB3:online:managed:0:Quorum_2:1.0GB:0000000000000000:BLUBB3:600a0b8000293eb800001f264c3e8a1f00000000000000000000000000000000:generic_hdd +# 2:stp6_300G_01-01:online:managed:15:stp6_300G_01:1.1TB:0000000000000000:BLUBB6:600a0b80006e8e3c00000f1651302b8800000000000000000000000000000000:generic_hdd +# 3:Quorum_blubb5:online:managed:18:Quorum_0:1.0GB:0000000000000001:BLUBB5:600a0b80006e1dcc0000f6905130225800000000000000000000000000000000:generic_hdd +# 4:Quorum_blubb6:online:managed:17:Quorum_1:1.0GB:0000000000000001:BLUBB6:600a0b80006e1d5e00000dcb5130228700000000000000000000000000000000:generic_hdd +# 5:stp5_300G_01-02:online:managed:16:stp5_300G_01:1.1TB:0000000000000002:BLUBB5:600a0b80006e1dbc0000f6fc51304bfc00000000000000000000000000000000:generic_hdd +# 6:stp6_300G_01-02:online:managed:15:stp6_300G_01:1.1TB:0000000000000002:BLUBB6:600a0b80006e8e3c00000f1951304f9a00000000000000000000000000000000:generic_hdd +# 7:stp5_300G_01-03:online:managed:16:stp5_300G_01:1.1TB:0000000000000003:BLUBB5:600a0b80006e1dcc0000f76951305bc000000000000000000000000000000000:generic_hdd +# 8:stp6_300G_01-03:online:managed:15:stp6_300G_01:1.1TB:0000000000000003:BLUBB6:600a0b80006e1d5e00000e9a51305a3200000000000000000000000000000000:generic_hdd +# 9:stp5_300G_01-04:online:managed:16:stp5_300G_01:1.1TB:0000000000000004:BLUBB5:600a0b80006e1dbc0000f7d051341cc000000000000000000000000000000000:generic_hdd + + +def inventory_ibm_svc_mdisk(info): + for line in info: + if len(line) in (11, 12): + yield line[1], None + +def check_ibm_svc_mdisk(item, _no_params, info): + for line in info: + if len(line) in (11, 12) and line[1] == item: + mdisk_status = line[2] + mdisk_mode = line[3] + + message = "Status: %s" % mdisk_status + if mdisk_status == "online": + status = 0 + elif mdisk_status in ("offline", "excluded"): + status = 2 + message += "(!!)" + else: + status = 1 + message += "(!)" + + message += ", Mode: %s" % mdisk_mode + if mdisk_mode not in ( "managed", "array" ): + status = max(status, 1) + message += "(!)" + + return status, message + +check_info["ibm_svc_mdisk"] = { + "check_function" : check_ibm_svc_mdisk, + "inventory_function" : inventory_ibm_svc_mdisk, + "service_description" : "MDisk %s", +} + diff -Nru check-mk-1.2.2p3/ibm_svc_mdiskgrp check-mk-1.2.6p12/ibm_svc_mdiskgrp --- check-mk-1.2.2p3/ibm_svc_mdiskgrp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ibm_svc_mdiskgrp 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,92 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +# <<>> +# 0:Quorum_2:online:1:0:704.00MB:64:704.00MB:0.00MB:0.00MB:0.00MB:0:0:auto:inactive:no:0.00MB:0.00MB:0.00MB +# 1:stp5_450G_03:online:18:6:29.43TB:256:21.68TB:8.78TB:7.73TB:7.75TB:29:80:auto:inactive:no:0.00MB:0.00MB:0.00MB +# 4:stp5_450G_02:online:15:14:24.53TB:256:277.00GB:24.26TB:24.26TB:24.26TB:98:80:auto:inactive:no:0.00MB:0.00MB:0.00MB +# 9:stp6_450G_03:online:18:6:29.43TB:256:21.68TB:8.78TB:7.73TB:7.75TB:29:80:auto:inactive:no:0.00MB:0.00MB:0.00MB +# 10:stp6_450G_02:online:15:14:24.53TB:256:277.00GB:24.26TB:24.26TB:24.26TB:98:80:auto:inactive:no:0.00MB:0.00MB:0.00MB +# 15:stp6_300G_01:online:15:23:16.34TB:256:472.50GB:15.88TB:15.88TB:15.88TB:97:80:auto:inactive:no:0.00MB:0.00MB:0.00MB +# 16:stp5_300G_01:online:15:23:16.34TB:256:472.50GB:15.88TB:15.88TB:15.88TB:97:80:auto:inactive:no:0.00MB:0.00MB:0.00MB +# 17:Quorum_1:online:1:0:512.00MB:256:512.00MB:0.00MB:0.00MB:0.00MB:0:80:auto:inactive:no:0.00MB:0.00MB:0.00MB +# 18:Quorum_0:online:1:0:512.00MB:256:512.00MB:0.00MB:0.00MB:0.00MB:0:80:auto:inactive:no:0.00MB:0.00MB:0.00MB +# 21:stp5_450G_01:online:12:31:19.62TB:256:320.00GB:19.31TB:19.31TB:19.31TB:98:0:auto:inactive:no:0.00MB:0.00MB:0.00MB +# 22:stp6_450G_01:online:12:31:19.62TB:256:320.00GB:19.31TB:19.31TB:19.31TB:98:0:auto:inactive:no:0.00MB:0.00MB:0.00MB +# 23:stp5_600G_01:online:3:2:6.54TB:256:512.00MB:6.54TB:6.54TB:6.54TB:99:80:auto:inactive:no:0.00MB:0.00MB:0.00MB +# 24:stp6_600G_01:online:3:2:6.54TB:256:512.00MB:6.54TB:6.54TB:6.54TB:99:80:auto:inactive:no:0.00MB:0.00MB:0.00MB + +def ibm_svc_mdiskgrp_to_mb(size): + if size.endswith("MB"): + size = float(size.replace("MB", "")) + elif size.endswith("GB"): + size = float(size.replace("GB", "")) * 1024 + elif size.endswith("TB"): + size = float(size.replace("TB", "")) * 1024 * 1024 + elif size.endswith("PB"): + size = float(size.replace("PB", "")) * 1024 * 1024 * 1024 + elif size.endswith("EB"): + size = float(size.replace("EB", "")) * 1024 * 1024 * 1024 * 1024 + else: + size = float(size) + return size + +def inventory_ibm_svc_mdiskgrp(info): + for line in info: + if len(line) > 8: + yield line[1], {} + +def check_ibm_svc_mdiskgrp(item, params, info): + for line in info: + if len(line) > 8 and item == line[1]: + mgrp_status = line[2] + + if mgrp_status != "online": + return 2, "Status: %s" % mgrp_status + + fslist = [] + capacity = line[5] + free_capacity = line[7] + size_mb = ibm_svc_mdiskgrp_to_mb(capacity) + avail_mb = ibm_svc_mdiskgrp_to_mb(free_capacity) + fslist.append((item, size_mb, avail_mb)) + status, message, perfdata = df_check_filesystem_list(item, params, fslist) + message += ", Status: %s" % mgrp_status + + return status, message, perfdata + + +check_info["ibm_svc_mdiskgrp"] = { + "check_function" : check_ibm_svc_mdiskgrp, + "inventory_function" : inventory_ibm_svc_mdiskgrp, + "service_description" : "MDiskGrp %s", + "has_perfdata" : True, + "group" : "filesystem", + "includes" : [ "df.include" ], + "default_levels_variable" : "filesystem_default_levels", +} + diff -Nru check-mk-1.2.2p3/ibm_svc_node check-mk-1.2.6p12/ibm_svc_node --- check-mk-1.2.2p3/ibm_svc_node 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ibm_svc_node 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,78 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +# Put here the example output from your TCP-Based agent. If the +# check is SNMP-Based, then remove this section + +def inventory_ibm_svc_node(info): + io_groups = {} + inventory = [] + for node_id, node_name, ups_serial, wwnn, node_status, io_group_id, \ + io_group_name, config_node, ups_unique_id, hardware, iscsi_name, \ + iscsi_alias, panel_name, enclosure_id, canister_id, \ + enclosure_serial_number, additional in info: + io_groups[io_group_name] = 1 + for io_group_name in io_groups.keys(): + inventory.append( (io_group_name, None) ) + return inventory + +def check_ibm_svc_node(item, _no_params, info): + message = "" + status = 0 + online_nodes = 0 + nodes_of_iogroup = 0 + + for node_id, node_name, ups_serial, wwnn, node_status, io_group_id, \ + io_group_name, config_node, ups_unique_id, hardware, iscsi_name, \ + iscsi_alias, panel_name, enclosure_id, canister_id, \ + enclosure_serial_number, additional in info: + if io_group_name == item: + if message != "": + message += ", " + message += "Node %s is %s" % (node_name, node_status) + nodes_of_iogroup += 1 + if node_status == "online": + online_nodes += 1 + + if nodes_of_iogroup == 0: + return 3, "IO Group %s not found in agent output" % item + + if nodes_of_iogroup == online_nodes: + status = 0 + elif online_nodes == 0: + status = 2 + else: + status = 1 + + return status, message + +check_info["ibm_svc_node"] = { + "check_function" : check_ibm_svc_node, + "inventory_function" : inventory_ibm_svc_node, + "service_description" : "IO Group %s", +} + diff -Nru check-mk-1.2.2p3/ibm_svc_nodestats check-mk-1.2.6p12/ibm_svc_nodestats --- check-mk-1.2.2p3/ibm_svc_nodestats 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ibm_svc_nodestats 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,309 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Note: This file is almost identical with ibm_svc_nodestats. We should +# create an include file for sharing common code! + +# Example output from agent: +# <<>> +# 1:BLUBBSVC01:compression_cpu_pc:0:0:140325134931 +# 1:BLUBBSVC01:cpu_pc:1:3:140325134526 +# 1:BLUBBSVC01:fc_mb:35:530:140325134526 +# 1:BLUBBSVC01:fc_io:5985:11194:140325134751 +# 1:BLUBBSVC01:sas_mb:0:0:140325134931 +# 1:BLUBBSVC01:sas_io:0:0:140325134931 +# 1:BLUBBSVC01:iscsi_mb:0:0:140325134931 +# 1:BLUBBSVC01:iscsi_io:0:0:140325134931 +# 1:BLUBBSVC01:write_cache_pc:0:0:140325134931 +# 1:BLUBBSVC01:total_cache_pc:70:77:140325134716 +# 1:BLUBBSVC01:vdisk_mb:1:246:140325134526 +# 1:BLUBBSVC01:vdisk_io:130:1219:140325134501 +# 1:BLUBBSVC01:vdisk_ms:0:4:140325134531 +# 1:BLUBBSVC01:mdisk_mb:17:274:140325134526 +# 1:BLUBBSVC01:mdisk_io:880:1969:140325134526 +# 1:BLUBBSVC01:mdisk_ms:1:5:140325134811 +# 1:BLUBBSVC01:drive_mb:0:0:140325134931 +# 1:BLUBBSVC01:drive_io:0:0:140325134931 +# 1:BLUBBSVC01:drive_ms:0:0:140325134931 +# 1:BLUBBSVC01:vdisk_r_mb:0:244:140325134526 +# 1:BLUBBSVC01:vdisk_r_io:19:1022:140325134501 +# 1:BLUBBSVC01:vdisk_r_ms:2:8:140325134756 +# 1:BLUBBSVC01:vdisk_w_mb:0:2:140325134701 +# 1:BLUBBSVC01:vdisk_w_io:110:210:140325134901 +# 1:BLUBBSVC01:vdisk_w_ms:0:0:140325134931 +# 1:BLUBBSVC01:mdisk_r_mb:1:265:140325134526 +# 1:BLUBBSVC01:mdisk_r_io:15:1081:140325134526 +# 1:BLUBBSVC01:mdisk_r_ms:5:23:140325134616 +# 1:BLUBBSVC01:mdisk_w_mb:16:132:140325134751 +# 1:BLUBBSVC01:mdisk_w_io:865:1662:140325134736 +# 1:BLUBBSVC01:mdisk_w_ms:1:5:140325134811 +# 1:BLUBBSVC01:drive_r_mb:0:0:140325134931 +# 1:BLUBBSVC01:drive_r_io:0:0:140325134931 +# 1:BLUBBSVC01:drive_r_ms:0:0:140325134931 +# 1:BLUBBSVC01:drive_w_mb:0:0:140325134931 +# 1:BLUBBSVC01:drive_w_io:0:0:140325134931 +# 1:BLUBBSVC01:drive_w_ms:0:0:140325134931 +# 5:BLUBBSVC02:compression_cpu_pc:0:0:140325134930 +# 5:BLUBBSVC02:cpu_pc:1:2:140325134905 +# 5:BLUBBSVC02:fc_mb:141:293:140325134755 +# 5:BLUBBSVC02:fc_io:7469:12230:140325134750 +# 5:BLUBBSVC02:sas_mb:0:0:140325134930 +# 5:BLUBBSVC02:sas_io:0:0:140325134930 +# [...] + +# parses agent output into a structure like: +# {'Drives BLUBBSVC01': {'r_mb': 0, 'w_mb': 0, 'r_io': 0, 'w_io': 0, 'r_ms': 0, 'w_ms': 0}, +# 'Drives BLUBBSVC02': {'r_mb': 0, 'w_mb': 0, 'r_io': 0, 'w_io': 0, 'r_ms': 0, 'w_ms': 0}, +# 'Drives BLUBBSVC03': {'r_mb': 0, 'w_mb': 0, 'r_io': 0, 'w_io': 0, 'r_ms': 0, 'w_ms': 0}, +# 'Drives BLUBBSVC04': {'r_mb': 0, 'w_mb': 0, 'r_io': 0, 'w_io': 0, 'r_ms': 0, 'w_ms': 0}, +# 'MDisks BLUBBSVC01': {'r_mb': 1, 'w_mb': 16, 'r_io': 15, 'w_io': 865, 'r_ms': 5, 'w_ms': 1}, +# 'MDisks BLUBBSVC02': {'r_mb': 3, 'w_mb': 6, 'r_io': 245, 'w_io': 361, 'r_ms': 6, 'w_ms': 0}, +# 'MDisks BLUBBSVC03': {'r_mb': 28, 'w_mb': 5, 'r_io': 1194, 'w_io': 901, 'r_ms': 3, 'w_ms': 0}, +# 'MDisks BLUBBSVC04': {'r_mb': 0, 'w_mb': 162, 'r_io': 0, 'w_io': 1414, 'r_ms': 7, 'w_ms': 0}, +# 'VDisks BLUBBSVC01': {'r_mb': 0, 'w_mb': 0, 'r_io': 19, 'w_io': 110, 'r_ms': 2, 'w_ms': 0}, +# 'VDisks BLUBBSVC02': {'r_mb': 101, 'w_mb': 13, 'r_io': 1105, 'w_io': 789, 'r_ms': 1, 'w_ms': 0}, +# 'VDisks BLUBBSVC03': {'r_mb': 12, 'w_mb': 80, 'r_io': 1345, 'w_io': 1442, 'r_ms': 1, 'w_ms': 0}, +# 'VDisks BLUBBSVC04': {'r_mb': 0, 'w_mb': 0, 'r_io': 0, 'w_io': 0, 'r_ms': 7, 'w_ms': 0}} + +def ibm_svc_nodestats_parse(info): + parsed = {} + for node_id, node_name, stat_name, stat_current, stat_peak, stat_peak_time in info: + if stat_name in ("vdisk_r_mb", "vdisk_w_mb", "vdisk_r_io", "vdisk_w_io", "vdisk_r_ms", "vdisk_w_ms"): + if "VDisks %s" % node_name not in parsed.keys(): + parsed["VDisks %s" % node_name] = {} + stat_name = stat_name.replace("vdisk_", "") + parsed["VDisks %s" % node_name][stat_name] = int(stat_current) + if stat_name in ("mdisk_r_mb", "mdisk_w_mb", "mdisk_r_io", "mdisk_w_io", "mdisk_r_ms", "mdisk_w_ms"): + if "MDisks %s" % node_name not in parsed.keys(): + parsed["MDisks %s" % node_name] = {} + stat_name = stat_name.replace("mdisk_", "") + parsed["MDisks %s" % node_name][stat_name] = int(stat_current) + if stat_name in ("drive_r_mb", "drive_w_mb", "drive_r_io", "drive_w_io", "drive_r_ms", "drive_w_ms"): + if "Drives %s" % node_name not in parsed.keys(): + parsed["Drives %s" % node_name] = {} + stat_name = stat_name.replace("drive_", "") + parsed["Drives %s" % node_name][stat_name] = int(stat_current) + return parsed + +# .--disk IO-------------------------------------------------------------. +# | _ _ _ ___ ___ | +# | __| (_)___| | __ |_ _/ _ \ | +# | / _` | / __| |/ / | | | | | | +# | | (_| | \__ \ < | | |_| | | +# | \__,_|_|___/_|\_\ |___\___/ | +# | | +# '----------------------------------------------------------------------' + +def inventory_ibm_svc_nodestats_diskio(info): + inventory = [] + parsed = ibm_svc_nodestats_parse(info) + for key in parsed.keys(): + inventory.append( (key, None) ) + return inventory + +def check_ibm_svc_nodestats_diskio(item, _no_params, info): + parsed = ibm_svc_nodestats_parse(info) + + if item not in parsed.keys(): + return 3, "%s not found in agent output" % item + + read_bytes = parsed[item]['r_mb'] * 1024 * 1024 + write_bytes = parsed[item]['w_mb'] * 1024 * 1024 + perfdata = [ ("read", read_bytes), ("write", write_bytes) ] + + return 0, "%s/s read, %s/s write" % \ + (get_bytes_human_readable(read_bytes), get_bytes_human_readable(write_bytes)), \ + perfdata + +check_info["ibm_svc_nodestats.diskio"] = { + "check_function" : check_ibm_svc_nodestats_diskio, + "inventory_function" : inventory_ibm_svc_nodestats_diskio, + "service_description" : "Disk IO %s", + "has_perfdata" : True, +} + +#. +# .--iops----------------------------------------------------------------. +# | _ | +# | (_) ___ _ __ ___ | +# | | |/ _ \| '_ \/ __| | +# | | | (_) | |_) \__ \ | +# | |_|\___/| .__/|___/ | +# | |_| | +# '----------------------------------------------------------------------' + +def inventory_ibm_svc_nodestats_iops(info): + inventory = [] + parsed = ibm_svc_nodestats_parse(info) + for key in parsed.keys(): + inventory.append( (key, None) ) + return inventory + +def check_ibm_svc_nodestats_iops(item, _no_params, info): + parsed = ibm_svc_nodestats_parse(info) + + if item not in parsed.keys(): + return 3, "%s not found in agent output" % item + + read_iops = parsed[item]['r_io'] + write_iops = parsed[item]['w_io'] + perfdata = [ ("read", read_iops), ("write", write_iops) ] + + return 0, "%s IO/s read, %s IO/s write" % (read_iops, write_iops), perfdata + +check_info["ibm_svc_nodestats.iops"] = { + "check_function" : check_ibm_svc_nodestats_iops, + "inventory_function" : inventory_ibm_svc_nodestats_iops, + "service_description" : "Disk IOPS %s", + "has_perfdata" : True, +} + +#. +# .--disk latency--------------------------------------------------------. +# | _ _ _ _ _ | +# | __| (_)___| | __ | | __ _| |_ ___ _ __ ___ _ _ | +# | / _` | / __| |/ / | |/ _` | __/ _ \ '_ \ / __| | | | | +# | | (_| | \__ \ < | | (_| | || __/ | | | (__| |_| | | +# | \__,_|_|___/_|\_\ |_|\__,_|\__\___|_| |_|\___|\__, | | +# | |___/ | +# '----------------------------------------------------------------------' + +def inventory_ibm_svc_nodestats_disk_latency(info): + inventory = [] + parsed = ibm_svc_nodestats_parse(info) + for key in parsed.keys(): + inventory.append( (key, None) ) + return inventory + +def check_ibm_svc_nodestats_disk_latency(item, _no_params, info): + parsed = ibm_svc_nodestats_parse(info) + + if item not in parsed.keys(): + return 3, "%s not found in agent output" % item + + read_latency = parsed[item]['r_ms'] + write_latency = parsed[item]['w_ms'] + perfdata = [ ("read_latency", read_latency), ("write_latency", write_latency) ] + + return 0, "Latency is %s ms for read, %s ms for write" % (read_latency, write_latency), \ + perfdata + +check_info["ibm_svc_nodestats.disk_latency"] = { + "check_function" : check_ibm_svc_nodestats_disk_latency, + "inventory_function" : inventory_ibm_svc_nodestats_disk_latency, + "service_description" : "Disk Latency %s", + "has_perfdata" : True, +} + +#. +# .--cpu-----------------------------------------------------------------. +# | | +# | ___ _ __ _ _ | +# | / __| '_ \| | | | | +# | | (__| |_) | |_| | | +# | \___| .__/ \__,_| | +# | |_| | +# | | +# '----------------------------------------------------------------------' + +ibm_svc_cpu_default_levels = ( 90.0, 95.0 ) + +def inventory_ibm_svc_cpu(info): + inventory = [] + for node_id, node_name, stat_name, stat_current, stat_peak, stat_peak_time in info: + if stat_name == "cpu_pc": + inventory.append( (node_name, "ibm_svc_cpu_default_levels") ) + return inventory + +def check_ibm_svc_cpu(item, params, info): + for node_id, node_name, stat_name, stat_current, stat_peak, stat_peak_time in info: + if node_name == item and stat_name == "cpu_pc": + return check_cpu_util(int(stat_current), params) + + return 3, "value cpu_pc not found in agent output for node %s" % item + +check_info["ibm_svc_nodestats.cpu_util"] = { + "check_function" : check_ibm_svc_cpu, + "inventory_function" : inventory_ibm_svc_cpu, + "service_description" : "CPU utilization %s", + "has_perfdata" : True, + "group" : "cpu_utilization_multiitem", + "includes" : [ "cpu_util.include" ], +} +#. +# .--cache---------------------------------------------------------------. +# | _ | +# | ___ __ _ ___| |__ ___ | +# | / __/ _` |/ __| '_ \ / _ \ | +# | | (_| (_| | (__| | | | __/ | +# | \___\__,_|\___|_| |_|\___| | +# | | +# '----------------------------------------------------------------------' + +# parses agent output into a structure like: +# {'BLUBBSVC01': {'total_cache_pc': 70, 'write_cache_pc': 0}, +# 'BLUBBSVC02': {'total_cache_pc': 75, 'write_cache_pc': 0}, +# 'BLUBBSVC03': {'total_cache_pc': 65, 'write_cache_pc': 0}, +# 'BLUBBSVC04': {'total_cache_pc': 71, 'write_cache_pc': 0}} + +def ibm_svc_cache_parse(info): + parsed = {} + for node_id, node_name, stat_name, stat_current, stat_peak, stat_peak_time in info: + if stat_name in ("write_cache_pc", "total_cache_pc"): + if node_name not in parsed.keys(): + parsed[node_name] = {} + parsed[node_name][stat_name] = int(stat_current) + return parsed + +def inventory_ibm_svc_cache(info): + inventory = [] + parsed = ibm_svc_cache_parse(info) + for key in parsed.keys(): + inventory.append( (key, None) ) + return inventory + +def check_ibm_svc_cache(item, _no_params, info): + parsed = ibm_svc_cache_parse(info) + + if item not in parsed.keys(): + return 3, "%s not found in agent output" % item + + write_cache_pc = parsed[item]["write_cache_pc"] + total_cache_pc = parsed[item]["total_cache_pc"] + perfdata = [ ("write_cache_pc", write_cache_pc, None, None, 0, 100), + ("total_cache_pc", total_cache_pc, None, None, 0, 100) ] + + return 0, "Write cache usage is %d %%, total cache usage is %d %%" % \ + (write_cache_pc, total_cache_pc), perfdata + +check_info["ibm_svc_nodestats.cache"] = { + "check_function" : check_ibm_svc_cache, + "inventory_function" : inventory_ibm_svc_cache, + "service_description" : "Cache %s", + "has_perfdata" : True, +} + +#. diff -Nru check-mk-1.2.2p3/ibm_svc_nodestats.cache check-mk-1.2.6p12/ibm_svc_nodestats.cache --- check-mk-1.2.2p3/ibm_svc_nodestats.cache 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ibm_svc_nodestats.cache 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,24 @@ +title: IBM SVC / Storwize V3700 / V7000: Cache Usage per Node +agents: ibm_svc +catalog: hw/storagehw/ibm +license: GPL +distribution: check_mk +description: + Reports the Write Cache Usage and Total Cache Usage for each node of an + IBM SVC / Storwize V3700 / V7000 device in percent. + + The check does report only and returns always {OK}. + + Please note: You need the Special Agent agent_ibmsvc to retrieve the monitoring + data from the device. Your monitoring user must be able to SSH to the device + with SSH Key Authentification. Please exchange SSH key. The Special Agent itself + can be configured by WATO. + +item: + The name of the node. + +inventory: + Creates one check per node. + +perfdata: + Two values: The write cache usage and the total cache usage in percent. diff -Nru check-mk-1.2.2p3/ibm_svc_nodestats.cpu_util check-mk-1.2.6p12/ibm_svc_nodestats.cpu_util --- check-mk-1.2.2p3/ibm_svc_nodestats.cpu_util 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ibm_svc_nodestats.cpu_util 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,47 @@ +title: IBM SVC / Storwize V3700 / V7000: CPU Utilization per Node +agents: ibm_svc +catalog: hw/storagehw/ibm +license: GPL +distribution: check_mk +description: + Reports the CPU Utilization for each node of an IBM SVC / Storwize V3700 / V7000 + device. + + The check returns {WARN} or {CRIT} if the utilization in percent is higher + then given levels and {OK} otherwise. + + Please note: You need the Special Agent agent_ibmsvc to retrieve the monitoring + data from the device. Your monitoring user must be able to SSH to the device + with SSH Key Authentification. Please exchange SSH key. The Special Agent itself + can be configured by WATO. + +item: + The name of the node. + +inventory: + Creates one check per node. + +perfdata: + One value: The utilization in percent, together with it's levels for + warn and crit. + +examples: + # set default levels to 70 and 80 percent: + ibm_svc_cpu_default_levels = (70.0, 80.0) + + # Check CPU Utilization of Node SVC01 on a IBM SVC called my-svc with default levels + checks += [ + ("my-svc", "ibm_svc_nodestats.cpu_util", 'SVC01', ibm_svc_cpu_default_levels) + ] + + # or use individual levels for warn and crit + checks += [ + ("my-svc", "ibm_svc_nodestats.cpu_util", 'SVC01', (75.0, 85.0)) + ] + +[parameters] +parameters (float, float): levels of CPU utilization for {WARN} and {CRIT} in percent + +[configuration] +ibm_svc_cpu_default_levels (float, float): The standard levels for {WARN} and + {CRIT}, preset to (80.0, 90.0) diff -Nru check-mk-1.2.2p3/ibm_svc_nodestats.diskio check-mk-1.2.6p12/ibm_svc_nodestats.diskio --- check-mk-1.2.2p3/ibm_svc_nodestats.diskio 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ibm_svc_nodestats.diskio 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,24 @@ +title: IBM SVC / Storwize V3700 / V7000: Disk Throughput for Drives/MDisks/VDisks per Node +agents: ibm_svc +catalog: hw/storagehw/ibm +license: GPL +distribution: check_mk +description: + Reports the disk throughput for Drives, MDisks and VDisks for each node + of an IBM SVC / Storwize V3700 / V7000 device in bytes per second. + + The check is only for reporting and returns always {OK}. + + Please note: You need the Special Agent agent_ibmsvc to retrieve the monitoring + data from the device. Your monitoring user must be able to SSH to the device + with SSH Key Authentification. Please exchange SSH key. The Special Agent itself + can be configured by WATO. + +item: + "Drives", "MDisks" or "VDisks" plus the name of the node. + +inventory: + Creates one check for Drives, one for MDisks and one for VDisks per node. + +perfdata: + Two values: Throughput read and throughput write in Bytes/sec. diff -Nru check-mk-1.2.2p3/ibm_svc_nodestats.disk_latency check-mk-1.2.6p12/ibm_svc_nodestats.disk_latency --- check-mk-1.2.2p3/ibm_svc_nodestats.disk_latency 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ibm_svc_nodestats.disk_latency 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,24 @@ +title: IBM SVC / Storwize V3700 / V7000: Latency for Drives/MDisks/VDisks per Node +agents: ibm_svc +catalog: hw/storagehw/ibm +license: GPL +distribution: check_mk +description: + Reports the Latency for Read and Write (in ms) for Drives, MDisks and VDisks + for each node of an IBM SVC / Storwize V3700 / V7000 device. + + The check is only for reporting and returns always {OK}. + + Please note: You need the Special Agent agent_ibmsvc to retrieve the monitoring + data from the device. Your monitoring user must be able to SSH to the device + with SSH Key Authentification. Please exchange SSH key. The Special Agent itself + can be configured by WATO. + +item: + "Drives", "MDisks" or "VDisks" plus the name of the node. + +inventory: + Creates one check for Drives, one for MDisks and one for VDisks per node. + +perfdata: + Two values: Latency for read and write in ms. diff -Nru check-mk-1.2.2p3/ibm_svc_nodestats.iops check-mk-1.2.6p12/ibm_svc_nodestats.iops --- check-mk-1.2.2p3/ibm_svc_nodestats.iops 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ibm_svc_nodestats.iops 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,24 @@ +title: IBM SVC / Storwize V3700 / V7000: IO operations/sec for Drives/MDisks/VDisks per Node +agents: ibm_svc +catalog: hw/storagehw/ibm +license: GPL +distribution: check_mk +description: + Reports the IO operations per second for Drives, MDisks and VDisks for each node + of an IBM SVC / Storwize V3700 / V7000 device. + + The check is only for reporting and returns always {OK}. + + Please note: You need the Special Agent agent_ibmsvc to retrieve the monitoring + data from the device. Your monitoring user must be able to SSH to the device + with SSH Key Authentification. Please exchange SSH key. The Special Agent itself + can be configured by WATO. + +item: + "Drives", "MDisks" or "VDisks" plus the name of the node. + +inventory: + Creates one check for Drives, one for MDisks and one for VDisks per node. + +perfdata: + Two values: IO operations per second for read and write. diff -Nru check-mk-1.2.2p3/ibm_svc_portfc check-mk-1.2.6p12/ibm_svc_portfc --- check-mk-1.2.2p3/ibm_svc_portfc 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ibm_svc_portfc 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,77 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Output may have 11 fields: +# id:fc_io_port_id:port_id:type:port_speed:node_id:node_name:WWPN:nportid:status:attachment +# Example output from agent: +# <<>> +# 0:1:1:fc:8Gb:1:node1:5005076803042126:030400:active:switch +# 1:2:2:fc:8Gb:1:node1:5005076803082126:040400:active:switch +# 2:3:3:fc:N/A:1:node1:50050768030C2126:000000:inactive_unconfigured:none +# 3:4:4:fc:N/A:1:node1:5005076803102126:000000:inactive_unconfigured:none +# 8:1:1:fc:8Gb:2:node2:5005076803042127:030500:active:switch +# 9:2:2:fc:8Gb:2:node2:5005076803082127:040500:active:switch +# 10:3:3:fc:N/A:2:node2:50050768030C2127:000000:inactive_unconfigured:none +# 11:4:4:fc:N/A:2:node2:5005076803102127:000000:inactive_unconfigured:none +# +# Output may have 12 fields: +# id:fc_io_port_id:port_id:type:port_speed:node_id:node_name:WWPN:nportid:status:attachment:cluster_use +# Example output from agent: +# <<>> +# 0:1:1:fc:8Gb:1:node1:5005076803042126:030400:active:switch:local_partner +# 1:2:2:fc:8Gb:1:node1:5005076803082126:040400:active:switch:local_partner +# 2:3:3:fc:N/A:1:node1:50050768030C2126:000000:inactive_unconfigured:none:local_partner +# 3:4:4:fc:N/A:1:node1:5005076803102126:000000:inactive_unconfigured:none:local_partner +# 8:1:1:fc:8Gb:2:node2:5005076803042127:030500:active:switch:local_partner +# 9:2:2:fc:8Gb:2:node2:5005076803082127:040500:active:switch:local_partner +# 10:3:3:fc:N/A:2:node2:50050768030C2127:000000:inactive_unconfigured:none:local_partner +# 11:4:4:fc:N/A:2:node2:5005076803102127:000000:inactive_unconfigured:none:local_partner + + +def inventory_ibm_svc_portfc(info): + for line in info: + if len(line) in (11, 12, 14) and line[9] == "active": + yield line[0], None + +def check_ibm_svc_portfc(item, _no_params, info): + for line in info: + if len(line) in (11, 12, 14): + if line[0] == item: + fc_port_status = line[9] + + if fc_port_status == "active": + state = 0 + else: + state = 2 + + return state, fc_port_status + +check_info["ibm_svc_portfc"] = { + "check_function" : check_ibm_svc_portfc, + "inventory_function" : inventory_ibm_svc_portfc, + "service_description" : "FC Port %s", +} + diff -Nru check-mk-1.2.2p3/ibm_svc_system check-mk-1.2.6p12/ibm_svc_system --- check-mk-1.2.2p3/ibm_svc_system 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ibm_svc_system 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,116 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +# <<>> +# id:0000020060C16104 +# name:BLUBB_NAME +# location:local +# partnership: +# bandwidth: +# total_mdisk_capacity:192.9TB +# space_in_mdisk_grps:192.9TB +# space_allocated_to_vdisks:147.48TB +# total_free_space:45.5TB +# total_vdiskcopy_capacity:149.54TB +# total_used_capacity:147.44TB +# total_overallocation:77 +# total_vdisk_capacity:74.77TB +# total_allocated_extent_capacity:147.49TB +# statistics_status:on +# statistics_frequency:5 +# cluster_locale:en_US +# time_zone:384 Europe/Paris +# code_level:6.4.1.4 (build 75.3.1303080000) +# console_IP:x.x.x.x:443 +# id_alias:0000020060C16104 +# gm_link_tolerance:300 +# gm_inter_cluster_delay_simulation:0 +# gm_intra_cluster_delay_simulation:0 +# gm_max_host_delay:5 +# email_reply:master@desaster +# email_contact:Wichtiger Admin +# email_contact_primary:+49 30 555555555 +# email_contact_alternate: +# email_contact_location:blubb +# email_contact2: +# email_contact2_primary: +# email_contact2_alternate: +# email_state:running +# inventory_mail_interval:7 +# cluster_ntp_IP_address:x.x.x.x +# cluster_isns_IP_address: +# iscsi_auth_method:none +# iscsi_chap_secret: +# auth_service_configured:no +# auth_service_enabled:no +# auth_service_url: +# auth_service_user_name: +# auth_service_pwd_set:no +# auth_service_cert_set:no +# auth_service_type:tip +# relationship_bandwidth_limit:25 +# tier:generic_ssd +# tier_capacity:0.00MB +# tier_free_capacity:0.00MB +# tier:generic_hdd +# tier_capacity:192.94TB +# tier_free_capacity:45.46TB +# has_nas_key:no +# layer:replication +# rc_buffer_size:48 +# compression_active:no +# compression_virtual_capacity:0.00MB +# compression_compressed_capacity:0.00MB +# compression_uncompressed_capacity:0.00MB +# cache_prefetch:on +# email_organization:Acme Inc +# email_machine_address: +# email_machine_city:Berlin +# email_machine_state:XX +# email_machine_zip: +# email_machine_country:DE +# total_drive_raw_capacity:0 + +def inventory_ibm_svc_system(info): + return [(None, None)] + +def check_ibm_svc_system(item, _no_params, info): + message = "" + for line in info: + if line[0] in ("name", "location", "code_level", "email_contact_location"): + if message != "": + message += ", " + message += "%s: %s" % (line[0], line[1]) + return 0, message + +check_info["ibm_svc_system"] = { + "check_function" : check_ibm_svc_system, + "inventory_function" : inventory_ibm_svc_system, + "service_description" : "IBM SVC Info", + "has_perfdata" : False, +} + diff -Nru check-mk-1.2.2p3/ibm_svc_systemstats check-mk-1.2.6p12/ibm_svc_systemstats --- check-mk-1.2.2p3/ibm_svc_systemstats 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ibm_svc_systemstats 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,293 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Note: This file is almost identical with ibm_svc_systemstats. We should +# create an include file for sharing common code! + +# Example output from agent: +# <<>> +# compression_cpu_pc:0:0:140325134929 +# cpu_pc:2:2:140325134929 +# fc_mb:640:1482:140325134754 +# fc_io:46718:56258:140325134749 +# sas_mb:0:0:140325134929 +# sas_io:0:0:140325134929 +# iscsi_mb:0:0:140325134929 +# iscsi_io:0:0:140325134929 +# write_cache_pc:0:1:140325134819 +# total_cache_pc:70:75:140325134704 +# vdisk_mb:207:533:140325134754 +# vdisk_io:4827:5966:140325134819 +# vdisk_ms:1:2:140325134759 +# mdisk_mb:222:651:140325134754 +# mdisk_io:4995:6741:140325134754 +# mdisk_ms:1:3:140325134809 +# drive_mb:0:0:140325134929 +# drive_io:0:0:140325134929 +# drive_ms:0:0:140325134929 +# vdisk_r_mb:113:428:140325134524 +# vdisk_r_io:2470:3672:140325134819 +# vdisk_r_ms:1:4:140325134759 +# vdisk_w_mb:93:143:140325134704 +# vdisk_w_io:2359:3595:140325134859 +# vdisk_w_ms:0:2:140325134704 +# mdisk_r_mb:32:362:140325134754 +# mdisk_r_io:1452:2825:140325134754 +# mdisk_r_ms:4:7:140325134649 +# mdisk_w_mb:189:291:140325134749 +# mdisk_w_io:3542:4465:140325134714 +# mdisk_w_ms:0:2:140325134819 +# drive_r_mb:0:0:140325134929 +# drive_r_io:0:0:140325134929 +# drive_r_ms:0:0:140325134929 +# drive_w_mb:0:0:140325134929 +# drive_w_io:0:0:140325134929 +# drive_w_ms:0:0:140325134929 + +# parses agent output into a structure like: +# {'Drives': {'r_mb': 0, 'w_mb': 0, 'r_io': 0, 'w_io': 0, 'r_ms': 0, 'w_ms': 0}, +# 'MDisks': {'r_mb': 32, 'w_mb': 189, 'r_io': 1452, 'w_io': 3542, 'r_ms': 4, 'w_ms': 0}, +# 'VDisks': {'r_mb': 113, 'w_mb': 93, 'r_io': 2470, 'w_io': 2359, 'r_ms': 1, 'w_ms': 0}} + +def ibm_svc_systemstats_parse(info): + parsed = {} + for stat_name, stat_current, stat_peak, stat_peak_time in info: + if stat_name in ("vdisk_r_mb", "vdisk_w_mb", "vdisk_r_io", "vdisk_w_io", "vdisk_r_ms", "vdisk_w_ms"): + if "VDisks" not in parsed.keys(): + parsed["VDisks"] = {} + stat_name = stat_name.replace("vdisk_", "") + parsed["VDisks"][stat_name] = int(stat_current) + if stat_name in ("mdisk_r_mb", "mdisk_w_mb", "mdisk_r_io", "mdisk_w_io", "mdisk_r_ms", "mdisk_w_ms"): + if "MDisks" not in parsed.keys(): + parsed["MDisks"] = {} + stat_name = stat_name.replace("mdisk_", "") + parsed["MDisks"][stat_name] = int(stat_current) + if stat_name in ("drive_r_mb", "drive_w_mb", "drive_r_io", "drive_w_io", "drive_r_ms", "drive_w_ms"): + if "Drives" not in parsed.keys(): + parsed["Drives"] = {} + stat_name = stat_name.replace("drive_", "") + parsed["Drives"][stat_name] = int(stat_current) + return parsed + +# .--disk IO-------------------------------------------------------------. +# | _ _ _ ___ ___ | +# | __| (_)___| | __ |_ _/ _ \ | +# | / _` | / __| |/ / | | | | | | +# | | (_| | \__ \ < | | |_| | | +# | \__,_|_|___/_|\_\ |___\___/ | +# | | +# '----------------------------------------------------------------------' + +def inventory_ibm_svc_systemstats_diskio(info): + inventory = [] + parsed = ibm_svc_systemstats_parse(info) + for key in parsed.keys(): + inventory.append( (key, None) ) + return inventory + +def check_ibm_svc_systemstats_diskio(item, _no_params, info): + parsed = ibm_svc_systemstats_parse(info) + + if item not in parsed.keys(): + return 3, "%s not found in agent output" % item + + read_bytes = parsed[item]['r_mb'] * 1024 * 1024 + write_bytes = parsed[item]['w_mb'] * 1024 * 1024 + perfdata = [ ("read", read_bytes), ("write", write_bytes) ] + + return 0, "%s/s read, %s/s write" % \ + (get_bytes_human_readable(read_bytes), get_bytes_human_readable(write_bytes)), \ + perfdata + +check_info["ibm_svc_systemstats.diskio"] = { + "check_function" : check_ibm_svc_systemstats_diskio, + "inventory_function" : inventory_ibm_svc_systemstats_diskio, + "service_description" : "IBM SVC Throughput %s Total", + "has_perfdata" : True, +} + +#. +# .--iops----------------------------------------------------------------. +# | _ | +# | (_) ___ _ __ ___ | +# | | |/ _ \| '_ \/ __| | +# | | | (_) | |_) \__ \ | +# | |_|\___/| .__/|___/ | +# | |_| | +# '----------------------------------------------------------------------' + +def inventory_ibm_svc_systemstats_iops(info): + inventory = [] + parsed = ibm_svc_systemstats_parse(info) + for key in parsed.keys(): + inventory.append( (key, None) ) + return inventory + +def check_ibm_svc_systemstats_iops(item, _no_params, info): + parsed = ibm_svc_systemstats_parse(info) + + if item not in parsed.keys(): + return 3, "%s not found in agent output" % item + + read_iops = parsed[item]['r_io'] + write_iops = parsed[item]['w_io'] + perfdata = [ ("read", read_iops), ("write", write_iops) ] + + return 0, "%s IO/s read, %s IO/s write" % (read_iops, write_iops), perfdata + +check_info["ibm_svc_systemstats.iops"] = { + "check_function" : check_ibm_svc_systemstats_iops, + "inventory_function" : inventory_ibm_svc_systemstats_iops, + "service_description" : "IBM SVC IOPS %s Total", + "has_perfdata" : True, +} + +#. +# .--disk latency--------------------------------------------------------. +# | _ _ _ _ _ | +# | __| (_)___| | __ | | __ _| |_ ___ _ __ ___ _ _ | +# | / _` | / __| |/ / | |/ _` | __/ _ \ '_ \ / __| | | | | +# | | (_| | \__ \ < | | (_| | || __/ | | | (__| |_| | | +# | \__,_|_|___/_|\_\ |_|\__,_|\__\___|_| |_|\___|\__, | | +# | |___/ | +# '----------------------------------------------------------------------' + +def inventory_ibm_svc_systemstats_disk_latency(info): + inventory = [] + parsed = ibm_svc_systemstats_parse(info) + for key in parsed.keys(): + inventory.append( (key, {}) ) + return inventory + +def check_ibm_svc_systemstats_disk_latency(item, params, info): + parsed = ibm_svc_systemstats_parse(info) + + + if item not in parsed.keys(): + yield 3, "%s not found in agent output" % item + return + + if not params: + params = {} # Convert from previous None + + for what, latency in [ + ('read', parsed[item]['r_ms'] ), + ('write', parsed[item]['w_ms'] )]: + + state, text, extraperf = check_levels(latency, what + "_latency", params.get(what), unit = "ms") + yield state, ("Latency is %.1f ms for %s" % (latency, what)) + text, \ + [ ( what + "_latency", latency) ] + extraperf + + +check_info["ibm_svc_systemstats.disk_latency"] = { + "check_function" : check_ibm_svc_systemstats_disk_latency, + "inventory_function" : inventory_ibm_svc_systemstats_disk_latency, + "service_description" : "IBM SVC Latency %s Total", + "has_perfdata" : True, + "group" : "ibm_svc_total_latency", +} + +#. +# .--cpu-----------------------------------------------------------------. +# | | +# | ___ _ __ _ _ | +# | / __| '_ \| | | | | +# | | (__| |_) | |_| | | +# | \___| .__/ \__,_| | +# | |_| | +# | | +# '----------------------------------------------------------------------' + +ibm_svc_cpu_default_levels = ( 90.0, 95.0 ) + +def inventory_ibm_svc_systemstats_cpu(info): + inventory = [] + for stat_name, stat_current, stat_peak, stat_peak_time in info: + if stat_name == "cpu_pc": + inventory.append( (None, "ibm_svc_cpu_default_levels") ) + return inventory + +def check_ibm_svc_systemstats_cpu(item, params, info): + for stat_name, stat_current, stat_peak, stat_peak_time in info: + if stat_name == "cpu_pc": + return check_cpu_util(int(stat_current), params) + + return 3, "value cpu_pc not found in agent output for node %s" % item + +check_info["ibm_svc_systemstats.cpu_util"] = { + "check_function" : check_ibm_svc_systemstats_cpu, + "inventory_function" : inventory_ibm_svc_systemstats_cpu, + "service_description" : "CPU utilization Total", + "has_perfdata" : True, + "group" : "cpu_utilization", + "includes" : [ "cpu_util.include" ], +} +#. +# .--cache---------------------------------------------------------------. +# | _ | +# | ___ __ _ ___| |__ ___ | +# | / __/ _` |/ __| '_ \ / _ \ | +# | | (_| (_| | (__| | | | __/ | +# | \___\__,_|\___|_| |_|\___| | +# | | +# '----------------------------------------------------------------------' + +def inventory_ibm_svc_systemstats_cache(info): + inventory = [] + for stat_name, stat_current, stat_peak, stat_peak_time in info: + if stat_name == "total_cache_pc": + inventory.append( (None, None) ) + return inventory + +def check_ibm_svc_systemstats_cache(item, _no_params, info): + write_cache_pc = None + total_cache_pc = None + + for stat_name, stat_current, stat_peak, stat_peak_time in info: + if stat_name == "total_cache_pc": + total_cache_pc = int(stat_current) + if stat_name == "write_cache_pc": + write_cache_pc = int(stat_current) + + if total_cache_pc == None: + return 3, "value total_cache_pc not found in agent output" + if write_cache_pc == None: + return 3, "value write_cache_pc not found in agent output" + + perfdata = [ ("write_cache_pc", write_cache_pc, None, None, 0, 100), + ("total_cache_pc", total_cache_pc, None, None, 0, 100) ] + + return 0, "Write cache usage is %d %%, total cache usage is %d %%" % \ + (write_cache_pc, total_cache_pc), perfdata + +check_info["ibm_svc_systemstats.cache"] = { + "check_function" : check_ibm_svc_systemstats_cache, + "inventory_function" : inventory_ibm_svc_systemstats_cache, + "service_description" : "IBM SVC Cache Total", + "has_perfdata" : True, +} + +#. diff -Nru check-mk-1.2.2p3/ibm_svc_systemstats.cache check-mk-1.2.6p12/ibm_svc_systemstats.cache --- check-mk-1.2.2p3/ibm_svc_systemstats.cache 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ibm_svc_systemstats.cache 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,21 @@ +title: IBM SVC / V7000: Cache Usage in Total +agents: ibm_svc +catalog: hw/storagehw/ibm +license: GPL +distribution: check_mk +description: + Reports the Write Cache Usage and Total Cache Usage for an + IBM SVC / V7000 device in Total in percent. + + The check does report only and returns always {OK}. + + Please note: You need the Special Agent agent_ibmsvc to retrieve the monitoring + data from the device. Your monitoring user must be able to SSH to the device + with SSH Key Authentification. Please exchange SSH key. The Special Agent itself + can be configured by WATO. + +inventory: + Creates one check per IBM SVC / V7000 device. + +perfdata: + Two values: The write cache usage and the total cache usage in percent. diff -Nru check-mk-1.2.2p3/ibm_svc_systemstats.cpu_util check-mk-1.2.6p12/ibm_svc_systemstats.cpu_util --- check-mk-1.2.2p3/ibm_svc_systemstats.cpu_util 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ibm_svc_systemstats.cpu_util 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,43 @@ +title: IBM SVC / V7000: CPU Utilization in Total +agents: ibm_svc +catalog: hw/storagehw/ibm +license: GPL +distribution: check_mk +description: + Reports the CPU Utilization of an IBM SVC / V7000 device in total. + + The check returns {WARN} or {CRIT} if the utilization in percent is higher + then given levels and {OK} otherwise. + + Please note: You need the Special Agent agent_ibmsvc to retrieve the monitoring + data from the device. Your monitoring user must be able to SSH to the device + with SSH Key Authentification. Please exchange SSH key. The Special Agent itself + can be configured by WATO. + +inventory: + Creates one check IBM SVC / V7000 device. + +perfdata: + One value: The utilization in percent, together with it's levels for + warn and crit. + +examples: + # set default levels to 70 and 80 percent: + ibm_svc_cpu_default_levels = (70.0, 80.0) + + # Check CPU Utilization on a IBM SVC called my-svc with default levels + checks += [ + ("my-svc", "ibm_svc_nodestats.cpu_util", None, ibm_svc_cpu_default_levels) + ] + + # or use individual levels for warn and crit + checks += [ + ("my-svc", "ibm_svc_nodestats.cpu_util", None, (75.0, 85.0)) + ] + +[parameters] +parameters (float, float): levels of CPU utilization for {WARN} and {CRIT} in percent + +[configuration] +ibm_svc_cpu_default_levels (float, float): The standard levels for {WARN} and + {CRIT}, preset to (80.0, 90.0) diff -Nru check-mk-1.2.2p3/ibm_svc_systemstats.diskio check-mk-1.2.6p12/ibm_svc_systemstats.diskio --- check-mk-1.2.2p3/ibm_svc_systemstats.diskio 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ibm_svc_systemstats.diskio 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,24 @@ +title: IBM SVC / V7000: Disk Throughput for Drives/MDisks/VDisks in Total +agents: ibm_svc +catalog: hw/storagehw/ibm +license: GPL +distribution: check_mk +description: + Reports the disk throughput for Drives, MDisks and VDisks of an IBM + SVC / V7000 device in total in bytes per second. + + The check is only for reporting and returns always {OK}. + + Please note: You need the Special Agent agent_ibmsvc to retrieve the monitoring + data from the device. Your monitoring user must be able to SSH to the device + with SSH Key Authentification. Please exchange SSH key. The Special Agent itself + can be configured by WATO. + +item: + "Drives", "MDisks" or "VDisks" + +inventory: + Creates one check for Drives, one for MDisks and one for VDisks. + +perfdata: + Two values: Throughput read and throughput write in Bytes/sec. diff -Nru check-mk-1.2.2p3/ibm_svc_systemstats.disk_latency check-mk-1.2.6p12/ibm_svc_systemstats.disk_latency --- check-mk-1.2.2p3/ibm_svc_systemstats.disk_latency 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ibm_svc_systemstats.disk_latency 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,24 @@ +title: IBM SVC / V7000: Latency for Drives/MDisks/VDisks in Total +agents: ibm_svc +catalog: hw/storagehw/ibm +license: GPL +distribution: check_mk +description: + Reports the Latency (in ms) for Drives, MDisks and VDisks for an + IBM SVC / V7000 device in total. + + The check is only for reporting and returns always {OK}. + + Please note: You need the Special Agent agent_ibmsvc to retrieve the monitoring + data from the device. Your monitoring user must be able to SSH to the device + with SSH Key Authentification. Please exchange SSH key. The Special Agent itself + can be configured by WATO. + +item: + "Drives", "MDisks" or "VDisks". + +inventory: + Creates one check for Drives, one for MDisks and one for VDisks. + +perfdata: + Two values: Latency for read and write in ms. diff -Nru check-mk-1.2.2p3/ibm_svc_systemstats.iops check-mk-1.2.6p12/ibm_svc_systemstats.iops --- check-mk-1.2.2p3/ibm_svc_systemstats.iops 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ibm_svc_systemstats.iops 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,24 @@ +title: IBM SVC / V7000: IO operations/sec for Drives/MDisks/VDisks in Total +agents: ibm_svc +catalog: hw/storagehw/ibm +license: GPL +distribution: check_mk +description: + Reports the IO operations per second for Drives, MDisks and VDisks for an + IBM SVC / V7000 device in total. + + The check is only for reporting and returns always {OK}. + + Please note: You need the Special Agent agent_ibmsvc to retrieve the monitoring + data from the device. Your monitoring user must be able to SSH to the device + with SSH Key Authentification. Please exchange SSH key. The Special Agent itself + can be configured by WATO. + +item: + "Drives", "MDisks" or "VDisks". + +inventory: + Creates one check for Drives, one for MDisks and one for VDisks. + +perfdata: + Two values: IO operations per second for read and write. diff -Nru check-mk-1.2.2p3/ibm_xraid_pdisks check-mk-1.2.6p12/ibm_xraid_pdisks --- check-mk-1.2.2p3/ibm_xraid_pdisks 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/ibm_xraid_pdisks 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -55,13 +55,13 @@ if disk_path == item: slot_label, disk_id, disk_type, disk_state, slot_desc = disk_entry if disk_state == "3": - return (0, "OK - Disk is active" + " [%s]" % slot_desc) + return (0, "Disk is active" + " [%s]" % slot_desc) elif disk_state == "4": - return (1, "WARN - Disk is rebuilding" + " [%s]" % slot_desc) + return (1, "Disk is rebuilding" + " [%s]" % slot_desc) elif disk_state == "5": - return (2, "WARN - Disk is dead" + " [%s]" % slot_desc) + return (2, "Disk is dead" + " [%s]" % slot_desc) - return (2, "CRIT - disk is missing") # + " [%s]" % data[item][4]) + return (2, "disk is missing") # + " [%s]" % data[item][4]) check_info["ibm_xraid_pdisks"] = { diff -Nru check-mk-1.2.2p3/if check-mk-1.2.6p12/if --- check-mk-1.2.2p3/if 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/if 2015-09-18 13:28:27.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -33,37 +33,42 @@ def if_convert_to_if64(info): newinfo = [] for line in info: - newinfo.append(line[0:8] + ['0'] + line[8:12] + ['0'] + line[12:16] + line[1:2] + line[16:17]) + if saveint(line[0]) > 0: #Fix for switches with empty indexes + newinfo.append(line[0:8] + ['0'] + line[8:12] + ['0'] + line[12:16] + line[1:2] + line[16:17]) + # Fix encoding of interface description + newinfo[-1][1] = snmp_decode_string(newinfo[-1][1]) return newinfo -check_info['if'] = (check_if, "Interface %s", 1, inventory_if) check_includes['if'] = [ "if.include" ] -checkgroup_of['if'] = "if" -check_default_levels['if'] = "if_default_levels" - -snmp_info['if'] = \ - ( ".1.3.6.1.2.1.2.2.1", [ - 1, # ifIndex 0 - 2, # ifDescr 1 - 3, # ifType 2 - 5, # ifSpeed 3 - 8, # ifOperStatus 4 - 10, # ifInOctets 5 - 11, # ifInUcastPkts 6 - 12, # ifInNUcastPkts 7 - 13, # ifInDiscards 8 - 14, # ifInErrors 9 - 16, # ifOutOctets 10 - 17, # ifOutUcastPkts 11 - 18, # ifOutNUcastPkts 12 - 19, # ifOutDiscards 13 - 20, # ifOutErrors 14 - 21, # ifOutQLen 15 - 6, # ifPhysAddress 16 - ] ) - -# check if number of network interfaces (IF-MIB::ifNumber.0) is at least 2 -snmp_scan_functions['if'] = \ - lambda oid: saveint(oid(".1.3.6.1.2.1.2.1.0")) >= 2 and \ - (if64_disabled(g_hostname) or not oid('.1.3.6.1.2.1.31.1.1.1.6.*')) - # use if64 if possible and not disabled +check_info["if"] = { + 'check_function': check_if, + 'inventory_function': inventory_if, + 'service_description': 'Interface %s', + 'has_perfdata': True, + 'snmp_info': ( ".1.3.6.1.2.1.2.2.1", [ + 1, # ifIndex 0 + 2, # ifDescr 1 + 3, # ifType 2 + 5, # ifSpeed 3 + 8, # ifOperStatus 4 + 10, # ifInOctets 5 + 11, # ifInUcastPkts 6 + 12, # ifInNUcastPkts 7 + 13, # ifInDiscards 8 + 14, # ifInErrors 9 + 16, # ifOutOctets 10 + 17, # ifOutUcastPkts 11 + 18, # ifOutNUcastPkts 12 + 19, # ifOutDiscards 13 + 20, # ifOutErrors 14 + 21, # ifOutQLen 15 + 6, # ifPhysAddress 16 + ]), + # check if number of network interfaces (IF-MIB::ifNumber.0) is at least 2 + # use if64 if possible and not disabled + 'snmp_scan_function': \ + lambda oid: saveint(oid(".1.3.6.1.2.1.2.1.0")) >= 2 and \ + (if64_disabled(g_hostname) or not oid('.1.3.6.1.2.1.31.1.1.1.6.*')), + 'group': 'if', + 'default_levels_variable': 'if_default_levels', +} diff -Nru check-mk-1.2.2p3/if64 check-mk-1.2.6p12/if64 --- check-mk-1.2.2p3/if64 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/if64 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -26,48 +26,74 @@ def fix_if64_highspeed(info): for line in info: - if type(line[3]) == str: # not yet converted - line[3] = saveint(line[3]) * 1000000 + if type(line[4]) == str: # not yet converted + line[4] = saveint(line[4]) * 1000000 + def inventory_if64(info): + # Fix for switches like DLINK, with empty lines in MIB + info = [ x for x in info if saveint(x[1]) > 0 ] fix_if64_highspeed(info) - return inventory_if_common(info) + return inventory_if_common(info, has_nodeinfo = True) def check_if64(item, params, info): fix_if64_highspeed(info) - return check_if_common(item, params, info) + return check_if_common(item, params, info, has_nodeinfo = True) + + +def if64_scan_function(oid): + ignored_strings = [ + "LANCOM", + "ELSA", + "T-Systems", + "Brocade VDX Switch", + ] + for string in ignored_strings: + if string in oid(".1.3.6.1.2.1.1.1.0"): + return False + + if ".4.1.11863." in oid(".1.3.6.1.2.1.1.2.0"): + return False + + if if64_disabled(g_hostname): + return False + + if oid(".1.3.6.1.2.1.31.1.1.1.6.*") != None: + return True + return False + + -check_info['if64'] = (check_if64, "Interface %s", 1, inventory_if64) -check_includes['if64'] = [ "if.include" ] -checkgroup_of['if64'] = "if" -check_default_levels['if64'] = "if_default_levels" - -snmp_info['if64'] = \ - ( ".1.3.6.1.2.1", [ - "2.2.1.1", # ifIndex 0 - "2.2.1.2", # ifDescr 1 - "2.2.1.3", # ifType 2 - "31.1.1.1.15", # ifHighSpeed .. 1000 means 1GBit - "2.2.1.8", # ifOperStatus 4 - "31.1.1.1.6", # ifHCInOctets 5 - "31.1.1.1.7", # ifHCInUcastPkts 6 - "31.1.1.1.8", # ifHCInMulticastPkts 7 - "31.1.1.1.9", # ifHCInBroadcastPkts 8 - "2.2.1.13", # ifInDiscards 9 - "2.2.1.14", # ifInErrors 10 - "31.1.1.1.10", # ifHCOutOctets 11 - "31.1.1.1.11", # ifHCOutUcastPkts 12 - "31.1.1.1.12", # ifHCOutMulticastPkts 13 - "31.1.1.1.13", # ifHCOutBroadcastPkts 14 - "2.2.1.19", # ifOutDiscards 15 - "2.2.1.20", # ifOutErrors 16 - "2.2.1.21", # ifOutQLen 17 - "31.1.1.1.18", # ifAlias 18 - "2.2.1.6", # ifPhysAddress 19 - ] ) - -# check if number of network interfaces (IF-MIB::ifNumber.0) is at least 2 -snmp_scan_functions['if64'] = \ - lambda oid: "LANCOM" not in oid("1.3.6.1.2.1.1.1.0") and \ - (not if64_disabled(g_hostname)) and \ - oid(".1.3.6.1.2.1.31.1.1.1.6.*") != None +check_info["if64"] = { + 'check_function': check_if64, + 'inventory_function': inventory_if64, + 'service_description': 'Interface %s', + 'has_perfdata': True, + 'includes': [ 'if.include' ], + 'snmp_info': ( ".1.3.6.1.2.1", [ + "2.2.1.1", # ifIndex 0 + "2.2.1.2", # ifDescr 1 + "2.2.1.3", # ifType 2 + "31.1.1.1.15", # ifHighSpeed .. 1000 means 1Gbit + "2.2.1.8", # ifOperStatus 4 + "31.1.1.1.6", # ifHCInOctets 5 + "31.1.1.1.7", # ifHCInUcastPkts 6 + "31.1.1.1.8", # ifHCInMulticastPkts 7 + "31.1.1.1.9", # ifHCInBroadcastPkts 8 + "2.2.1.13", # ifInDiscards 9 + "2.2.1.14", # ifInErrors 10 + "31.1.1.1.10", # ifHCOutOctets 11 + "31.1.1.1.11", # ifHCOutUcastPkts 12 + "31.1.1.1.12", # ifHCOutMulticastPkts 13 + "31.1.1.1.13", # ifHCOutBroadcastPkts 14 + "2.2.1.19", # ifOutDiscards 15 + "2.2.1.20", # ifOutErrors 16 + "2.2.1.21", # ifOutQLen 17 + "31.1.1.1.18", # ifAlias 18 + "2.2.1.6", # ifPhysAddress 19 + ]), + 'snmp_scan_function' : if64_scan_function, + 'group' : 'if', + 'node_info' : True, + 'default_levels_variable' : 'if_default_levels', +} diff -Nru check-mk-1.2.2p3/if64_tplink check-mk-1.2.6p12/if64_tplink --- check-mk-1.2.2p3/if64_tplink 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/if64_tplink 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,76 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def fix_tplink_highspeed(info): + for line in info: + if type(line[3]) == str: # not yet converted + line[3] = saveint(line[3]) * 1000000 + +def inventory_tplink(info): + fix_tplink_highspeed(info) + return inventory_if_common(info) + +def check_tplink(item, params, info): + fix_tplink_highspeed(info) + return check_if_common(item, params, info) + + +# check if number of network interfaces (IF-MIB::ifNumber.0) is at least 2 + +check_info["if64_tplink"] = { + 'check_function': check_tplink, + 'inventory_function': inventory_tplink, + 'service_description': 'Interface %s', + 'has_perfdata': True, + 'includes': [ 'if.include' ], + 'snmp_info': ( ".1.3.6.1", [ + "2.1.2.2.1.1", # ifIndex 0 + "2.1.2.2.1.2", # ifDescr 1 + "2.1.2.2.1.3", # ifType 2 + "2.1.31.1.1.1.15", # ifHighSpeed .. 1000 means 1Gbit + "2.1.2.2.1.8", # ifOperStatus 4 + "2.1.31.1.1.1.6", # ifHCInOctets 5 + "2.1.31.1.1.1.7", # ifHCInUcastPkts 6 + "2.1.31.1.1.1.8", # ifHCInMulticastPkts 7 + "2.1.31.1.1.1.9", # ifHCInBroadcastPkts 8 + "2.1.2.2.1.13", # ifInDiscards 9 + "2.1.2.2.1.14", # ifInErrors 10 + "2.1.31.1.1.1.10", # ifHCOutOctets 11 + "2.1.31.1.1.1.11", # ifHCOutUcastPkts 12 + "2.1.31.1.1.1.12", # ifHCOutMulticastPkts 13 + "2.1.31.1.1.1.13", # ifHCOutBroadcastPkts 14 + "2.1.2.2.1.19", # ifOutDiscards 15 + "2.1.2.2.1.20", # ifOutErrors 16 + "2.1.2.2.1.21", # ifOutQLen 17 + "4.1.11863.1.1.3.2.1.1.1.1.2", # special for TP Link + "2.1.2.2.1.6", # ifPhysAddress 19 + ]), + 'snmp_scan_function': \ + lambda oid: ".4.1.11863." in oid(".1.3.6.1.2.1.1.2.0") and \ + oid(".1.3.6.1.2.1.31.1.1.1.6.*") != None, + 'group': 'if', + 'default_levels_variable': 'if_default_levels', +} diff -Nru check-mk-1.2.2p3/if_brocade check-mk-1.2.6p12/if_brocade --- check-mk-1.2.2p3/if_brocade 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/if_brocade 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,102 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def fix_if_64_highspeed(info): + for line in info: + if type(line[3]) == str: # not yet converted + line[3] = saveint(line[3]) * 1000000 + +def if_brocade_convert(info): + iftab, ssidtab = info + ssid_dict = dict(ssidtab) + new_info = [] + for line in iftab: + if line[20].startswith("Point-2-Point"): + continue + else: + ifName = line[1] + + new_line = line[:-1] + if line[20].startswith("Logical Network"): + new_line[1] += " Logical" + ssid = ssid_dict.get(ifName, "") + if ssid: + new_line[1] += " " + ssid + new_line[1] = new_line[1].strip() + new_info.append(new_line) + return new_info + +def inventory_if_brocade(info): + info = if_brocade_convert(info) + fix_if_64_highspeed(info) + return inventory_if_common(info) + +def check_if_brocade(item, params, info): + info = if_brocade_convert(info) + fix_if_64_highspeed(info) + return check_if_common(item, params, info) + +check_includes['if_brocade'] = [ "if.include" ] +check_info["if_brocade"] = { + 'check_function': check_if_brocade, + 'inventory_function': inventory_if_brocade, + 'service_description': 'Interface %s', + 'has_perfdata': True, + 'snmp_info': [ + ( ".1.3.6.1.2.1", [ + "2.2.1.1", # ifIndex 0 + "31.1.1.1.1", # ifName (brocade has no useful information if Descr) + "2.2.1.3", # ifType 2 + "31.1.1.1.15", # ifHighSpeed .. 1000 means 1Gbit + "2.2.1.8", # ifOperStatus 4 + "31.1.1.1.6", # ifHCInOctets 5 + "31.1.1.1.7", # ifHCInUcastPkts 6 + "31.1.1.1.8", # ifHCInMulticastPkts 7 + "31.1.1.1.9", # ifHCInBroadcastPkts 8 + "2.2.1.13", # ifInDiscards 9 + "2.2.1.14", # ifInErrors 10 + "31.1.1.1.10", # ifHCOutOctets 11 + "31.1.1.1.11", # ifHCOutUcastPkts 12 + "31.1.1.1.12", # ifHCOutMulticastPkts 13 + "31.1.1.1.13", # ifHCOutBroadcastPkts 14 + "2.2.1.19", # ifOutDiscards 15 + "2.2.1.20", # ifOutErrors 16 + "2.2.1.21", # ifOutQLen 17 + "31.1.1.1.18", # ifAlias 18 + "2.2.1.6", # ifPhysAddress 19 + # Used in order to ignore some logical NICs + "2.2.1.2", # ifDescr 20 + ]), + ( ".1.3.6.1.4.1.2356.11.1.3.56.1", [1, 3] ) # brocade: SSID 21 + ], + # check if number of network interfaces (IF-MIB::ifNumber.0) is at least 2 + 'snmp_scan_function': \ + lambda oid: "Brocade VDX Switch" in oid(".1.3.6.1.2.1.1.1.0") and \ + (not if64_disabled(g_hostname)) and \ + oid(".1.3.6.1.2.1.31.1.1.1.6.*") != None, + 'group': 'if', + 'default_levels_variable': 'if_default_levels', +} diff -Nru check-mk-1.2.2p3/if.include check-mk-1.2.6p12/if.include --- check-mk-1.2.2p3/if.include 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/if.include 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -26,7 +26,7 @@ # Functions and definitions used by if and if64 -if_inventory_porttypes = [ '6', '32', '62', '117' ] +if_inventory_porttypes = [ '6', '32', '62', '117', '127', '128', '129', '180', '181', '182', '205','229' ] if_inventory_portstates = [ '1' ] if_inventory_uses_description = False if_inventory_uses_alias = False @@ -82,20 +82,47 @@ '7': 'lowerLayerDown' } return names.get(st, st) +def if_extract_node(line, has_nodeinfo): + if has_nodeinfo: + return line[0], line[1:] + else: + return None, line + +def if_item_matches(item, ifIndex, ifAlias, ifDescr): + return item.lstrip("0") == ifIndex \ + or (item == "0" * len(item) and saveint(ifIndex) == 0) \ + or item == ifAlias \ + or item == ifDescr \ + or item == "%s %s" % (ifAlias, ifIndex) \ + or item == "%s %s" % (ifDescr, ifIndex) # Pads port numbers with zeroes, so that items # nicely sort alphabetically -def if_pad_with_zeroes(info, ifIndex): +def if_pad_with_zeroes(info, ifIndex, has_nodeinfo): + if has_nodeinfo: + index = 1 + else: + index = 0 if if_inventory_pad_portnumbers: - max_index = max([int(line[0]) for line in info]) + def get_index(line): + if type(line[index]) == tuple: + return line[index][1] + else: + return line[index] + + max_index = max([int(get_index(line)) for line in info]) digits = len(str(max_index)) return ("%0"+str(digits)+"d") % int(ifIndex) else: return ifIndex -def inventory_if_common(info): - if len(info) == 0 or len(info[0]) != 20: +def inventory_if_common(info, has_nodeinfo = False): + if has_nodeinfo: + length = 21 + else: + length = 20 + if len(info) == 0 or len(info[0]) != length: return [] settings = host_extra_conf_merged(g_hostname, inventory_if_rules) @@ -103,6 +130,18 @@ uses_alias = settings.get('use_alias', if_inventory_uses_alias) porttypes = settings.get('porttypes', if_inventory_porttypes) portstates = settings.get('portstates', if_inventory_portstates) + match_alias = settings.get('match_alias') + + def alias_matches(alias): + if match_alias == None: + return True + for regex in match_alias: + if get_regex(regex).match(alias): + return True + return False + + # Allow main check to set no port type (e.g. hitachi_hnas_fc_if) + porttypes.append("") pre_inventory = [] pre_inventory_groups = [] @@ -110,46 +149,73 @@ for line in host_extra_conf(g_hostname, if_groups): for entry in line: - if str(entry["iftype"]) in porttypes: - group_patterns[entry["name"]] = entry + group_patterns[entry["name"]] = entry seen_items = set([]) duplicate = set([]) have_groups = {} - # Determine whether single, grouped or both - for ifIndex, ifDescr, ifType, ifSpeed, ifOperStatus, ifInOctets, \ + for line in info: + node, line = if_extract_node(line, has_nodeinfo) + ifIndex, ifDescr, ifType, ifSpeed, ifOperStatus, ifInOctets, \ inucast, inmcast, inbcast, ifInDiscards, ifInErrors, ifOutOctets, \ outucast, outmcast, outbcast, ifOutDiscards, ifOutErrors, \ - ifOutQLen, ifAlias, ifPhysAddress in info: + ifOutQLen, ifAlias, ifPhysAddress = line + + ifGroup = None + if type(ifIndex) == tuple: + ifGroup, ifIndex = ifIndex + ifDescr = cleanup_if_strings(ifDescr) ifAlias = cleanup_if_strings(ifAlias) + # Fix bug in brocade switches + if ifIndex == '': + continue + + # Fix bug in TP Link switches + ifSpeed = saveint(ifSpeed) + if ifSpeed > 9000000 * 100 * 1000: + ifSpeed /= 1000000 + # compute item now - also for unmonitored ports - in order to see if it is unique. if uses_description and ifDescr: item = ifDescr elif uses_alias and ifAlias: item = ifAlias else: - item = if_pad_with_zeroes(info, ifIndex) + item = if_pad_with_zeroes(info, ifIndex, has_nodeinfo) # Prepare grouped interfaces is_only_in_group = False for group_name, pattern in group_patterns.items(): - if pattern["iftype"] == saveint(ifType): - have_groups.setdefault(group_name, {"interfaces": []})["iftype"] = pattern["iftype"] - have_groups[group_name]["interfaces"].append((saveint(ifSpeed), ifOperStatus)) + # Interfaces can be grouped by item and iftype + match_item = not pattern.get("include_items") or (item in pattern["include_items"]) + match_type = not pattern.get("iftype") or (pattern["iftype"] == saveint(ifType)) + if match_item and match_type: + have_groups.setdefault(group_name, {"interfaces": [], + "iftype": pattern.get("iftype"), + "include_items": pattern.get("include_items")}) + have_groups[group_name]["interfaces"].append((saveint(ifSpeed), ifOperStatus, ifType)) if pattern.get("single"): is_only_in_group = True + # The agent output already set this interface to grouped + if ifGroup: + have_groups.setdefault(ifGroup, {"interfaces": [], + "iftype": ifType, + "include_items": []}) + have_groups[ifGroup]["interfaces"].append((saveint(ifSpeed), ifOperStatus, ifType)) + is_only_in_group = True + # Prepare single interfaces if not is_only_in_group: if item in seen_items: # duplicate duplicate.add(item) seen_items.add(item) - if ifType in porttypes and ifOperStatus in portstates: + if ifType in porttypes and ifOperStatus in portstates and alias_matches(ifAlias): params = {} if if_inventory_monitor_state: params["state"] = [ifOperStatus] @@ -167,15 +233,19 @@ one_up = "1" in [pair[1] for pair in values["interfaces"]] group_operStatus = one_up and "1" or "2" - ifType = group_patterns[group_name]["iftype"] - if str(ifType) in porttypes and group_operStatus in portstates: - params = { "iftype": ifType, "aggregate": True } + if group_operStatus in portstates: + params = { "iftype": values["iftype"], + "include_items": values["include_items"], + "aggregate": True } + if if_inventory_monitor_state: - params["state"] = [ group_operStatus ] + params["state"] = [group_operStatus] if ifSpeed != "" and if_inventory_monitor_speed: params["speed"] = total_speed - pre_inventory.append( (group_name, "%r" % params, int(ifIndex)) ) + + # Note: the group interface index is always set to 1 + pre_inventory.append((group_name, "%r" % params, 1)) inventory = [] # Check for duplicate items (e.g. when using Alias as item and the alias is not unique) @@ -188,14 +258,40 @@ return inventory -def check_if_common(item, params, info): +def check_if_common(item, params, info, has_nodeinfo = False): # If this item is in an ifgroup create a pseudo interface and pass its data to the common instance # This is done by simply adding the additional group_info data to the already existing info table if params.get("aggregate"): matching_interfaces = [] + node_offset = has_nodeinfo and 1 or 0 + def get_interface_item(line): + ifIndex = line[ 0 + node_offset] + if type(ifIndex) == tuple: + ifGroup, ifIndex = ifIndex + + ifDescr = line[ 1 + node_offset] + ifAlias = line[18 + node_offset] + if if_inventory_uses_description and ifDescr: + if_item = ifDescr + elif if_inventory_uses_alias and ifAlias: + if_item = ifAlias + else: + if_item = if_pad_with_zeroes(info, ifIndex, has_nodeinfo) + return if_item + for element in info: - if params["iftype"] == saveint(element[2]): + ifIndex = element[0 + node_offset] + ifGroup = None + if type(ifIndex) == tuple: + ifGroup, ifIndex = ifIndex + + if ifGroup and ifGroup == item: matching_interfaces.append(element) + else: + match_item = not params.get("include_items") or (get_interface_item(element) in params["include_items"]) + match_type = not params.get("iftype") or (params["iftype"] == saveint(element[2 + node_offset])) + if match_item and match_type: + matching_interfaces.append(element) # Accumulate info over matching_interfaces wrapped = False @@ -207,17 +303,18 @@ "outbcast" : 0, "ifOutDiscards" : 0, "ifOutErrors" : 0, "ifOutQLen" : 0 } - for ifIndex, ifDescr, ifType, ifSpeed, ifOperStatus, ifInOctets, \ + for element in matching_interfaces: + node, line = if_extract_node(element, has_nodeinfo) + ifIndex, ifDescr, ifType, ifSpeed, ifOperStatus, ifInOctets, \ inucast, inmcast, inbcast, ifInDiscards, ifInErrors, ifOutOctets, \ outucast, outmcast, outbcast, ifOutDiscards, ifOutErrors, \ - ifOutQLen, ifAlias, ifPhysAddress in matching_interfaces: - if if_inventory_uses_description and ifDescr: - group_item = ifDescr - elif if_inventory_uses_alias and ifAlias: - group_item = ifAlias - else: - group_item = if_pad_with_zeroes(info, ifIndex) + ifOutQLen, ifAlias, ifPhysAddress = line + + ifGroup = None + if type(ifIndex) == tuple: + ifGroup, ifIndex = ifIndex + if_item = get_interface_item(element) perfdata = [] # Only these values are packed into counters @@ -238,7 +335,7 @@ ( "outdisc", ifOutDiscards), ( "outerr", ifOutErrors) ]: try: - get_counter("if.%s.%s.%s" % (name, item, group_item), this_time, saveint(counter)) + get_rate("if.%s.%s.%s" % (name, item, if_item), this_time, saveint(counter), onwrap=RAISE) except MKCounterWrapped: wrapped = True # continue, other counters might wrap as well @@ -265,6 +362,12 @@ one_up = "1" in [element[4] for element in matching_interfaces] group_operStatus = one_up and "1" or "2" + alias_info = [] + if params.get("iftype"): + alias_info.append("iftype %s" % params["iftype"]) + if params.get("include_items"): + alias_info.append("grouped items") + group_entry = [ "ifgroup%s" % item, # ifIndex item, # ifDescr @@ -285,25 +388,28 @@ group_info["ifOutDiscards"],# ifOutDiscards group_info["ifOutErrors"], # ifOutErrors group_info["ifOutQLen"], # ifOutQLen - "iftype %s" % params["iftype"], # ifAlias + " and ".join(alias_info), "", # ifPhysAddress ] - info.append(group_entry) + # If applicable, signal the check_if_common_single if the counter of the # given interface has wrapped. Actually a wrap of the if group itself is unlikely, # however any counter wrap of one of its members causes the accumulation being invalid - return check_if_common_single(item, params, info, wrapped) + return check_if_common_single(item, params, [group_entry], wrapped, has_nodeinfo=has_nodeinfo) - return check_if_common_single(item, params,info) + return check_if_common_single(item, params, info, has_nodeinfo=has_nodeinfo) -def check_if_common_single(item, params, info, force_counter_wrap = False): +def check_if_common_single(item, params, info, force_counter_wrap = False, has_nodeinfo = False): # Params now must be a dict. Some keys might # be set to None targetspeed = params.get("speed") + assumed_speed_in = params.get("assumed_speed_in") + assumed_speed_out = params.get("assumed_speed_out") targetstate = params.get("state") average = params.get("average") unit = params.get("unit") in ["Bit", "bit"] and "Bit" or "B" unit_multiplier = unit == "Bit" and 8.0 or 1.0 + cluster_items = {} # error checking might be turned off if params["errors"]: @@ -313,25 +419,31 @@ # Traffic checking might be turned off if "traffic" in params: - bw_warn, bw_crit = params["traffic"] + bw_warn_param, bw_crit_param = params["traffic"] else: - bw_warn, bw_crit = None, None + bw_warn_param, bw_crit_param = None, None + # Traffic minimum checking might be turned off + if "traffic_minimum" in params: + bw_warn_min_param, bw_crit_min_param = params["traffic_minimum"] + else: + bw_warn_min_param, bw_crit_min_param = None, None - for ifIndex, ifDescr, ifType, ifSpeed, ifOperStatus, ifInOctets, \ + for line in info: + node, line = if_extract_node(line,has_nodeinfo) + ifIndex, ifDescr, ifType, ifSpeed, ifOperStatus, ifInOctets, \ inucast, inmcast, inbcast, ifInDiscards, ifInErrors, ifOutOctets, \ outucast, outmcast, outbcast, ifOutDiscards, ifOutErrors, \ - ifOutQLen, ifAlias, ifPhysAddress in info: + ifOutQLen, ifAlias, ifPhysAddress = line + + ifGroup = None + if type(ifIndex) == tuple: + ifGroup, ifIndex = ifIndex + ifDescr = cleanup_if_strings(ifDescr) ifAlias = cleanup_if_strings(ifAlias) - if item.lstrip("0") == ifIndex \ - or (item == "0" * len(item) and saveint(ifIndex) == 0) \ - or item == ifAlias \ - or item == ifDescr \ - or item == "%s %s" % (ifAlias, ifIndex) \ - or item == "%s %s" % (ifDescr, ifIndex): - + if if_item_matches(item, ifIndex, ifAlias, ifDescr): # Display port number or alias in infotext if that is not part # of the service description anyway if item.lstrip("0") == ifIndex \ @@ -347,6 +459,9 @@ else: infotext = "[%s] " % ifIndex + if node != None: + infotext = "%son %s: " % ( infotext, node ) + state = 0 operstatus = if_statename(str(ifOperStatus)) @@ -361,14 +476,16 @@ # prepare reference speed for computing relative bandwidth usage speed = saveint(ifSpeed) - if speed: + if speed > 9 * 1000 * 1000 * 1000 * 1000: + speed /= (1000 * 1000) ref_speed = speed / 8.0 elif targetspeed: ref_speed = targetspeed / 8.0 else: ref_speed = None + if ifPhysAddress: mac = ":".join(["%02s" % hex(ord(m))[2:] for m in ifPhysAddress]).replace(' ', '0') infotext += 'MAC: %s, ' % mac @@ -385,29 +502,85 @@ else: infotext += "speed unknown" + + bw_warn, bw_crit = bw_warn_param, bw_crit_param + bw_warn_min, bw_crit_min = bw_warn_min_param, bw_crit_min_param + # If the measurement unit is set to bit and the bw levels + # are of type int convert these 'bit' entries to byte + # still reported as bytes to stay compatible with older rrd data + if unit == "Bit": + if type(bw_warn_param) == int: + bw_warn = bw_warn / 8 + if type(bw_crit_param) == int: + bw_crit = bw_crit / 8 + if type(bw_warn_min_param) == int: + bw_warn_min = bw_warn_min / 8 + if type(bw_crit_min_param) == int: + bw_crit_min = bw_crit_min / 8 + # convert percentages to absolute values if levels are float # this is only possible if a reference speed is available. if ref_speed: - if bw_warn != None and type(bw_warn) == float: - bw_warn = bw_warn / 100.0 * ref_speed * unit_multiplier - if bw_crit != None and type(bw_crit) == float: - bw_crit = bw_crit / 100.0 * ref_speed * unit_multiplier - + if type(bw_warn_param) == float: + bw_warn = bw_warn_param / 100.0 * ref_speed # bytes + if type(bw_crit_param) == float: + bw_crit = bw_crit_param / 100.0 * ref_speed # bytes + if type(bw_warn_min_param) == float: + bw_warn_min = bw_warn_min_param / 100.0 * ref_speed # bytes + if type(bw_crit_min_param) == float: + bw_crit_min = bw_crit_min_param / 100.0 * ref_speed # bytes # Ignore percentual levels if no reference speed is available else: - if bw_warn != None and type(bw_warn) == float: + if type(bw_warn_param) == float: bw_warn = None - - if bw_crit != None and type(bw_crit) == float: + if type(bw_crit_param) == float: bw_crit = None - - # Even if the measurement unit is set to "bit" the WARN and CRIT levels are - # still reported as bytes to stay compatible with older rrd data - if unit == "Bit": - if bw_crit and bw_crit != None: - bw_crit = bw_crit / 8 - if bw_warn and bw_warn != None: - bw_warn = bw_warn / 8 + if type(bw_warn_min_param) == float: + bw_warn_min = None + if type(bw_crit_min_param) == float: + bw_crit_min = None + + + bw_warn_in, bw_crit_in, bw_warn_out, bw_crit_out = [bw_warn, bw_crit] * 2 + bw_warn_min_in, bw_crit_min_in, bw_warn_min_out, bw_crit_min_out = [bw_warn_min, bw_crit_min] * 2 + for speed_in in [ assumed_speed_in, assumed_speed_out ]: + if not speed_in: + continue + if type(bw_warn_param) == float: + bw_warn_in = bw_warn_param / 100.0 * speed_in / 8 + if type(bw_crit_param) == float: + bw_crit_in = bw_crit_param / 100.0 * speed_in / 8 + if type(bw_warn_min_param) == float: + bw_warn_min_in = bw_warn_min_param / 100.0 * speed_in / 8 + if type(bw_crit_min_param) == float: + bw_crit_min_in = bw_crit_min_param / 100.0 * speed_in / 8 + break + + for speed_out in [ assumed_speed_out, assumed_speed_in ]: + if not speed_out: + continue + if type(bw_warn_param) == float: + bw_warn_out = bw_warn_param / 100.0 * speed_out / 8 + if type(bw_crit_param) == float: + bw_crit_out = bw_crit_param / 100.0 * speed_out / 8 + if type(bw_warn_min_param) == float: + bw_warn_min_out = bw_warn_min_param / 100.0 * speed_out / 8 + if type(bw_crit_min_param) == float: + bw_crit_min_out = bw_crit_min_param / 100.0 * speed_out / 8 + break + + + # Speed in bytes + speed_b_in = assumed_speed_in and assumed_speed_in / 8 or ref_speed + speed_b_out = assumed_speed_out and assumed_speed_out / 8 or ref_speed + + # When the interface is reported as down, there is no need to try to handle, + # the performance counters. Most devices do reset the counter values to zero, + # but we spotted devices, which do report error packes even for down interfaces. + # To deal with it, we simply skip over all performance counter checks for down + # interfaces. + if operstatus == "down": + return state, infotext # Performance counters this_time = time.time() @@ -415,22 +588,27 @@ wrapped = False perfdata = [] for name, counter, warn, crit, mmin, mmax in [ - ( "in", ifInOctets, bw_warn, bw_crit, 0, ref_speed), + ( "in", ifInOctets, bw_warn_in, bw_crit_in, 0, speed_b_in), ( "inucast", inucast, None, None, None, None), ( "innucast", saveint(inmcast) + saveint(inbcast), None, None, None, None), ( "indisc", ifInDiscards, None, None, None, None), ( "inerr", ifInErrors, err_warn, err_crit, None, None), - ( "out", ifOutOctets, bw_warn, bw_crit, 0, ref_speed), + ( "out", ifOutOctets, bw_warn_out, bw_crit_out, 0, speed_b_out), ( "outucast", outucast, None, None, None, None), ( "outnucast", saveint(outmcast) + saveint(outbcast), None, None, None, None), ( "outdisc", ifOutDiscards, None, None, None, None), ( "outerr", ifOutErrors, err_warn, err_crit, None, None) ]: try: - timedif, rate = get_counter("if.%s.%s" % (name, item), this_time, saveint(counter)) - if force_counter_wrap: - raise MKCounterWrapped("if.%s.%s" % (name, item), "Forced counter wrap") + if node == None: + rate = get_rate("if.%s.%s" % (name, item), this_time, saveint(counter), onwrap=RAISE) + if force_counter_wrap: + raise MKCounterWrapped("Forced counter wrap") + else: # clustered check needs one counter per variable, item AND NODE + rate = get_rate("if.%s.%s.%s" % (node, name, item), this_time, saveint(counter), onwrap=RAISE) + if force_counter_wrap: + raise MKCounterWrapped("Forced counter wrap") rates.append(rate) perfdata.append( (name, rate, warn, crit, mmin, mmax) ) except MKCounterWrapped: @@ -442,35 +620,51 @@ # If there is a threshold on the bandwidth, we cannot proceed # further (the check would be flapping to green on a wrap) if bw_crit != None: - raise MKCounterWrapped("", "Counter wrap, skipping checks this time") + raise MKCounterWrapped("Counter wrap, skipping checks this time") perfdata = [] else: perfdata.append(("outqlen", saveint(ifOutQLen),"","", unit == "Bit" and "0.0" or "0")) - get_human_readable = lambda traffic: get_bytes_human_readable(traffic * unit_multiplier, 1024, True, unit) - for what, errorrate, okrate, traffic in \ - [ ("in", rates[4], rates[1] + rates[2], rates[0]), - ("out", rates[9], rates[6] + rates[7], rates[5]) ]: - - infotext += ", %s: %s/s" % (what, get_human_readable(traffic)) - - if ref_speed: - perc_used = 100.0 * traffic / ref_speed - infotext += "(%.1f%%)" % perc_used + def format_value(value): + if unit == "Bit": + value = value * 8 + return get_nic_speed_human_readable(value) + else: + return "%s/s" % get_bytes_human_readable(value) + + for what, errorrate, okrate, traffic, speed, bw_warn, bw_crit, bw_warn_min, bw_crit_min in \ + [ ("in", rates[4], rates[1] + rates[2], rates[0], speed_in, bw_warn_in, bw_crit_in, bw_warn_min_in, bw_crit_min_in), + ("out", rates[9], rates[6] + rates[7], rates[5], speed_out, bw_warn_out, bw_crit_out, bw_warn_min_out, bw_crit_min_out) ]: + + infotext += ", %s: %s" % (what, format_value(traffic)) + + if speed: + perc_used = 100.0 * traffic / (speed / 8) + + assumed_info = "" + if assumed_speed_in or assumed_speed_out: + assumed_info = "/" + format_value(speed / 8) + infotext += "(%.1f%%%s)" % (perc_used, assumed_info) # handle computation of average if average: - timedif, traffic_avg = get_average("if.%s.%s.avg" % (what, item), this_time, traffic, average) - infotext += ", %dmin avg: %s/s" % (average, get_human_readable(traffic_avg)) - perfdata.append( ("%s_avg_%d" % (what, average), traffic_avg, bw_warn, bw_crit, 0, ref_speed) ) + traffic_avg = get_average("if.%s.%s.avg" % (what, item), this_time, traffic, average) + infotext += ", %dmin avg: %s" % (average, format_value(traffic_avg)) + perfdata.append( ("%s_avg_%d" % (what, average), traffic_avg, bw_warn, bw_crit, 0, speed) ) traffic = traffic_avg # apply levels to average traffic # Check bandwidth thresholds if bw_crit != None and traffic >= bw_crit: state = 2 - infotext += ' (!!) >= ' + get_human_readable(bw_crit / unit_multiplier) + "/s" + infotext += ' (!!) >= ' + format_value(bw_crit) elif bw_warn != None and traffic >= bw_warn: state = max(state, 1) - infotext += ' (!) >= ' + get_human_readable(bw_warn / unit_multiplier) + "/s" + infotext += ' (!) >= ' + format_value(bw_warn) + if bw_crit_min != None and traffic < bw_crit_min: + state = 2 + infotext += ' (!!) < ' + format_value(bw_crit_min) + elif bw_warn_min != None and traffic < bw_warn_min: + state = max(state, 1) + infotext += ' (!) >= ' + format_value(bw_warn_min) pacrate = okrate + errorrate if pacrate > 0.0: # any packets transmitted? @@ -479,16 +673,41 @@ if errperc > 0: infotext += ", %s-errors: %.2f%%" % (what, errperc) - if err_crit != None and errperc >= err_crit: + if err_crit != None and errperc >= err_crit: state = 2 infotext += "(!!) >= " + str(err_crit) elif err_warn != None and errperc >= err_warn: state = max(state, 1) infotext += "(!) >= " + str(err_warn) - return (state, "%s - %s" % (nagios_state_names[state], infotext), perfdata) + if node: + cluster_items[node] = ( state, infotext, perfdata ) + else: + return (state, infotext, perfdata) + + # if system is a cluster we have more than one line per item with + # different node, results are collected in cluster_items + # we choose the node with the highest outgoing traffic + # since in a cluster environment this is likely the node + # which is master + if cluster_items: + maxval = 0 + choosen_node = None + for node, result in cluster_items.items(): + state, infotext, perfdata = result + for entry in perfdata: + name, value = entry[:2] + if name == "out": + maxval = max(maxval, value) + if maxval == value: + choosen_node = node + # In case that each node has a counter wrap for + # out, we use the last note from the list as source + if not choosen_node: + choosen_node = node + return cluster_items[choosen_node] - return (3, "UNKNOWN - no such interface") + return (3, "no such interface") diff -Nru check-mk-1.2.2p3/if_lancom check-mk-1.2.6p12/if_lancom --- check-mk-1.2.2p3/if_lancom 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/if_lancom 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -34,7 +34,7 @@ ssid_dict = dict(ssidtab) new_info = [] for line in iftab: - if line[20].startswith("Point-2-Point"): + if line[20].startswith("P2P") or line[20].startswith("Point-2-Point"): continue else: ifName = line[1] @@ -59,41 +59,46 @@ fix_if_64_highspeed(info) return check_if_common(item, params, info) -check_info['if_lancom'] = (check_if_lancom, "Interface %s", 1, inventory_if_lancom) check_includes['if_lancom'] = [ "if.include" ] -checkgroup_of['if_lancom'] = "if" -check_default_levels['if_lancom'] = "if_default_levels" - -snmp_info['if_lancom'] = [ \ - ( ".1.3.6.1.2.1", [ - "2.2.1.1", # ifIndex 0 - "31.1.1.1.1", # ifName (LANCOM has no useful information if Descr) - "2.2.1.3", # ifType 2 - "31.1.1.1.15", # ifHighSpeed .. 1000 means 1GBit - "2.2.1.8", # ifOperStatus 4 - "31.1.1.1.6", # ifHCInOctets 5 - "31.1.1.1.7", # ifHCInUcastPkts 6 - "31.1.1.1.8", # ifHCInMulticastPkts 7 - "31.1.1.1.9", # ifHCInBroadcastPkts 8 - "2.2.1.13", # ifInDiscards 9 - "2.2.1.14", # ifInErrors 10 - "31.1.1.1.10", # ifHCOutOctets 11 - "31.1.1.1.11", # ifHCOutUcastPkts 12 - "31.1.1.1.12", # ifHCOutMulticastPkts 13 - "31.1.1.1.13", # ifHCOutBroadcastPkts 14 - "2.2.1.19", # ifOutDiscards 15 - "2.2.1.20", # ifOutErrors 16 - "2.2.1.21", # ifOutQLen 17 - "31.1.1.1.18", # ifAlias 18 - "2.2.1.6", # ifPhysAddress 19 - # Used in order to ignore some logical NICs - "2.2.1.2", # ifDescr 20 - ]), - ( ".1.3.6.1.4.1.2356.11.1.3.56.1", [1, 3] ) # LANCOM: SSID 21 - ] - -# check if number of network interfaces (IF-MIB::ifNumber.0) is at least 2 -snmp_scan_functions['if_lancom'] = \ - lambda oid: "LANCOM" in oid("1.3.6.1.2.1.1.1.0") and \ - (not if64_disabled(g_hostname)) and \ - oid(".1.3.6.1.2.1.31.1.1.1.6.*") != None +check_info["if_lancom"] = { + 'check_function': check_if_lancom, + 'inventory_function': inventory_if_lancom, + 'service_description': 'Interface %s', + 'has_perfdata': True, + 'snmp_info': [ + ( ".1.3.6.1.2.1", [ + "2.2.1.1", # ifIndex 0 + "31.1.1.1.1", # ifName (LANCOM has no useful information if Descr) + "2.2.1.3", # ifType 2 + "31.1.1.1.15", # ifHighSpeed .. 1000 means 1Gbit + "2.2.1.8", # ifOperStatus 4 + "31.1.1.1.6", # ifHCInOctets 5 + "31.1.1.1.7", # ifHCInUcastPkts 6 + "31.1.1.1.8", # ifHCInMulticastPkts 7 + "31.1.1.1.9", # ifHCInBroadcastPkts 8 + "2.2.1.13", # ifInDiscards 9 + "2.2.1.14", # ifInErrors 10 + "31.1.1.1.10", # ifHCOutOctets 11 + "31.1.1.1.11", # ifHCOutUcastPkts 12 + "31.1.1.1.12", # ifHCOutMulticastPkts 13 + "31.1.1.1.13", # ifHCOutBroadcastPkts 14 + "2.2.1.19", # ifOutDiscards 15 + "2.2.1.20", # ifOutErrors 16 + "2.2.1.21", # ifOutQLen 17 + "31.1.1.1.18", # ifAlias 18 + "2.2.1.6", # ifPhysAddress 19 + # Used in order to ignore some logical NICs + "2.2.1.2", # ifDescr 20 + ]), + ( ".1.3.6.1.4.1.2356.11.1.3.56.1", [1, 3] ) # LANCOM: SSID 21 + ], + # check if number of network interfaces (IF-MIB::ifNumber.0) is at least 2 + 'snmp_scan_function': \ + lambda oid: ("LANCOM" in oid(".1.3.6.1.2.1.1.1.0") or \ + "ELSA" in oid(".1.3.6.1.2.1.1.1.0") or \ + "T-Systems" in oid(".1.3.6.1.2.1.1.1.0")) and \ + (not if64_disabled(g_hostname)) and \ + oid(".1.3.6.1.2.1.31.1.1.1.6.*") != None, + 'group': 'if', + 'default_levels_variable': 'if_default_levels', +} diff -Nru check-mk-1.2.2p3/ifoperstatus check-mk-1.2.6p12/ifoperstatus --- check-mk-1.2.2p3/ifoperstatus 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/ifoperstatus 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -48,16 +48,21 @@ if not isinstance(targetstate, list): targetstate = ifoperstatus_statename(targetstate) if operstatus == targetstate or (isinstance(targetstate, list) and operstatus in targetstate): - return (0, "OK - status is %s" % operstatus) + return (0, "status is %s" % operstatus) elif operstatus == "up": - return (1, "WARN - port used, but should not be") + return (1, "port used, but should not be") else: - return (2, "CRIT - status is %s" % operstatus) + return (2, "status is %s" % operstatus) - return (3, "UNKNOWN - interface %s missing" % item) + return (3, "interface %s missing" % item) -check_info['ifoperstatus'] = (check_ifoperstatus, "Interface %s", 0, inventory_ifoperstatus) -snmp_info['ifoperstatus'] = ( ".1.3.6.1.2.1.2.2.1", [ 2, 3, 8 ] ) # Never inventorize automatically. let if/if64 be the default -snmp_scan_functions['ifoperstatus'] = lambda oid: False + +check_info["ifoperstatus"] = { + 'check_function': check_ifoperstatus, + 'inventory_function': inventory_ifoperstatus, + 'service_description': 'Interface %s', + 'snmp_info': ('.1.3.6.1.2.1.2.2.1', [2, 3, 8]), + 'snmp_scan_function': lambda oid: False, +} diff -Nru check-mk-1.2.2p3/imm_health check-mk-1.2.6p12/imm_health --- check-mk-1.2.2p3/imm_health 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/imm_health 1970-01-01 00:00:00.000000000 +0000 @@ -1,11 +0,0 @@ -title: Check system health via IBM Integrated Management Module (IMM) -agents: snmp -author: Michael Nieporte based on rsa_health by Mathias Kettner -license: GPL -distribution: check_mk -description: - This check monitors the overall system health of a server - via a IBM Integrated Management Module (IMM). - -inventory: - The inventory will find one check per IMM board. diff -Nru check-mk-1.2.2p3/innovaphone_channels check-mk-1.2.6p12/innovaphone_channels --- check-mk-1.2.2p3/innovaphone_channels 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/innovaphone_channels 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,54 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +innovaphone_channels_default_levels = (75.0, 80.0) + +def inventory_innovaphone_channels(info): + return [ (x[0], 'innovaphone_channels_default_levels') for x in info \ + if x[1] == "Up" and x[2] == "Up" ] + +def check_innovaphone_channels(item, params, info): + for line in info: + if line[0] == item: + link, physical = line[1:3] + if link != 'Up' or physical != 'Up': + return 2, "Link: %s, Physical: %s" % (link, physical) + idle, total = map(float, line[3:]) + perc_used = (idle / total) * 100 + perc_free = 100 - perc_used + message = "(used: %.0f, free: %.0f, total: %.0f)" % (total - idle, idle, total) + return check_innovaphone(params, [[None,perc_free]], '%', message ) + return 3, "No Channel information found" + +check_info["innovaphone_channels"] = { + "check_function" : check_innovaphone_channels, + "inventory_function" : inventory_innovaphone_channels, + "service_description" : "Channel %s", + "has_perfdata" : True, + "group" : "hw_single_channelserature", + "include" : ["innovaphone.include"] +} + diff -Nru check-mk-1.2.2p3/innovaphone_cpu check-mk-1.2.6p12/innovaphone_cpu --- check-mk-1.2.2p3/innovaphone_cpu 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/innovaphone_cpu 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,46 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +innovaphone_cpu_default_levels = ( 90.0, 95.0 ) + +def inventory_innovaphone_cpu(info): + return [ ( None, "innovaphone_cpu_default_levels" )] + +def check_innovaphone_cpu(_no_item, params, info): + usage = saveint(info[0][1]) + return check_cpu_util(usage, params) + + +check_info["innovaphone_cpu"] = { + "check_function" : check_innovaphone_cpu, + "inventory_function" : inventory_innovaphone_cpu, + "service_description" : "CPU utilization", + "has_perfdata" : True, + "group" : "cpu_utilization", + "includes" : [ "cpu_util.include" ], + +} + diff -Nru check-mk-1.2.2p3/innovaphone.include check-mk-1.2.6p12/innovaphone.include --- check-mk-1.2.2p3/innovaphone.include 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/innovaphone.include 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,38 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def check_innovaphone(params, info, unit="%", msg=False): + warn, crit = params + current = int(info[0][1]) + message = "Current: %d%s" % (current, unit) + if msg: + message += " " + msg + perf = [ ("usage", current, warn, crit, 0, 100) ] + if current >= crit: + return 2, message, perf + if current >= warn: + return 1, message, perf + return 0, message, perf diff -Nru check-mk-1.2.2p3/innovaphone_licenses check-mk-1.2.6p12/innovaphone_licenses --- check-mk-1.2.2p3/innovaphone_licenses 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/innovaphone_licenses 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,51 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +innovaphone_licenses_default_levels = ( 90.0, 95.0 ) + +def inventory_innovaphone_licenses(info): + return [ ( None, 'innovaphone_licenses_default_levels') ] + +def check_innovaphone_licenses(_no_item, params, info): + total, used = map(savefloat, info[0]) + perc_used = ( used/ 100 ) * total + warn, crit = params + message = "Used %.0f/%.0f Licences (%.0f%%)" % ( used, total, perc_used) + levels = "Warning/ Critical at (%s/%s)" % ( warn, crit ) + perf = [ ('licenses', used, None, None, total ) ] + if perc_used > crit: + return 2, message + levels, perf + if perc_used > warn: + return 1, message + levels, perf + return 0, message, perf + +check_info["innovaphone_licenses"] = { + "check_function" : check_innovaphone_licenses, + "inventory_function" : inventory_innovaphone_licenses, + "service_description" : "Licenses", + "has_perfdata" : True, +} + diff -Nru check-mk-1.2.2p3/innovaphone_mem check-mk-1.2.6p12/innovaphone_mem --- check-mk-1.2.2p3/innovaphone_mem 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/innovaphone_mem 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,43 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +innovaphone_mem_default_levels = ( 60.0, 70.0 ) + +def inventory_innovaphone_mem(info): + return [ (None, "innovaphone_mem_default_levels") ] + +def check_innovaphone_mem(_no_item, params, info): + return check_innovaphone(params, info) + +check_info["innovaphone_mem"] = { + "check_function" : check_innovaphone_mem, + "inventory_function" : inventory_innovaphone_mem, + "service_description" : "Memory used", + "has_perfdata" : True, + "group" : "innovaphone_mem", + "include" : ["innovaphone.include"], +} + diff -Nru check-mk-1.2.2p3/innovaphone_priports_l1 check-mk-1.2.6p12/innovaphone_priports_l1 --- check-mk-1.2.2p3/innovaphone_priports_l1 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/innovaphone_priports_l1 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,75 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_innovaphone_priports_l1(info): + inventory = [] + for line in info: + if line[1] != '1': + inventory.append((line[0], {'err_slip_count' : saveint(line[3])} )) + return inventory + +def check_innovaphone_priports_l1(item, params, info): + states = { + 1 : "Down", + 2 : "UP", + } + + for line in info: + if line[0] == item: + state = 0 + messages = [] + l1state, l1sigloss, l1slip = map(saveint, line[1:]) + state_label = "" + if l1state == 1: + state = 2 + messages.append("Current State is %s" % states[l1state] ) + else: + messages.append("State is UP") + + siglos_per_sec = get_rate("innovaphone_priports_l1." + item, time.time(), l1sigloss ) + if siglos_per_sec > 0: + state = 2 + messages.append("Signal Loss is %.2f/sec(!!)" % siglos_per_sec) + + if l1slip > params.get('err_slip_count', 0): + state = 2 + messages.append("Slip Error Count at " + str(l1slip) ) + return state, ", ".join(messages) + return 3, "Output not found" + +check_info["innovaphone_priports_l1"] = { + "check_function" : check_innovaphone_priports_l1, + "inventory_function" : inventory_innovaphone_priports_l1, + "service_description" : "Port L1 %s", + "has_perfdata" : False, + "snmp_info" : ( ".1.3.6.1.4.1.6666.1.2.1", [ 1, #l1PriLabel + 2, #l1PriState + 5, #l1PriErrSigLoss + 9, #l1PriErrSlip + ] ), + "snmp_scan_function" : lambda oid: oid('.1.3.6.1.2.1.1.2.0') == ".1.3.6.1.4.1.6666", +} + diff -Nru check-mk-1.2.2p3/innovaphone_priports_l2 check-mk-1.2.6p12/innovaphone_priports_l2 --- check-mk-1.2.2p3/innovaphone_priports_l2 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/innovaphone_priports_l2 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,74 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_innovaphone_priports_l2(info): + inventory = [] + for line in info: + if line[1] != '1': + inventory.append((line[0], {'mode' : saveint(line[2])} )) + return inventory + +def check_innovaphone_priports_l2(item, params, info): + modes = { + 1 : "TE", + 2 : "NT", + } + + states = { + 1 : "Down", + 2 : "UP", + } + + for line in info: + if line[0] == item: + state = 0 + l2state, l2mode = map(saveint, line[1:]) + state_label = "" + if l2state == 1: + state = 2 + state_label = "(!!)" + + mode_label = "" + if l2mode != params['mode']: + state = 2 + mode_label = "(!!)" + + return state, "State: %s%s, Mode: %s%s" % \ + ( states[l2state], state_label, modes[l2mode], mode_label ) + return 3, "Output not found" + +check_info["innovaphone_priports_l2"] = { + "check_function" : check_innovaphone_priports_l2, + "inventory_function" : inventory_innovaphone_priports_l2, + "service_description" : "Port L2 %s", + "has_perfdata" : False, + "snmp_info" : ( ".1.3.6.1.4.1.6666.1.1.1", [ 1, #l2Label + 2, #l2State + 3 #l2Mode + ] ), + "snmp_scan_function" : lambda oid: oid('.1.3.6.1.2.1.1.2.0') == ".1.3.6.1.4.1.6666", +} + diff -Nru check-mk-1.2.2p3/innovaphone_temp check-mk-1.2.6p12/innovaphone_temp --- check-mk-1.2.2p3/innovaphone_temp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/innovaphone_temp 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,43 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +innovaphone_temp_default_levels = (45, 50) + +def inventory_innovaphone_temp(info): + return [ (None, "innovaphone_temp_default_levels") ] + +def check_innovaphone_temp(_no_item, params, info): + return check_temperature(int(info[0][1]), params) + +check_info["innovaphone_temp"] = { + "check_function" : check_innovaphone_temp, + "inventory_function" : inventory_innovaphone_temp, + "service_description" : "Temperature", + "has_perfdata" : True, + "group" : "hw_single_temperature", + "include" : [ "temperature.include" ], +} + diff -Nru check-mk-1.2.2p3/inventory.py check-mk-1.2.6p12/inventory.py --- check-mk-1.2.2p3/inventory.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/inventory.py 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,286 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +import gzip + +inventory_output_dir = var_dir + "/inventory" +inventory_pprint_output = True + +# .--Plugins-------------------------------------------------------------. +# | ____ _ _ | +# | | _ \| |_ _ __ _(_)_ __ ___ | +# | | |_) | | | | |/ _` | | '_ \/ __| | +# | | __/| | |_| | (_| | | | | \__ \ | +# | |_| |_|\__,_|\__, |_|_| |_|___/ | +# | |___/ | +# +----------------------------------------------------------------------+ +# | Code for reading the inventory plugins, help functions that are | +# | called by the plugins. +# '----------------------------------------------------------------------' + +# Plugins register here +inv_info = {} # Inventory plugins +inv_export = {} # Inventory export hooks + +# Read all inventory plugins right now +filelist = glob.glob(inventory_dir + "/*") +filelist.sort() + +# read local checks *after* shipped ones! +if local_inventory_dir: + local_files = glob.glob(local_inventory_dir + "/*") + local_files.sort() + filelist += local_files + + +# read include files always first, but still in the sorted +# order with local ones last (possibly overriding variables) +filelist = [ f for f in filelist if f.endswith(".include") ] + \ + [ f for f in filelist if not f.endswith(".include") ] + +for f in filelist: + if not f.endswith("~"): # ignore emacs-like backup files + try: + execfile(f) + except Exception, e: + sys.stderr.write("Error in inventory plugin file %s: %s\n" % (f, e)) + if opt_debug: + raise + sys.exit(5) + + +# Function for accessing the inventory tree of the current host +# Example: path = "software.packages:17." +# The path must end with : or . +# -> software is a dict +# -> packages is a list +def inv_tree(path): + global g_inv_tree + + node = g_inv_tree + current_what = "." + current_path = "" + + while path: + parts = re.split("[:.]", path) + name = parts[0] + what = path[len(name)] + path = path[1 + len(name):] + current_path += what + name + + if current_what == '.': # node is a dict + if name not in node: + if what == '.': + node[name] = {} + else: + node[name] = [] + node = node[name] + + else: # node is a list + try: + index = int(name) + except: + raise MKGeneralException("Cannot convert index %s of path %s into int" % (name, current_path)) + + if type(node) != list: + raise MKGeneralException("Path %s is exptected to by of type list, but is dict" % current_path) + + if index < 0 or index >= len(node): + raise MKGeneralException("Index %d not existing in list node %s" % (index, current_path)) + node = node[index] + + current_what = what + + return node + + +# Removes empty nodes from a (sub)-tree. Returns +# True if the tree itself is empty +def inv_cleanup_tree(tree): + + if type(tree) == dict: + for key, value in tree.items(): + if inv_cleanup_tree(value): + del tree[key] + return not tree + + elif type(tree) == list: + to_delete = [] + for nr, entry in enumerate(tree): + if inv_cleanup_tree(entry): + to_delete.append(nr) + for nr in to_delete[::-1]: + del tree[nr] + return not tree + + else: + return False # cannot clean non-container nodes + +#. +# .--Inventory-----------------------------------------------------------. +# | ___ _ | +# | |_ _|_ ____ _____ _ __ | |_ ___ _ __ _ _ | +# | | || '_ \ \ / / _ \ '_ \| __/ _ \| '__| | | | | +# | | || | | \ V / __/ | | | || (_) | | | |_| | | +# | |___|_| |_|\_/ \___|_| |_|\__\___/|_| \__, | | +# | |___/ | +# +----------------------------------------------------------------------+ +# | Code for doing the actual inventory | +# '----------------------------------------------------------------------' + + +def do_inv(hostnames): + + if not os.path.exists(inventory_output_dir): + os.makedirs(inventory_output_dir) + + # No hosts specified: do all hosts and force caching + if hostnames == None: + hostnames = all_active_hosts() + global opt_use_cachefile + opt_use_cachefile = True + + errors = [] + for hostname in hostnames: + try: + try: + ipaddress = lookup_ipaddress(hostname) + except: + raise MKGeneralException("Cannot resolve hostname '%s'." % hostname) + + if opt_verbose: + sys.stdout.write("Doing HW/SW-Inventory for %s..." % hostname) + sys.stdout.flush() + + do_inv_for(hostname, ipaddress) + run_inv_export_hooks(hostname, g_inv_tree) + if opt_verbose: + sys.stdout.write("OK\n") + except Exception, e: + if opt_debug: + raise + if opt_verbose: + sys.stdout.write("Failed: %s\n" % e) + else: + errors.append("Failed to inventorize %s: %s" % (hostname, e)) + + if errors: + raise MKGeneralException("\n".join(errors)) + + +def do_inv_check(hostname): + try: + do_inv([hostname]) + num_entries = count_nodes(g_inv_tree) + if not num_entries: + sys.stdout.write("WARN - Found no data\n") + sys.exit(1) + else: + sys.stdout.write("OK - found %d entries\n" % num_entries) + sys.exit(0) + except Exception, e: + if opt_debug: + raise + sys.stdout.write("WARN - Inventory failed: %s\n" % e) + sys.exit(1) + + +def count_nodes(tree): + if type(tree) == dict: + return len(tree) + sum([count_nodes(v) for v in tree.values()]) + elif type(tree) == list: + return len(tree) + sum([count_nodes(v) for v in tree]) + elif tree == None: + return 0 + else: + return 1 + +def do_inv_for(hostname, ipaddress): + global g_inv_tree + g_inv_tree = {} + + for secname, plugin in inv_info.items(): + try: + info = get_realhost_info(hostname, ipaddress, secname, 999999999999, ignore_check_interval = True) + except Exception, e: + if str(e): + raise # Otherwise simply ignore missing agent section + continue + + if not info: # section not present (None or []) + # Note: this also excludes existing sections without info.. + continue + + if opt_verbose: + sys.stdout.write(tty_green + tty_bold + secname + " " + tty_normal) + sys.stdout.flush() + + plugin["inv_function"](info) + + # Remove empty paths + inv_cleanup_tree(g_inv_tree) + + if inventory_pprint_output: + import pprint + r = pprint.pformat(g_inv_tree) + else: + r = repr(g_inv_tree) + + path = inventory_output_dir + "/" + hostname + if g_inv_tree: + file(path, "w").write(r + "\n") + gzip.open(path + ".gz", "w").write(r + "\n") + + else: + if os.path.exists(path): # Remove empty inventory files. Important for host inventory icon + os.remove(path) + if os.path.exists(path + ".gz"): + os.remove(path + ".gz") + + # Inform Livestatus about the latest inventory update + file(inventory_output_dir + "/.last", "w") + + if opt_verbose: + sys.stdout.write("..%s%s%d%s entries" % (tty_bold, tty_yellow, count_nodes(g_inv_tree), tty_normal)) + sys.stdout.flush() + +def run_inv_export_hooks(hostname, tree): + for hookname, ruleset in inv_exports.items(): + entries = host_extra_conf(hostname, ruleset) + if entries: + if opt_verbose: + sys.stdout.write(", running %s%s%s%s..." % (tty_blue, tty_bold, hookname, tty_normal)) + sys.stdout.flush() + params = entries[0] + try: + inv_export[hookname]["export_function"](hostname, params, tree) + except Exception, e: + if opt_debug: + raise + raise MKGeneralException("Failed to execute export hook %s: %s" % ( + hookname, e)) + + Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/inventory.tar.gz and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/inventory.tar.gz differ diff -Nru check-mk-1.2.2p3/ipmi check-mk-1.2.6p12/ipmi --- check-mk-1.2.2p3/ipmi 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/ipmi 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -127,14 +127,14 @@ if name == item: perfdata = [ (name, val + unit) ] # TODO: add warn and crit levels if status == 'ok': - return (0, "OK - %s is %s %s" % (name, val, unit), perfdata) + return (0, "%s is %s %s" % (name, val, unit), perfdata) elif status == 'nc': - return (1, "WARN - %s is %s %s" % (name, val, unit), perfdata) + return (1, "%s is %s %s" % (name, val, unit), perfdata) else: - return (2, "CRIT - %s is %s %s" % (name, val, unit), perfdata) - return (3, 'UNKNOWN - item %s not found' % item) + return (2, "%s is %s %s" % (name, val, unit), perfdata) + return (3, 'item %s not found' % item) except: - return (3, "UNKNOWN - invalid or incomplete output from agent") + return (3, "invalid or incomplete output from agent") def check_ipmi_summarized(info): worst_status = 0 @@ -143,36 +143,34 @@ count = 0 ambient_count = 0 ambient_sum = 0.0 - try: - for name, val, unit, status, unrec_low, crit_low, \ - warn_low, warn_high, crit_high, unrec_high in info: - # Skip datasets which have no valid data (zero value, no unit and state nc) - if val == '0.000' and unit == 'unspecified' and status == 'nc': - continue - - if ipmi_ignore_entry(name, status): - continue - - text = "%s is %s" % (name, val) - if unit != 'unspecified': - text += ' %s' % unit - count += 1 - if status == 'nc': - worst_status = max(worst_status, 1) - warn_texts.append(text) - elif status == 'nr' and ipmi_ignore_nr: + + for name, val, unit, status, unrec_low, crit_low, \ + warn_low, warn_high, crit_high, unrec_high in info: + # Skip datasets which have no valid data (zero value, no unit and state nc) + if val == '0.000' and unit == 'unspecified' and status == 'nc': + continue + + if ipmi_ignore_entry(name, status): + continue + + text = "%s is %s" % (name, val) + if unit != 'unspecified': + text += ' %s' % unit + count += 1 + if status == 'nc': + worst_status = max(worst_status, 1) + warn_texts.append(text) + elif status == 'nr' and ipmi_ignore_nr: + pass + elif status != 'ok': + worst_status = 2 + crit_texts.append(text) + if "amb" in name or "Ambient" in name: + try: + ambient_count += 1 + ambient_sum += float(val) + except: pass - elif status != 'ok': - worst_status = 2 - crit_texts.append(text) - if "amb" in name or "Ambient" in name: - try: - ambient_count += 1 - ambient_sum += float(val) - except: - pass - except: - return (3, "UNKNOWN - invalid or incomplete output from agent") if ambient_count > 0: @@ -180,7 +178,6 @@ else: perfdata = [] - statname = { 0: "OK", 1:"WARN", 2:"CRIT" }[worst_status] if worst_status == 0: infotext = [ "%d sensors OK" % count ] else: @@ -189,12 +186,18 @@ infotext.append("CRIT are: %s" % ", ".join(crit_texts)) if len(warn_texts) > 0: infotext.append("WARN are: %s" % ", ".join(warn_texts)) - return (worst_status, "%s - %s" % (statname, ' - '.join(infotext)), perfdata) + return (worst_status, ' - '.join(infotext), perfdata) -check_info['ipmi'] = (check_ipmi, "IPMI Sensor %s", 1, inventory_ipmi) # Make sure, configuration variables needed during check time are present # in precompiled code check_config_variables.append("ipmi_ignore_nr") check_config_variables.append("ipmi_ignored_sensors") + +check_info["ipmi"] = { + 'check_function': check_ipmi, + 'inventory_function': inventory_ipmi, + 'service_description': 'IPMI Sensor %s', + 'has_perfdata': True, +} diff -Nru check-mk-1.2.2p3/ipmi_sensors check-mk-1.2.6p12/ipmi_sensors --- check-mk-1.2.2p3/ipmi_sensors 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/ipmi_sensors 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -116,11 +116,17 @@ or status == "battery_presence_detected" \ or status == "Drive_Presence" \ or status.startswith("Fully_Redundant"): - return (0, "OK - " + infotext, perfdata) + return (0, infotext, perfdata) else: - return (2, "CRIT - " + infotext, perfdata) + return (2, infotext, perfdata) - return (3, "UNKNOWN - item %s not found" % item) + return (3, "item %s not found" % item) -check_info['ipmi_sensors'] = (check_freeipmi, "IPMI Sensor %s", 1, inventory_freeipmi) + +check_info["ipmi_sensors"] = { + 'check_function': check_freeipmi, + 'inventory_function': inventory_freeipmi, + 'service_description': 'IPMI Sensor %s', + 'has_perfdata': True, +} diff -Nru check-mk-1.2.2p3/ironport_misc check-mk-1.2.6p12/ironport_misc --- check-mk-1.2.2p3/ironport_misc 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/ironport_misc 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -26,12 +26,12 @@ def check_ironport_misc(item, params, info): - return (3, "UNKNOWN - Sorry. Check not implemented in this version.") + return (3, "Sorry. Check not implemented in this version.") -check_info['ironport_misc'] = ( - check_ironport_misc, - "%s", - 0, - no_inventory_possible) -checkgroup_of['ironport_misc'] = "obsolete" + +check_info["ironport_misc"] = { + 'check_function': check_ironport_misc, + 'service_description': '%s', + 'group': 'obsolete', +} diff -Nru check-mk-1.2.2p3/j4p_performance check-mk-1.2.6p12/j4p_performance --- check-mk-1.2.2p3/j4p_performance 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/j4p_performance 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,6 +24,9 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. +# WARNING: These checks are deprecated and will be removed soon. +# Please use jolokia_* instead + # MB warn, crit j4p_performance_mem_default_levels = (1000, 2000) # Number of threads warn, crit @@ -114,7 +117,7 @@ warn, crit = params parsed = j4p_performance_parse(info) if item not in parsed: - return (3, "UNKNOWN - data not found in agent output") + return (3, "data not found in agent output") d = parsed[item] mb = 1024 * 1024.0 heap = saveint(d["HeapMemoryUsage"]) / mb @@ -124,22 +127,31 @@ ("nonheap", non_heap, warn, crit) ] infotext = "%.0f MB total (%.0f MB heap, %.0f MB non-heap), levels at %.0f/%.0f" % (total, heap, non_heap, warn, crit) if total >= crit: - return (2, "CRIT - " + infotext, perfdata) + return (2, infotext, perfdata) elif total >= warn: - return (1, "WARN - " + infotext, perfdata) + return (1, infotext, perfdata) else: - return (0, "OK - " + infotext, perfdata) + return (0, infotext, perfdata) + +check_info["j4p_performance.mem"] = {'check_function': check_j4p_performance_mem, + 'default_levels_variable': None, + 'group': 'j4p_performance.mem', + 'has_perfdata': True, + 'inventory_function': lambda i: inventory_j4p_performance(i, "mem"), + 'node_info': False, + 'service_description': 'JMX %s Memory', + 'snmp_info': None, + 'snmp_scan_function': None} def check_j4p_performance_threads(item, params, info): warn, crit = params parsed = j4p_performance_parse(info) if item not in parsed: - return (3, "UNKNOWN - data not found in agent output") + return (3, "data not found in agent output") d = parsed[item] this_time = time.time() - wrapped = False perfdata = [] output = [] status = 0 @@ -153,25 +165,30 @@ status = 1 # Calculate the thread increase rate - try: - timedif, rate = get_counter("j4p_performance.threads.%s" % item, this_time, val) - output.append('ThreadRate: %0.2f' % rate) - perfdata.append(('ThreadRate', rate)) - except MKCounterWrapped: - wrapped = True + rate = get_rate("j4p_performance.threads.%s" % item, this_time, val) + output.append('ThreadRate: %0.2f' % rate) + perfdata.append(('ThreadRate', rate)) perfdata.append((key, val)) output.append('%s: %d' % (key, val)) - # Only process the perfdata when no wrap occured - if wrapped: - return (status, '%s - %s' % (nagios_state_names[status], ', '.join(output))) - else: - return (status, '%s - %s' % (nagios_state_names[status], ', '.join(output)), perfdata) + + return (status, ', '.join(output), perfdata) + +check_info["j4p_performance.threads"] = {'check_function': check_j4p_performance_threads, + 'default_levels_variable': None, + 'group': 'j4p_performance.threads', + 'has_perfdata': True, + 'inventory_function': lambda i: inventory_j4p_performance(i, "threads"), + 'node_info': False, + 'service_description': 'JMX %s Threads', + 'snmp_info': None, + 'snmp_scan_function': None} + def check_j4p_performance_uptime(item, _unused, info): parsed = j4p_performance_parse(info) if item not in parsed: - return (3, "UNKNOWN - data not found in agent output") + return (3, "data not found in agent output") uptime = saveint(parsed[item]['Uptime']) / 1000 seconds = uptime % 60 @@ -181,25 +198,46 @@ days = rem / 1440 now = int(time.time()) since = time.strftime("%c", time.localtime(now - uptime)) - return (0, "OK - up since %s (%dd %02d:%02d:%02d)" % (since, days, hours, minutes, seconds), [ ("uptime", uptime) ]) + return (0, "up since %s (%dd %02d:%02d:%02d)" % (since, days, hours, minutes, seconds), [ ("uptime", uptime) ]) + +check_info["j4p_performance.uptime"] = {'check_function': check_j4p_performance_uptime, + 'default_levels_variable': None, + 'group': 'j4p_performance.uptime', + 'has_perfdata': True, + 'inventory_function': lambda i: inventory_j4p_performance(i, "uptime"), + 'node_info': False, + 'service_description': 'JMX %s Uptime', + 'snmp_info': None, + 'snmp_scan_function': None} def check_j4p_performance_app_state(item, _unused, info): app = j4p_performance_app(info, item.split()) if not app or not 'Running' in app: - return (3, "UNKNOWN - data not found in agent output") + return (3, "data not found in agent output") if app['Running'] == '1': - return (0, 'OK - application is running') + return (0, 'application is running') else: - return (2, 'CRIT - application is not running (Running: %s)') + return (2, 'application is not running (Running: %s)') + + +check_info["j4p_performance.app_state"] = {'check_function': check_j4p_performance_app_state, + 'default_levels_variable': None, + 'group': 'j4p_performance.app_state', + 'has_perfdata': False, + 'inventory_function': lambda i: inventory_j4p_performance_apps(i, "app_state"), + 'node_info': False, + 'service_description': 'JMX %s State', + 'snmp_info': None, + 'snmp_scan_function': None} def check_j4p_performance_app_sess(item, params, info): lo_crit, lo_warn, hi_warn, hi_crit = params app = j4p_performance_app(info, item.split()) if not app or not 'Sessions' in app: - return (3, "UNKNOWN - data not found in agent output") + return (3, "data not found in agent output") sess = saveint(app['Sessions']) status = 0 @@ -217,15 +255,25 @@ status = 1 status_txt = ' (Above or equal %d)' % lo_crit - return (status, '%s - %d Sessions%s' % (nagios_state_names[status], sess, status_txt), + return (status, '%d Sessions%s' % (sess, status_txt), [('sessions', sess, hi_warn, hi_crit)]) +check_info["j4p_performance.app_sess"] = {'check_function': check_j4p_performance_app_sess, + 'default_levels_variable': None, + 'group': 'j4p_performance.app_sess', + 'has_perfdata': True, + 'inventory_function': lambda i: inventory_j4p_performance_apps(i, "app_sess"), + 'node_info': False, + 'service_description': 'JMX %s Sessions', + 'snmp_info': None, + 'snmp_scan_function': None} + def check_j4p_performance_serv_req(item, params, info): lo_crit, lo_warn, hi_warn, hi_crit = params serv = j4p_performance_serv(info, item.split()) if not serv or not 'Requests' in serv: - return (3, "UNKNOWN - data not found in agent output") + return (3, "data not found in agent output") req = saveint(serv['Requests']) status = 0 @@ -245,27 +293,18 @@ output = ['Requests: %d%s' % (req, status_txt)] perfdata = [('Requests', req, hi_warn, hi_crit)] - wrapped = False this_time = time.time() - try: - timedif, rate = get_counter("j4p_performance.serv_req.%s" % item, this_time, req) - output.append('RequestRate: %0.2f' % rate) - perfdata.append(('RequestRate', rate)) - except MKCounterWrapped: - wrapped = True - - if wrapped: - return (status, '%s - %s' % (nagios_state_names[status], ', '.join(output))) - else: - return (status, '%s - %s' % (nagios_state_names[status], ', '.join(output)), perfdata) - - -# General JVM checks -check_info["j4p_performance.mem"] = ( check_j4p_performance_mem, "JMX %s Memory", 1, lambda i: inventory_j4p_performance(i, "mem")) -check_info["j4p_performance.threads"] = ( check_j4p_performance_threads, "JMX %s Threads", 1, lambda i: inventory_j4p_performance(i, "threads")) -check_info["j4p_performance.uptime"] = ( check_j4p_performance_uptime, "JMX %s Uptime", 1, lambda i: inventory_j4p_performance(i, "uptime")) -# App specific checks -check_info["j4p_performance.app_state"] = ( check_j4p_performance_app_state, "JMX %s State", 0, lambda i: inventory_j4p_performance_apps(i, "app_state")) -check_info["j4p_performance.app_sess"] = ( check_j4p_performance_app_sess, "JMX %s Sessions", 1, lambda i: inventory_j4p_performance_apps(i, "app_sess")) -# Servlet specific checks -check_info["j4p_performance.serv_req"] = ( check_j4p_performance_serv_req, "JMX %s Requests", 1, lambda i: inventory_j4p_performance_serv(i, "serv_req")) + rate = get_rate("j4p_performance.serv_req.%s" % item, this_time, req) + output.append('RequestRate: %0.2f' % rate) + perfdata.append(('RequestRate', rate)) + return (status, ', '.join(output), perfdata) + +check_info["j4p_performance.serv_req"] = {'check_function': check_j4p_performance_serv_req, + 'default_levels_variable': None, + 'group': 'j4p_performance.serv_req', + 'has_perfdata': True, + 'inventory_function': lambda i: inventory_j4p_performance_serv(i, "serv_req"), + 'node_info': False, + 'service_description': 'JMX %s Requests', + 'snmp_info': None, + 'snmp_scan_function': None} diff -Nru check-mk-1.2.2p3/j4p_performance.app_sess check-mk-1.2.6p12/j4p_performance.app_sess --- check-mk-1.2.2p3/j4p_performance.app_sess 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/j4p_performance.app_sess 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,7 @@ +title: J4P Performance: Sessions - DEPRECATED +agents: linux +catalog: app/java +license: GPL +distribution: check_mk +description: + Do not use this plugin. It will be removed soon. Use jolokia_metrics instead. diff -Nru check-mk-1.2.2p3/j4p_performance.app_state check-mk-1.2.6p12/j4p_performance.app_state --- check-mk-1.2.2p3/j4p_performance.app_state 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/j4p_performance.app_state 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,7 @@ +title: J4P Performance: Application status - DEPRECATED +agents: linux +catalog: app/java +license: GPL +distribution: check_mk +description: + Do not use this plugin. It will be removed soon. Use jolokia_metrics instead. diff -Nru check-mk-1.2.2p3/j4p_performance.mem check-mk-1.2.6p12/j4p_performance.mem --- check-mk-1.2.2p3/j4p_performance.mem 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/j4p_performance.mem 2015-06-24 09:48:36.000000000 +0000 @@ -1,9 +1,11 @@ -title: Check memory usage of Java application server via JMX and j4p.war +title: Memory usage of Java application server via JMX and j4p.war - DEPRECATED agents: linux -author: Mathias Kettner +catalog: app/java license: GPL distribution: check_mk description: + Do not use this plugin. It will be removed soon. Use jolokia_metrics instead. + This checks uses the JSON HTTP service of j4p (from jmx4perl) to monitor the current memory usage of a Java application server like Tomcat or JBoss. You can define levels against the total memory diff -Nru check-mk-1.2.2p3/j4p_performance.serv_req check-mk-1.2.6p12/j4p_performance.serv_req --- check-mk-1.2.2p3/j4p_performance.serv_req 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/j4p_performance.serv_req 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,7 @@ +title: J4P Performance: Requests - DEPRECATED +agents: linux +catalog: app/java +license: GPL +distribution: check_mk +description: + Do not use this plugin. It will be removed soon. Use jolokia_metrics instead. diff -Nru check-mk-1.2.2p3/j4p_performance.threads check-mk-1.2.6p12/j4p_performance.threads --- check-mk-1.2.2p3/j4p_performance.threads 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/j4p_performance.threads 2015-06-24 09:48:36.000000000 +0000 @@ -1,9 +1,11 @@ -title: Check threads of Java application server via JMX and j4p.war +title: Threads of Java application server via JMX and j4p.war - DEPRECATED agents: linux -author: Lars Michelsen +catalog: app/java license: GPL distribution: check_mk description: + Do not use this plugin. It will be removed soon. Use jolokia_metrics instead. + This checks uses the JSON HTTP service of j4p (from jmx4perl) to monitor the number of threads created by a Java application server like Tomcat or JBoss. You can define levels against the number of threads diff -Nru check-mk-1.2.2p3/j4p_performance.uptime check-mk-1.2.6p12/j4p_performance.uptime --- check-mk-1.2.2p3/j4p_performance.uptime 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/j4p_performance.uptime 2015-06-24 09:48:36.000000000 +0000 @@ -1,9 +1,11 @@ -title: Check uptime of Java application server via JMX and j4p.war +title: Uptime of Java application server via JMX and j4p.war - DEPRECATED agents: linux -author: Lars Michelsen +catalog: app/java license: GPL distribution: check_mk description: + Do not use this plugin. It will be removed soon. Use jolokia_metrics instead. + This checks uses the JSON HTTP service of j4p (from jmx4perl) to monitor the uptime of a Java application server like Tomcat or JBoss. diff -Nru check-mk-1.2.2p3/jar_signature check-mk-1.2.6p12/jar_signature --- check-mk-1.2.2p3/jar_signature 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/jar_signature 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,86 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +import datetime, time + +def inventory_jar_signature(info): + inventory = [] + for line in info: + if line[0].startswith("[[["): + f = line[0][3:-3] + inventory.append((f, {})) + return inventory + +def check_jar_signature(item, params, info): + in_block = False + details = [] + in_cert = False + cert = [] + for line in info: + line = (" ".join(line)).strip() + if line == "[[[%s]]]" % item: + in_block = True + elif in_block and line.startswith("[[["): + break + elif in_block and line.startswith("X.509"): + in_cert = True + cert = [line] + elif in_block and in_cert and line.startswith("[") and not line.startswith("[entry was signed on"): + in_cert = False + cert.append(line) + details.append(cert) + + if not details: + return (2, "No certificate found") + + cert_dn, cert_valid = details[0] + + # [certificate is valid from 3/26/12 11:26 AM to 3/26/17 11:36 AM] + # [certificate will expire on 7/4/13 4:13 PM] + if "will expire on " in cert_valid: + to = cert_valid.split("will expire on ", 1)[1][:-1] + else: + to = cert_valid.split("to ", 1)[1][:-1] + to_dt = datetime.datetime(*time.strptime(to, '%m/%d/%y %I:%M %p')[:6]) + + warn, crit = 60, 30 + + state = 0 + status_txt = "" + if to_dt < datetime.datetime.now() + datetime.timedelta(days = crit): + state = 2 + status_txt = " (less than %d days)" % crit + elif to_dt < datetime.datetime.now() + datetime.timedelta(days = warn): + state = 1 + status_txt = " (less than %d days)" % warn + + return state, "Certificate expires on %s%s (%s)" % (to, status_txt, cert_dn) + +check_info['jar_signature'] = { + "service_description" : "Jar-Signature %s", + "check_function" : check_jar_signature, + "inventory_function" : inventory_jar_signature, +} Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/jasperreports/classes/livestatus/LivestatusDatasource.class and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/jasperreports/classes/livestatus/LivestatusDatasource.class differ Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/jasperreports/classes/livestatus/LivestatusFieldsProvider.class and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/jasperreports/classes/livestatus/LivestatusFieldsProvider.class differ Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/jasperreports/classes/livestatus/LivestatusQueryExecuter.class and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/jasperreports/classes/livestatus/LivestatusQueryExecuter.class differ Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/jasperreports/classes/livestatus/LivestatusQueryExecuterFactory.class and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/jasperreports/classes/livestatus/LivestatusQueryExecuterFactory.class differ Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/jasperreports/livestatus.jar and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/jasperreports/livestatus.jar differ diff -Nru check-mk-1.2.2p3/jasperreports/Makefile check-mk-1.2.6p12/jasperreports/Makefile --- check-mk-1.2.2p3/jasperreports/Makefile 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/jasperreports/Makefile 2015-06-24 09:48:37.000000000 +0000 @@ -5,7 +5,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/jasperreports/src/LivestatusDatasource.java check-mk-1.2.6p12/jasperreports/src/LivestatusDatasource.java --- check-mk-1.2.2p3/jasperreports/src/LivestatusDatasource.java 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/jasperreports/src/LivestatusDatasource.java 2013-11-05 09:58:00.000000000 +0000 @@ -60,7 +60,7 @@ if( column < 0 || column >= data.get(0).size() ) { throw new JRException("Unknown Field:" + fieldname); } - + String value = (data.get(m_nIdx)).get(column); // Cast string value according to type if( map_fieldtypes.containsKey(fieldname) ){ diff -Nru check-mk-1.2.2p3/jasperreports/src/LivestatusFieldsProvider.java check-mk-1.2.6p12/jasperreports/src/LivestatusFieldsProvider.java --- check-mk-1.2.2p3/jasperreports/src/LivestatusFieldsProvider.java 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/jasperreports/src/LivestatusFieldsProvider.java 2013-11-05 09:58:00.000000000 +0000 @@ -1,14 +1,12 @@ package livestatus; import java.util.ArrayList; -import java.util.Iterator; import java.util.Map; import java.util.Vector; import net.sf.jasperreports.engine.JRDataset; import net.sf.jasperreports.engine.JRException; import net.sf.jasperreports.engine.JRField; -import net.sf.jasperreports.engine.JRValueParameter; import net.sf.jasperreports.engine.design.JRDesignField; import com.jaspersoft.ireport.designer.FieldsProvider; diff -Nru check-mk-1.2.2p3/jasperreports/src/LivestatusQueryExecuter.java check-mk-1.2.6p12/jasperreports/src/LivestatusQueryExecuter.java --- check-mk-1.2.2p3/jasperreports/src/LivestatusQueryExecuter.java 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/jasperreports/src/LivestatusQueryExecuter.java 2013-11-05 09:58:00.000000000 +0000 @@ -40,17 +40,25 @@ public static void main(String[] args){ try { //new LivestatusQueryExecuter("localhost 6561\nGET services\nColumns: host_name check_command").createDatasource(); - String query = "localhost 6561\n"+ - "GET statehist\n" + - "Columns: host_name service_description\n"+ - "Filter: time >= 1344195720\n"+ - "Filter: time <= 1344195776\n"+ - "Filter: host_name = localhost\n"+ - "Stats: sum duration_ok\n"+ - "Stats: sum duration_warning\n"+ - "Stats: sum duration_critical"; - +// String query = "localhost 6557\n"+ +// "GET statehist\n" + +// "Columns: host_name service_description\n"+ +// "Filter: time >= 1344195720\n"+ +// "Filter: time <= 1344195776\n"+ +// "Filter: host_name = localhost\n"+ +// "Stats: sum duration_ok\n"+ +// "Stats: sum duration_warning\n"+ +// "Stats: sum duration_critical"; + String query = "localhost 6557\n"+ + "GET statehist\n" + + "Columns: service_description time state duration duration_part log_output\n" + +// "Filter: service_description ~ /fshome" + + "Filter: time >= 1351724400"; + + JRDataSource sourci = new LivestatusQueryExecuter(query, null).createDatasource(); + + } catch (JRException e) { // TODO Auto-generated catch block e.printStackTrace(); @@ -61,12 +69,13 @@ public LivestatusQueryExecuter(String query, Map parameters) { this.jasper_query = query; this.parameters = new HashMap(); - for ( Object key : parameters.keySet() ) { - if ( parameters.get(key) == null ) { - continue; + if (parameters != null) + for ( Object key : parameters.keySet() ) { + if ( parameters.get(key) == null ) { + continue; + } + this.parameters.put(key.toString(), parameters.get(key).toString()); } - this.parameters.put(key.toString(), parameters.get(key).toString()); - } // LivestatusQueryExecuterFactory.logFile(this.jasper_query); // for (String key : this.parameters.keySet()) @@ -161,7 +170,6 @@ // setup socket and in/output streams setupSocket(); - // query the column types and their descriptions String table_name = livestatus_query.split("\n")[0].split(" ")[1]; String desc_query = String.format("GET columns\nFilter: table = %s\nColumnHeaders: off\nResponseHeader: fixed16\nOutputFormat: csv\nSeparators: 10 1 44 124\nKeepAlive: on\n\n", table_name); @@ -219,7 +227,7 @@ if( line == null ) break; ArrayList tmp_array = new ArrayList(); - for( String field : line.split("\001") ){ + for( String field : line.split("\001",-1) ){ tmp_array.add(field); } livestatus_data.add(tmp_array); diff -Nru check-mk-1.2.2p3/job check-mk-1.2.6p12/job --- check-mk-1.2.2p3/job 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/job 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,176 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +#<<>> +#==> asd ASD <== +#start_time 1389355839 +#exit_code 0 +#real_time 0:00.00 +#user_time 0.00 +#system_time 0.00 +#reads 0 +#writes 0 +#max_res_kbytes 1968 +#avg_mem_kbytes 0 +# +# +#==> test <== +#start_time 1389352839 +#exit_code 0 +#real_time 0:00.00 +#user_time 0.00 +#system_time 0.00 +#reads 0 +#writes 0 +#max_res_kbytes 1984 +#avg_mem_kbytes 0 + +factory_settings["job_default_levels"] = { + "age": ( 0, 0 ) # disabled as default +} + +def inventory_job(info): + inventory = [] + for line in info: + if line[0] == '==>': + item = ' '.join(line[1:-1]) + if not item.endswith('.running'): + inventory.append( (item, {} ) ) + return inventory + +def job_parse_real_time(s): + parts = s.split(':') + min_sec, hour_sec = 0, 0 + if len(parts) == 3: + hour_sec = int(parts[0]) * 60 * 60 + if len(parts) >= 2: + min_sec = int(parts[-2]) * 60 + return float(parts[-1]) + min_sec + hour_sec + +def job_parse(item, info): + data = {} + prefix = None + for line in info: + if line[0] == '==>': + + if ' '.join(line[1:-1]) == item: + prefix = '' + + elif ' '.join(line[1:-1]) == item + '.running': + # There might be a second section per job, the contents of the + # .running file which exists during execution of the job. + # We use the start_time from this file. + prefix = 'running_' + + elif 'start_time' in data.keys() and 'running_start_time' in data.keys(): + break # both sections completed => we are done here + + else: + prefix = None + + elif len(line) == 2 and prefix is not None: + key, val = line + # Convert several keys/values + if key == 'real_time': + val = job_parse_real_time(val) + elif key in [ 'user_time', 'system_time' ]: + val = float(val) + elif key in [ 'exit_code', 'invol_context_switches', 'vol_context_switches', 'start_time' ]: + val = int(val) + elif key in [ 'max_res_kbytes', 'avg_mem_kbytes' ]: + key = key.replace('kbytes', 'bytes') + val = int(val) * 1000 + data[prefix+key] = val + + return data + +def check_job(item, params, info): + warn, crit = params.get('age') + job = job_parse(item, info) + if not job: + return 3, 'Got no information for this job' + + def process_start_time(value, state, warn, crit): + display_value = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(value) ) + job_age = time.time() - value + if crit > 0 and job_age >= crit: + state = max(state, 2) + display_value += "(!!) (more than %s ago)" % get_age_human_readable(crit) + elif warn > 0 and job_age >= warn: + state = max(state, 1) + display_value += "(!!) (more than %s ago)" % get_age_human_readable(warn) + return state, display_value + + state = 0 + output = [] + perfdata = [] + + if 'running_start_time' in job: + output.append('Currently running') + state, display_value = process_start_time(job['running_start_time'], state, warn, crit) + output.append('(Started: %s)' % display_value) + return state, ' '.join(output) + + txt = 'Exit-Code: %d' % job['exit_code'] + if job['exit_code'] != 0: + state = max(state, 2) + txt += ' (!!)' + output.append(txt) + + for key, title, value in [ + ('start_time', 'Started', job['start_time']), + ('real_time', 'Real-Time', job['real_time']), + ('user_time', 'User-Time', job['user_time']), + ('system_time', 'System-Time', job['system_time']), + ('reads', 'Filesystem Reads', job['reads']), + ('writes', 'Filesystem Writes', job['writes']), + ('max_res_bytes', 'Max. Memory', job['max_res_bytes']), + ('avg_mem_bytes', 'Avg. Memory', job['avg_mem_bytes']), + ('vol_context_switches', 'Vol. Context Switches', job['vol_context_switches']), + ('invol_context_switches', 'Invol. Context Switches', job['invol_context_switches']), + ]: + if key in [ 'max_res_bytes', 'avg_mem_bytes' ]: + display_value = get_bytes_human_readable(value, 1000) + elif key in [ 'real_time', 'user_time', 'system_time' ]: + display_value = get_age_human_readable(value) + elif key == 'start_time': + state, display_value = process_start_time(value, state, warn, crit) + else: + display_value = value + + output.append('%s: %s' % (title, display_value)) + perfdata.append((key, value)) + + return state, ', '.join(output), perfdata + +check_info["job"] = { + 'check_function' : check_job, + 'inventory_function' : inventory_job, + 'service_description' : 'Job %s', + 'default_levels_variable' : 'job_default_levels', + 'group' : 'job', + 'has_perfdata' : True, +} diff -Nru check-mk-1.2.2p3/jolokia.cfg check-mk-1.2.6p12/jolokia.cfg --- check-mk-1.2.2p3/jolokia.cfg 2013-10-12 17:49:41.000000000 +0000 +++ check-mk-1.2.6p12/jolokia.cfg 1970-01-01 00:00:00.000000000 +0000 @@ -1,50 +0,0 @@ -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# ails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# Default settings or settings for only one -# instance: - -# Settings for authentication -# server = "127.0.0.1" -# user = "monitoring" -# password = None -# mode = "digest" -suburi = "jolokia" -instance = None - -# Configuration for multiple instances. Not-specified -# values will be taken from the upper settings -# instances = [ -# { -# "port" : 8080, -# "instance" : "FOO", -# }, -# { -# "server" : "10.1.88.5", -# "port" : 8081, -# "instance" : "BAR", -# "user" : "harri", -# "password" : "hirsch", -# } -# ] diff -Nru check-mk-1.2.2p3/jolokia_info check-mk-1.2.6p12/jolokia_info --- check-mk-1.2.2p3/jolokia_info 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/jolokia_info 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -30,11 +30,14 @@ def check_jolokia_info(item, _no_params, info): for line in info: if line[0] == item: + if line[1] == 'ERROR' or len(line) < 4: + return (2, "Jolokia not running") + product, version, jolokia_version = line[1:] - return (0, "OK - %s %s (Jolokia version %s)" % ( + return (0, "%s %s (Jolokia version %s)" % ( product.title(), version, jolokia_version)) - return (3, "UNKNOWN - No data from agent, server might be down") + return (3, "No data from agent, server might be down") check_info["jolokia_info"] = { "service_description" : "JVM %s", diff -Nru check-mk-1.2.2p3/jolokia_metrics check-mk-1.2.6p12/jolokia_metrics --- check-mk-1.2.2p3/jolokia_metrics 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/jolokia_metrics 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -38,14 +38,40 @@ jolokia_metrics_queue_default_levels = ( 20, 50 ) +# Garbage collections count and time per minute +jolokia_metrics_gc_default_levels = { + 'CollectionTime': (None, None), + 'CollectionCount': (None, None) +} + +# Tomcat ThreadPools Count/Busy in relation to max value +jolokia_metrics_tp_default_levels = { + 'currentThreadsBusy': (80, 90), + 'currentThreadsCount': (None, None) +} def jolokia_metrics_parse(info): parsed = {} for inst, var, value in info: - app, servlet = None, None + app, servlet, gc, tp = None, None, None, None if ',' in inst: parts = inst.split(',') - if len(parts) == 3: + if "GarbageCollector" in inst: + part1 = inst.split(',java.lang:name=') + inst = part1[0] + part2 = part1[1].split(',type=') + gc = part2[0] + elif "ThreadPool" in inst: + if "Catalina" in inst: + part1 = inst.split(',Catalina:name=') + elif "Standalone" in inst: + part1 = inst.split(',Standalone:name=') + else: + continue + inst = part1[0] + part2 = part1[1].split(',type=') + tp = part2[0].replace('"','') + elif len(parts) == 3: inst, app, servlet = parts else: inst, app = parts @@ -61,6 +87,14 @@ parsed[inst].setdefault('apps', {}) parsed[inst]['apps'].setdefault(app, {}) parsed[inst]['apps'][app][var] = value + elif gc: + parsed[inst].setdefault('gc', {}) + parsed[inst]['gc'].setdefault(gc, {}) + parsed[inst]['gc'][gc][var] = value + elif tp: + parsed[inst].setdefault('tp', {}) + parsed[inst]['tp'].setdefault(tp, {}) + parsed[inst]['tp'][tp][var] = value else: parsed[inst][var] = value return parsed @@ -74,7 +108,6 @@ return None return parsed[inst]['apps'][app] - # This bisects info from BEA and passes on to jolokia_metrics_app def jolokia_metrics_serv(info, (inst, app, serv)): app = jolokia_metrics_app(info, (inst, app)) @@ -82,6 +115,19 @@ return None return app['servlets'][serv] +def jolokia_metrics_gc(info, (inst, typ, gc)): + parsed = jolokia_metrics_parse(info) + if not inst in parsed \ + or not gc in parsed[inst].get('gc', {}): + return None + return parsed[inst]['gc'][gc] + +def jolokia_metrics_tp(info, (inst, typ, tp)): + parsed = jolokia_metrics_parse(info) + if not inst in parsed \ + or not tp in parsed[inst].get('tp', {}): + return None + return parsed[inst]['tp'][tp] def inventory_jolokia_metrics(info, what): parsed = jolokia_metrics_parse(info) @@ -92,7 +138,14 @@ elif what == 'threads': levels = 'jolokia_metrics_threads_default_levels' - return [ (k, levels) for k in parsed ] + for instance, data in parsed.items(): + if what == 'uptime' and "Uptime" not in data: + continue + if what == 'mem' and ("HeapMemoryUsage" not in data or "NonHeapMemoryUsage" not in data + or "HeapMemoryMax" not in data or "NonHeapMemoryMax" not in data): + # don't add memory check if we don't have the necessary data + continue + yield instance, levels def inventory_jolokia_metrics_apps(info, what): @@ -157,13 +210,33 @@ inv.append(('%s %s %s' % (inst, app, serv), levels)) return inv +def inventory_jolokia_metrics_gc(info, what): + inv = [] + parsed = jolokia_metrics_parse(info) + levels = None + if what == 'gc': + needed_key = ["CollectionCount", "CollectionTime"] + for inst, vals in parsed.iteritems(): + for gc, val in vals.get('gc', {}).iteritems(): + inv.append(("%s GC %s" % (inst ,gc) , levels)) + return inv + +def inventory_jolokia_metrics_tp(info, what): + inv = [] + parsed = jolokia_metrics_parse(info) + levels = 'jolokia_metrics_tp_default_levels' + if what == 'tp': + needed_key = ["currentThreadsBusy", "currentThreadCount", "maxThreads"] + for inst, vals in parsed.iteritems(): + for tp, val in vals.get('tp', {}).iteritems(): + inv.append(("%s ThreadPool %s" % (inst ,tp) , levels)) + return inv def check_jolokia_metrics_mem(item, params, info): parsed = jolokia_metrics_parse(info) if item not in parsed: return (3, "data not found in agent output") - # convert old parameter version ( warn, crit ) # represented levels of total heap if type(params) == tuple: @@ -172,11 +245,17 @@ # rename totalheap to total # this block can be removed in the future (today 22.02.13) if "totalheap" in params: + params = params.copy() params["total"] = params["totalheap"] del params["totalheap"] d = parsed[item] mb = 1024 * 1024.0 + + if "HeapMemoryUsage" not in d or "NonHeapMemoryUsage" not in d\ + or "HeapMemoryMax" not in d or "NonHeapMemoryMax" not in d: + return 3, "data in agent output incomplete" + heap = saveint(d["HeapMemoryUsage"]) / mb heapmax = saveint(d.get("HeapMemoryMax",-1)) / mb nonheap = saveint(d["NonHeapMemoryUsage"]) / mb @@ -185,56 +264,57 @@ if heapmax > 0 and nonheapmax > 0: totalmax = heapmax + nonheapmax else: - totalmax = "" - if heapmax < 0: - heapmax = "" - if nonheapmax < 0: - nonheapmax = "" + totalmax = 0 + heapmax = max(0, heapmax) + nonheapmax = max(0, nonheapmax) - state_sign = { 0: "", 1: "(!)", 2: "(!!)" } worst_state = 0 - perfdata = [] - info_list = [] + perfdata = [] + info_list = [] - for (what, value, value_max) in [ - ("heap", heap, heapmax), + for what, value, value_max in [ + ("heap", heap, heapmax), ("nonheap", nonheap, nonheapmax), - ("total", total, totalmax), + ("total", total, totalmax), ]: param_state = 0 level_info = "" used_info = "" if params.get(what): - warn_level = "" - crit_level = "" + warn_level = 0 + crit_level = 0 if type(params[what][0]) == int: + if value_max: + warn_level = value_max - params[what][0] + crit_level = value_max - params[what][1] + if what != "total": - if value_max: - warn_level = value_max - params[what][0] - crit_level = value_max - params[what][1] perfdata.append((what, value, warn_level, crit_level, "", value_max)) + if not value_max: param_state = 0 elif value >= crit_level: param_state = 2 - level_info = "%s(crit at %sMB free)" % (state_sign[2], params[what][1]) + level_info = "%s(crit at %sMB free)" % (state_markers[2], params[what][1]) elif value >= warn_level: param_state = 1 - level_info = "%s(warn at %sMB free)" % (state_sign[1], params[what][0]) + level_info = "%s(warn at %sMB free)" % (state_markers[1], params[what][0]) else: + if value_max: + warn_level = value_max * params[what][0] / 100.0 + crit_level = value_max * params[what][1] / 100.0 + if what != "total": - if value_max: - warn_level = value_max * params[what][0] / 100.0 - crit_level = value_max * params[what][1] / 100.0 perfdata.append((what, value, warn_level, crit_level, "", value_max)) + if not value_max: param_state = 0 elif value >= crit_level: param_state = 2 - level_info = "%s(crit at %s%%)" % (state_sign[2], params[what][1]) + level_info = "%s(crit at %s%%)" % (state_markers[2], params[what][1]) elif value >= warn_level: param_state = 1 - level_info = "%s(warn at %s%%)" % (state_sign[1], params[what][0]) + level_info = "%s(warn at %s%%)" % (state_markers[1], params[what][0]) else: if what != "total": perfdata.append((what, value, "", "", "", value_max)) @@ -259,7 +339,10 @@ output = [] status = 0 for key in [ 'ThreadCount', 'DeamonThreadCount', 'PeakThreadCount', 'TotalStartedThreadCount' ]: - val = saveint(d[key]) + if key not in d: + continue # The keys might be optional (saw jboss only sending ThreadCount) + + val = int(d[key]) if key == 'ThreadCount': # Thread count might lead to a warn/crit state if val >= crit: @@ -269,7 +352,7 @@ # Calculate the thread increase rate try: - timedif, rate = get_counter("jolokia_metrics.threads.%s" % item, this_time, val) + rate = get_rate("jolokia_metrics.threads.%s" % item, this_time, val) output.append('ThreadRate: %0.2f' % rate) perfdata.append(('ThreadRate', rate)) except MKCounterWrapped: @@ -283,27 +366,17 @@ else: return (status, ', '.join(output), perfdata) -def check_jolokia_metrics_uptime(item, _unused, info): +def check_jolokia_metrics_uptime(item, params, info): parsed = jolokia_metrics_parse(info) - if item not in parsed: - return (3, "data not found in agent output") - uptime = saveint(parsed[item]['Uptime']) / 1000 - - seconds = uptime % 60 - rem = uptime / 60 - minutes = rem % 60 - hours = (rem % 1440) / 60 - days = rem / 1440 - now = int(time.time()) - since = time.strftime("%c", time.localtime(now - uptime)) - return (0, "up since %s (%dd %02d:%02d:%02d)" % (since, days, hours, minutes, seconds), [ ("uptime", uptime) ]) + if item in parsed: + uptime = int(parsed[item]['Uptime']) / 1000 + return check_uptime_seconds(params, uptime) def check_jolokia_metrics_app_state(item, _unused, info): app_state=3 app = jolokia_metrics_app(info, item.split()) - #print app # FIXME: this could be nicer. if app and "Running" in app: if app['Running'] == '1': @@ -352,10 +425,10 @@ status_txt = ' (Below or equal %d)' % lo_warn elif hi_crit is not None and sess >= hi_crit: status = 2 - status_txt = ' (Above or equal %d)' % lo_warn + status_txt = ' (Above or equal %d)' % hi_crit elif hi_warn is not None and sess >= hi_warn: status = 1 - status_txt = ' (Above or equal %d)' % lo_crit + status_txt = ' (Above or equal %d)' % hi_warn if maxActive and maxActive > 0: status_txt += " (max active sessions: %d)" % maxActive @@ -381,17 +454,17 @@ status_txt = ' (Below or equal %d)' % lo_warn elif hi_crit is not None and req >= hi_crit: status = 2 - status_txt = ' (Above or equal %d)' % lo_warn + status_txt = ' (Above or equal %d)' % hi_crit elif hi_warn is not None and req >= hi_warn: status = 1 - status_txt = ' (Above or equal %d)' % lo_crit + status_txt = ' (Above or equal %d)' % hi_warn output = ['Requests: %d%s' % (req, status_txt)] perfdata = [('Requests', req, hi_warn, hi_crit)] wrapped = False this_time = time.time() try: - timedif, rate = get_counter("jolokia_metrics.serv_req.%s" % item, this_time, req) + rate = get_rate("jolokia_metrics.serv_req.%s" % item, this_time, req) output.append('RequestRate: %0.2f' % rate) perfdata.append(('RequestRate', rate)) except MKCounterWrapped: @@ -432,7 +505,7 @@ for nk in [ "CompletedRequestCount", "requestCount" ]: if nk in app: requests = int(app[nk]) - timedif, rate = get_counter("j4p.bea.requests.%s" % item, time.time(), requests) + rate = get_rate("j4p.bea.requests.%s" % item, time.time(), requests) return (0, "%.2f requests/sec" % rate, [("rate", rate)]) return (3, "data not found in agent output") @@ -441,7 +514,7 @@ def check_jolokia_metrics_bea_threads(item, _no_params, info): app = jolokia_metrics_app(info, item.split()) if not app: - return (3, "application not found") + return (3, "data not found in agent output") perfdata = [] infos = [] @@ -456,7 +529,119 @@ return (0, ", ".join(infos), perfdata) +def check_jolokia_metrics_gc(item, params, info): + gc = jolokia_metrics_gc(info, item.split()) + if gc == None: + return + + if params == None: + params = {} + + + crate = get_rate("jvm.gc.count.%s" % (item), \ + time.time(), int(gc['CollectionCount'])) + crate = crate * 60.0 + + ctext = '' + status = 0 + cwarn, ccrit = params.get('CollectionCount', (None, None)) + if cwarn != None and ccrit != None: + if crate >= int(ccrit): + status = 2 + ctext = " (Level %s) " % ccrit + elif crate >= int(cwarn): + status = 1 + ctext = " (Level %s) " % cwarn + + yield status, "%.2f GC Count/minute%s" % (crate, ctext), \ + [('CollectionCount', crate, cwarn, ccrit)] + + if 'CollectionTime' in gc: + twarn, tcrit = params.get('CollectionTime', (None, None)) + trate = get_rate("jvm.gc.time.%s" % (item), \ + time.time(), int(gc['CollectionTime'])) + trate = trate * 60.0 + + ttext = '' + status = 0 + if twarn != None and tcrit != None: + if trate >= int(tcrit): + status = 2 + ttext = "(Level %s) " % tcrit + elif trate >= int(twarn): + status = 1 + ttext = "(Level %s) " % twarn + + yield status, "%.2f GC ms/minute%s" % (trate, ttext), \ + [('CollectionTime', trate, twarn, tcrit)] + + +def check_jolokia_metrics_tp(item, params, info): + tp = jolokia_metrics_tp(info, item.split()) + if tp == None: + return(3,"data not found in agent output") + + if params != None: + if 'currentThreadsBusy' in params: + bwarn, bcrit = params['currentThreadsBusy'] + if bwarn: + bwarn = (int(tp["maxThreads"]) * bwarn) / 100 + if bcrit: + bcrit = (int(tp["maxThreads"]) * bcrit) / 100 + else: + bwarn, bcrit = (None, None) + + if 'currentThreadCount' in params: + cwarn, ccrit = params['currentThreadCount'] + if cwarn: + cwarn = (int(tp["maxThreads"]) * cwarn) / 100 + if ccrit: + ccrit = (int(tp["maxThreads"]) * ccrit) / 100 + else: + cwarn, ccrit = (None, None) + + status = 0 + perf = [] + btext = "" + ctext = "" + + busy = int(tp["currentThreadsBusy"]) + count = int(tp["currentThreadCount"]) + max = int(tp["maxThreads"]) + + if params != None: + if cwarn != None and ccrit != None: + if count >= ccrit: + status = 2 + ctext = "((!!) Level %s) " % ccrit + elif count >= cwarn: + if status != 2: + status = 1 + ctext = "((!) Level %s) " % cwarn + + perf.append(('currentThreadCount', count, cwarn, ccrit, 0, max)) + else: + perf.append(('currentThreadCount', count, "", "", 0, max)) + + if bwarn != None and bcrit != None: + if busy >= bcrit: + status = 2 + btext = "((!!) Level %s) " % bcrit + elif busy >= bwarn: + if status != 2: + status = 1 + btext = "((!) Level %s) " % bwarn + + perf.append(('currentThreadsBusy', busy, bwarn, bcrit, 0, max)) + else: + perf.append(('currentThreadsBusy', busy, "", "", 0, max)) + else: + perf = [('currentThreadCount', count, "", "", 0, max), ('currentThreadsBusy', busy, "", "", 0, max)] + + output = "%s currentThreadCount %sand %s currentThreadsBusy %s (maxThreads: %s)" % (count, \ + ctext, busy, btext, max) + return (status, output, perf) # General JVM checks check_info["jolokia_metrics.mem"] = { @@ -472,6 +657,7 @@ "service_description" : "JVM %s Threads", "check_function" : check_jolokia_metrics_threads, "inventory_function" : lambda i: inventory_jolokia_metrics(i, "threads"), + "group" : "jvm_threads", "has_perfdata" : True, } @@ -479,6 +665,24 @@ "service_description" : "JVM %s Uptime", "check_function" : check_jolokia_metrics_uptime, "inventory_function" : lambda i: inventory_jolokia_metrics(i, "uptime"), + "group" : "jvm_uptime", + 'includes' : [ 'uptime.include' ], + "has_perfdata" : True, +} + +check_info["jolokia_metrics.gc"] = { + "service_description" : "JVM %s", + "check_function" : check_jolokia_metrics_gc, + "inventory_function" : lambda i: inventory_jolokia_metrics_gc(i, "gc"), + "group" : "jvm_gc", + "has_perfdata" : True, +} + +check_info["jolokia_metrics.tp"] = { + "service_description" : "JVM %s", + "check_function" : check_jolokia_metrics_tp, + "inventory_function" : lambda i: inventory_jolokia_metrics_tp(i, "tp"), + "group" : "jvm_tp", "has_perfdata" : True, } @@ -496,6 +700,7 @@ "service_description" : "JVM %s Sessions", "check_function" : check_jolokia_metrics_app_sess, "inventory_function" : lambda i: inventory_jolokia_metrics_apps(i, "app_sess"), + "group" : "jvm_sessions", "has_perfdata" : True, } @@ -503,6 +708,7 @@ "service_description" : "JVM %s Requests", "check_function" : check_jolokia_metrics_bea_requests, "inventory_function" : lambda i: inventory_jolokia_metrics_apps(i, "requests"), + "group" : "jvm_requests", "has_perfdata" : True, } @@ -511,6 +717,7 @@ "service_description" : "JVM %s Requests", "check_function" : check_jolokia_metrics_serv_req, "inventory_function" : lambda i: inventory_jolokia_metrics_serv(i, "serv_req"), + "group" : "jvm_requests", "has_perfdata" : True, } @@ -519,6 +726,7 @@ "service_description" : "JVM %s Queue", "check_function" : check_jolokia_metrics_bea_queue, "inventory_function" : lambda i: inventory_jolokia_metrics_apps(i, "queue"), + "group" : "jvm_queue", "has_perfdata" : True, } @@ -526,6 +734,7 @@ "service_description" : "JVM %s Requests", "check_function" : check_jolokia_metrics_bea_requests, "inventory_function" : lambda i: inventory_jolokia_metrics_apps(i, "bea_requests"), + "group" : "jvm_requests", "has_perfdata" : True, } @@ -533,6 +742,7 @@ "service_description" : "JVM %s Threads", "check_function" : check_jolokia_metrics_bea_threads, "inventory_function" : lambda i: inventory_jolokia_metrics_apps(i, "threads"), + "group" : "jvm_threads", "has_perfdata" : True, } @@ -540,6 +750,7 @@ "service_description" : "JVM %s Sessions", "check_function" : check_jolokia_metrics_app_sess, "inventory_function" : lambda i: inventory_jolokia_metrics_apps(i, "bea_app_sess"), + "group" : "jvm_sessions", "has_perfdata" : True, } diff -Nru check-mk-1.2.2p3/jolokia_metrics.app_sess check-mk-1.2.6p12/jolokia_metrics.app_sess --- check-mk-1.2.2p3/jolokia_metrics.app_sess 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/jolokia_metrics.app_sess 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ title: Number of JVM sessions agents: linux -author: Mathias Kettner +catalog: app/java license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/jolokia_metrics.app_state check-mk-1.2.6p12/jolokia_metrics.app_state --- check-mk-1.2.2p3/jolokia_metrics.app_state 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/jolokia_metrics.app_state 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ title: State of JVM applications agents: linux -author: Mathias Kettner +catalog: app/java license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/jolokia_metrics.bea_queue check-mk-1.2.6p12/jolokia_metrics.bea_queue --- check-mk-1.2.2p3/jolokia_metrics.bea_queue 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/jolokia_metrics.bea_queue 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Length of queue in BEA +title: BEA Weblogic: Length of queue agents: linux -author: Mathias Kettner +catalog: app/java license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/jolokia_metrics.bea_requests check-mk-1.2.6p12/jolokia_metrics.bea_requests --- check-mk-1.2.2p3/jolokia_metrics.bea_requests 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/jolokia_metrics.bea_requests 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Requests to JVM sessions on BEA Weblogic +title: BEA Weblogic: Requests to JVM sessions agents: linux -author: Mathias Kettner +catalog: app/java license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/jolokia_metrics.bea_sess check-mk-1.2.6p12/jolokia_metrics.bea_sess --- check-mk-1.2.2p3/jolokia_metrics.bea_sess 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/jolokia_metrics.bea_sess 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Number of JVM sessions on BEA Weblogic +title: BEA Weblogic: Number of JVM sessions agents: linux -author: Mathias Kettner +catalog: app/java license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/jolokia_metrics.bea_threads check-mk-1.2.6p12/jolokia_metrics.bea_threads --- check-mk-1.2.2p3/jolokia_metrics.bea_threads 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/jolokia_metrics.bea_threads 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Number of JVM threads +title: BEA Weblogic: Number of JVM threads agents: linux -author: Mathias Kettner +catalog: app/java license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/jolokia_metrics.gc check-mk-1.2.6p12/jolokia_metrics.gc --- check-mk-1.2.2p3/jolokia_metrics.gc 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/jolokia_metrics.gc 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,46 @@ +title: Java virtual machine garbage collection +agents: linux +catalog: app/java +license: GPL +distribution: check_mk +description: + This check monitors the count and assumed time of the garbage collection. It + helps to recognize any deviation from the regular behaviour of the garbage + collection. Such deviations could be more garbage collection runs per + minute or an increase in the time the garbage collection is using per minute. + + Since every tomcat instance has a different behaviour there are no default + values set to avoid false alarms. + + To use this plugin, Jolokia and the agent plugin need to be installed + on the monitored server. + +item: + The name of the application server instance and name of the garbage collection + as sent by the agent. + +perfdata: + The following values are returned in the perfdata: + Garbage collection count per Minute + Garbage collection time per Minute + +inventory: + One check is created per garbage collector sent by the Jolokia agent. + +[parameters] +Collection count + warn (int): a WARN state is triggered by to many collections per minute + crit (int): a CRITICAL state is triggered by to many collections per minute +Collection time + warn (int): a WARN state is triggered by assuming to many time for + collections per minute + crit (int): a CRITICAL state is triggered by assuming to many time for + collections per minute + +[configuration] +jolokia_metrics_gc_default_levels = { + 'CollectionTime': (None, None), + 'CollectionCount': (None, None) + } + By default no levels are set because each virtual machine requieres its own + individual parameters. diff -Nru check-mk-1.2.2p3/jolokia_metrics.mem check-mk-1.2.6p12/jolokia_metrics.mem --- check-mk-1.2.2p3/jolokia_metrics.mem 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/jolokia_metrics.mem 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ title: Memory usage of a JVM agents: linux -author: Mathias Kettner +catalog: app/java license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/jolokia_metrics.requests check-mk-1.2.6p12/jolokia_metrics.requests --- check-mk-1.2.2p3/jolokia_metrics.requests 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/jolokia_metrics.requests 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Requests to JVM instances +title: BEA Weblogic: Requests to JVM instances agents: linux -author: Mathias Kettner +catalog: app/java license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/jolokia_metrics.serv_req check-mk-1.2.6p12/jolokia_metrics.serv_req --- check-mk-1.2.2p3/jolokia_metrics.serv_req 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/jolokia_metrics.serv_req 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,51 @@ +title: Number of Requests +agents: linux +catalog: app/java +license: GPL +distribution: check_mk +description: + This check allows to monitor the number of requests on web application servers. + + To use this check, Jolokia and the {mk_jolokia} agent plugin need to be installed + on the monitored server. + + The check returns {WARN}/{CRIT} if the number of requests is outside the given + ranges. + +item: + The name of the application server instance as configured by the agent. + +examples: + # Number of requests low crit, low warn, high warn, high crit + jolokia_metrics_serv_req_default_levels = (-1, -1, 5000, 6000) + + checks += [ + # set warning level to 10.000 requests and crit to 12.000 + # for instance ExampleServer on machine appserver1 + ( "appserver1", "jolokia_metrics.serv_req", "ExampleServer", (-1, -1, 10000, 12000) ), + ] + +perfdata: + One value: The number of requests. + +inventory: + One check is created for each Jolokia instance sent by the agent. + +[parameters] +lo_crit(int): Return {CRIT} if number of requests is lower or equal than + this value. + +lo_warn(int): Return {WARN} if number of requests is lower or equal than + this value. + +hi_warn(int): Return {WARN} if number of requests is higher or equal than + this value. + +hi_crit(int): Return {CRIT} if number of requests is higher or equal than + this value. + +[configuration] +jolokia_metrics_serv_req_default_levels(int, int, int, int): + The default levels to be used for inventorized checks for + low crit, low warn, high warn, high crit. + They are preset to {(-1, -1, 5000, 6000)} diff -Nru check-mk-1.2.2p3/jolokia_metrics.threads check-mk-1.2.6p12/jolokia_metrics.threads --- check-mk-1.2.2p3/jolokia_metrics.threads 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/jolokia_metrics.threads 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ title: Number of JVM threads agents: linux -author: Mathias Kettner +catalog: app/java license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/jolokia_metrics.tp check-mk-1.2.6p12/jolokia_metrics.tp --- check-mk-1.2.2p3/jolokia_metrics.tp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/jolokia_metrics.tp 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,39 @@ +title: Tomcat number of threads in thread pool +agents: linux +license: GPL +catalog: app/java +distribution: check_mk +description: + This check monitors the number of threads in a tomcat thread pool. + It can alert if the configured percentage of max threads is + exceeded or the configured percentage of max threads is busy. + By default only levels for currentThreadsbusy are configured. + + To use this plugin, Jolokia and the agent plugin need to be installed + on the monitored server. + +item: + The name of the application server instance as configured by the agent. + +perfdata: + The following values are returned in the perfdata: + currentThreadsBusy + currentThreadsCount + +inventory: + One check is created per thread pool sent by the Jolokia agent. + +[parameters] +warn (int): a WARN state is triggered by count/busy threads of max +crit (int): a CRIT state is triggered by count/busy threads of max + +[configuration] +jolokia_metrics_tp_default_levels { 'currentThreadsBusy' : (int, int), + 'currentThreadsCount' : (int, int) }: by default only currentThreadsbusy (80, 90) + is configured. + +examples: + jolokia_metrics_tp_default_levels = { + 'currentThreadsBusy' : (80, 90) + 'currentThreadsCount' : (None, None) + } diff -Nru check-mk-1.2.2p3/jolokia_metrics.uptime check-mk-1.2.6p12/jolokia_metrics.uptime --- check-mk-1.2.2p3/jolokia_metrics.uptime 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/jolokia_metrics.uptime 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,28 @@ +title: Uptime of JVM +agents: linux +catalog: app/java +license: GPL +distribution: check_mk +description: + This check outputs the uptime of the java virtual machine. + This check can go WARN or CRIT when the uptime is below or above certain + configurable levels. + + To use this plugin, Jolokia and the agent plugin need to be installed + on the monitored server. + +perfdata: + The uptime in seconds. + +inventory: + One check is created for each application running inside an Jolokia + instance sent by the agent. + +[parameters] +parameters (dict): A dictionary with the following optional keys: + + {"min"}: Pair of integers of warn and crit: the minimum required uptime + uptime in seconds. + + {"max"}: Pair of integers of warn and crit: the maximum allowed uptime + uptime in seconds. diff -Nru check-mk-1.2.2p3/juniper_bgp_state check-mk-1.2.6p12/juniper_bgp_state --- check-mk-1.2.2p3/juniper_bgp_state 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/juniper_bgp_state 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,82 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def juniper_bgp_state_create_item(oid_end): + return re.sub("6\.1\.[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+\.1\.", "", oid_end) + +def inventory_juniper_bgp_state(info): + inventory = [] + for oid_end, bgp_state, bgp_operational_state in info: + inventory.append( (juniper_bgp_state_create_item(oid_end), None) ) + return inventory + +def check_juniper_bgp_state(item, _no_params, info): + bgp_state_map = [ "undefined", # 0 does not exist + "idle", # 1 + "connect", # 2 + "active", # 3 + "opensent", # 4 + "openconfirm", # 5 + "established"] # 6 + bgp_operational_state_map = [ "undefined", # 0 does not exist + "halted", # 1 + "running"] # 2 + status = 0 + for oid_end, bgp_state, bgp_operational_state in info: + peering_partner_ip = juniper_bgp_state_create_item(oid_end) + bgp_state = int(bgp_state) + bgp_operational_state = int(bgp_operational_state) + + if peering_partner_ip == item: + operational_state_error_string = "" + state_error_string = "" + + if bgp_operational_state != 2: + status = 1 + operational_state_error_string = "(!)" + elif bgp_state != 6: + status = 2 + state_error_string = "(!!)" + + return status, "Status with peer %s is %s%s, operational status: %s%s" \ + % (peering_partner_ip, \ + bgp_state_map[bgp_state], state_error_string, \ + bgp_operational_state_map[bgp_operational_state], \ + operational_state_error_string) + + return 3, "Peering partner %s not configured" % item + +check_info["juniper_bgp_state"] = { + "check_function" : check_juniper_bgp_state, + "inventory_function" : inventory_juniper_bgp_state, + "service_description" : "BGP Status Peer %s", + "snmp_info" : ('.1.3.6.1.4.1.2636.5.1.1.2.1.1.1', [ OID_END, # take peering partner IP from this + 2, # jnxBgpM2PeerState + 3 ]), # jnxBgpM2PeerStatus + # (like operational status) + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.2636.1.1.1.2"), + "has_perfdata" : False, +} diff -Nru check-mk-1.2.2p3/juniper_cpu check-mk-1.2.6p12/juniper_cpu --- check-mk-1.2.2p3/juniper_cpu 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/juniper_cpu 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,82 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +juniper_cpu_default_levels = ( 80.0, 90.0 ) + +def inventory_juniper_cpu(info): + return [ (x[0], "juniper_cpu_default_levels" ) for x in info if x[0].startswith("Routing Engine") ] + +def check_juniper_cpu(item, params, info): + for line in info: + if line[0] == item: + util1, util5, util15 = map(int,line[1:]) + warn, crit = params + label1, label5, label15 = "", "", "" + state = 0 + if util1 >= crit: + state = 2 + label1 = "(!!)" + elif util1 >= warn: + state = 1 + label1 = "(!)" + if util5 >= crit: + state = 2 + label5 = "(!!)" + elif util5 >= warn: + state = max(state,1) + label5 = "(!)" + if util15 >= crit: + state = 2 + label15 = "(!!)" + elif util15 >= warn: + state = max(state,1) + label15 = "(!)" + + perf = [( "util1", util1, warn, crit ), + ( "util5", util5, warn, crit ), + ( "util15", util15, warn, crit )] + + + message = "%d%% 1min%s, %d%% 5min%s, %d%% 15min%s" % \ + ( util1, label1, util5, label5, util15, label15 ) + return state, message, perf + return 3, "Information not found in output" + +check_info["juniper_cpu"] = { + "check_function" : check_juniper_cpu, + "inventory_function" : inventory_juniper_cpu, + "service_description" : "CPU utilization %s", + "has_perfdata" : True, + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.2636.1.1.1.2"), + "snmp_info" : (".1.3.6.1.4.1.2636.3.1.13.1",[ + 5, #jnxOperatingDescr + 20, #jnxOperating1MinLoadAvg + 21, #jnxOperating5MinLoadAvg + 22, #jnxOperating15MinLoadAvg + ]) + + +} diff -Nru check-mk-1.2.2p3/juniper_mem.include check-mk-1.2.6p12/juniper_mem.include --- check-mk-1.2.2p3/juniper_mem.include 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/juniper_mem.include 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,50 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +juniper_mem_default_levels = ( 80.0, 90.0 ) + +def inventory_juniper_mem(info): + return [ ( None, "juniper_mem_default_levels" ) ] + +def check_juniper_mem(_no_item, params, info): + usage_kb, mem_size_kb = map(int, info[0]) # Kilobyte + mem_size = mem_size_kb * 1024 + usage = usage_kb * 1024 + usage_perc = (float(usage_kb) / mem_size_kb) * 100 + + warn, crit = params + warn_kb = (mem_size_kb / 100.0) * warn + crit_kb = (mem_size_kb / 100.0) * crit + perf = [( "usage", usage, warn_kb * 1024, crit_kb * 1024, 0, mem_size )] + message = "Used: %s/%s (%.0f%%)" % \ + ( get_bytes_human_readable(usage), get_bytes_human_readable(mem_size), usage_perc ) + levels = " (warn/crit at %.0f%%/%0.f%%)" % ( warn, crit ) + if usage_perc >= crit: + return 2, message + levels, perf + elif usage_perc >= warn: + return 1, message + levels, perf + return 0, message, perf + diff -Nru check-mk-1.2.2p3/juniper_screenos_cpu check-mk-1.2.6p12/juniper_screenos_cpu --- check-mk-1.2.2p3/juniper_screenos_cpu 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/juniper_screenos_cpu 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,62 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +juniper_screenos_cpu_default_levels = ( 80.0, 90.0 ) + +def inventory_juniper_screenos_cpu(info): + return [ (None , "juniper_screenos_cpu_default_levels" ) ] + +def check_juniper_screenos_cpu(_no_item, params, info): + util1, util15 = map(float, info[0]) + warn, crit = params + label15 = "" + state = 0 + if util15 >= crit: + state = 2 + label15 = "(!!)" + elif util15 >= warn: + state = max(state,1) + label15 = "(!)" + + perf = [( "util1", util1, warn, crit ), + ( "util15", util15, warn, crit )] + + message = "%d%% 1min, %d%% 15min%s (warn/crit at %d%%/%d%%)" % \ + ( util1, util15, label15, warn, crit ) + return state, message, perf + +check_info["juniper_screenos_cpu"] = { + "check_function" : check_juniper_screenos_cpu, + "inventory_function" : inventory_juniper_screenos_cpu, + "group" : "cpu_utilization", + "service_description" : "CPU utilization", + "has_perfdata" : True, + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.3224.1"), + "snmp_info" : (".1.3.6.1.4.1.3224.16.1",[ + 2, # jnxOperating1MinLoadAvg + 4, # jnxOperating15MinLoadAvg + ]) +} diff -Nru check-mk-1.2.2p3/juniper_screenos_fan check-mk-1.2.6p12/juniper_screenos_fan --- check-mk-1.2.2p3/juniper_screenos_fan 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/juniper_screenos_fan 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,53 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +def inventory_juniper_screenos_fan(info): + # SNMP outputs "Fan 1". Our item is just '1' + return [ (line[0].split()[-1], None) for line in info ] + +def check_juniper_screenos_fan(item, params, info): + for fan_id, fan_status in info: + if fan_id.split()[-1] == item: + if fan_status == "1": + return (0, "status is good") + elif fan_status == "2": + return (2, "status is failed") + else: + return (2, "Unknown fan status %s" % fan_status ) + return (3, "Sensor not found in SNMP data") + +check_info["juniper_screenos_fan"] = { + "check_function" : check_juniper_screenos_fan, + "inventory_function" : inventory_juniper_screenos_fan, + "service_description" : "FAN %s", + "has_perfdata" : False, + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.3224.1"), + "snmp_info" : (".1.3.6.1.4.1.3224.21.2.1",[ + 3, # Fan Description + 2, # Fan Status 1 Good, 2 Error + ]) +} diff -Nru check-mk-1.2.2p3/juniper_screenos_mem check-mk-1.2.6p12/juniper_screenos_mem --- check-mk-1.2.2p3/juniper_screenos_mem 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/juniper_screenos_mem 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,46 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def parse_juniper_screenos_mem(info): + new_info = [] + for used, free in info: + new_info.append([int(used)/1024, (int(used)+int(free))/1024]) + return new_info + +check_info["juniper_screenos_mem"] = { + "parse_function" : parse_juniper_screenos_mem, + "check_function" : check_juniper_mem, + "inventory_function" : inventory_juniper_mem, + "group" : "juniper_mem", + "service_description" : "Memory used", + "has_perfdata" : True, + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.3224.1"), + "snmp_info" : (".1.3.6.1.4.1.3224.16.2",[ + 1.0, # Memory bytes used + 2.0, # Memory bytes free + ]), + "includes": [ "juniper_mem.include" ], +} diff -Nru check-mk-1.2.2p3/juniper_screenos_temp check-mk-1.2.6p12/juniper_screenos_temp --- check-mk-1.2.2p3/juniper_screenos_temp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/juniper_screenos_temp 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,55 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +juniper_screenos_temp_default_levels = ( 70, 80 ) + +def inventory_juniper_screenos_temp(info): + for name, temp in info: + if name.endswith("Temperature"): + name = name.rsplit(None, 1)[0] + yield (name, "juniper_screenos_temp_default_levels") + +def check_juniper_screenos_temp(item, params, info): + for name, temp in info: + if name.endswith("Temperature"): + name = name.rsplit(None, 1)[0] + if name == item: + return check_temperature(int(temp), params) + + +check_info["juniper_screenos_temp"] = { + "check_function" : check_juniper_screenos_temp, + "inventory_function" : inventory_juniper_screenos_temp, + "group" : "hw_temperature", + "service_description" : "Temperature %s", + "has_perfdata" : True, + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.3224.1"), + "snmp_info" : (".1.3.6.1.4.1.3224.21.4.1",[ + 4, # Name + 3, # Temperatur + ]), + "includes" : [ "temperature.include" ], +} diff -Nru check-mk-1.2.2p3/juniper_screenos_vpn check-mk-1.2.6p12/juniper_screenos_vpn --- check-mk-1.2.2p3/juniper_screenos_vpn 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/juniper_screenos_vpn 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,52 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +def inventory_juniper_screenos_vpn(info): + return [ (line[0], None) for line in info ] + +def check_juniper_screenos_vpn(item, params, info): + for vpn_id, vpn_status in info: + if vpn_id == item: + if vpn_status == "1": + return (0, "VPN Status %s is active" % vpn_id) + elif vpn_status == "0": + return (2, "VPN Status %s inactive" % vpn_id) + else: + return (1, "Unknown vpn status %s" % vpn_status) + return (2, "VPN name not found in SNMP data") + +check_info["juniper_screenos_vpn"] = { + "check_function" : check_juniper_screenos_vpn, + "inventory_function" : inventory_juniper_screenos_vpn, + "service_description" : "VPN %s", + "has_perfdata" : False, + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.3224.1"), + "snmp_info" : (".1.3.6.1.4.1.3224.4.1.1.1",[ + 4, # VPN Name + 23, # VPN Ike 2 Status + ]) +} diff -Nru check-mk-1.2.2p3/juniper_trpz_aps check-mk-1.2.6p12/juniper_trpz_aps --- check-mk-1.2.2p3/juniper_trpz_aps 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/juniper_trpz_aps 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,45 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_juniper_trpz_aps(info): + return [ (None, None) ] + +def check_juniper_trpz_aps(_no_item, _no_params, info): + aps = info[0][0] + message = "%s Access Points online" % aps + return 0, message + +check_info["juniper_trpz_aps"] = { + "check_function" : check_juniper_trpz_aps, + "inventory_function" : inventory_juniper_trpz_aps, + "service_description" : "Access Points", + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.14525.3.1.6"), + "snmp_info" : (".1.3.6.1.4.1.14525.4.5.1.1",[ + 1, #number of Access Points + ]) + + +} diff -Nru check-mk-1.2.2p3/juniper_trpz_cpu_util check-mk-1.2.6p12/juniper_trpz_cpu_util --- check-mk-1.2.2p3/juniper_trpz_cpu_util 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/juniper_trpz_cpu_util 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,75 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +juniper_trpz_cpu_util_default_levels = ( 80.0, 90.0 ) + +def inventory_juniper_trpz_cpu_util(info): + return [ ( None, "juniper_trpz_cpu_util_default_levels" ) ] + +def check_juniper_trpz_cpu_util(_no_item, params, info): + utilc, util1, util5 = map(savefloat, info[0]) + + warn, crit = params + label1, label5 = "", "" + state = 0 + + if util1 >= crit: + state = 2 + label1 = "(!!)" + elif util1 >= warn: + state = 1 + label1 = "(!)" + + if util5 >= crit: + state = 2 + label5 = "(!!)" + elif util5 >= warn: + state = max(state, 1) + label5 = "(!)" + + perf = [( "util1", util1, warn, crit ), + ( "util5", util5, warn, crit ), + ( "utilc", utilc )] + + message = "%d%% current, %d%% 1min%s, %d%% 5min%s" % \ + ( utilc, util1, label1, util5, label5 ) + + return state, message, perf + +check_info["juniper_trpz_cpu_util"] = { + "check_function" : check_juniper_trpz_cpu_util, + "inventory_function" : inventory_juniper_trpz_cpu_util, + "group" : "cpu_utilization", + "service_description" : "CPU utilization", + "has_perfdata" : True, + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.14525.3.1.6"), + # see: http://www.observium.org/svn/observer/trunk/mibs/trapeze/trpz-system-mib.my + "snmp_info" : (".1.3.6.1.4.1.14525.4.8.1.1.11", [ + 1, # trpzSysCpuInstantLoad + 2, # trpzSysCpuLastMinuteLoad + 3, # trpzSysCpuLast5MinutesLoad + ]) +} diff -Nru check-mk-1.2.2p3/juniper_trpz_flash check-mk-1.2.6p12/juniper_trpz_flash --- check-mk-1.2.2p3/juniper_trpz_flash 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/juniper_trpz_flash 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,68 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +juniper_trpz_flash_default_levels = ( 90.0 , 95.0 ) + +def inventory_juniper_trpz_flash(info): + return [ ( None, "juniper_trpz_flash_default_levels") ] + +def check_juniper_trpz_flash(_no_item, params, info): + warn, crit = params + used, total = map(savefloat, info[0]) + message = "Used: %s of %s " % ( get_bytes_human_readable(used) , get_bytes_human_readable(total)) + perc_used = (used / total ) * 100 + if type(crit) == float: + a_warn = (warn / 100) * total + a_crit = (crit / 100) * total + perf = [ ("used", used, a_warn, a_crit, 0, total ) ] + levels = "Levels Warn/Crit are (%.2f%%, %.2f%%)" % ( warn, crit) + if perc_used > crit: + return 2, message + levels, perf + if perc_used > warn: + return 1, message + levels, perf + else: + perf = [ ("used", used, warn, crit, 0, total ) ] + levels = "Levels Warn/Crit are (%.2f, %.2f)" % \ + ( get_bytes_human_readable(warn), get_bytes_human_readable(crit)) + if used > crit: + return 2, message + levels, perf + if used > warn: + return 1, message + levels, perf + return 0, message, perf + +check_info["juniper_trpz_flash"] = { + "check_function" : check_juniper_trpz_flash, + "inventory_function" : inventory_juniper_trpz_flash, + "service_description" : "Flash", + "has_perfdata" : True, + "snmp_info" : ( ".1.3.6.1.4.1.14525.4.8.1.1", [ + 3, #Flash used + 4, #Flash_total + ] ), + "snmp_scan_function" : lambda oid: oid('.1.3.6.1.2.1.1.2.0') == ".1.3.6.1.4.1.14525.3.1.6", + "group" : "general_flash_usage", +} + diff -Nru check-mk-1.2.2p3/juniper_trpz_info check-mk-1.2.6p12/juniper_trpz_info --- check-mk-1.2.2p3/juniper_trpz_info 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/juniper_trpz_info 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,47 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +def inventory_juniper_trpz_info(info): + return [ (None, None) ] + +def check_juniper_trpz_info(_no_item, _no_params, info): + serial, version = info[0] + message = "S/N: %s, FW Version: %s" % ( serial, version ) + return 0, message + +check_info["juniper_trpz_info"] = { + "check_function" : check_juniper_trpz_info, + "inventory_function" : inventory_juniper_trpz_info, + "service_description" : "Info", + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.14525.3.1.6"), + "snmp_info" : (".1.3.6.1.4.1.14525.4.2.1",[ + 1, #serial number + 4, #firmware verson + ]) + + +} diff -Nru check-mk-1.2.2p3/juniper_trpz_mem check-mk-1.2.6p12/juniper_trpz_mem --- check-mk-1.2.2p3/juniper_trpz_mem 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/juniper_trpz_mem 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,39 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +check_info["juniper_trpz_mem"] = { + 'check_function': check_juniper_mem, + 'inventory_function': inventory_juniper_mem, + 'service_description': 'Memory used', + 'has_perfdata': True, + "group": "juniper_mem", + "snmp_scan_function": lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.14525.3.1.6"), + "snmp_info": (".1.3.6.1.4.1.14525.4.8.1.1", [ + "12.1", # trpzSysCpuMemoryInstantUsage + "6", # trpzSysCpuMemorySize + ]), + "includes": [ "juniper_mem.include" ], +} diff -Nru check-mk-1.2.2p3/juniper_trpz_power check-mk-1.2.6p12/juniper_trpz_power --- check-mk-1.2.2p3/juniper_trpz_power 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/juniper_trpz_power 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,59 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +def inventory_juniper_trpz_power(info): + return [ ( line[0], None ) for line in info ] + +def check_juniper_trpz_power(item, _no_params, info): + states = { + 1 : 'other', + 2 : 'unknown', + 3 : 'ac-failed', + 4 : 'dc-failed', + 5 : 'ac-ok-dc-ok', + } + for line in info: + if line[0] == item: + state = saveint(line[1]) + message = "Current state: %s" % states[state] + if state in [ 2, 3, 4 ]: + return 2, message + if state == 1: + return 1, message + return 0, message + +check_info["juniper_trpz_power"] = { + "check_function" : check_juniper_trpz_power, + "inventory_function" : inventory_juniper_trpz_power, + "service_description" : "PSU %s", + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.14525.3.1.6"), + "snmp_info" : (".1.3.6.1.4.1.14525.4.8.1.1.13.1.2.1",[ + 3, #name + 2, #state + ]) +} + diff -Nru check-mk-1.2.2p3/kaspersky_av_quarantine check-mk-1.2.6p12/kaspersky_av_quarantine --- check-mk-1.2.2p3/kaspersky_av_quarantine 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/kaspersky_av_quarantine 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,52 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +# <<>> +# Quarantine/backup statistics: +# Objects: 0 +# Size: 0 +# Last added: unknown + +def inventory_kaspersky_av_quarantine(info): + return [( None, None )] + +def check_kaspersky_av_quarantine(item, _no_params, info): + # Reformat info when some lines have more than 2 elements + info = dict([ [l[0], ' '.join(l[1:])] for l in info ]) + objects = int(info['Objects']) + perf = [ ( 'objects', objects ) ] + if objects > 0: + return 2, "%d Objects in Quarantine, Last added: %s" % (objects, info['Last added'].strip()), perf + return 0, "No objects in Quarantine", perf + +check_info["kaspersky_av_quarantine"] = { + "check_function" : check_kaspersky_av_quarantine, + "inventory_function" : inventory_kaspersky_av_quarantine, + "service_description" : "AV Quarantine", + "has_perfdata" : True, +} + diff -Nru check-mk-1.2.2p3/kaspersky_av_tasks check-mk-1.2.6p12/kaspersky_av_tasks --- check-mk-1.2.2p3/kaspersky_av_tasks 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/kaspersky_av_tasks 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,82 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +# Number of tasks: 15 +# Name: System:EventManager +# Id: 1 +# Runtime ID: 1314160393 +# Class: EventManager +# State: Started +# Name: System:AVS +# Id: 2 +# Runtime ID: 1314160398 +# Class: AVS +# State: Started +# Name: System:Quarantine +# Id: 3 +# Runtime ID: 1314160399 +# Class: Quarantine +# State: Started +# Name: System:Statistics +# Id: 4 +# Runtime ID: 1314160396 +# Class: Statistics +# State: Started +# + +def inventory_kaspersky_av_tasks(info): + inventory = [] + jobs = [ + 'Real-time protection', + 'System:EventManager' + ] + for line in [ x for x in info if x[0].startswith("Name")]: + job = " ".join(line[1:]) + if job in jobs: + inventory.append( (job, None )) + return inventory + +def check_kaspersky_av_tasks(item, _no_params, info): + found = False + for line in info: + if found: + if line[0].startswith('State'): + state = 0 + if line[1] != "Started": + state = 2 + return state, "Current state is " + line[1] + if line[0].startswith('Name') and " ".join(line[1:]) == item: + found = True + return 3, "Task not found in agent output" + +check_info["kaspersky_av_tasks"] = { + "check_function" : check_kaspersky_av_tasks, + "inventory_function" : inventory_kaspersky_av_tasks, + "service_description" : "AV Task %s", + "has_perfdata" : False, +} + diff -Nru check-mk-1.2.2p3/kaspersky_av_updates check-mk-1.2.6p12/kaspersky_av_updates --- check-mk-1.2.2p3/kaspersky_av_updates 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/kaspersky_av_updates 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,58 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +#Current AV databases date: 2014-05-27 03:54:00 +#Last AV databases update date: 2014-05-27 09:00:40 +#Current AV databases state: UpToDate +#Current AV databases records: 8015301 +#Update attempts: 48616 +#Successful updates: 9791 +#Update manual stops: 0 +#Updates failed: 3333 + +def inventory_kaspersky_av_updates(info): + return [( None, None ) ] + +def check_kaspersky_av_updates(item, _no_params, info): + info = dict([ ( x[0], ":".join(x[1:]).strip()) for x in info ]) + state = 0 + message = "Database State: " + info['Current AV databases state'] + if info['Current AV databases state'] != 'UpToDate': + state = 2 + message += "(!!)" + message += ", Database Date: %s, Last Update: %s" % ( info['Current AV databases date'], + info['Last AV databases update date'] ) + + return state, message + +check_info["kaspersky_av_updates"] = { + "check_function" : check_kaspersky_av_updates, + "inventory_function" : inventory_kaspersky_av_updates, + "service_description" : "AV Update Status", + "has_perfdata" : False, +} + diff -Nru check-mk-1.2.2p3/kemp_loadmaster_ha check-mk-1.2.6p12/kemp_loadmaster_ha --- check-mk-1.2.2p3/kemp_loadmaster_ha 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/kemp_loadmaster_ha 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,46 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +def inventory_kemp_loadmaster_ha(info): + return [ ( None, None ) ] + +def check_kemp_loadmaster_ha(_no_item, _no_params, info): + states = ( 'none', 'Master', 'Standby', 'Passive' ) + state = states[saveint(info[0][0])] + firmware = info[0][1] + return 0, "Device is: %s (Firmware: %s)" % ( state, firmware ) + + +check_info["kemp_loadmaster_ha"] = { + "check_function" : check_kemp_loadmaster_ha, + "inventory_function" : inventory_kemp_loadmaster_ha, + "service_description" : "HA State", + "has_perfdata" : False, + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.12196.250.10" or oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.2021.250.10", + "snmp_info" : ( ".1.3.6.1.4.1.12196.13.0", [ 9, 10 ] ), +} + diff -Nru check-mk-1.2.2p3/kemp_loadmaster_realserver check-mk-1.2.6p12/kemp_loadmaster_realserver --- check-mk-1.2.2p3/kemp_loadmaster_realserver 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/kemp_loadmaster_realserver 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,61 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +def inventory_kemp_loadmaster_realserver(info): + return [ ( x[0], None ) for x in info ] + +def check_kemp_loadmaster_realserver(item, _no_params, info): + states = { 1 : 'reachable', + 2 : 'stale', + 3 : 'delay', + 4 : 'probe', + 5 : 'invalid', + 6 : 'unkown', + 7 : 'incomplete', + 20 : 'not in use'} + + for line in info: + if line[0] == item: + state = int(line[1]) + message = "State: %s" % (states[state]) + if state == 1: + return 0, message + if state == 6: + return 1, message + if state in [ 2, 3, 4, 5, 7, 20 ]: + return 3, message + return 3, "Service not found" + +check_info["kemp_loadmaster_realserver"] = { + "check_function" : check_kemp_loadmaster_realserver, + "inventory_function" : inventory_kemp_loadmaster_realserver, + "service_description" : "Server %s", + "has_perfdata" : False, + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.12196.250.10", + "snmp_info" : ( ".1.3.6.1.2.1.4.35.1.7.7.1.4", [ OID_END, ''] ), +} + diff -Nru check-mk-1.2.2p3/kemp_loadmaster_services check-mk-1.2.6p12/kemp_loadmaster_services --- check-mk-1.2.2p3/kemp_loadmaster_services 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/kemp_loadmaster_services 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,68 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +kemp_loadmaster_service_default_levels = ( 1500, 2000 ) + +def inventory_kemp_loadmaster_services(info): + return [ ( x[0], kemp_loadmaster_service_default_levels ) for x in info ] + +def check_kemp_loadmaster_services(item, _no_params, info): + service_states = ( 'in Service', + 'out of Service', + 'failed', + 'disabled', + 'sorry', + 'redirect', + 'errormsg') + for line in info: + if line[0] == item: + service_state, conns = map(saveint, line[1:]) + message = "State: %s" % (service_states[service_state-1]) + perfdata = [ ('conns', conns) ] + state = 3 + if service_state in [ 1, 6 ]: + message += ", Active Connections: %s" % conns + state = 0 + if service_state == 5: + state = 1 + if service_state in [ 2, 3, 7 ]: + state = 2 + return (state, message, perfdata) + return 3, "Service not found" + +check_info["kemp_loadmaster_services"] = { + "check_function" : check_kemp_loadmaster_services, + "inventory_function" : inventory_kemp_loadmaster_services, + "service_description" : "Service %s", + "has_perfdata" : True, + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.12196.250.10" or oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.2021.250.10", + "snmp_info" : ( ".1.3.6.1.4.1.12196.13.1.1", [ + 13, # name + 14, # state + 21, # conns + ] ), +} diff -Nru check-mk-1.2.2p3/kentix_humidity check-mk-1.2.6p12/kentix_humidity --- check-mk-1.2.2p3/kentix_humidity 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/kentix_humidity 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,59 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_kentix_humidity(info): + if info: + return [ (None, None ) ] + +def check_kentix_humidity(_no_item, _no_params, info): + value, lower_warn, upper_warn, alarm, text = info[0] + temp = float(value)/10 + perfdata = [ ("temp", temp, lower_warn+":"+upper_warn ) ] + upper_warn = float(upper_warn) + lower_warn = float(lower_warn) + infotext = "%.1f%% (min/max at %.1f%%/%.1f%%)" % (temp, lower_warn, upper_warn) + if temp >= upper_warn or temp <= lower_warn: + state = 1 + infotext = "%s: %s" % (text, infotext) + else: + state = 0 + return state, infotext, perfdata + +check_info["kentix_humidity"] = { + 'check_function': check_kentix_humidity, + 'inventory_function': inventory_kentix_humidity, + 'service_description': 'Humidity', + 'has_perfdata': True, + 'snmp_info': ('.1.3.6.1.4.1.37954.3.1.2', + ['1', # humidityValue + '2', # humidityMin + '3', # humidityMax + '4', # humidityAlarm, not used + '5', # humidityAlarmtext + ] + ), + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.332.11.6"), +} diff -Nru check-mk-1.2.2p3/kentix_temp check-mk-1.2.6p12/kentix_temp --- check-mk-1.2.2p3/kentix_temp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/kentix_temp 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,59 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_kentix_temp(info): + if info: + return [ (None, None ) ] + +def check_kentix_temp(_no_item, _no_params, info): + value, lower_warn, upper_warn, alarm, text = info[0] + temp = float(value)/10 + perfdata = [ ("temp", temp, lower_warn+":"+upper_warn ) ] + upper_warn = float(upper_warn) + lower_warn = float(lower_warn) + infotext = "%.1f C (min/max at %.1f/%.1f C)" % (temp, lower_warn, upper_warn) + if temp >= upper_warn or temp <= lower_warn: + state = 1 + infotext = "%s: %s" % (text, infotext) + else: + state = 0 + return state, infotext, perfdata + +check_info["kentix_temp"] = { + 'check_function': check_kentix_temp, + 'inventory_function': inventory_kentix_temp, + 'service_description': 'Temperature', + 'has_perfdata': True, + 'snmp_info': ('.1.3.6.1.4.1.37954.3.1.1', + ['1', # temperatureValue + '2', # temperatureMin + '3', # temperatureMax + '4', # temperatureAlarm, not used here + '5', # temperatureAlarmtext + ] + ), + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.332.11.6"), +} diff -Nru check-mk-1.2.2p3/kernel check-mk-1.2.6p12/kernel --- check-mk-1.2.2p3/kernel 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/kernel 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,6 +24,17 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. +# .--kernel--Counters----------------------------------------------------. +# | ____ _ | +# | / ___|___ _ _ _ __ | |_ ___ _ __ ___ | +# | | | / _ \| | | | '_ \| __/ _ \ '__/ __| | +# | | |__| (_) | |_| | | | | || __/ | \__ \ | +# | \____\___/ \__,_|_| |_|\__\___|_| |___/ | +# | | +# +----------------------------------------------------------------------+ +# | Check page faults, context switches and process creations | +# '----------------------------------------------------------------------' + # Inventory creates three checks per default: inventory_kernel_counters = [ "pgmajfault", "ctxt", "processes" ] kernel_default_levels = None @@ -52,40 +63,53 @@ for line in info[1:] if line[0] == item or kernel_counter_names.get(line[0], line[0]) == item ] if len(hits) == 0: - return (3, "UNKNOWN - item '%s' not found in agent output" % item) + return (3, "item '%s' not found in agent output" % item) elif len(hits) > 1: - return (3, "UNKNOWN - item '%s' not unique (found %d times)" % (item, len(hits))) + return (3, "item '%s' not unique (found %d times)" % (item, len(hits))) counter = hits[0][0] this_val = int(hits[0][1]) - timedif, per_sec = get_counter("kernel." + item, this_time, this_val) - infotext = " - %.0f/s in last %d secs" % (per_sec, timedif) - - if params == None: - return (0, "OK" + infotext, [ (counter, per_sec) ]) + per_sec = get_rate("kernel." + counter, this_time, this_val) - warn, crit = params - perfdata = [ (counter, per_sec, warn, crit) ] - if warn == None and crit != None: - infotext += " (critical at %.0f/s)" % crit - elif warn != None and crit == None: - infotext += " (warning at %.0f/s)" % warn - elif warn != None: - infotext += " (warn/crit at %.0f/%.0f per sec)" % (warn, crit) - - if crit != None and per_sec >= crit: - return (2, "CRIT" + infotext, perfdata) - elif warn != None and per_sec >= warn: - return (1, "WARN" + infotext, perfdata) + if type(params) == tuple: + warn, crit = params else: - return (0, "OK" + infotext, perfdata) + warn, crit = None, None + perfdata = [ (counter, per_sec, warn, crit) ] + state, text, extraperf = check_levels(per_sec, counter, params) + perfdata += extraperf + infotext = "%.0f/s" % per_sec + if text: + infotext += ", " + text + return state, infotext, perfdata + + +check_info["kernel"] = { + 'check_function': check_kernel, + 'inventory_function': inventory_kernel, + 'service_description': 'Kernel %s', + 'has_perfdata': True, + 'group': 'vm_counter', +} + +#. +# .--kernel.util--CPU Utilization----------------------------------------. +# | _ _ _ _ _ _ _ _ | +# | | | | | |_(_) (_)______ _| |_(_) ___ _ __ | +# | | | | | __| | | |_ / _` | __| |/ _ \| '_ \ | +# | | |_| | |_| | | |/ / (_| | |_| | (_) | | | | | +# | \___/ \__|_|_|_/___\__,_|\__|_|\___/|_| |_| | +# | | +# +----------------------------------------------------------------------+ +# | Check system/user/io-wait | +# '----------------------------------------------------------------------' kernel_util_default_levels = None def inventory_cpu_utilization(info): for x in info: if len(x) > 0 and x[0] == 'cpu': - return [(None, "kernel_util_default_levels")] + return [(None, {})] # Columns of cpu usage /proc/stat: # - cpuX: number of CPU or only 'cpu' for aggregation @@ -97,12 +121,17 @@ # - irq: servicing interrupts # - softirq: servicing softirqs # - steal: involuntary wait -def check_cpu_utilization(item, params, info): - global g_counters +def kernel_check_cpu_utilization(item, params, info): + + # Convert old style tuple-parameter to new dict + if type(params) != dict: + params = { "iowait": params } + # Look for entry beginning with "cpu" f = [ l for l in info if l[0] == "cpu" ] if len(f) != 1: - return (3, "UNKNOWN - invalid output from plugin") + return 3, "More than one line with CPU info found. This check is not cluster-enabled." + line = f[0] if len(line) < 8: line = line + ['0', '0', '0', '0'] # needed for Linux 2.4 @@ -111,51 +140,15 @@ # 'cpu' user nice system idle wait hw-int sw-int (steal ...) # convert number to int values = [ int(x) for x in line[1:8] ] - this_time = int(time.time()) - diff_values = [] - n = 0 - for v in values: - n += 1 - countername = "cpu.util.%d" % n - last_time, last_val = g_counters.get(countername, (0, 0)) - diff_values.append(v - last_val) - g_counters[countername] = (this_time, v) - - sum_jiffies = sum(diff_values[0:7]) # do not account for steal! - if sum_jiffies == 0: - return (0, "OK - too short interval") - user = diff_values[0] + diff_values[1] # add user + nice - system = diff_values[2] - wait = diff_values[4] - user_perc = 100.0 * float(user) / float(sum_jiffies) - system_perc = 100.0 * float(system) / float(sum_jiffies) - wait_perc = 100.0 * float(wait) / float(sum_jiffies) - perfdata = [ - ( "user", "%.3f" % user_perc ), - ( "system", "%.3f" % system_perc ), - ( "wait", "%.3f" % wait_perc ) ] - - infotext = " - user: %2.1f%%, system: %2.1f%%, wait: %2.1f%%" % (user_perc, system_perc, wait_perc) - - # You may set a warning/critical level on the io wait - # percentage. This can be done by setting params to - # a pair of (warn, crit) - result = 0 - try: - warn, crit = params - if wait_perc >= crit: - result = 2 - infotext += "(!!)" - elif wait_perc >= warn: - result = 1 - infotext += "(!)" - except: - pass - - return (result, nagios_state_names[result] + infotext, perfdata) + return check_cpu_util_unix(values, params) -check_info['kernel.util'] = (check_cpu_utilization, "CPU utilization", 1, inventory_cpu_utilization) -checkgroup_of['kernel.util'] = "cpu_iowait" +check_info["kernel.util"] = { + 'check_function': kernel_check_cpu_utilization, + 'inventory_function': inventory_cpu_utilization, + 'service_description': 'CPU utilization', + 'has_perfdata': True, + 'default_levels_variable': 'kernel_util_default_levels', + 'group': 'cpu_iowait', + 'includes': ['cpu_util.include'], +} -check_info['kernel'] = (check_kernel, "Kernel %s", 1, inventory_kernel) -checkgroup_of['kernel'] = "vm_counter" diff -Nru check-mk-1.2.2p3/kernel.util check-mk-1.2.6p12/kernel.util --- check-mk-1.2.2p3/kernel.util 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/kernel.util 2015-06-24 09:48:36.000000000 +0000 @@ -1,13 +1,12 @@ -title: Check CPU usage (utilisation) +title: CPU utilization agents: linux -author: Mathias Kettner +catalog: os/kernel license: GPL distribution: check_mk description: This check measures the CPU utilization during the last check - cycle. A level can be set only on {wait} (disk wait). If you - want to set levels on your CPU usage you better use the check - {cpu.loads}. + cycle. Levels can be set on the total utilization and also + specifically on the disk wait (IO wait). perfdata: Three variables: The percentage of time spent in {user}, {system} and {wait} @@ -21,10 +20,14 @@ preset to {None} thus imposing no levels and making the check always OK. [parameters] -warning (int): The percentage of {wait} that triggers a warning level. -critical (int): The percentage of {wait} that triggers a critical level - The parameters may also be set to {None}, which makes the check alwas {OK}. +dictionary: A dictionary for two keys, both of which are optional: + + {"iowait"} a pair of float values with percentages for warning and critical + for the disk wait + + {"util"} a pair of float values with percentages for warning and critical + for the total utilization [configuration] -kernel_util_default_levels (int, int): Default levels used by inventorized - checks. This is present to {None}, which disables the levels. +kernel_util_default_levels (dictionary): Default levels used by inventorized + checks. The default is not to impose any levels. diff -Nru check-mk-1.2.2p3/knuerr_rms_humidity check-mk-1.2.6p12/knuerr_rms_humidity --- check-mk-1.2.2p3/knuerr_rms_humidity 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/knuerr_rms_humidity 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,57 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +knuerr_rms_humidity_default_levels = ( 30, 40, 70, 75 ) + +def inventory_knuerr_rms_humidity(info): + return [ ( None, 'knuerr_rms_humidity_default_levels') ] + +def check_knuerr_rms_humidity(_no_item, params, info): + name, reading = info[0] + reading = float(reading) / 10 + crit_low, warn_low, warn_high, crit_high = params + infotext = "Humidity is: %.1f %% on Sensor %s" % ( reading, name ) + levels = ' (Warn/Crit at %s/%s or below %s/%s)' % ( warn_high, crit_high, warn_low, crit_low ) + perfdata = [ ('humidity', reading, crit_low, warn_low, warn_high, crit_high ) ] + state = 0 + if reading >= crit_high or reading <= crit_low: + state = 2 + infotext += levels + elif reading >= warn_high or reading <= warn_low: + state = 1 + infotext += levels + return state, infotext, perfdata + +check_info["knuerr_rms_humidity"] = { + "check_function" : check_knuerr_rms_humidity, + "inventory_function" : inventory_knuerr_rms_humidity, + "service_description" : "Humidity", + "has_perfdata" : True, + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.3711.15.1", + "snmp_info" : ( ".1.3.6.1.4.1.3711.15.1.1.1.2" , [ 2 , 4 ]), + "group" : "single_humidity" +} + diff -Nru check-mk-1.2.2p3/knuerr_rms_temp check-mk-1.2.6p12/knuerr_rms_temp --- check-mk-1.2.2p3/knuerr_rms_temp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/knuerr_rms_temp 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,51 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +knuerr_rms_temp_default_levels = (30, 35) + + +def inventory_knuerr_rms_temp(info): + return [ (None, 'knuerr_rms_temp_default_levels') ] + + +def check_knuerr_rms_temp(_no_item, params, info): + name, rawtemp = info[0] + status, infotext, perfdata = check_temperature(float(rawtemp) / 10, params) + infotext += " (%s)" % name + return status, infotext, perfdata + + +check_info["knuerr_rms_temp"] = { + "check_function" : check_knuerr_rms_temp, + "inventory_function" : inventory_knuerr_rms_temp, + "service_description" : "Temperature", + "has_perfdata" : True, + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.3711.15.1", + "snmp_info" : ( ".1.3.6.1.4.1.3711.15.1.1.1.1", [2, 4]), + "group" : "hw_single_temperature", + "includes" : [ "temperature.include" ], +} + diff -Nru check-mk-1.2.2p3/knuerr_sensors check-mk-1.2.6p12/knuerr_sensors --- check-mk-1.2.2p3/knuerr_sensors 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/knuerr_sensors 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,46 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_knuerr_sensors(info): + for sensor, state in info: + yield sensor, None + +def check_knuerr_sensors(item, _no_params, info): + for sensor, state in info: + if sensor == item: + if state != '0': + return 2, "Sensor triggered" + return 0, "Sensor not triggered" + return 3, "Sensor not longer found" + +check_info["knuerr_sensors"] = { + "check_function" : check_knuerr_sensors, + "inventory_function" : inventory_knuerr_sensors, + "service_description" : "Sensor %s", + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.3711.15.1", + "snmp_info" : ( ".1.3.6.1.4.1.3711.15.1.1.2", [ 1, 5 ]) +} + diff -Nru check-mk-1.2.2p3/lgp_info check-mk-1.2.6p12/lgp_info --- check-mk-1.2.2p3/lgp_info 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/lgp_info 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -56,30 +56,20 @@ return (0, 'Model: %s, Firmware: %s, S/N: %s\n%s' % tuple(agent_info + [ device_output ])) -check_info['lgp_info'] = (check_lgp_info, 'Liebert Info', 0, inventory_lgp_info) - -snmp_info['lgp_info'] = [ - ('.1.3.6.1.4.1.476.1.42.2.1', [ - '2.0', # LIEBERT-GP-AGENT-MIB::lgpAgentIdentModel.0 - '3.0', # LIEBERT-GP-AGENT-MIB::lgpAgentIdentFirmwareVersion.0 - '4.0', # LIEBERT-GP-AGENT-MIB::lgpAgentIdentSerialNumber.0 - ]), - # Devices registered on this manager - ('.1.3.6.1.4.1.476.1.42.2.4.2.1', [ - '2', # LIEBERT-GP-AGENT-MIB::lgpAgentDeviceId.1 - '3', # LIEBERT-GP-AGENT-MIB::lgpAgentDeviceManufacturer.1 - '6', # LIEBERT-GP-AGENT-MIB::lgpAgentDeviceUnitNumber.1 - ]) -] - -snmp_scan_functions['lgp_info'] = lambda oid: oid('.1.3.6.1.2.1.1.2.0') == \ - '.1.3.6.1.4.1.476.1.42' - -#check_info["lgp_info"] = { -# "check_function" : check_lgp_info, -# "inventory_function" : inventory_lgp_info, -# "service_description" : "Liebert Info", -# "has_perfdata" : False, -# "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") == \ -# ".1.3.6.1.4.1.476.1.42" -#} +check_info["lgp_info"] = { + 'check_function': check_lgp_info, + 'inventory_function': inventory_lgp_info, + 'service_description': 'Liebert Info', + 'snmp_info': [('.1.3.6.1.4.1.476.1.42.2.1', [ + '2.0', # LIEBERT-GP-AGENT-MIB::lgpAgentIdentModel.0 + '3.0', # LIEBERT-GP-AGENT-MIB::lgpAgentIdentFirmwareVersion.0 + '4.0', # LIEBERT-GP-AGENT-MIB::lgpAgentIdentSerialNumber.0 + ]), + ('.1.3.6.1.4.1.476.1.42.2.4.2.1', [ + '2', # LIEBERT-GP-AGENT-MIB::lgpAgentDeviceId.1 + '3', # LIEBERT-GP-AGENT-MIB::lgpAgentDeviceManufacturer.1 + '6', # LIEBERT-GP-AGENT-MIB::lgpAgentDeviceUnitNumber.1 + ])], + 'snmp_scan_function': lambda oid: oid('.1.3.6.1.2.1.1.2.0') == \ + '.1.3.6.1.4.1.476.1.42', +} diff -Nru check-mk-1.2.2p3/lgp_pdu_aux check-mk-1.2.6p12/lgp_pdu_aux --- check-mk-1.2.2p3/lgp_pdu_aux 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/lgp_pdu_aux 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -69,7 +69,7 @@ 'closed', ] -lgp_pdu_aux_fields = { +_lgp_pdu_aux_fields = { # Index, Type, Factor, ID '10': (str, None, 'Type'), '15': (str, None, 'SystemLabel'), @@ -96,8 +96,8 @@ if not id in new_info: new_info[id] = {'TypeIndex': id.split('.')[-1]} # Skip not handled rows - if type in lgp_pdu_aux_fields: - ty, factor, key = lgp_pdu_aux_fields[type] + if type in _lgp_pdu_aux_fields: + ty, factor, key = _lgp_pdu_aux_fields[type] if key == 'Type': value = lgp_pdu_aux_types.get(value, 'UNHANDLED') @@ -116,13 +116,13 @@ for id, pdu in info.iteritems(): # Using SystemLabel as index. But it is not uniq in all cases. # Adding the Type-Index to prevent problems - inv.append((pdu['Type'] + '-' + pdu['TypeIndex'], None)) + inv.append((pdu['Type'] + '-' + pdu['SystemLabel'] + '-' + pdu['TypeIndex'], None)) return inv def check_lgp_pdu_aux(item, params, info): info = lgp_pdu_aux_fmt(info) for id, pdu in info.iteritems(): - if item == pdu['Type'] + '-' + pdu['TypeIndex']: + if item == pdu['Type'] + '-' + pdu['SystemLabel'] + '-' + pdu['TypeIndex']: state = 0 output = [] perfdata = [] @@ -171,9 +171,9 @@ else: output.append('Door is %s' % lgp_pdu_aux_states[pdu['DoorState']]) - return (state, '%s - %s' % (nagios_state_names[state], ', '.join(output)), perfdata) + return (state, ', '.join(output), perfdata) - return (3, 'UNKNOWN - Could not find given PDU.') + return (3, 'Could not find given PDU.') check_info['lgp_pdu_aux'] = (check_lgp_pdu_aux, 'Liebert PDU AUX %s', 1, inventory_lgp_pdu_aux) diff -Nru check-mk-1.2.2p3/lgp_pdu_info check-mk-1.2.6p12/lgp_pdu_info --- check-mk-1.2.2p3/lgp_pdu_info 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/lgp_pdu_info 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -48,24 +48,17 @@ return (3, 'Device can not be found in SNMP output.') -check_info['lgp_pdu_info'] = (check_lgp_pdu_info, 'Liebert PDU Info %s', 0, inventory_lgp_pdu_info) - -snmp_info['lgp_pdu_info'] = ('.1.3.6.1.4.1.476.1.42.3.8.20.1', [ - 5, # LIEBERT-GP-PDU-MIB::lgpPduEntryId - 10, # LIEBERT-GP-PDU-MIB::lgpPduEntryUsrLabel - 15, # LIEBERT-GP-PDU-MIB::lgpPduEntrySysAssignLabel - 45, # LIEBERT-GP-PDU-MIB::lgpPduEntrySerialNumber - 50, # LIEBERT-GP-PDU-MIB::lgpPduEntryRbCount -]) - -snmp_scan_functions['lgp_pdu_info'] = lambda oid: oid('.1.3.6.1.2.1.1.2.0') == \ - '.1.3.6.1.4.1.476.1.42' - -#check_info["lgp_pdu_info"] = { -# "check_function" : check_lgp_pdu_info, -# "inventory_function" : inventory_lgp_pdu_info, -# "service_description" : "Liebert Info", -# "has_perfdata" : False, -# "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") == \ -# ".1.3.6.1.4.1.476.1.42" -#} +check_info["lgp_pdu_info"] = { + 'check_function': check_lgp_pdu_info, + 'inventory_function': inventory_lgp_pdu_info, + 'service_description': 'Liebert PDU Info %s', + 'snmp_info': ('.1.3.6.1.4.1.476.1.42.3.8.20.1', [ + 5, # LIEBERT-GP-PDU-MIB::lgpPduEntryId + 10, # LIEBERT-GP-PDU-MIB::lgpPduEntryUsrLabel + 15, # LIEBERT-GP-PDU-MIB::lgpPduEntrySysAssignLabel + 45, # LIEBERT-GP-PDU-MIB::lgpPduEntrySerialNumber + 50, # LIEBERT-GP-PDU-MIB::lgpPduEntryRbCount + ]), + 'snmp_scan_function': lambda oid: oid('.1.3.6.1.2.1.1.2.0') == \ + '.1.3.6.1.4.1.476.1.42', +} diff -Nru check-mk-1.2.2p3/lib/check_mkevents check-mk-1.2.6p12/lib/check_mkevents --- check-mk-1.2.2p3/lib/check_mkevents 2013-11-05 09:22:41.000000000 +0000 +++ check-mk-1.2.6p12/lib/check_mkevents 1970-01-01 00:00:00.000000000 +0000 @@ -1,136 +0,0 @@ -#!/usr/bin/python - -import os, socket, sys - -if os.getenv("OMD_ROOT"): - socket_path = os.getenv("OMD_ROOT") + "/tmp/run/mkeventd/status" -else: - socket_path = None - -def query(query, remote_host): - global socket_path - try: - if remote_host and ':' in remote_host: - parts = remote_host.split(":") - host = parts[0] - if len(parts) == 2: - port = int(parts[1]) - else: - port = 6558 - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - sock.settimeout(10) - sock.connect((host, port)) - else: - sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - sock.settimeout(3) - if remote_host and remote_host[0] == '/': - socket_path = remote_host - elif remote_host: - sys.stderr.write("Invalid socket specification '%s'\n" % remote_host) - sys.exit(3) - if not socket_path: - sys.stderr.write("You running out of OMD. Please specify a socket with -H.\n") - sys.exit(3) - sock.connect(socket_path) - - sock.send(query) - - response_text = "" - while True: - chunk = sock.recv(8192) - response_text += chunk - if not chunk: - break - - return eval(response_text) - except SyntaxError, e: - sys.stdout.write("UNKNOWN - Invalid answer from event daemon\n%s\nQuery was:\n%s\n" \ - % (e, query)) - sys.exit(3) - - except Exception, e: - if remote_host and ':' in remote_host: - via = "TCP %s" % (remote_host) - else: - via = "UNIX socket %s" % socket_path - sys.stdout.write("UNKNOWN - Cannot connect to event daemon via %s: %s\n" % (via, e)) - sys.exit(3) - -try: - remote_host = None - try: - del sys.argv[sys.argv.index('-a')] - opt_ignore_acknowledged = True - except: - opt_ignore_acknowledged = False - - if sys.argv[1] == '-H': - remote_host = sys.argv[2] - del sys.argv[1:3] - host_name = sys.argv[1] - if len(sys.argv) > 2: - application = sys.argv[2] - else: - application = None -except: - sys.stdout.write("""Usage: check_mkevents [-H SOCKET] [-a] HOST [APPLICATION] - - -a do not take into account acknowledged events. - -H SOCKET how to connect to the status socket of the Event Console. - Use either HOST:PORT for TCP (e.g. 10.10.0.139:4478) or - a path to a file for a local UNIX socket (e.g. - /var/run/nagios/rw/mkeventd/status). -""") - sys.exit(3) - -q = "GET events\n" \ - "Filter: event_host =~ %s\n" % host_name - -if application: - q += "Filter: event_application ~~ %s\n" % application - -q += "Filter: event_phase in open ack\n" - -response = query(q, remote_host) -headers = response[0] -worst_state = 0 -worst_row = None -count = 0 -unhandled = 0 -for line in response[1:]: - count += 1 - row = dict(zip(headers, line)) - p = row["event_phase"] - if p == 'open' or not opt_ignore_acknowledged: - s = row["event_state"] - if s == 3: - if worst_state < 2: - worst_state = 3 - worst_row = row - elif s >= worst_state: - worst_state = s - worst_row = row - if p == 'open': - unhandled += 1 - -nagios_state_names = { - 0 : "OK", - 1 : "WARN", - 2 : "CRIT", - 3 : "UNKNOWN", -} - -if count == 0 and application: - sys.stdout.write("OK - no events for %s on host %s\n" % (application, host_name)) -elif count == 0: - sys.stdout.write("OK - no events for %s\n" % host_name) -else: - sys.stdout.write(nagios_state_names[worst_state] + \ - " - %d events (%d unacknowledged)" % (count, unhandled)) - if worst_row: - sys.stdout.write(", worst state is %s (Last line: %s)" % \ - (nagios_state_names[worst_state], worst_row['event_text'].encode('utf-8'))) - sys.stdout.write("\n") - -sys.exit(worst_state) - diff -Nru check-mk-1.2.2p3/lib/check_mkevents.cc check-mk-1.2.6p12/lib/check_mkevents.cc --- check-mk-1.2.2p3/lib/check_mkevents.cc 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/lib/check_mkevents.cc 2014-12-11 10:15:03.000000000 +0000 @@ -0,0 +1,322 @@ +// +------------------------------------------------------------------+ +// | ____ _ _ __ __ _ __ | +// | / ___| |__ ___ ___| | __ | \/ | |/ / | +// | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +// | | |___| | | | __/ (__| < | | | | . \ | +// | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +// | | +// | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +// +------------------------------------------------------------------+ +// +// This file is part of Check_MK. +// The official homepage is at http://mathias-kettner.de/check_mk. +// +// check_mk is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation in version 2. check_mk is distributed +// in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +// out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +// PARTICULAR PURPOSE. See the GNU General Public License for more de- +// ails. You should have received a copy of the GNU General Public +// License along with GNU Make; see the file COPYING. If not, write +// to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +// Boston, MA 02110-1301 USA. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace std; + +#ifndef AF_LOCAL +#define AF_LOCAL AF_UNIX +#endif +#ifndef PF_LOCAL +#define PF_LOCAL PF_UNIX +#endif + +void usage() +{ + printf("Usage: check_mkevents [-s SOCKETPATH] [-H REMOTE:PORT] [-a] HOST [APPLICATION]"); + printf("\n -a do not take into account acknowledged events.\n"); + printf(" HOST may be a hostname, and IP address or hostname/IP-address.\n"); +} + + +string prepare_hostname_regex(const char *s) +{ + const char *scan = s; + string result = ""; + while (*scan) { + if (strchr("[](){}^$.*+?|\\", *scan)) { + result += "\\"; + result += *scan; + } + else if (*scan == '/') + result += "|"; + else + result += *scan; + scan ++; + } + return result; +} + + +int main(int argc, char** argv) +{ + // Parse arguments + char *host = NULL; + char *remote_host = NULL; + char *remote_hostaddress = NULL; + char remote_hostipaddress[64]; + int remote_port = 6558; + char *application = NULL; + bool ignore_acknowledged = false; + char unixsocket_path[1024]; + unixsocket_path[0] = 0; + + int argc_count = argc; + for (int i = 1; i < argc ; i++) { + if (!strcmp("-H", argv[i]) && i < argc + 1) { + remote_host = argv[i+1]; + i++; + argc_count -= 2; + } + else if (!strcmp("-s", argv[i]) && i < argc + 1) { + strcpy(unixsocket_path, argv[i+1]); + i++; + argc_count -= 2; + } + else if (!strcmp("-a", argv[i])) { + ignore_acknowledged = true; + argc_count--; + } + else if (argc_count > 2) { + host = argv[i]; + application = argv[i+1]; + break; + } + else if (argc_count > 1) { + host = argv[i]; + break; + } + } + + if (!host) { + usage(); + exit(3); + } + + // Get omd environment + if (!unixsocket_path[0] && !remote_host) { + char *omd_path = getenv("OMD_ROOT"); + if (omd_path) + snprintf(unixsocket_path, sizeof(unixsocket_path), "%s/tmp/run/mkeventd/status", omd_path); + else { + printf("UNKNOWN - OMD_ROOT is not set, no socket path is defined.\n"); + exit(3); + } + } + + if (remote_host) { + struct hostent *he; + struct in_addr **addr_list; + + remote_hostaddress = strtok(remote_host, ":"); + if ( (he = gethostbyname(remote_hostaddress) ) == NULL) + { + printf("UNKNOWN - Unable to resolve remote host address: %s\n", remote_hostaddress); + return 3; + } + addr_list = (struct in_addr **) he->h_addr_list; + for(int i = 0; addr_list[i] != NULL; i++) + { + strcpy(remote_hostipaddress, inet_ntoa(*addr_list[i]) ); + } + + char *port_str = strtok(NULL, ":"); + if (port_str) + remote_port = atoi(port_str); + } + + //Create socket and setup connection + int sock; + struct timeval tv; + if (remote_host) { + sock = socket(AF_INET, SOCK_STREAM, 0); + tv.tv_sec = 10; + setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, (struct timeval *)&tv, sizeof(struct timeval)); + // Right now, there is no send timeout.. + // setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO, (struct timeval *)&tv, sizeof(struct timeval)); + + struct sockaddr_in addr; + memset(&addr, 0, sizeof(addr)); + addr.sin_family = AF_INET; + inet_aton(remote_hostipaddress, &addr.sin_addr); + addr.sin_port = htons(remote_port); + + if (0 > connect(sock, (struct sockaddr*) &addr, sizeof(struct sockaddr_in))) + { + printf("UNKNOWN - Cannot connect to event daemon via TCP %s:%d (%s)\n", + remote_hostipaddress, remote_port, strerror(errno)); + exit(3); + } + } + else { + sock = socket(PF_LOCAL, SOCK_STREAM , 0); + if (sock < 0) { + printf("UNKNOWN - Cannot create client socket: %s\n", strerror(errno)); + exit(3); + } + + tv.tv_sec = 3; + setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, (struct timeval *)&tv, sizeof(struct timeval)); + + struct sockaddr_un addr; + memset(&addr, 0, sizeof(struct sockaddr_un)); + addr.sun_family = AF_LOCAL; + strncpy(addr.sun_path, unixsocket_path, sizeof(addr.sun_path)); + + if(0 > connect(sock, (struct sockaddr*) &addr, sizeof(struct sockaddr_un))){ + printf("UNKNOWN - Cannot connect to event daemon via UNIX socket %s (%s)\n", + unixsocket_path, strerror(errno)); + exit(3); + } + } + + // Create query message + string query_message; + query_message += "GET events\nFilter: event_host "; + if (strchr(host, '/')) { + query_message += "~~ "; + query_message += prepare_hostname_regex(host); + } + else { + query_message += "=~ "; + query_message += host; + } + query_message += "\nFilter: event_phase in open ack\n"; + query_message += "OutputFormat: plain\n"; + + if (application) { + query_message += "Filter: event_application ~~ "; + query_message += application; + query_message += "\n"; + } + + // Send message + int length = write(sock, query_message.c_str(), query_message.length()); + + // Get response + char response_chunk[4096]; + memset(response_chunk, 0, sizeof(response_chunk)); + stringstream response_stream; + int read_length; + while (0 < (read_length = read(sock, response_chunk, sizeof(response_chunk)))) + { + // replace binary 0 in response with space + for (int i=0; i headers; + while (getline(linestream, token, '\x02')) { + if (!strcmp(token.c_str(), "event_phase")) + idx_event_phase = current_index; + else if (!strcmp(token.c_str(), "event_state")) + idx_event_state = current_index; + else if (!strcmp(token.c_str(), "event_text")) + idx_event_text = current_index; + headers.push_back(token); + current_index++; + } + + // Basic header validation + if (idx_event_phase == -1 || idx_event_state == -1 || idx_event_text == -1) { + printf("UNKNOWN - Invalid answer from event daemon\n%s\nQuery was:\n%s\n", + response_stream.str().c_str(), query_message.c_str()); + exit(3); + } + + // Get data + vector< vector > data; + while (getline(response_stream, line)) { + if (line.size() < headers.size()) + break; // broken / empty line + linestream.str(""); + linestream.clear(); + linestream << line; + vector data_line; + bool has_data = false; + while (getline(linestream, token, '\x02')) { + has_data = true; + data_line.push_back(token); + } + if (has_data) + data.push_back(data_line); + } + + // Generate output + string worst_row_event_text; + int worst_state = 0; + int count = 0; + int unhandled = 0; + + for (vector< vector >::iterator it = data.begin() ; it != data.end(); ++it) { + count++; + const char* p = it->at(idx_event_phase).c_str(); + if (!strcmp(p, "open") || !ignore_acknowledged) { + int s = atoi(it->at(idx_event_state).c_str()); + if (s == 3) { + if (worst_state < 2) { + worst_state = 3; + worst_row_event_text = it->at(idx_event_text); + } + } else if ( s >= worst_state ) { + worst_state = s; + worst_row_event_text = it->at(idx_event_text); + } + } + if (!strcmp(p, "open")) + unhandled++; + } + + if (count == 0 && application) + printf("OK - no events for %s on host %s\n", application, host); + else if (count == 0) + printf("OK - no events for %s\n", host ); + else { + const char* state_text = worst_state == 0 ? "OK" : worst_state == 1 ? "WARN" : worst_state == 2 ? "CRIT" : "UNKNOWN"; + printf("%s - %d events (%d unacknowledged)", state_text, count, unhandled); + if (worst_row_event_text.length() > 0) + printf(", worst state is %s (Last line: %s)", state_text, worst_row_event_text.c_str()); + printf("\n"); + } + return worst_state; +} diff -Nru check-mk-1.2.2p3/lib/check_mkevents.py check-mk-1.2.6p12/lib/check_mkevents.py --- check-mk-1.2.2p3/lib/check_mkevents.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/lib/check_mkevents.py 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,162 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Old, outdated Python version of check_mkevents. Do not use +# anymore... + +import os, socket, sys + +try: + socket_path = os.getenv("OMD_ROOT") + "/tmp/run/mkeventd/status" +except: + sys.stdout.write("UNKNOWN - OMD_ROOT is not set, no socket path is defined.\n") + sys.exit(3) + +def query(query, remote_host): + try: + if remote_host: + parts = remote_host.split(":") + host = parts[0] + if len(parts) == 2: + port = int(parts[1]) + else: + port = 6558 + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.settimeout(10) + sock.connect((host, port)) + else: + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + sock.settimeout(3) + sock.connect(socket_path) + + sock.send(query) + + response_text = "" + while True: + chunk = sock.recv(8192) + response_text += chunk + if not chunk: + break + + return eval(response_text) + except SyntaxError, e: + sys.stdout.write("UNKNOWN - Invalid answer from event daemon\n%s\nQuery was:\n%s\n" \ + % (e, query)) + sys.exit(3) + + except Exception, e: + if remote_host: + via = "TCP %s:%d" % (host, port) + else: + via = "UNIX socket %s" % socket_path + sys.stdout.write("UNKNOWN - Cannot connect to event daemon via %s: %s\n" % (via, e)) + sys.exit(3) + +try: + remote_host = None + try: + del sys.argv[sys.argv.index('-a')] + opt_ignore_acknowledged = True + except: + opt_ignore_acknowledged = False + + try: + del sys.argv[sys.argv.index('-l')] + opt_less_verbose = True + except: + opt_less_verbose = False + + if sys.argv[1] == '-H': + remote_host = sys.argv[2] + del sys.argv[1:3] + host_name = sys.argv[1] + if len(sys.argv) > 2: + application = sys.argv[2] + else: + application = None +except: + sys.stdout.write("Usage: check_mkevents [-H (REMOTE:PORT|/path/to/unix/socket)] [-a] [-l] HOST [APPLICATION]\n") + sys.stdout.write("\n -a do not take into account acknowledged events.\n") + sys.stdout.write("\n -l less verbose output.\n") + sys.stdout.write("\n") + sys.exit(3) + +q = "GET events\n" \ + "Filter: event_host =~ %s\n" % host_name + +if application: + q += "Filter: event_application ~~ %s\n" % application + +q += "Filter: event_phase in open ack\n" + +response = query(q, remote_host) +headers = response[0] +worst_state = 0 +worst_row = None +count = 0 +unhandled = 0 +for line in response[1:]: + count += 1 + row = dict(zip(headers, line)) + p = row["event_phase"] + if p == 'open' or not opt_ignore_acknowledged: + s = row["event_state"] + if s == 3: + if worst_state < 2: + worst_state = 3 + worst_row = row + elif s >= worst_state: + worst_state = s + worst_row = row + if p == 'open': + unhandled += 1 + +nagios_state_names = { + 0 : "OK", + 1 : "WARN", + 2 : "CRIT", + 3 : "UNKNOWN", +} + +if count == 0 and application: + sys.stdout.write("OK - no events for %s on host %s\n" % (application, host_name)) +elif count == 0: + sys.stdout.write("OK - no events for %s\n" % host_name) +else: + if opt_less_verbose: + sys.stdout.write(nagios_state_names[worst_state] + " - %d events" % (count)) + if worst_row: + sys.stdout.write(" (Worst line: %s)" % (worst_row['event_text'].encode('utf-8'))) + else: + sys.stdout.write(nagios_state_names[worst_state] + \ + " - %d events (%d unacknowledged)" % (count, unhandled)) + if worst_row: + sys.stdout.write(", worst state is %s (Last line: %s)" % \ + (nagios_state_names[worst_state], worst_row['event_text'].encode('utf-8'))) + sys.stdout.write("\n") + +sys.exit(worst_state) + diff -Nru check-mk-1.2.2p3/lib/Makefile check-mk-1.2.6p12/lib/Makefile --- check-mk-1.2.2p3/lib/Makefile 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/lib/Makefile 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,31 @@ +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +all: check_mkevents + +check_mkevents: check_mkevents.cc + g++ -O2 -s -o check_mkevents check_mkevents.cc + +clean: + rm -f check_mkevents diff -Nru check-mk-1.2.2p3/libelle_business_shadow check-mk-1.2.6p12/libelle_business_shadow --- check-mk-1.2.2p3/libelle_business_shadow 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/libelle_business_shadow 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,410 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# .--Example output from agent-------------------------------------------. +# <<>> +# DBShadow Oracle Libelle AG +# trd : Shared Memory +# Release: 5.7.6.0.053 (31003) +# +# Process-Id : 7929872 +# Start-Time : 12.05.2014 16:55 +# Shm-Id : 2097154 +# Sem-Id : 23068672 +# Number DB : 2 +# Company : khkirchheim +# DB Size : 999 +# Business : 2 +# Long-Distance : 0 +# Raw-Device : 0 +# Flat-Files : 0 +# Trace : OFF +# Host : kkn-dbnode1-2351 +# System : aix.61 +# Status : RUN +# Authentication Type : none +# +# -------------- TRD-Parameter -------------- +# TRD_ARCHDIR_REPEAT : 30 +# TRD_ARCHIVER_TYPE : turbo +# TRD_AUDIT_TRAIL : FALSE +# TRD_CHECK_RELEASE : TRUE +# TRD_COPYWAIT : 0/0 +# TRD_COPY_DELETE_BACKUPFILES : NONE +# TRD_DISKSPACE : 70/90 +# TRD_FFARCHIVEFILE_SIZE : 10 +# TRD_FFBACKUPBLOCK_SIZE : 10 +# TRD_FFBACKUPFILE_SIZE : 100 +# TRD_FFCHECK_PERM : TRUE +# TRD_FFLOGFORMAT : ff__.ars +# TRD_FILE_ACCESS : FILE_POINTER +# TRD_FILE_BUFFER_SIZE : 1 +# TRD_FI_OWNER_ERRORLEVEL : warning +# TRD_FI_PERM_ERRORLEVEL : warning +# TRD_FI_TIME_ERRORLEVEL : warning +# TRD_HOME : /opt/libelle +# TRD_HOME_SIZE_REQUIRED : 1000 +# TRD_HOSTNAME : kkn-dbnode1-2351 +# TRD_LANGUAGE : english +# TRD_MAIL_LOCALINTERFACE : +# TRD_MAX_CONNECTIONS : 100 +# TRD_MAX_FF_ENTRIES : 200 +# TRD_MAX_MAPPINGS : 200 +# TRD_MAX_USER_MAPPINGS : 200 +# TRD_PARALLEL_TRANS_PORTS : +# TRD_PORT : 7200 +# TRD_POWERUP : 0 +# TRD_RECOVERTIMEOUT : 1500 +# TRD_ROOT_ALLOW : FALSE +# TRD_SAVE_DELETE : 7 +# TRD_SAVE_SIZE : 5 +# TRD_SIGCLD_RESET : FALSE +# TRD_SOCKETLEN : 32 +# TRD_SOCKETTIMEOUT : 120 +# TRD_SOCKET_BLOCKING : FALSE +# TRD_SPFILE_PATH : DEFAULT +# TRD_TIMEOUT : 60 +# TRD_TRACELEVEL : 30 +# TRD_UI_NUM : 20 +# TRD_USE_ACCOUNT_TYPE : local +# +# -------------- Process List -------------- +# Pid : 7929872 -1 Timestamp : 22.05.2014 17:36:38 Type : main +# Pid : 7929872 -258 Timestamp : 22.05.2014 17:36:38 Type : listen +# Pid : 2556268 -1 Timestamp : 22.05.2014 17:36:37 Type : recover KHVN1 +# +# ============== Configuration KHVN1 =============== +# +# -------------- Parameter ----------------- +# APPLICATION_SYSTEM = +# ARCHIVE_ALTERNATE_PATH = /u02/oradata/khvn/archive_stby +# ARCHIVE_CHECK_DB = 1 +# ARCHIVE_COLD = FALSE +# ARCHIVE_DELETE = TRUE +# ARCHIVE_MAX_SWITCH = 20 +# ARCHIVE_NUM = 131511 +# ARCHIVE_PATH = /u02/oradata/khvn/archive +# ARCHIVE_PIPE = TRUE +# ARCHIVE_SHIP_LOGFILES = TRUE +# ARCHIVE_STATUS = RUN +# CHECK_DISKSPACE = TRUE +# COMPRESSED_COPY = TRUE +# COPY_BACK = FALSE +# COPY_BACKUP_DIRECTORY = +# COPY_CHECK_MIRROR = TRUE +# COPY_COLD = FALSE +# COPY_CONTINUE = FALSE +# COPY_FROM_COPY = +# COPY_START_ARCHIVER = TRUE +# COPY_START_RECOVER = TRUE +# COPY_STATUS = STOP +# CREATE_DIRECTORY = FALSE +# CREATE_FULL_DIRECTORY = TRUE +# CURRENT_FUNCTION = recover +# CURRENT_STATUS = 20140522 17:36:17;normal;archiver:131511;recover:131492; +# CUSTOM1 = +# CUSTOM2 = +# DBS_PATH = +# DB_USER = +# DEFINED_SWITCH = FALSE +# DESCRIPTION = KHVN1 +# EMERGENCY_AUTOMATIC = FALSE +# EMERGENCY_SID = +# EMERGENCY_WAIT_TIME = 0 +# EXTERNAL_BACKUP = FALSE +# EXTERNAL_COPY = FALSE +# EXTERNAL_RESTORE = FALSE +# FAST_RECOVER = 0 +# FF_ARCHIVE_NUM = 1 +# FF_BACKUP_ID = 31.12.2037 23:59:00 +# FF_RECOVER_NUM = 1 +# FF_SWITCH_NUM = 1 +# HIGH_COMPRESSION = FALSE +# INTERNAL_PASSWORD = +# LOGFORMAT = logarcKHVN.743803837.|.0f.1 +# MAKE_DB = FALSE +# MIRROR_ARCHIVE_PATH = /u02/oradata/khvn/archive +# MIRROR_A_INTERFACE = kkn-dbnode1-2351 +# MIRROR_B_INTERFACE = kkn-dbnode1-2351 +# MIRROR_DBS_PATH = +# MIRROR_HOME = /u01/app/ora10/product/10.2.0/db_1 +# MIRROR_HOST = kkn-dbnode1-2351 +# MIRROR_INSTANCE = +# MIRROR_PASSWORD = +# MIRROR_RELEASE = 10.2.0.4.0 +# MIRROR_SID = KHVNS +# MIRROR_USER = +# NAME = KHVN1 +# ORACLE_HOME = /u01/app/ora10/product/10.2.0/db_1 +# ORACLE_RELEASE = 10.2.0.4.0 +# ORACLE_SID = KHVN +# PARALLEL_ARCHIVER = 0 +# PARALLEL_COPY = 4 +# RAW_DEVICE = FALSE +# REAL_A_INTERFACE = kkn-dbnode3-2351 +# REAL_B_INTERFACE = kkn-dbnode3-2351 +# REAL_HOST = kkn-dbnode3-2351 +# RECOVER_CHECK_FILES = 30 +# RECOVER_CONTINUE = FALSE +# RECOVER_DELAY = 240 +# RECOVER_DELETE = TRUE +# RECOVER_LOSSLESS = FALSE +# RECOVER_MODE = TIME_DELAY +# RECOVER_NUM = 131493 +# RECOVER_OPEN_READONLY = FALSE +# RECOVER_REDO_LOG_PATH = +# RECOVER_ROLLBACK = FALSE +# RECOVER_ROLLBACK_PATH = +# RECOVER_ROLLBACK_SIZE = +# RECOVER_START_CLEAR_LOGFILES = FALSE +# RECOVER_START_MAKE_DB = FALSE +# RECOVER_STATUS = RUN +# RECOVER_STOP_COMPLETE = TRUE +# RECOVER_STOP_LOSSLESS = FALSE +# RECOVER_TO = 31.12.2099 23:59:00 +# RECOVER_TO_NUMBER = 41284 +# RECOVER_VERIFY_STATUS = STOP +# RECOVER_VERIFY_SYNC = +# SAME_FS_PARALLEL = TRUE +# STANDBY = TRUE +# STRUCTURE_CHANGE = TRUE +# STRUCTURE_CHECK_INTERVAL = 60 +# STRUCTURE_CHECK_NOLOGGING = TRUE +# STRUCTURE_STATUS = STOP +# TRD_PASSWORD = +# +# --------- CRON Check Mirror Files ---------- +# CRON[ 0] : 1000010 010000000000 +# +# -------------- Active Processes ---------- +# trdcopy : +# trdarchiver : +# trdrecover : 2556268 22.05.2014 17:36:34 RUN +# trdstructure : +# +# -------------- Statistic of Recover ------ +# Last update : 22.05.2014 17:36:34 +# Archive-Dir total : 150 GB +# Archive-Dir free : 143 GB +# Archive-Dir limit warning : 70 +# Arshive-Dir limit error : 90 +# Number of total files : 18 +# Number logfiles recovered : 1034 +# logfile-size recovered : 123 GB +# Average : 5 files/h +# Average : 608 MB/h +# Maximum : 14 files/h +# Maximum : 2.4 GB/h +# Current : 6 files/h +# Current : 1.1 GB/h +# Min. Rollback : +# State of mirror : 22.05.2014 13:26:50 +# Max. Rollforward : +# +# -------------- Last Message -------------- +# recover 20140522172734OK 1212Recover of /u02/oradata/khvn/archive/logarcKHVN.743803837.131493.1 at 17:40 22.05.2014. +# + +#. + +def check_libelle_business_shadow_to_mb(size): + if size.endswith("MB"): + size = int(float(size.replace("MB", ""))) + elif size.endswith("GB"): + size = int(float(size.replace("GB", ""))) * 1024 + elif size.endswith("TB"): + size = int(float(size.replace("TB", ""))) * 1024 * 1024 + elif size.endswith("PB"): + size = int(float(size.replace("PB", ""))) * 1024 * 1024 * 1024 + elif size.endswith("EB"): + size = int(float(size.replace("EB", ""))) * 1024 * 1024 * 1024 * 1024 + else: + size = int(float(size)) + return size + +# parses agent output into a dict +def check_libelle_business_shadow_parse(info): + parsed = {} + for line in info: + if len(line) > 1 and line[0].startswith("Host "): + parsed["host"] = re.sub("^ +", "", line[1]) + elif len(line) > 2 and line[0].startswith("Start-Time "): + parsed["start_time"] = re.sub("^ +", "", line[1]) + ":" + line[2] + elif len(line) > 1 and line[0] == "Release": + parsed["release"] = re.sub("^ +", "", line[1]) + elif len(line) > 1 and line[0].startswith("Status "): + parsed["libelle_status"] = re.sub("^ +", "", line[1]) + elif len(line) > 3 and ( line[0].startswith("trdrecover ") or \ + line[0].startswith("trdarchiver ") ): + parsed["process"] = re.sub(" +$", "", line[0]) + parsed["process_status"] = re.sub("^[0-9]+ +", "", line[3]) + elif len(line) > 1 and line[0].startswith("Archive-Dir total "): + parsed["arch_total_mb"] = check_libelle_business_shadow_to_mb(re.sub(" ", "", line[1])) + elif len(line) > 1 and line[0].startswith("Archive-Dir free "): + parsed["arch_free_mb"] = check_libelle_business_shadow_to_mb(re.sub(" ", "", line[1])) + return parsed + +# .--info----------------------------------------------------------------. +# | _ __ | +# | (_)_ __ / _| ___ | +# | | | '_ \| |_ / _ \ | +# | | | | | | _| (_) | | +# | |_|_| |_|_| \___/ | +# | | +# '----------------------------------------------------------------------' + +def inventory_libelle_business_shadow_info(info): + return [ (None, None) ] + +def check_libelle_business_shadow_info(_no_item, _no_params, info): + parsed = check_libelle_business_shadow_parse(info) + message = "Libelle Business Shadow" + if "host" in parsed.keys(): + message += ", Host: %s" % parsed["host"] + if "release" in parsed.keys(): + message += ", Release: %s" % parsed["release"] + if "start_time" in parsed.keys(): + message += ", Start Time: %s" % parsed["start_time"] + + return 0, message + +check_info["libelle_business_shadow.info"] = { + "check_function" : check_libelle_business_shadow_info, + "inventory_function" : inventory_libelle_business_shadow_info, + "service_description" : "Libelle Business Shadow Info", + "has_perfdata" : False, +} + +#. +# .--status--------------------------------------------------------------. +# | _ _ | +# | ___| |_ __ _| |_ _ _ ___ | +# | / __| __/ _` | __| | | / __| | +# | \__ \ || (_| | |_| |_| \__ \ | +# | |___/\__\__,_|\__|\__,_|___/ | +# | | +# '----------------------------------------------------------------------' + +def inventory_libelle_business_shadow_status(info): + parsed = check_libelle_business_shadow_parse(info) + if "libelle_status" in parsed.keys(): + return [ (None, None) ] + else: + return [] + +def check_libelle_business_shadow_status(_no_item, _no_params, info): + parsed = check_libelle_business_shadow_parse(info) + status = 0 + if "libelle_status" in parsed.keys(): + message = "Status is: %s" % parsed["libelle_status"] + if parsed["libelle_status"] != "RUN": + status = 2 + else: + message = "No information about libelle status found in agent output" + status = 3 + + return status, message + +check_info["libelle_business_shadow.status"] = { + "check_function" : check_libelle_business_shadow_status, + "inventory_function" : inventory_libelle_business_shadow_status, + "service_description" : "Libelle Business Shadow Status", + "has_perfdata" : False, +} + +#. +# .--process-------------------------------------------------------------. +# | | +# | _ __ _ __ ___ ___ ___ ___ ___ | +# | | '_ \| '__/ _ \ / __/ _ \/ __/ __| | +# | | |_) | | | (_) | (_| __/\__ \__ \ | +# | | .__/|_| \___/ \___\___||___/___/ | +# | |_| | +# '----------------------------------------------------------------------' + +def inventory_libelle_business_shadow_process(info): + parsed = check_libelle_business_shadow_parse(info) + if "process" in parsed.keys(): + return [ (None, None) ] + else: + return [] + +def check_libelle_business_shadow_process(_no_item, _no_params, info): + parsed = check_libelle_business_shadow_parse(info) + status = 0 + if "process" in parsed.keys(): + message = "Active Process is: %s, Status: %s" % \ + (parsed["process"], parsed["process_status"]) + if parsed["process_status"] != "RUN": + status = 2 + else: + message = "No Active Process found!" + status = 2 + + return status, message + +check_info["libelle_business_shadow.process"] = { + "check_function" : check_libelle_business_shadow_process, + "inventory_function" : inventory_libelle_business_shadow_process, + "service_description" : "Libelle Business Shadow Process", + "has_perfdata" : False, +} + +#. +# .--archive dir---------------------------------------------------------. +# | _ _ _ _ | +# | __ _ _ __ ___| |__ (_)_ _____ __| (_)_ __ | +# | / _` | '__/ __| '_ \| \ \ / / _ \ / _` | | '__| | +# | | (_| | | | (__| | | | |\ V / __/ | (_| | | | | +# | \__,_|_| \___|_| |_|_| \_/ \___| \__,_|_|_| | +# | | +# '----------------------------------------------------------------------' + +def inventory_libelle_business_shadow_archive_dir(info): + parsed = check_libelle_business_shadow_parse(info) + if "arch_total_mb" in parsed.keys() and "arch_free_mb" in parsed.keys(): + return [ ("Archive Dir", {}) ] + else: + return [] + +def check_libelle_business_shadow_archive_dir(item, params, info): + parsed = check_libelle_business_shadow_parse(info) + fslist = [] + fslist.append((item, parsed["arch_total_mb"], parsed["arch_free_mb"])) + + return df_check_filesystem_list(item, params, fslist) + + +check_info["libelle_business_shadow.archive_dir"] = { + "check_function" : check_libelle_business_shadow_archive_dir, + "inventory_function" : inventory_libelle_business_shadow_archive_dir, + "service_description" : "Libelle Business Shadow %s", + "has_perfdata" : True, + "group" : "filesystem", + "includes" : [ "df.include" ], +} + +#. diff -Nru check-mk-1.2.2p3/libelle_business_shadow.archive_dir check-mk-1.2.6p12/libelle_business_shadow.archive_dir --- check-mk-1.2.2p3/libelle_business_shadow.archive_dir 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/libelle_business_shadow.archive_dir 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,35 @@ +title: Libelle Business Shadow: Total / Used Space in Archive Dir +agents: linux, aix, solaris, hpux +catalog: app/libelle +license: GPL +distribution: check_mk +description: + Checks the total / used capacity in the Archive Dir of Libelle Business + Shadow. + + Returns {WARN} or {CRIT} if usage is above given levels. + + It uses the check logic of the {df} check, so for configuration + parameters and examples please refer to the man page of {df}. + +item: + "Archive Dir" for now is the only item. + +perfdata: + Three values: The first value is the used space in the Archive Dir in MB. + Also the minimum (0 MB), maximum (total size) and the warning and critical + levels in MB are provided. + The second is the change of the usage in MB per range since the last check + (e.g. in MB per 24 hours) and the 3rd is the averaged change (so called + trend), also in MB per range. Please note, that performance data for + trends is enabled per default. You can globally disable that in {main.mk} + with {filesystem_default_levels["trend_perfdata"] = False}. + +inventory: + Finds exactly one check on every node. + +[parameters] +parameters (dict): See man page of {df}. + +[configuration] +filesystem_default_levels: And other, see man page of {df}. diff -Nru check-mk-1.2.2p3/libelle_business_shadow.info check-mk-1.2.6p12/libelle_business_shadow.info --- check-mk-1.2.2p3/libelle_business_shadow.info 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/libelle_business_shadow.info 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,13 @@ +title: Libelle Business Shadow: System Infos +agents: linux, aix, solaris, hpux +catalog: app/libelle +license: GPL +distribution: check_mk +description: + Gathers System Information (hostname, release, start time) from + Libelle Business Shadow and displays them. + + This check is informational only and returns always {OK}. + +inventory: + Creates exactly one check per device. diff -Nru check-mk-1.2.2p3/libelle_business_shadow.process check-mk-1.2.6p12/libelle_business_shadow.process --- check-mk-1.2.2p3/libelle_business_shadow.process 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/libelle_business_shadow.process 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,14 @@ +title: Libelle Business Shadow: Active Process +agents: linux, aix, solaris, hpux +catalog: app/libelle +license: GPL +distribution: check_mk +description: + Checks for an active trdrecover or trdarchive process at Libelle Business + Shadow and reports it's status. + + It returns {OK} if one of them is found and has status {RUN}. + {CRIT} is returned otherwise. + +inventory: + Creates exactly one check per device. diff -Nru check-mk-1.2.2p3/libelle_business_shadow.status check-mk-1.2.6p12/libelle_business_shadow.status --- check-mk-1.2.2p3/libelle_business_shadow.status 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/libelle_business_shadow.status 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,12 @@ +title: Libelle Business Shadow: Status +agents: linux, aix, solaris, hpux +catalog: app/libelle +license: GPL +distribution: check_mk +description: + Checks the Status of Libelle Business Shadow. + + It returns {OK} on Status {RUN} and {CRIT} otherwise. + +inventory: + Creates exactly one check per device. diff -Nru check-mk-1.2.2p3/license.include check-mk-1.2.6p12/license.include --- check-mk-1.2.2p3/license.include 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/license.include 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,58 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +def license_check_levels(have, used, params): + if params == None: + warn = have + crit = have + elif params == False: + warn = None + crit = None + elif type(params[0]) == int: + warn = max(0, have - params[0]) + crit = max(0, have - params[1]) + else: + warn = have * (1 - params[0]/100) + crit = have * (1 - params[1]/100) + + perfdata = [ ( "licenses", used, warn, crit, 0, have )] + if used <= have: + infotext = "used %d out of %d licenses" % (used, have) + else: + infotext = "used %d licenses, but you have only %d" % (used, have) + + if crit != None and used >= crit: + status = 2 + elif warn != None and used >= warn: + status = 1 + else: + status = 0 + + if status: + infotext += " (levels at %d/%d)" % (warn, crit) + + return status, infotext, perfdata diff -Nru check-mk-1.2.2p3/liebert_bat_temp check-mk-1.2.6p12/liebert_bat_temp --- check-mk-1.2.2p3/liebert_bat_temp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/liebert_bat_temp 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,62 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +liebert_bat_temp_default = (40, 50) # warning / critical + +def inventory_liebert_bat_temp(info): + if info: + return [ ( None, "liebert_bat_temp_default") ] + +def check_liebert_bat_temp(item, params, info): + warn, crit = params + if not info: + return 3, "Data Missing in SNMP Output" + line = info[0] + temp = saveint(line[0]) + perfdata = [ ( "temp", temp, warn, crit, 80 ) ] + infotext = "temperature: %d°C , (warn/crit at %d°C/%d°C) " % \ + (temp, warn, crit) + + if temp >= crit: + return (2, infotext, perfdata) + elif temp >= warn: + return (1, infotext, perfdata) + else: + return (0, infotext, perfdata) + + +check_info['liebert_bat_temp'] = { + "inventory_function" : inventory_liebert_bat_temp, + "check_function" : check_liebert_bat_temp, + "service_description" : "Battery Temp", + "has_perfdata" : True, + "group" : "hw_single_temperature", + "snmp_info" : (".1.3.6.1.4.1.476.1.42.3.4.1.3.3.1.3", "1"), + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") in \ + [".1.3.6.1.4.1.476.1.42"] + +} diff -Nru check-mk-1.2.2p3/liebert_chiller_status check-mk-1.2.6p12/liebert_chiller_status --- check-mk-1.2.2p3/liebert_chiller_status 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/liebert_chiller_status 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,44 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_liebert_chiller_status(info): + return [(None,None)] + +def check_liebert_chiller_status(_no_item, _no_params, info): + status = info[0][0] + if status not in [ "5", "7" ]: + return 2, "Device is in a non OK state" + return 0, "Device is in a OK state" + + +check_info["liebert_chiller_status"] = { + "check_function" : check_liebert_chiller_status, + "inventory_function" : inventory_liebert_chiller_status, + "service_description" : "Chiller status", + "has_perfdata" : False, + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.476.1.42.4.3.20"), + "snmp_info" : (".1.3.6.1.4.1.476.1.42.4.3.20.1.1.20", [ 2 ]) +} diff -Nru check-mk-1.2.2p3/livestatus_status check-mk-1.2.6p12/livestatus_status --- check-mk-1.2.2p3/livestatus_status 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/livestatus_status 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -37,7 +37,9 @@ while n < len(info): site = info[n][0][1:-1] if n == len(info) - 1 or info[n+1][0].startswith('['): - parsed[site] = None # Site is down + # Handle the case if the check running in cluster + if site not in parsed.keys(): + parsed[site] = None # Site is down n += 1 else: headers = info[n+1] @@ -62,31 +64,26 @@ def check_livestatus_status(item, _no_params, info): parsed = livestatus_status_parse(info) if item not in parsed: - return (3, "UNKNOWN - Site does not exist") + return (3, "Site does not exist") status = parsed[item] # Ignore down sites. This happens on a regular basis due to restarts # of the core. The availability of a site is monitored with 'omd_status'. if status == None: - return (0, "OK - Site is currently not running") + return (0, "Site is currently not running") this_time = time.time() perfdata = [] infos = [] for counter, title in livestatus_status_counters: - try: - timedif, rate = get_counter("livestatus_status.%s.%s" % (item, counter), this_time, saveint(status[counter])) - perfdata.append((counter, rate)) - infos.append("%.1f %s/s" % (rate, title)) - # If one counter wraps then *all* counters should wrap (because the core has - # been restarted) - except MKCounterWrapped: - wrapped = True + rate = get_rate("livestatus_status.%s.%s" % (item, counter), this_time, saveint(status[counter])) + perfdata.append((counter, rate)) + infos.append("%.1f %s/s" % (rate, title)) infos += [ "Core version: %s" % status["program_version"], "Livestatus version: %s" % status["livestatus_version"]] - return (0, "OK - " + ", ".join(infos), perfdata) + return (0, ", ".join(infos), perfdata) check_info['livestatus_status'] = { Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/livestatus.tar.gz and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/livestatus.tar.gz differ diff -Nru check-mk-1.2.2p3/lnx_bonding check-mk-1.2.6p12/lnx_bonding --- check-mk-1.2.2p3/lnx_bonding 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/lnx_bonding 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/lnx_cpuinfo check-mk-1.2.6p12/lnx_cpuinfo --- check-mk-1.2.2p3/lnx_cpuinfo 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/lnx_cpuinfo 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,102 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output: +# <<>> +# processor:0 +# vendor_id:GenuineIntel +# cpu family:6 +# model:45 +# model name:Intel(R) Core(TM) i7-3930K CPU @ 3.20GHz +# stepping:7 +# microcode:0x70c +# cpu MHz:1200.000 +# cache size:12288 KB +# physical id:0 +# siblings:12 +# core id:0 +# cpu cores:6 +# apicid:0 +# initial apicid:0 +# fpu:yes +# fpu_exception:yes +# cpuid level:13 +# wp:yes +# bogomips:6399.88 +# clflush size:64 +# cache_alignment:64 +# address sizes:46 bits physical, 48 bits virtual +# power management: +# +# processor:1 +# ... + +# Note: This node is also being filled by dmidecode +def inv_lnx_cpuinfo(info): + node = inv_tree("hardware.cpu.") + num_threads_total = 0 + sockets = set([]) + for varname, value in info: + if varname == "cpu cores": + node["cores_per_cpu"] = int(value) + elif varname == "siblings": + node["threads_per_cpu"] = int(value) + elif varname == "mode name": + node["model"] = value + elif varname == "vendor_id": + node["vendor"] = { + "GenuineIntel" : "intel", + "AuthenticAMD" : "amd", + }.get(value, value) + elif varname == "cache size": + node["cache_size"] = int(value.split()[0]) * 1024 # everything is normalized to bytes! + elif varname == "model name": + node["model"] = value + # For the following two entries we assume that all + # entries are numbered in increasing order in /proc/cpuinfo. + elif varname == "processor": + num_threads_total = int(value) + 1 + elif varname == "physical id": + sockets.add(int(value)) + elif varname == "flags": + if re.search(" lm ", value): + node["arch"] = "x86_64" + else: + node["arch"] = "i386" + + num_sockets = len(sockets) + + if num_threads_total: + node.setdefault("cores_per_cpu", 1) + node.setdefault("threads_per_cpu", 1) + node["cores"] = num_sockets * node["cores_per_cpu"] + node["threads"] = num_sockets * node["threads_per_cpu"] + node["cpus"] = num_sockets + +inv_info['lnx_cpuinfo'] = { + "inv_function" : inv_lnx_cpuinfo, +} + diff -Nru check-mk-1.2.2p3/lnx_distro check-mk-1.2.6p12/lnx_distro --- check-mk-1.2.2p3/lnx_distro 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/lnx_distro 2015-08-07 12:22:01.000000000 +0000 @@ -0,0 +1,133 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example outputs from various systems: +# <<>> +# /etc/debian_version|wheezy/sid + +# <<>> +# /etc/lsb-release|DISTRIB_ID=Ubuntu|DISTRIB_RELEASE=12.10|DISTRIB_CODENAME=quantal|DISTRIB_DESCRIPTION="Ubuntu 12.10" + +# <<>> +# /etc/redhat-release|Red Hat Enterprise Linux Server release 6.5 (Santiago) + +# <<>> +# /etc/SuSE-release|SUSE Linux Enterprise Server 11 (x86_64)|VERSION = 11|PATCHLEVEL = 2 + + +def inv_lnx_distro(info): + node = inv_tree("software.os.") + for line in info: + if line[0] == '/etc/lsb-release': + inv_lnx_parse_lsb_release(node, line[1:]) + elif line[0] == '/etc/debian_version': + inv_lnx_parse_debian(node, line[1]) + elif line[0] == '/etc/redhat-release': + inv_lnx_parse_redhat_release(node, line[1]) + elif line[0] == '/etc/SuSE-release': + inv_lnx_parse_suse_release(node, line[1:]) + +def inv_lnx_parse_suse_release(node, line): + node["type"] = "linux" + node["vendor"] = "SuSE" + if len(line) == 3: + version = "%s.%s" % (line[1].split()[-1], line[2].split()[-1]) + else: + version = "%s.0" % line[1].split()[-1] + node["version"] = version + if version == "11.2": + node["code_name"] = "Emerald" + elif version == "11.3": + node["code_name"] = "Teal" + elif version == "11.4": + node["code_name"] = "Celadon" + elif version == "12.1": + node["code_name"] = "Asparagus" + elif version == "12.2": + node["code_name"] = "Mantis" + elif version == "12.3": + node["code_name"] = "Darthmouth" + elif version == "13.1": + node["code_name"] = "Bottle" + +def inv_lnx_parse_redhat_release(node, line): + node["type"] = "linux" + parts = line.split("(") + left = parts[0].strip() + node["code_name"] = parts[1].rstrip(")") + name, _release, version = left.rsplit(None, 2) + if name.startswith("Red Hat"): + node["vendor"] = "Red Hat" + node["version"] = version + node["name"] = left + + +def inv_lnx_parse_lsb_release(node, lines): + node["type"] = "linux" + for line in lines: + varname, value = line.split("=", 1) + value = value.strip("'").strip('"') + if varname == "DISTRIB_ID": + node["vendor"] = value + elif varname == "DISTRIB_RELEASE": + node["version"] = value + elif varname == "DISTRIB_CODENAME": + node["code_name"] = value.title() + elif varname == "DISTRIB_DESCRIPTION": + node["name"] = value + +def inv_lnx_parse_debian(node, line): + node["type"] = "linux" + if "name" not in node: # Do not overwrite Ubuntu information + node["name"] = "Debian " + line + node["vendor"] = "Debian" + node["version"] = line + if line.startswith("2.0."): + node["code_name"] = "Hamm" + elif line.startswith("2.1."): + node["code_name"] = "Slink" + elif line.startswith("2.2."): + node["code_name"] = "Potato" + elif line.startswith("3.0."): + node["code_name"] = "Woody" + elif line.startswith("3.1."): + node["code_name"] = "Sarge" + elif line.startswith("4."): + node["code_name"] = "Etch" + elif line.startswith("5."): + node["code_name"] = "Lenny" + elif line.startswith("6."): + node["code_name"] = "Squeeze" + elif line.startswith("7."): + node["code_name"] = "Wheezy" + elif line.startswith("8."): + node["code_name"] = "Jessie" + + + +inv_info['lnx_distro'] = { + "inv_function" : inv_lnx_distro, +} diff -Nru check-mk-1.2.2p3/lnx_if check-mk-1.2.6p12/lnx_if --- check-mk-1.2.2p3/lnx_if 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/lnx_if 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -37,6 +37,7 @@ # Duplex: Unknown! (255) # Auto-negotiation: on # Link detected: no +# Address: de:ad:be:af:00:01 # [eth1] # Speed: 1000Mb/s # Duplex: Full @@ -52,15 +53,20 @@ current_nic = None index = 0 for line in info: + # Be careful! On clustered hosts we have more than one perf-counters section + # and ethtool section. This needs to be handled. Sadly we have no section + # headers. Try to detect it by data format. if line[0].startswith('['): current_nic = line[0][1:-1] index += 1 nic_info[current_nic]['index'] = index - elif current_nic == None: # still in perf-counters subsection + elif len(line) == 2 and len(line[1].split()) >= 16: + # This looks like a perf-counter line nic = line[0] nic_info[nic] = { "counters": map(int, line[1].split()) } else: - nic_info[current_nic][line[0].strip()] = line[1].strip() + # ethtool data line + nic_info[current_nic][line[0].strip()] = ":".join(line[1:]).strip() # if index is 0 we either have found no nics or no information # from ethtool is present. In the latter case we continue and @@ -129,8 +135,10 @@ else: ifOperStatus = 4 # unknown (NIC has never been used) - # There is no mac adress data available for the moment - ifPhysAddress = '' + if attr.get("Address"): + ifPhysAddress = "".join([chr(int(x, 16)) for x in attr.get("Address", "").split(":")]) + else: + ifPhysAddress = '' if_table.append(map(str, [ ifIndex, ifDescr, ifType, ifSpeed, ifOperStatus, @@ -148,6 +156,12 @@ def check_lnx_if(item, params, info): return check_if_common(item, params, if_lnx_convert_to_if64(info)) -check_info['lnx_if'] = (check_lnx_if, "Interface %s", 1, inventory_lnx_if) -checkgroup_of['lnx_if'] = "if" -check_default_levels['lnx_if'] = "if_default_levels" + +check_info["lnx_if"] = { + 'check_function': check_lnx_if, + 'inventory_function': inventory_lnx_if, + 'service_description': 'Interface %s', + 'has_perfdata': True, + 'group': 'if', + 'default_levels_variable': 'if_default_levels', +} diff -Nru check-mk-1.2.2p3/lnx_packages check-mk-1.2.6p12/lnx_packages --- check-mk-1.2.2p3/lnx_packages 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/lnx_packages 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,72 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example for Debian/Ubuntu +# <<>> +# zlib1g:amd64|1:1.2.7.dfsg-13|amd64|deb|compression library - runtime|install ok installed +# zlib1g:i386|1:1.2.7.dfsg-13|i386|deb|compression library - runtime|install ok installed +# zlib1g-dev:amd64|1:1.2.7.dfsg-13|amd64|deb|compression library - development|install ok installed + +# Example for RPM +# gpg-pubkey|307e3d54|(none)|rpm|gpg(SuSE Package Signing Key )| +# gpg-pubkey|1d061a62|(none)|rpm|gpg(build@novell.com (Novell Linux Products) )| +# licenses|20070810|noarch|rpm|License collection as found in the packages of SuSE Linux| +# branding-SLES|11|noarch|rpm|SUSE Linux Enterprise Server Brand File| +# terminfo|5.6|i586|rpm|A terminal descriptions database| +# yast2-schema|2.17.4|noarch|rpm|YaST2 - AutoYaST Schema| +# glibc-i18ndata|2.11.1|i586|rpm|Database Sources for 'locale'| +# cpio-lang|2.9|i586|rpm|Languages for package cpio| +# zlib|1.2.3|i586|rpm|Data Compression Library| + + +def inv_lnx_packages(info): + paclist = inv_tree("software.packages:") + for pacname, version, arch, pactype, summary, inststate in info: + if pactype == "deb": + if "installed" not in inststate: + continue + if arch == "amd64": + arch = "x86_64" + entry = { + "name" : pacname, + "version" : version, + "arch" : arch, + "package_type" : pactype, + "summary" : summary, + } + # Split version into version of contained software and version of the + # packages (RPM calls the later "release") + parts = version.rsplit("-", 1) + if len(parts) == 2: + version, package_version = parts + entry["version"] = version + entry["package_version"] = package_version + paclist.append(entry) + + +inv_info['lnx_packages'] = { + "inv_function" : inv_lnx_packages, +} diff -Nru check-mk-1.2.2p3/lnx_quota check-mk-1.2.6p12/lnx_quota --- check-mk-1.2.2p3/lnx_quota 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/lnx_quota 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,102 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def lnx_quota_parse(info): + parsed = {} + fs = None + for line in info: + if line[0].startswith('[[['): + # new filesystem + fs = line[0][3:-3] + parsed[fs] = {} + elif fs and len(line) == 10: + # new table entry + parsed[fs][line[0]] = map(int, line[2:]) + return parsed + +def inventory_lnx_quota(info): + inv = [] + for fs in lnx_quota_parse(info).keys(): + inv.append((fs, {})) + return inv + +def check_lnx_quota(item, params, info): + parsed = lnx_quota_parse(info) + if item not in parsed: + return 3, 'Quota info not found for this filesystem' + + state = 0 + output = [] + perfdata = [] + + fmt = lambda v, w: w == 'files' and '%d files' % v or get_bytes_human_readable(v*1000, 1000) + + for user, values in parsed[item].items(): + for what, (used, soft, hard, grace) in [ + ('blocks', values[:4]), + ('files', values[4:]) ]: + if soft == 0 and hard == 0: + continue # skip entries with not-set limits + + this_state = 0 + txt = '%s %s' % (user, fmt(used, what)) + if used > hard: + this_state = 2 + txt += ' (over %s hard(!!))' % fmt(hard, what) + elif used > soft: + this_state = 1 + txt += ' (over %s soft' % fmt(soft, what) + if grace != 0: + # user is or was in grace period + if grace <= time.time(): + txt += ', grace exceeded(!!)' + this_state = 2 + else: + txt += ', within grace(!)' + else: + txt += '(!)' + txt += ')' + # When users are in "ok" state, don't output their usage, just + # add the perfdata for them + if this_state: + output.append(txt) + state = max(state, this_state) + + perfdata.append(('%s_%s' % (user, what), used*1000, + soft*1000, hard*1000, 0, hard*1000)) + + if not output: + output.append('All users are within quota') + + return state, ', '.join(output), perfdata + +check_info['lnx_quota'] = { + 'check_function' : check_lnx_quota, + 'inventory_function' : inventory_lnx_quota, + 'service_description' : 'Quota %s', + 'has_perfdata' : True, + 'group' : 'quota', +} diff -Nru check-mk-1.2.2p3/lnx_thermal check-mk-1.2.6p12/lnx_thermal --- check-mk-1.2.2p3/lnx_thermal 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/lnx_thermal 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,86 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# <<>> +# thermal_zone0 enabled acpitz 57000 127000 critical +# thermal_zone1 enabled acpitz 65000 100000 critical 95500 passive + +# <<>> +# thermal_zone0 enabled acpitz 47000 90000 critical 79000 passive + +# <<>> +# thermal_zone0 enabled acpitz 38000 98000 critical +# thermal_zone1 pkg-temp-0 44000 0 passive 0 passive + +def inventory_lnx_thermal(info): + return [ (l[0].replace('thermal_zone', ''), None) for l in info ] + +def check_lnx_thermal(item, params, info): + for line in info: + if line[0].replace('thermal_zone', '') == item: + # ['thermal_zone0', 'enabled', 'acpitz', '51000', '90000', 'critical', '79000', 'passive'] + state = 0 + tp_reached = [] + + # Some devices report an empty value for the 3rd field (type). Trying to fix those lines. + # -> thermal_zone1 pkg-temp-0 44000 0 passive 0 passive + try: + int(line[2]) + int(line[3]) + line = line[:2] + [''] + line[2:] + except: + pass # -> regular line + + # convert values from millidegree + cur = int(line[3]) / 1000 + + # parse trip points + if len(line) > 4: + trip_points = dict(zip(line[5::2], map(lambda x: int(x) / 1000, line[4::2]))) + for tp_num, (tp_name, level) in enumerate(trip_points.items()): + # ignore active cooling device trip points (means enabling a fan or similar) + if tp_name == 'active': + continue + if level != 0 and cur > level: + if tp_name in [ 'hot', 'critical' ]: + state = max(state, 2) + else: + state = max(state, 1) + tp_reached.append("%s (%d)" % (tp_name, tp_num)) + + detail_txt = '' + if tp_reached: + detail_txt = ' (Trip Points reached: %s)' % ', '.join(tp_reached) + + return (state, 'Temperature is %d °C%s' % (cur, detail_txt), [ ( 'temperature', cur ) ]) + return (3, 'No data found for sensor "thermal_zone%s"' % item) + +check_info['lnx_thermal'] = { + "inventory_function" : inventory_lnx_thermal, + "check_function" : check_lnx_thermal, + "service_description" : "Temperature %s", + "has_perfdata" : True, +} diff -Nru check-mk-1.2.2p3/lnx_uname check-mk-1.2.6p12/lnx_uname --- check-mk-1.2.2p3/lnx_uname 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/lnx_uname 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,40 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output +# <<>> +# Linux klappmesser 3.10.17 #2 SMP PREEMPT Mon Nov 25 19:51:52 CET 2013 x86_64 GNU/Linux + +def inv_lnx_uname(info): + node = inv_tree("software.os.") + if len(info) >= 1: + node["arch"] = info[0][0] + if len(info) >= 2: + node["kernel_version"] = info[1][0] + +inv_info['lnx_uname'] = { + "inv_function" : inv_lnx_uname, +} diff -Nru check-mk-1.2.2p3/lnx_video check-mk-1.2.6p12/lnx_video --- check-mk-1.2.2p3/lnx_video 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/lnx_video 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,62 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output +# <<>> +# 05:00.0 VGA compatible controller: Advanced Micro Devices [AMD] nee ATI Cape Verde PRO [Radeon HD 7700 Series] (prog-if 00 [VGA controller]) +# Subsystem: Hightech Information System Ltd. Device 200b +# Flags: bus master, fast devsel, latency 0, IRQ 58 +# Memory at d0000000 (64-bit, prefetchable) [size=256M] +# Memory at fe8c0000 (64-bit, non-prefetchable) [size=256K] +# I/O ports at c000 [size=256] +# Expansion ROM at fe8a0000 [disabled] [size=128K] +# Capabilities: [48] Vendor Specific Information: Len=08 +# Capabilities: [50] Power Management version 3 +# Capabilities: [58] Express Legacy Endpoint, MSI 00 +# Capabilities: [a0] MSI: Enable+ Count=1/1 Maskable- 64bit+ +# Capabilities: [100] Vendor Specific Information: ID=0001 Rev=1 Len=010 +# Capabilities: [150] Advanced Error Reporting +# Capabilities: [270] #19 +# Kernel driver in use: fglrx_pci + + +def inv_lnx_video(info): + node = inv_tree("hardware.video:") + array = {} + for line in info: + if len(line) > 1: + if re.search("VGA compatible controller", line[1]): + array["name"] = line[2] + elif line[0] == "Subsystem": + array["subsystem"] = line[1] + elif line[0] == "Kernel driver in use": + array["driver"] = line[1] + node.append(array) + + +inv_info['lnx_video'] = { + "inv_function" : inv_lnx_video, +} diff -Nru check-mk-1.2.2p3/local check-mk-1.2.6p12/local --- check-mk-1.2.2p3/local 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/local 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,14 +24,82 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. +# Example output from agent: +# 0 Service_FOO V=1 This Check is OK +# 1 Bar_Service - This is WARNING and has no performance data +# 2 NotGood V=120;50;100;0;1000 A critical check +# P Some_other_Service value1=10;30;50|value2=20;10:20;0:50;0;100 Result is computed from two values +# P This_is_OK foo=18;20;50 +# P Some_yet_other_Service temp=40;30;50|humidity=28;50:100;0:50;0;100 +# P Has-no-var - This has no variable +# P No-Text hirn=-8;-20 + + +# Compute state according to warn/crit levels contained in the +# performance data. +def local_compute_state(perfdata): + texts = [] + + def outof_levels(value, levels): + if levels == None: + return + + if ':' in levels: + lower, upper = map(float, levels.split(':')) + else: + lower = None + upper = float(levels) + if value > upper: + return " %s > %s" % (value, upper) + elif lower != None and value < lower: + return " %s < %s" % (value, lower) + + worst = 0 + for entry in perfdata: + if len(entry) < 3: + continue # No levels attached + varname = entry[0] + value = float(entry[1]) + warn = entry[2] + if len(entry) >= 4: + crit = entry[3] + else: + crit = None + + text = outof_levels(value, crit) + if text: + worst = 2 + text = "%s%s(!!)" % (varname, text) + texts.append(text) + + else: + text = outof_levels(value, warn) + if text: + worst = max(worst, 1) + text = "%s%s(!)" % (varname, text) + texts.append(text) + + else: + texts.append("%s is %s(.)" % (varname, value)) + + return worst, texts def inventory_local(info): - return [ (line[1], '', '""') for line in info ] + inventory = [] + # Ignore invalid lines, tolerate bugs in local checks + # of unexperianced users. Lines with P do not need to + # supply a text + for line in info: + if len(line) >= 4 or len(line) == 3 and line[0] == 'P': + inventory.append( (line[1], None) ) + return inventory def check_local(item, params, info): for line in info: - if line[1] == item: - state = int(line[0]) + # Ignore invalid lines, tolerate bugs in local checks + # of unexperienced users + if len(line) >= 2 and line[1] == item: + statechar = line[0] perftxt = line[2] output = " ".join(line[3:]) perfdata = [] @@ -41,12 +109,23 @@ varname, valuetxt = entry.split('=') values = valuetxt.split(';') perfdata.append(tuple( [varname] + values )) + if statechar == 'P': + state, texts = local_compute_state(perfdata) + if output: + texts = [output] + texts + output = ", ".join(texts) + else: + state = int(statechar) + if state not in range(0, 4): + output += ", local check has sent invalid state %d" % state + state = 3 return (state, output, perfdata) return (3, "Check output not found in local checks") -check_info['local'] = ( - check_local, - "%s", - 1, - inventory_local) +check_info["local"] = { + 'check_function': check_local, + 'inventory_function': inventory_local, + 'service_description': '%s', + 'has_perfdata': True, +} diff -Nru check-mk-1.2.2p3/localize.py check-mk-1.2.6p12/localize.py --- check-mk-1.2.2p3/localize.py 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/localize.py 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -55,7 +55,6 @@ else: pot_file = locale_base + '/multisite.pot' - try: os.makedirs(locale_base) except: @@ -245,6 +244,13 @@ except: pass + # Maybe initialize the file in the local hierarchy with the file in + # the default hierarchy + if local_locale_dir and not os.path.exists(po_file) \ + and os.path.exists(locale_dir + '/%s/LC_MESSAGES/%s.po' % (lang, domain)): + file(po_file, 'w').write(file(locale_dir + '/%s/LC_MESSAGES/%s.po' % (lang, domain)).read()) + sys.stdout.write('Initialize %s with the file in the default hierarchy\n' % po_file) + localize_sniff() if not os.path.exists(po_file): @@ -263,6 +269,13 @@ init_files(lang) + # Maybe initialize the file in the local hierarchy with the file in + # the default hierarchy + if local_locale_dir and not os.path.exists(po_file) \ + and os.path.exists(locale_dir + '/%s/LC_MESSAGES/%s.po' % (lang, domain)): + file(po_file, 'w').write(file(locale_dir + '/%s/LC_MESSAGES/%s.po' % (lang, domain)).read()) + sys.stdout.write('Initialize %s with the file in the default hierarchy\n' % po_file) + if not os.path.exists(po_file): raise LocalizeException('The .po file %s does not exist.' % po_file) diff -Nru check-mk-1.2.2p3/logins check-mk-1.2.6p12/logins --- check-mk-1.2.2p3/logins 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/logins 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,59 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +# Example output from agent: +# <<>> +# 3 + +logins_default_levels = (20,30) + +def inventory_logins(info): + if info: + return [ (None, "logins_default_levels") ] + +def check_logins(_no_item, params, info): + if info: + logins = int(info[0][0]) + warn, crit = params + state = 0 + if logins >= crit: + state = 2 + elif logins >= warn: + state = 1 + + infotext = "%d logins on system, levels at %d/%d" % ( logins, warn, crit ) + perfdata = [ ( "logins", logins, warn, crit, 0 ) ] + yield state, infotext, perfdata + + +check_info["logins"] = { + 'check_function' : check_logins, + 'inventory_function' : inventory_logins, + 'service_description' : 'Logins', + 'has_perfdata' : True, + 'group' : 'logins', +} diff -Nru check-mk-1.2.2p3/logwatch check-mk-1.2.6p12/logwatch --- check-mk-1.2.2p3/logwatch 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/logwatch 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -30,25 +30,64 @@ logwatch_rules = [] logwatch_max_filesize = 500000 # do not save more than 500k of message (configurable) logwatch_service_output = "default" -logwatch_forward_to_ec = False +logwatch_groups = [] # Variables embedded in precompiled checks check_config_variables += [ "logwatch_dir", "logwatch_max_filesize", "logwatch_service_output" ] -def inventory_logwatch(info): - if logwatch_forward_to_ec: - # this is the regular logwatch inventory. Terminate when in logwatch forward mode - return [] +def logwatch_ec_forwarding_enabled(params, item): + if 'restrict_logfiles' not in params: + return True # matches all logs on this host + else: + # only logs which match the specified patterns + for pattern in params['restrict_logfiles']: + if re.match(pattern, item): + return True + + return False + +# Splits the number of existing logfiles into +# forwarded (to ec) and not forwarded. Returns a +# pair of forwarded and not forwarded logs. +def logwatch_select_forwarded(info): + forwarded_logs = [] + not_forwarded_logs = [] + + forward_settings = host_extra_conf(g_hostname, checkgroup_parameters.get('logwatch_ec', [])) - inventory = [] for l in info: line = " ".join(l) if len(line) > 6 and line[0:3] == "[[[" and line[-3:] == "]]]" \ and ':missing' not in line and ':cannotopen' not in line: - inventory.append((line[3:-3], "", '""')) + logfile_name = line[3:-3] + + # Is forwarding enabled in general? + if forward_settings and forward_settings[0] != None: + if logwatch_ec_forwarding_enabled(forward_settings[0], logfile_name): + forwarded_logs.append(logfile_name) + else: + not_forwarded_logs.append(logfile_name) + + # No forwarding rule configured + else: + not_forwarded_logs.append(logfile_name) + + return forwarded_logs, not_forwarded_logs + + +def inventory_logwatch(info): + forwarded_logs, not_forwarded_logs = logwatch_select_forwarded(info) + inventory = [] + for logfile in not_forwarded_logs: + groups = logwatch_groups_of_logfile(logfile) + if groups: + continue + else: + inventory.append((logfile, None)) return inventory -#logwatch_patterns = { + +# logwatch_patterns = { # 'System': [ # ( 'W', 'sshd' ), # ( ['host1', 'host2'], 'C', 'ssh' ), # only applies to certain hosts @@ -177,6 +216,11 @@ # without precompiled checks, the params must be None an will be # ignored. def check_logwatch(item, params, info): + if len(info) == 1: + line = " ".join(info[0]) + if line.startswith("CANNOT READ CONFIG FILE"): + return 3, "Error in agent configuration: %s" % " ".join(info[0][4:]) + found = False loglines = [] for l in info: @@ -189,7 +233,93 @@ found = False elif found: loglines.append(line) + return check_logwatch_generic(item, params, loglines, found) + +check_info['logwatch'] = { + 'check_function': check_logwatch, + 'inventory_function': inventory_logwatch, + 'service_description': "Log %s", + 'group': 'logwatch', +} + +precompile_params['logwatch'] = logwatch_precompile + +# .--lw.groups-----------------------------------------------------------. +# | _ | +# | | |_ ____ _ _ __ ___ _ _ _ __ ___ | +# | | \ \ /\ / / _` | '__/ _ \| | | | '_ \/ __| | +# | | |\ V V / (_| | | | (_) | |_| | |_) \__ \ | +# | |_| \_/\_(_)__, |_| \___/ \__,_| .__/|___/ | +# | |___/ |_| | +# +----------------------------------------------------------------------+ +# | | +# '----------------------------------------------------------------------' + +def logwatch_group_precompile(hostname, item, _unused): + return logwatch_precompile(hostname, item, None), host_extra_conf(hostname, logwatch_groups) + +def logwatch_groups_of_logfile(filename, params=False): + import fnmatch + groups = [] + if not params: + params = host_extra_conf(g_hostname, logwatch_groups) + else: + params = params[1] + for line in params: + for group_name, pattern in line: + inclusion, exclusion = pattern + if fnmatch.fnmatch(filename, inclusion) \ + and not fnmatch.fnmatch(filename, exclusion): + groups.append(group_name) + return groups + +def inventory_logwatch_groups(info): + forwarded_logs, not_forwarded_logs = logwatch_select_forwarded(info) + added_groups = [] + inventory = [] + for logfile in not_forwarded_logs: + groups = logwatch_groups_of_logfile(logfile) + for group in groups: + if group not in added_groups: + added_groups.append(group) + inventory.append((group, None)) + return inventory + +def check_logwatch_groups(item, params, info): + if len(info) == 1: + line = " ".join(info[0]) + if line.startswith("CANNOT READ CONFIG FILE"): + return 3, "Error in agent configuration: %s" % " ".join(info[0][4:]) + + found = False + logfile_found = False + loglines = [] + for l in info: + line = " ".join(l) + if logfile_found == True and not line.startswith('[[['): + loglines.append(line) + if line.startswith('[[['): + logfile = line[3:-3] + if item in logwatch_groups_of_logfile(logfile, params): + found = True + logfile_found = True + else: + logfile_found = False + continue + return check_logwatch_generic(item, params, loglines, found, True) + + +check_info['logwatch.groups'] = { + 'check_function': check_logwatch_groups, + 'inventory_function': inventory_logwatch_groups, + 'service_description': "LOG %s", + 'group': 'logwatch', +} +precompile_params['logwatch.groups'] = logwatch_group_precompile +#. + +def check_logwatch_generic(item, params, loglines, found, groups=False): # Create directories, if neccessary try: logdir = logwatch_dir + "/" + g_hostname @@ -200,11 +330,10 @@ if www_group != None: try: if i_am_root(): - import pwd - to_user = pwd.getpwnam(nagios_user)[2] + to_user = nagios_user else: - to_user = -1 # keep user unchanged - os.chown(logdir, to_user, www_group) + to_user = "" # keep user unchanged + os.system("chown %s:%s %s" % (to_user, www_group, quote_shell_string(logdir))) os.chmod(logdir, 0775) except Exception, e: os.rmdir(logdir) @@ -223,15 +352,9 @@ # Logfile (=item) section not found and no local file found. This usually # means, that the corresponding logfile also vanished on the target host. if found == False and not os.path.exists(logfile): - return (3, "UNKNOWN - log not present anymore") - - # if logfile has reached maximum size, abort with critical state - if os.path.exists(logfile) and os.path.getsize(logfile) > logwatch_max_filesize: - return (2, "CRIT - unacknowledged messages have exceeded max size (%d Bytes)" % logwatch_max_filesize) + return (3, "log not present anymore") - # # Write out new log lines (no reclassify here. It is done later in general for all logs) - # if len(loglines) > 0: worst = -1 for line in loglines: @@ -250,10 +373,15 @@ # Get the patterns (either compile or reuse the precompiled ones) # Check_MK creates an empty string if the precompile function has - # not been exectued yet. The precomile function creates an empty - # list when no rules/patterns are defined. - if params != '': - patterns = params # patterns already precompiled + # not been executed yet. The precompile function creates an empty + # list when no ruless/patterns are defined. In case of the logwatch.groups + # checks, params are a tuple with the normal logwatch parameters on the first + # and the grouping patterns on the second position + if params not in ('', None): + if groups: + patterns = params[0] + else: + patterns = params # patterns already precompiled else: patterns = logwatch_precompile(g_hostname, item, None) @@ -264,10 +392,8 @@ level = newlevel return level - # # Read current log messages, reclassify all messages and write out the # whole file again if at least one line has been reclassified - # worst = 0 last_worst_line = '' reclassified_lines = [] @@ -308,7 +434,7 @@ old_level, text = logwatch_parse_line(line) level = reclassify_line(counts, patterns, text, old_level) - if old_level != level: + if old_level != level or level == "I": reclassified = True old_block_worst = block_worst @@ -325,6 +451,9 @@ except KeyError: state_counts[level] = 1 + if level == "I": + continue + block_lines.append(level + ' ' + text) # The last section is finished here. Add it to the list of reclassified lines if the @@ -344,34 +473,25 @@ else: os.unlink(logfile) + # if logfile has reached maximum size, abort with critical state + if os.path.exists(logfile) and os.path.getsize(logfile) > logwatch_max_filesize: + return (2, "unacknowledged messages have exceeded max size (%d Bytes)" % logwatch_max_filesize) + # # Render output # - state = logwatch_state(worst) - if len(reclassified_lines) == 0: - return (0, "OK - no error messages") + return (0, "no error messages") else: count_txt = [] for level, num in state_counts.iteritems(): count_txt.append('%d %s' % (num, logwatch_level_name(level))) if logwatch_service_output == 'default': - return (worst, state + " - %s messages (Last worst: \"%s\")" % + return (worst, "%s messages (Last worst: \"%s\")" % (', '.join(count_txt), last_worst_line)) else: - return (worst, state + " - %s messages" % ', '.join(count_txt)) - - -check_info['logwatch'] = { - 'check_function': check_logwatch, - 'service_description': "LOG %s", - 'has_perfdata': 0, - 'inventory_function': inventory_logwatch, - 'group': 'logwatch', -} - -precompile_params['logwatch'] = logwatch_precompile + return (worst, "%s messages" % ', '.join(count_txt)) # .----------------------------------------------------------------------. # | _____ ____ _____ ___ ______ ___ ____ ____ | @@ -386,17 +506,10 @@ import socket, time -def inventory_logwatch_ec(info): - if logwatch_forward_to_ec and info: - # this is the logwatch event console forward inventory. Add one service per host. - return [(None, "{}")] - - # this is the logwatch event console forward inventory. Terminate when not in logwatch forward mode - return [] - -# OK -> priority 5 -# WARN -> priority 4 -# CRIT -> priority 2 +# OK -> priority 5 (notice) +# WARN -> priority 4 (warning) +# CRIT -> priority 2 (crit) +# context -> priority 6 (info) # u = Uknown def logwatch_to_prio(level): if level == 'W': @@ -405,82 +518,145 @@ return 2 elif level == 'O': return 5 - elif level == 'u': + elif level == '.': + return 6 + else: return 4 -def syslog_time(): - localtime = time.localtime() - day = int(time.strftime("%d", localtime)) # strip leading 0 - value = time.strftime("%b %%d %H:%M:%S", localtime) - return value % day +def inventory_logwatch_ec(info): + forwarded_logs, not_forwarded_logs = logwatch_select_forwarded(info) + if forwarded_logs: + return [ (None, { "expected_logfiles": forwarded_logs } ) ] + +def check_logwatch_ec(item, params, info): + if len(info) == 1: + line = " ".join(info[0]) + if line.startswith("CANNOT READ CONFIG FILE"): + return 3, "Error in agent configuration: %s" % " ".join(info[0][4:]) -def check_logwatch_ec(_unused, params, info): # 1. Parse lines in info and separate by logfile - logs = [] + logs = {} logfile = None for l in info: line = " ".join(l) if len(line) > 6 and line[0:3] == "[[[" and line[-3:] == "]]]": # new logfile, extract name logfile = line[3:-3] + logs.setdefault(logfile, []) elif logfile and line: - # new regular line, skip context lines - if line[0] != '.': - logs.append((logfile, line)) + # new regular line, skip context lines and ignore lines + if line[0] not in ['.', 'I']: + logs[logfile].append(line) + + # 2. Maybe filter logfiles if some should be excluded + if 'restrict_logfiles' in params: + for logfile in logs.keys(): + if not logwatch_ec_forwarding_enabled(params, logfile): + del logs[logfile] + + # Check if the number of expected files matches the actual one + status = 0 + infotexts = [] + if params.get('monitor_logfilelist'): + if 'expected_logfiles' not in params: + infotexts.append("You enabled monitoring the list of forwarded logfiles. You need to re-inventorize the check once.") + status = 1 + else: + expected = params['expected_logfiles'] + missing = [] + for f in expected: + if f not in logs: + missing.append(f) + if missing: + infotexts.append("Missing logfiles: %s" % (", ".join(missing))) + status = 1 + + exceeding = [] + for f in logs: + if f not in expected: + exceeding.append(f) + if exceeding: + infotexts.append("Newly appeared logfiles: %s" % (", ".join(exceeding))) + status = 1 - # 2. create syslog message of each line + # 3. create syslog message of each line # <128> Oct 24 10:44:27 Klappspaten /var/log/syslog: Oct 24 10:44:27 Klappspaten logger: asdasdad as # timestamp hostname logfile: message facility = params.get('facility', 17) << 3 # default to "local1" messages = [] - cur_time = syslog_time() - for logfile, line in logs: - msg = '<%d>' % (facility + logwatch_to_prio(line[0]),) - msg += '%s %s %s: %s' % (cur_time, g_hostname, logfile, line[2:]) - messages.append(msg) + cur_time = int(time.time()) + + forwarded_logfiles = set([]) + for logfile, lines in logs.items(): + for line in lines: + msg = '<%d>' % (facility + logwatch_to_prio(line[0]),) + msg += '@%s %s %s: %s' % (cur_time, g_hostname, logfile, line[2:]) + messages.append(msg) + forwarded_logfiles.add(logfile) - # 3. send lines to event console + # 4. send lines to event console # a) local in same omd site # b) local pipe # c) remote via udp # d) remote via tcp method = params.get('method') - if method == None: + if not method: method = os.getenv('OMD_ROOT') + "/tmp/run/mkeventd/eventsocket" + elif method == 'spool:': + method += os.getenv('OMD_ROOT') + "/var/mkeventd/spool" + num_messages = len(messages) try: - if isinstance(method, tuple): - # connect either via tcp or udp - if method[0] == 'udp': - sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + if messages: + if isinstance(method, tuple): + # connect either via tcp or udp + if method[0] == 'udp': + sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + else: + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.connect((method[1], method[2])) + for message in messages: + sock.send(message + "\n") + sock.close() + + elif not method.startswith('spool:'): + # write into local event pipe + # Important: When the event daemon is stopped, then the pipe + # is *not* existing! This prevents us from hanging in such + # situations. So we must make sure that we do not create a file + # instead of the pipe! + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + sock.connect(method) + sock.send('\n'.join(messages) + '\n') + sock.close() + else: - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - sock.connect((method[1], method[2])) - for message in messages: - sock.send(message) - sock.close() + # Spool the log messages to given spool directory. + # First write a file which is not read into ec, then + # perform the move to make the file visible for ec + spool_path = method[6:] + file_name = '.%s_%s%d' % (g_hostname, item and item.replace('/', '\\') + '_' or '', time.time()) + if not os.path.exists(spool_path): + os.makedirs(spool_path) + file('%s/%s' % (spool_path, file_name), 'w').write('\n'.join(messages) + '\n') + os.rename('%s/%s' % (spool_path, file_name), '%s/%s' % (spool_path, file_name[1:])) + if forwarded_logfiles: + logfile_info = " from " + ",".join(list(forwarded_logfiles)) else: - # write into local event pipe - # Important: When the event daemon is stopped, then the pipe - # is *not* existing! This prevents us from hanging in such - # situations. So we must make sure that we do not create a file - # instead of the pipe! - sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - sock.connect(method) - sock.send('\n'.join(messages) + '\n') - sock.close() + logfile_info = "" + + infotexts.append('Forwarded %d messages%s to event console' % (num_messages, logfile_info)) + return (status, ", ".join(infotexts), [('messages', num_messages)]) - num_messages = len(messages) - return (0, 'OK - Forwarded %d messages to event console' % num_messages, [('messages', num_messages)]) except Exception, e: - return (2, 'CRITICAL - Unable to forward messages to event console (%s)' % e) + return (2, 'Unable to forward messages to event console (%s). Lost %d messages.' % + (e, num_messages)) check_info['logwatch.ec'] = { 'check_function': check_logwatch_ec, - 'service_description': "Log Forwarding", - 'has_perfdata': 0, 'inventory_function': inventory_logwatch_ec, + 'service_description': "Log Forwarding", 'group': 'logwatch_ec', } diff -Nru check-mk-1.2.2p3/logwatch.cfg check-mk-1.2.6p12/logwatch.cfg --- check-mk-1.2.2p3/logwatch.cfg 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/logwatch.cfg 1970-01-01 00:00:00.000000000 +0000 @@ -1,56 +0,0 @@ -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# ails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# logwatch.cfg -# This file configures mk_logwatch. Define your logfiles -# and patterns to be looked for here. - -# Name one or more logfiles -/var/log/messages -# Patterns are indented with one space are prefixed with: -# C: Critical messages -# W: Warning messages -# I: ignore these lines (OK) -# The first match decided. Lines that do not match any pattern -# are ignored - C Fail event detected on md device - I mdadm.*: Rebuild.*event detected - W mdadm\[ - W ata.*hard resetting link - W ata.*soft reset failed (.*FIS failed) - W device-mapper: thin:.*reached low water mark - C device-mapper: thin:.*no free space - -/var/log/auth.log - W sshd.*Corrupted MAC on input - -/var/log/syslog /var/log/kern.log - C panic - C Oops - W generic protection rip - W .*Unrecovered read error - auto reallocate failed - -# Globbing patterns are allowed: -# /sapdata/*/saptrans.log -# C ORA- diff -Nru check-mk-1.2.2p3/logwatch.ec check-mk-1.2.6p12/logwatch.ec --- check-mk-1.2.2p3/logwatch.ec 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/logwatch.ec 2015-09-21 10:59:54.000000000 +0000 @@ -1,6 +1,6 @@ title: Forward Logwatch Messages to the Event Console agents: linux, windows, aix, solaris -author: Lars Michelsen +catalog: os/files license: GPL distribution: check_mk description: @@ -18,4 +18,10 @@ {"method"}: This value can have the following format: {None} (Default value): Tries to detect the path to the local mkeventd event pipe. {"/path/to/pipe"}: The path to a local mkeventd event pipe. {("udp", "127.0.0.1", 514)}: The udp host and port to forward the messages to. - {("tcp", "127.0.0.1", 514)}: The tcp host and port to forward the messages to. + {("tcp", "127.0.0.1", 514)}: The tcp host and port to forward the messages to. It can also + be configured to use the spooling mechanism of the event console. To configure this, either + configure {"socket:"} to detect the spooling directory of the local event console or + {"socket:/path/to/spool/directory"} to configure the path explicit to the local spool directory. + + {"monitor_logfilelist"}: Set this to {True} if you want to get warned if the list of logfiles + has changed since the last inventory if this check. diff -Nru check-mk-1.2.2p3/logwatch.groups check-mk-1.2.6p12/logwatch.groups --- check-mk-1.2.2p3/logwatch.groups 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/logwatch.groups 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,19 @@ +title: Grouping of logfiles +agents: linux, windows, aix, solaris +catalog: os/files +license: GPL +distribution: check_mk +description: + This check is used to group multiple logfiles together. This is usefull in cases when a logrotation mechanism is used, + with no permanent name for the current logfile. + + Please refer to to manpage of {{logwatch}} for more details about logfile monitoring. + +inventory: + One service for each group will be created + +examples: + logwatch_groups = [ + ([("Debug Group", ("/var/log/*.err","/var/log/*.debug"))], ALL_HOSTS), + ([("Message_group", ("/var/log/messages*",""))], ['test2'], ALL_HOSTS), + ] diff -Nru check-mk-1.2.2p3/lparstat_aix check-mk-1.2.6p12/lparstat_aix --- check-mk-1.2.2p3/lparstat_aix 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/lparstat_aix 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -30,60 +30,91 @@ # | Joerg Linge 2009 Copyright 2010 | # +------------------------------------------------------------------+ -# %user %sys %wait %idle physc %entc lbusy app vcsw phint -# ----- ---- ----- ----- ----- ----- ------ --- ---- ----- -# 2.5 6.0 0.1 91.5 0.03 11.4 1.8 3.27 1976647217 490545630 - +# %user %sys %wait %idle physc %entc lbusy app vcsw phint +# ----- ---- ----- ----- ----- ----- ------ --- ---- ----- +# 2.5 6.0 0.1 91.5 0.03 11.4 1.8 3.27 1976647217 490545630 + +# %user %sys %wait %idle physc %entc lbusy vcsw phint %nsp %utcyc +# ----- ----- ------ ------ ----- ----- ------ ----- ----- ----- ------ +# 0.2 0.4 0.0 99.3 0.02 1.7 0.0 215 3 101 0.64 + +# %user %sys %wait %idle physc %entc lbusy app vcsw phint %nsp %utcyc +# ----- ----- ------ ------ ----- ----- ------ --- ----- ----- ----- ------ +# 0.1 0.4 0.0 99.5 0.02 1.6 0.0 3.97 297 0 101 0.64 + +# %user %sys %wait %idle physc %entc lbusy vcsw phint %nsp +# ----- ----- ------ ------ ----- ----- ------ ----- ----- ----- +# 0.1 0.2 0.0 99.6 0.04 1.8 2.3 371 0 58 lparstat_default_levels = (5, 10) def inventory_lparstat(info): - if len(info) == 1 and len(info[0]) >= 5: + if len(info) >= 1 and len(info[-1]) >= 5: return [(None, "", "lparstat_default_levels")] def check_lparstat(item, params, info): - try: - user = info[0][0] - sys = info[0][1] - wait = info[0][2] - idle = info[0][3] - physc = info[0][4] - entc = info[0][5] - lbusy = info[0][6] - app = info[0][7] - except: - return (3, "UNKNOWN - invalid or incomplete output from plugin") - - perfdata = [ - ('user', str(user) + '%'), - ('sys', str(sys) + '%'), - ('wait', str(wait) + '%'), - ('idle', str(idle) + '%'), - ('physc', str(physc) + ''), - ('entc', str(entc) + '%'), - ('lbusy', str(lbusy) + ''), - ('app', str(app) + ''), - ] - - return (0, "OK - AIX lparstat, user=%s%% sys=%s%% wait=%s%% idle=%s%% physc=%s app=%s" % (user,sys,wait,idle,physc,app),perfdata ) - -check_info['lparstat_aix'] = (check_lparstat, "lparstat", 1, inventory_lparstat ) + if len(info) == 1: + # Old agent provided only the values in a single line + line = info[0] + if len(line) == 12: + cols = [ 'physc', 'entc', 'lbusy', 'app', 'vcsw', 'phint', 'nsp', 'utcyc' ] + uom = [ '', '%', '', '', '', '', '%', '%' ] + values = line[-8:] + elif len(line) == 11: + cols = [ 'physc', 'entc', 'lbusy', 'app', 'vcsw', 'phint', 'nsp' ] + uom = [ '', '%', '', '', '', '', '%' ] + values = line[-7:] + elif len(line) == 10: + cols = [ 'physc', 'entc', 'lbusy', 'app', 'vcsw', 'phint' ] + uom = [ '', '%', '', '', '', '' ] + values = line[-6:] + elif len(line) == 9: + cols = [ 'physc', 'entc', 'lbusy', 'app' ] + uom = [ '', '%', '', '' ] + values = line[-4:] + elif len(line) == 6: + cols = [ 'nsp', 'utcyc' ] + uom = [ '%', '%' ] + values = line[-2:] + # else: + # Invalid output, let exception happen + else: + # The new agent provides three lines, the title line, spacer line and the values + cols = [ c.replace('%', '') for c in info[-3] ] + uom = [ '%' in c and '%' or '' for c in info[-3] ] + values = info[-1] + + perfdata = [ (cols[i], val+uom[i]) for i, val in enumerate(values) ] + output = ', '.join([ cols[i].title()+': '+val+uom[i] for i, val in enumerate(values) ]) + + return 0, output, perfdata + +check_info["lparstat_aix"] = { + 'check_function': check_lparstat, + 'inventory_function': inventory_lparstat, + 'service_description': 'lparstat', + 'has_perfdata': True, +} # Utilization and IO/Wait kernel_util_default_levels = None def inventory_lparstat_aix_cpu(info): - if len(info) == 1 and len(info[0]) >= 4: + if len(info) >= 1 and len(info[-1]) >= 4: return [(None, "kernel_util_default_levels")] def check_lparstat_aix_cpu(_no_item, params, info): - user, system, wait, idle = map(float, info[0][:4]) + if len(info) == 1: + line = info[0] # old (single line) agent output + else: + line = info[-1] + user, system, wait, idle = map(float, line[:4]) perfdata = [ ( "user", "%.3f" % user ), ( "system", "%.3f" % system ), ( "wait", "%.3f" % wait ) ] - infotext = " - user: %2.1f%%, system: %2.1f%%, wait: %2.1f%%" % (user, system, wait) + infotext = "user: %2.1f%%, system: %2.1f%%, wait: %2.1f%%" % (user, system, wait) # You may set a warning/critical level on the io wait # percentage. This can be done by setting params to @@ -100,9 +131,7 @@ except: pass - return (result, nagios_state_names[result] + infotext, perfdata) - - + return (result, infotext, perfdata) check_info['lparstat_aix.cpu_util'] = { "check_function" : check_lparstat_aix_cpu, diff -Nru check-mk-1.2.2p3/lparstat_aix.cpu_util check-mk-1.2.6p12/lparstat_aix.cpu_util --- check-mk-1.2.2p3/lparstat_aix.cpu_util 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/lparstat_aix.cpu_util 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,34 @@ +title: lparstat on AIX - CPU utilization +agents: aix +catalog: os/kernel +license: GPL +distribution: check_mk +description: + The check executes the {lparstat} command on a logical partition (LPAR) under + AIX and reports CPU utilization extracted from it's output. See section + perfdata for description of the single values. + + A level can be set only on {wait} (disk wait). + +perfdata: + The following 3 float values: + + {user}: CPU utilization in user space in percent + + {sys}: CPU utilization in kernel space in percent + + {wait}: CPU time for I/O wait in percent + +inventory: + On each LPAR one check is generated. + +[parameters] +warning (int): The percentage of {wait} that triggers a warning level. + +critical (int): The percentage of {wait} that triggers a critical level + + The parameters may also be set to {None}, which makes the check alwas {OK}. + +[configuration] +kernel_util_default_levels (int, int): Default levels used by inventorized + checks. This is present to {None}, which disables the levels. diff -Nru check-mk-1.2.2p3/lsi check-mk-1.2.6p12/lsi --- check-mk-1.2.2p3/lsi 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/lsi 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,8 +24,6 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. - - def inventory_lsi(check_type, info): # convert [ 0, 1, 2, 3, 4, 5, ...] into [ (0,1), (2,3), (4,5), ... ] @@ -46,6 +44,7 @@ else: return [(id, '"%s"' % state) for (id, state) in disks] + def check_lsi_array(item, _no_params, info): volumeid = -1 for line in info: @@ -54,10 +53,20 @@ elif line[0] == 'Statusofvolume' and volumeid == item: status = line[1] if status == 'Okay(OKY)': - return (0, 'OK - Status is Okay (OKY)') + return (0, 'Status is Okay (OKY)') else: - return (2, 'CRIT - Status is %s' % (status,)) - return (2, 'CRIT - RAID volume %d not existing' % item) + return (2, 'Status is %s' % (status,)) + return (2, 'RAID volume %d not existing' % item) + +check_info["lsi.array"] = {'check_function': check_lsi_array, + 'default_levels_variable': None, + 'group': 'raid', + 'has_perfdata': False, + 'inventory_function': lambda info: inventory_lsi('lsi.array', info), + 'node_info': False, + 'service_description': 'RAID array %s', + 'snmp_info': None, + 'snmp_scan_function': None} def check_lsi_disk(item, target_state, info): @@ -68,14 +77,17 @@ elif line[0] == 'State' and target_id == item: state = line[1].split('(')[-1][:-1] if state == target_state: - return (0, 'OK - disk has state %s' % state) + return (0, 'disk has state %s' % state) else: - return (2, 'CRIT - disk has state %s (should be %s)' % (state, target_state)) - return (2, 'CRIT - disk not present') - - -check_info['lsi.array'] = (check_lsi_array, "RAID array %s", 0, lambda info: inventory_lsi('lsi.array', info)) -checkgroup_of['lsi.array'] = "raid" + return (2, 'disk has state %s (should be %s)' % (state, target_state)) + return (2, 'disk not present') -check_info['lsi.disk'] = (check_lsi_disk, "RAID disk %s", 0, lambda info: inventory_lsi('lsi.disk', info)) -checkgroup_of['lsi.disk'] = "raid_disk" +check_info["lsi.disk"] = {'check_function': check_lsi_disk, + 'default_levels_variable': None, + 'group': 'raid_disk', + 'has_perfdata': False, + 'inventory_function': lambda info: inventory_lsi('lsi.disk', info), + 'node_info': False, + 'service_description': 'RAID disk %s', + 'snmp_info': None, + 'snmp_scan_function': None} diff -Nru check-mk-1.2.2p3/lsi.array check-mk-1.2.6p12/lsi.array --- check-mk-1.2.2p3/lsi.array 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/lsi.array 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Check LSI RAID array +title: LSI RAID array agents: linux -author: Mathias Kettner +catalog: os/storage license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/lsi.disk check-mk-1.2.6p12/lsi.disk --- check-mk-1.2.2p3/lsi.disk 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/lsi.disk 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Check disk of LSI RAID array +title: Status of a disk in an LSI RAID array agents: linux -author: Mathias Kettner +catalog: os/storage license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/ltmain.sh check-mk-1.2.6p12/ltmain.sh --- check-mk-1.2.2p3/ltmain.sh 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/ltmain.sh 2014-10-30 13:30:24.000000000 +0000 @@ -1,29 +1,4 @@ -#!/usr/bin/python -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# ails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - +#!/bin/bash # ltmain.sh - Provide generalized library-building support services. # NOTE: Changing this file will not affect anything until you rerun configure. # diff -Nru check-mk-1.2.2p3/mail check-mk-1.2.6p12/mail --- check-mk-1.2.2p3/mail 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/mail 2015-09-21 10:59:54.000000000 +0000 @@ -1,13 +1,55 @@ #!/usr/bin/python -# HTML Emails with included Graphs -# This script creates a very beautiful mail in multipart format with +# HTML Email +# Bulk: yes + +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +# Argument 1: Full system path to the pnp4nagios index.php for fetching the graphs. Usually auto configured in OMD. +# Argument 2: HTTP-URL-Prefix to open Multisite. When provided, several links are added to the mail. +# Example: http://myserv01/prod +# +# This script creates a nifty HTML email in multipart format with # attached graphs and such neat stuff. Sweet! + import os, re, sys, subprocess -from email.mime.multipart import MIMEMultipart -from email.mime.text import MIMEText -from email.mime.application import MIMEApplication -from email.mime.image import MIMEImage + +try: + from email.mime.multipart import MIMEMultipart + from email.mime.text import MIMEText + from email.mime.application import MIMEApplication + from email.mime.image import MIMEImage +except ImportError: + # python <2.5 compat + from email.MIMEMultipart import MIMEMultipart + from email.MIMEText import MIMEText + from email.MIMEImage import MIMEImage + from email.MIMEBase import MIMEBase + from email import Encoders + MIMEApplication = None tmpl_head_html = ''' @@ -15,63 +57,54 @@ $SUBJECT$ -''' + +''' -tmpl_foot_html = ''' +tmpl_foot_html = '''
    + ''' -# -# HOST TEMPLATES -# -tmpl_host_subject = 'Check_MK: $HOSTNAME$ - $NOTIFICATIONTYPE$' +# Elements to be put into the mail body. Columns: +# 1. Name +# 2. "both": always, possible, "host": only for hosts, or "service": only for service notifications +# 3. True -> always enabled, not configurable, False: optional +# 4. Title +# 5. Text template +# 6. HTML template + +body_elements = [ + + ( "hostname", "both", True, "Host", + "$HOSTNAME$ ($HOSTALIAS$)", + "$LINKEDHOSTNAME$ ($HOSTALIAS$)" ), + + ( "servicedesc", "service", True, "Service", + "$SERVICEDESC$", + "$LINKEDSERVICEDESC$" ), + + ( "event", "both", True, "Event", + "$EVENT_TXT$", + "$EVENT_HTML$", ), + + # Elements for both host and service notifications + ( "address", "both", False, "Address", + "$HOSTADDRESS$", + "$HOSTADDRESS$", ), + + ( "abstime", "both", False, "Date / Time", + "$LONGDATETIME$", + "$LONGDATETIME$", ), + + # Elements only for host notifications + ( "reltime", "host", False, "Relative Time", + "$LASTHOSTSTATECHANGE_REL$", + "$LASTHOSTSTATECHANGE_REL$",), + + ( "output", "host", True, "Plugin Output", + "$HOSTOUTPUT$", + "$HOSTOUTPUT$",), + + ( "ack_author", "host", False, "Acknowledge Author", + "$HOSTACKAUTHORNAME$", + "$HOSTACKAUTHORNAME$",), + + ( "ack_comment", "host", False, "Acknowledge Comment", + "$HOSTACKCOMMENT$", + "$HOSTACKCOMMENT$",), + + ( "perfdata", "host", False, "Performance Data", + "$HOSTPERFDATA$", + "$HOSTPERFDATA$",), + + + # Elements only for service notifications + ( "reltime", "service", False, "Relative Time", + "$LASTSERVICESTATECHANGE_REL$", + "$LASTSERVICESTATECHANGE_REL$",), + + ( "output", "service", True, "Plugin Output", + "$SERVICEOUTPUT$", + "$SERVICEOUTPUT$",), + + ( "longoutput", "service", False, "Additional Output", + "$LONGSERVICEOUTPUT$", + "$LONGSERVICEOUTPUT$",), + + ( "ack_author", "service", False, "Acknowledge Author", + "$SERVICEACKAUTHOR$", + "$SERVICEACKAUTHOR$",), + + ( "ack_comment", "service", False, "Acknowledge Comment", + "$SERVICEACKCOMMENT$", + "$SERVICEACKCOMMENT$",), + + ( "perfdata", "service", False, "Performance Data", + "$HOSTPERFDATA$", + "$HOSTPERFDATA$",), + + ( "perfdata", "service", False, "Performance Data", + "$SERVICEPERFDATA$", + "$SERVICEPERFDATA$",), + + # Debugging + ( "context", "both", False, "Complete variable list", + "$CONTEXT_ASCII$", + "$CONTEXT_HTML$", + ) +] -tmpl_host_txt = ''' -Host: $HOSTNAME$ ($HOSTALIAS$) -Address: $HOSTADDRESS$ - -State: $LASTHOSTSTATE$ -> $HOSTSTATE$ ($NOTIFICATIONTYPE$) -Output: $HOSTOUTPUT$ -Perfdata: $HOSTPERFDATA$ -$LONGHOSTOUTPUT$ -''' - -tmpl_host_html = tmpl_head_html + ''' - - - - - - - - - - - - - - - - - - - - - - - -$GRAPH_CODE$ -
    Object Information
    Name$HOSTNAME$ ($HOSTALIAS$)
    Address$HOSTADDRESS$
    State
    State$LASTHOSTSTATE$ -> $HOSTSTATE$ ($NOTIFICATIONTYPE$)
    Output$HOSTOUTPUT$
    ''' + tmpl_foot_html +tmpl_host_subject = 'Check_MK: $HOSTNAME$ - $EVENT_TXT$' +tmpl_service_subject = 'Check_MK: $HOSTNAME$/$SERVICEDESC$ $EVENT_TXT$' -# -# SERVICE TEMPLATES -# - -tmpl_service_subject = 'Check_MK: $HOSTNAME$/$SERVICEDESC$ $NOTIFICATIONTYPE$' - -tmpl_service_txt = ''' -Host: $HOSTNAME$ ($HOSTALIAS$) -Address: $HOSTADDRESS$ - -Service: $SERVICEDESC$ -State: $LASTSERVICESTATE$ -> $SERVICESTATE$ ($NOTIFICATIONTYPE$) -Output: $SERVICEOUTPUT$ -Perfdata: $SERVICEPERFDATA$ -$LONGSERVICEOUTPUT$ -''' - -tmpl_service_html = tmpl_head_html + ''' - - - - - - - - - - - - - - - - - - - - - - - - - - - -$GRAPH_CODE$ -''' + tmpl_foot_html opt_debug = '-d' in sys.argv +bulk_mode = '--bulk' in sys.argv class GraphException(Exception): pass @@ -262,22 +327,33 @@ for varname, value in context.items(): template = template.replace('$'+varname+'$', value) + # Debugging of variables. Create content only on demand + if "$CONTEXT_ASCII$" in template or "$CONTEXT_HTML$" in template: + template = replace_variable_context(template, context) + # Remove the rest of the variables and make them empty template = re.sub("\$[A-Z_][A-Z_0-9]*\$", "", template) return template -def prepare_contents(context): - if context['WHAT'] == 'HOST': - tmpl_txt = tmpl_host_txt - tmpl_html = tmpl_host_html - else: - tmpl_txt = tmpl_service_txt - tmpl_html = tmpl_service_html - return substitute_context(tmpl_txt, context), \ - substitute_context(tmpl_html, context) +def replace_variable_context(template, context): + ascii_output = "" + html_output = "
    Object Information
    Hostname$HOSTNAME$ ($HOSTALIAS$)
    Address$HOSTADDRESS$
    Service description$SERVICEDESC$
    State
    State$LASTSERVICESTATE$ -> $SERVICESTATE$ ($NOTIFICATIONTYPE$)
    Output$SERVICEOUTPUT$
    \n" + elements = context.items() + elements.sort() + for varname, value in elements: + ascii_output += "%s=%s\n" % (varname, value) + html_output += "\n" % ( + varname, encode_entities(value)) + html_output += "
    %s%s
    \n" + return template.replace("$CONTEXT_ASCII$", ascii_output).replace("$CONTEXT_HTML$", html_output) + -def multipart_mail(target, subject, content_txt, content_html, attach = []): +def encode_entities(text): + return text.replace("&", "&").replace("<", "<").replace(">", ">") + + +def multipart_mail(target, subject, from_address, reply_to, content_txt, content_html, attach = []): m = MIMEMultipart('related', _charset='utf-8') alt = MIMEMultipart('alternative') @@ -297,7 +373,14 @@ if what == 'img': part = MIMEImage(contents, name = name) else: - part = MIMEApplication(contents, name = name) + if MIMEApplication != None: + part = MIMEApplication(contents, name = name) + else: + # python <2.5 compat + part = MimeBase('application', 'octet-stream') + part.set_payload(contents) + Encoders.encode_base64(part) + part.add_header('Content-ID', '<%s>' % name) # how must be inline or attachment part.add_header('Content-Disposition', how, filename = name) @@ -306,103 +389,320 @@ m['Subject'] = subject m['To'] = target + # Set a few configurable headers + if from_address: + m['From'] = from_address + + if reply_to: + m['Reply-To'] = reply_to + + + return m -def send_mail(m, target): - p = subprocess.Popen(["/usr/sbin/sendmail", "-i", target ], stdin = subprocess.PIPE) +def send_mail(m, target, from_address): + cmd = ["/usr/sbin/sendmail"] + if from_address: + cmd += ['-f', from_address] + cmd += [ "-i", target] + try: + p = subprocess.Popen(cmd, stdin = subprocess.PIPE) + except OSError: + raise Exception("Failed to send the mail: /usr/sbin/sendmail is missing") p.communicate(m.as_string()) return True def fetch_pnp_data(context, params): try: # Autodetect the path in OMD environments - path = "%s/share/pnp4nagios/htdocs/index.php" % context['OMD_ROOT'] + path = "%s/share/pnp4nagios/htdocs/index.php" % context['OMD_ROOT'].encode('utf-8') + php_save_path = "-d session.save_path=%s/tmp/php/session" % context['OMD_ROOT'].encode('utf-8') + env = 'REMOTE_USER="check-mk" SKIP_AUTHORIZATION=1' except: # Non-omd environment - use plugin argument 1 path = context.get('PARAMETER_1', '') + php_save_path = "" # Using default path + skip_authorization = False + env = 'REMOTE_USER="%s"' % context['CONTACTNAME'].encode('utf-8') if not os.path.exists(path): raise GraphException('Unable to locate pnp4nagios index.php (%s)' % path) - return os.popen('REMOTE_USER="%s" php %s "%s"' % (context['CONTACTNAME'], path, params)).read() + return os.popen('%s php %s %s "%s"' % (env, php_save_path, path, params)).read() def fetch_num_sources(context): svc_desc = context['WHAT'] == 'HOST' and '_HOST_' or context['SERVICEDESC'] infos = fetch_pnp_data(context, '/json?host=%s&srv=%s&view=0' % - (context['HOSTNAME'], svc_desc)) + (context['HOSTNAME'].encode('utf-8'), svc_desc.encode('utf-8'))) if not infos.startswith('[{'): - raise GraphException('Unable to fetch graph infos, got: "%s"' % infos) + raise GraphException('Unable to fetch graph infos: %s' % extract_graph_error(infos)) return infos.count('source=') def fetch_graph(context, source, view = 1): svc_desc = context['WHAT'] == 'HOST' and '_HOST_' or context['SERVICEDESC'] graph = fetch_pnp_data(context, '/image?host=%s&srv=%s&view=%d&source=%d' % - (context['HOSTNAME'], svc_desc, view, source)) + (context['HOSTNAME'], svc_desc.encode('utf-8'), view, source)) if graph[:8] != '\x89PNG\r\n\x1a\n': - raise GraphException('Unable to fetch the graph, got: "%s"' % graph) + raise GraphException('Unable to fetch the graph: %s' % extract_graph_error(graph)) return graph -def main(): - # gather all options from env - context = dict([ - (var[7:], value.decode("utf-8")) - for (var, value) - in os.environ.items() - if var.startswith("NOTIFY_")]) - - # Fetch graphs for this object. It first tries to detect how many sources - # are available for this object. Then it loops through all sources and - # fetches retrieves the images. If a problem occurs, it is printed to - # stderr (-> notify.log) and the graph is not added to the mail. - try: - num_sources = fetch_num_sources(context) - except GraphException, e: - sys.stderr.write('Unable to fetch graph infos: %s\n' % e) - num_sources = 0 +def extract_graph_error(output): + lines = output.splitlines() + for nr, line in enumerate(lines): + if "Please check the documentation for information about the following error" in line: + return lines[nr+1] + return output - attachments = [] - graph_code = '' - for source in range(0, num_sources): - try: - content = fetch_graph(context, source) - except GraphException, e: - sys.stderr.write('Unable to fetch graph: %s\n' % e) - continue - if context['WHAT'] == 'HOST': - svc_desc = '_HOST_' - else: - svc_desc = context['SERVICEDESC'].replace(' ', '_') - name = '%s-%s-%d.png' % (context['HOSTNAME'], svc_desc, source) +def construct_content(context): + # A list of optional information is configurable via the parameter "elements" + # (new configuration style) + if "PARAMETER_ELEMENTS" in context: + elements = context["PARAMETER_ELEMENTS"].split() + else: + elements = [ "perfdata", "graph", "abstime", "address", "longoutput" ] + + # If argument 2 is given (old style) or the parameter url_prefix is set (new style), + # we know the base url to the installation and can add + # links to hosts and services. ubercomfortable! + if context.get('PARAMETER_2'): + url_prefix = context["PARAMETER_2"] + elif context.get("PARAMETER_URL_PREFIX"): + url_prefix = context["PARAMETER_URL_PREFIX"] + else: + url_prefix = None - attachments.append(('img', name, content, 'inline')) + if url_prefix: + base_url = url_prefix.rstrip('/') + if base_url.endswith("/check_mk"): + base_url = base_url[:-9] + host_url = base_url + context['HOSTURL'] + + context['LINKEDHOSTNAME'] = '%s' % (host_url, context['HOSTNAME']) + context['HOSTLINK'] = '\nLink: %s' % host_url + + if context['WHAT'] == 'SERVICE': + service_url = base_url + context['SERVICEURL'] + context['LINKEDSERVICEDESC'] = '%s' % (service_url, context['SERVICEDESC']) + context['SERVICELINK'] = '\nLink: %s' % service_url + else: + context['LINKEDHOSTNAME'] = context['HOSTNAME'] + context['LINKEDSERVICEDESC'] = context.get('SERVICEDESC', '') + context['HOSTLINK'] = '' + context['SERVICELINK'] = '' + + # Create a notification summary in a new context variable + # Note: This code could maybe move to cmk --notify in order to + # make it available every in all notification scripts + # We have the following types of notifications: + + # - Alerts OK -> CRIT + # NOTIFICATIONTYPE is "PROBLEM" or "RECOVERY" + + # - Flapping Started, Ended + # NOTIFICATIONTYPE is "FLAPPINGSTART" or "FLAPPINGSTOP" + + # - Downtimes Started, Ended, Cancelled + # NOTIFICATIONTYPE is "DOWNTIMESTART", "DOWNTIMECANCELLED", or "DOWNTIMEEND" + + # - Acknowledgements + # NOTIFICATIONTYPE is "ACKNOWLEDGEMENT" + + # - Custom notifications + # NOTIFICATIONTYPE is "CUSTOM" + + html_info = "" + html_state = '$@STATE$' + notification_type = context["NOTIFICATIONTYPE"] + if notification_type in [ "PROBLEM", "RECOVERY" ]: + txt_info = "$PREVIOUS@HARDSHORTSTATE$ -> $@SHORTSTATE$" + html_info = '$PREVIOUS@HARDSTATE$ → ' + \ + html_state + + elif notification_type.startswith("FLAP"): + if "START" in notification_type: + txt_info = "Started Flapping" + else: + txt_info = "Stopped Flapping ($@SHORTSTATE$)" + html_info = "Stopped Flapping (while " + html_state + ")" - context['GRAPH_%d' % source] = name - graph_code += '' % name + elif notification_type.startswith("DOWNTIME"): + what = notification_type[8:].title() + txt_info = "Downtime " + what + " ($@SHORTSTATE$)" + html_info = "Downtime " + what + " (while " + html_state + ")" + + elif notification_type == "ACKNOWLEDGEMENT": + + txt_info = "Acknowledged ($@SHORTSTATE$)" + html_info = "Acknowledged (while " + html_state + ")" + + elif notification_type == "CUSTOM": + txt_info = "Custom Notification ($@SHORTSTATE$)" + html_info = "Custom Notification (while " + html_state + ")" - if graph_code: - context['GRAPH_CODE'] = ( - 'Graphs' - '%s' % graph_code - ) else: - context['GRAPH_CODE'] = '' + txt_info = notification_type # Should neven happen + + if not html_info: + html_info = txt_info + + txt_info = substitute_context(txt_info.replace("@", context["WHAT"]), context) + html_info = substitute_context(html_info.replace("@", context["WHAT"]), context) + + context["EVENT_TXT"] = txt_info + context["EVENT_HTML"] = html_info + + attachments = [] # Compute the subject of the mail if context['WHAT'] == 'HOST': - context['SUBJECT'] = substitute_context(tmpl_host_subject, context) + tmpl = context.get('PARAMETER_HOST_SUBJECT') or tmpl_host_subject + context['SUBJECT'] = substitute_context(tmpl, context) else: - context['SUBJECT'] = substitute_context(tmpl_service_subject, context) + tmpl = context.get('PARAMETER_SERVICE_SUBJECT') or tmpl_service_subject + context['SUBJECT'] = substitute_context(tmpl, context) # Prepare the mail contents - content_txt, content_html = prepare_contents(context) + content_txt, content_html = render_elements(context, elements) + + # Add PNP Graph + if "graph" in elements: + # Fetch graphs for this object. It first tries to detect how many sources + # are available for this object. Then it loops through all sources and + # fetches retrieves the images. If a problem occurs, it is printed to + # stderr (-> notify.log) and the graph is not added to the mail. + try: + num_sources = fetch_num_sources(context) + except GraphException, e: + graph_error = extract_graph_error(str(e)) + if '.xml" not found.' not in graph_error: + sys.stderr.write('Unable to fetch number of graphs: %s\n' % graph_error) + num_sources = 0 + + graph_code = '' + for source in range(0, num_sources): + try: + content = fetch_graph(context, source) + except GraphException, e: + sys.stderr.write('Unable to fetch graph: %s\n' % e) + continue + + if context['WHAT'] == 'HOST': + svc_desc = '_HOST_' + else: + svc_desc = context['SERVICEDESC'].replace(' ', '_') + # replace forbidden windows characters < > ? " : | \ / * + for token in ["<", ">", "?", "\"", ":", "|", "\\", "/", "*"] : + svc_desc = svc_desc.replace(token, "x%s" % ord(token)) + name = '%s-%s-%d.png' % (context['HOSTNAME'], svc_desc, source) + + attachments.append(('img', name, content, 'inline')) + cls = '' + if context.get('PARAMETER_NO_FLOATING_GRAPHS'): + cls = ' class="nofloat"' + graph_code += '' % (name, cls) + + if graph_code: + content_html += ( + 'Graphs' + '%s' % graph_code + ) + + content_html = substitute_context(tmpl_head_html, context) + \ + content_html + \ + substitute_context(tmpl_foot_html, context) + + return content_txt, content_html, attachments + +def render_elements(context, elements): + what = context['WHAT'].lower() + even = "even" + tmpl_txt = "" + tmpl_html = "" + for name, whence, forced, title, txt, html in body_elements: + if (whence == "both" or whence == what) and \ + (forced or name in elements): + tmpl_txt += "%-20s %s\n" % (title + ":", txt) + tmpl_html += '%s%s' % ( + even, title, html) + even = even == "even" and "odd" or "even" + + return substitute_context(tmpl_txt, context), \ + substitute_context(tmpl_html, context) + + +def read_bulk_contexts(): + parameters = {} + contexts = [] + in_params = True + + # First comes a section with global variables + for line in sys.stdin: + line = line.strip() + if line: + key, value = line.split("=", 1) + value = value.replace("\1", "\n") + if in_params: + parameters[key] = value + else: + context[key] = value + + else: + in_params = False + context = {} + contexts.append(context) + + return parameters, contexts + +def main(): + if bulk_mode: + attachments = [] + content_txt = "" + content_html = "" + parameters, contexts = read_bulk_contexts() + hosts = set([]) + for context in contexts: + context.update(parameters) + txt, html, att = construct_content(context) + content_txt += txt + content_html += html + attachments += att + mailto = context['CONTACTEMAIL'] # Assume the same in each context + subject = context['SUBJECT'] + hosts.add(context["HOSTNAME"]) + + # Create a useful subject + hosts = list(hosts) + if len(contexts) > 1: + if len(hosts) == 1: + subject = "Check_MK: %d notifications for %s" % (len(contexts), hosts[0]) + else: + subject = "Check_MK: %d notifications for %d hosts" % ( + len(contexts), len(hosts)) + + else: + # gather all options from env + context = dict([ + (var[7:], value.decode("utf-8")) + for (var, value) + in os.environ.items() + if var.startswith("NOTIFY_")]) + content_txt, content_html, attachments = construct_content(context) + mailto = context['CONTACTEMAIL'] + subject = context['SUBJECT'] + + if not mailto: # e.g. empty field in user database + sys.stdout.write("Cannot send HTML email: empty destination email address") + sys.exit(2) + # Create the mail and send it - m = multipart_mail(context['CONTACTEMAIL'], context['SUBJECT'], content_txt, - content_html, attachments) - send_mail(m, context['CONTACTEMAIL']) + from_address = context.get("PARAMETER_FROM") + reply_to = context.get("PARAMETER_REPLY_TO") + m = multipart_mail(mailto, subject, from_address, reply_to, content_txt, content_html, attachments) + send_mail(m, mailto, from_address) main() diff -Nru check-mk-1.2.2p3/mailman_lists check-mk-1.2.6p12/mailman_lists --- check-mk-1.2.2p3/mailman_lists 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/mailman_lists 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -31,8 +31,8 @@ for line in info: name, num_members = line[0], saveint(line[1]) if name == item: - return (0, 'OK - %d members subcribed' % (num_members), [('count', num_members)]) - return (3, 'UNKNOWN - List could not be found in agent output') + return (0, '%d members subcribed' % (num_members), [('count', num_members)]) + return (3, 'List could not be found in agent output') check_info["mailman_lists"] = { "check_function" : check_mailman_lists, diff -Nru check-mk-1.2.2p3/main.mk-1.2.2p3 check-mk-1.2.6p12/main.mk-1.2.2p3 --- check-mk-1.2.2p3/main.mk-1.2.2p3 2013-11-05 09:42:57.000000000 +0000 +++ check-mk-1.2.6p12/main.mk-1.2.2p3 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -# Put your host names here -# all_hosts = [ 'localhost' ] -all_hosts = [ ] diff -Nru check-mk-1.2.2p3/main.mk-1.2.6p12 check-mk-1.2.6p12/main.mk-1.2.6p12 --- check-mk-1.2.2p3/main.mk-1.2.6p12 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/main.mk-1.2.6p12 2015-09-21 11:01:34.000000000 +0000 @@ -0,0 +1,3 @@ +# Put your host names here +# all_hosts = [ 'localhost' ] +all_hosts = [ ] diff -Nru check-mk-1.2.2p3/Makefile check-mk-1.2.6p12/Makefile --- check-mk-1.2.2p3/Makefile 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/Makefile 1970-01-01 00:00:00.000000000 +0000 @@ -1,40 +0,0 @@ -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# ails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -waitmax: waitmax.c - @if type diet >/dev/null ; then \ - echo "Compiling with diet (please ignore diet-warnings)..." ; \ - diet gcc -s -o waitmax waitmax.c ; \ - else \ - echo "WARNING: your binary is not portable. Please compile " ; \ - echo "with dietlibc on 32-Bit to get portable statically " ; \ - echo "linked binary." ; \ - echo ; \ - echo "Compiling with normal gcc..." ; \ - gcc -s -o waitmax waitmax.c ; \ - fi - @echo "Fine. Typing 'make install' as root now will install into /usr/bin" - -clean: - rm -rf *~ waitmax diff -Nru check-mk-1.2.2p3/Makefile.am check-mk-1.2.6p12/Makefile.am --- check-mk-1.2.2p3/Makefile.am 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/Makefile.am 2014-10-30 13:30:24.000000000 +0000 @@ -5,7 +5,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/Makefile.in check-mk-1.2.6p12/Makefile.in --- check-mk-1.2.2p3/Makefile.in 2013-11-05 09:42:55.000000000 +0000 +++ check-mk-1.2.6p12/Makefile.in 2015-09-21 11:01:31.000000000 +0000 @@ -1,9 +1,9 @@ -# Makefile.in generated by automake 1.11.1 from Makefile.am. +# Makefile.in generated by automake 1.11.6 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, -# Inc. +# 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software +# Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -22,7 +22,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -39,6 +39,23 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. VPATH = @srcdir@ +am__make_dryrun = \ + { \ + am__dry=no; \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + echo 'am--echo: ; @echo "AM" OK' | $(MAKE) -f - 2>/dev/null \ + | grep '^AM OK$$' >/dev/null || am__dry=yes;; \ + *) \ + for am__flg in $$MAKEFLAGS; do \ + case $$am__flg in \ + *=*|--*) ;; \ + *n*) am__dry=yes; break;; \ + esac; \ + done;; \ + esac; \ + test $$am__dry = yes; \ + } pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ @@ -79,6 +96,11 @@ install-pdf-recursive install-ps-recursive install-recursive \ installcheck-recursive installdirs-recursive pdf-recursive \ ps-recursive uninstall-recursive +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ @@ -91,9 +113,11 @@ distdir = $(PACKAGE)-$(VERSION) top_distdir = $(distdir) am__remove_distdir = \ - { test ! -d "$(distdir)" \ - || { find "$(distdir)" -type d ! -perm -200 -exec chmod u+w {} ';' \ - && rm -fr "$(distdir)"; }; } + if test -d "$(distdir)"; then \ + find "$(distdir)" -type d ! -perm -200 -exec chmod u+w {} ';' \ + && rm -rf "$(distdir)" \ + || { sleep 5 && rm -rf "$(distdir)"; }; \ + else :; fi am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ @@ -122,6 +146,8 @@ DIST_ARCHIVES = $(distdir).tar.gz GZIP_ENV = --best distuninstallcheck_listfiles = find . -type f -print +am__distuninstallcheck_listfiles = $(distuninstallcheck_listfiles) \ + | sed 's|^\./|$(prefix)/|' | grep -v '$(infodir)/dir$$' distcleancheck_listfiles = find . -type f -print ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ @@ -201,6 +227,7 @@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ +nagios_headers = @nagios_headers@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ prefix = @prefix@ @@ -219,7 +246,7 @@ $(MAKE) $(AM_MAKEFLAGS) all-recursive .SUFFIXES: -am--refresh: +am--refresh: Makefile @: $(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ @@ -255,10 +282,8 @@ $(am__aclocal_m4_deps): config.h: stamp-h1 - @if test ! -f $@; then \ - rm -f stamp-h1; \ - $(MAKE) $(AM_MAKEFLAGS) stamp-h1; \ - else :; fi + @if test ! -f $@; then rm -f stamp-h1; else :; fi + @if test ! -f $@; then $(MAKE) $(AM_MAKEFLAGS) stamp-h1; else :; fi stamp-h1: $(srcdir)/config.h.in $(top_builddir)/config.status @rm -f stamp-h1 @@ -440,13 +465,10 @@ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ - test -d "$(distdir)/$$subdir" \ - || $(MKDIR_P) "$(distdir)/$$subdir" \ - || exit 1; \ - fi; \ - done - @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" = .; then :; else \ + $(am__make_dryrun) \ + || test -d "$(distdir)/$$subdir" \ + || $(MKDIR_P) "$(distdir)/$$subdir" \ + || exit 1; \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ @@ -478,7 +500,11 @@ $(am__remove_distdir) dist-bzip2: distdir - tardir=$(distdir) && $(am__tar) | bzip2 -9 -c >$(distdir).tar.bz2 + tardir=$(distdir) && $(am__tar) | BZIP2=$${BZIP2--9} bzip2 -c >$(distdir).tar.bz2 + $(am__remove_distdir) + +dist-lzip: distdir + tardir=$(distdir) && $(am__tar) | lzip -c $${LZIP_OPT--9} >$(distdir).tar.lz $(am__remove_distdir) dist-lzma: distdir @@ -486,7 +512,7 @@ $(am__remove_distdir) dist-xz: distdir - tardir=$(distdir) && $(am__tar) | xz -c >$(distdir).tar.xz + tardir=$(distdir) && $(am__tar) | XZ_OPT=$${XZ_OPT--e} xz -c >$(distdir).tar.xz $(am__remove_distdir) dist-tarZ: distdir @@ -517,6 +543,8 @@ bzip2 -dc $(distdir).tar.bz2 | $(am__untar) ;;\ *.tar.lzma*) \ lzma -dc $(distdir).tar.lzma | $(am__untar) ;;\ + *.tar.lz*) \ + lzip -dc $(distdir).tar.lz | $(am__untar) ;;\ *.tar.xz*) \ xz -dc $(distdir).tar.xz | $(am__untar) ;;\ *.tar.Z*) \ @@ -536,6 +564,7 @@ && am__cwd=`pwd` \ && $(am__cd) $(distdir)/_build \ && ../configure --srcdir=.. --prefix="$$dc_install_base" \ + $(AM_DISTCHECK_CONFIGURE_FLAGS) \ $(DISTCHECK_CONFIGURE_FLAGS) \ && $(MAKE) $(AM_MAKEFLAGS) \ && $(MAKE) $(AM_MAKEFLAGS) dvi \ @@ -564,8 +593,16 @@ list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \ sed -e 1h -e 1s/./=/g -e 1p -e 1x -e '$$p' -e '$$x' distuninstallcheck: - @$(am__cd) '$(distuninstallcheck_dir)' \ - && test `$(distuninstallcheck_listfiles) | wc -l` -le 1 \ + @test -n '$(distuninstallcheck_dir)' || { \ + echo 'ERROR: trying to run $@ with an empty' \ + '$$(distuninstallcheck_dir)' >&2; \ + exit 1; \ + }; \ + $(am__cd) '$(distuninstallcheck_dir)' || { \ + echo 'ERROR: cannot chdir into $(distuninstallcheck_dir)' >&2; \ + exit 1; \ + }; \ + test `$(am__distuninstallcheck_listfiles) | wc -l` -eq 0 \ || { echo "ERROR: files left after uninstall:" ; \ if test -n "$(DESTDIR)"; then \ echo " (check DESTDIR support)"; \ @@ -596,10 +633,15 @@ installcheck: installcheck-recursive install-strip: - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - `test -z '$(STRIP)' || \ - echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi mostlyclean-generic: clean-generic: @@ -686,17 +728,18 @@ .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am am--refresh check check-am clean clean-generic \ ctags ctags-recursive dist dist-all dist-bzip2 dist-gzip \ - dist-lzma dist-shar dist-tarZ dist-xz dist-zip distcheck \ - distclean distclean-generic distclean-hdr distclean-tags \ - distcleancheck distdir distuninstallcheck dvi dvi-am html \ - html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-pdf install-pdf-am \ - install-ps install-ps-am install-strip installcheck \ - installcheck-am installdirs installdirs-am maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-generic pdf \ - pdf-am ps ps-am tags tags-recursive uninstall uninstall-am + dist-lzip dist-lzma dist-shar dist-tarZ dist-xz dist-zip \ + distcheck distclean distclean-generic distclean-hdr \ + distclean-tags distcleancheck distdir distuninstallcheck dvi \ + dvi-am html html-am info info-am install install-am \ + install-data install-data-am install-dvi install-dvi-am \ + install-exec install-exec-am install-html install-html-am \ + install-info install-info-am install-man install-pdf \ + install-pdf-am install-ps install-ps-am install-strip \ + installcheck installcheck-am installdirs installdirs-am \ + maintainer-clean maintainer-clean-generic mostlyclean \ + mostlyclean-generic pdf pdf-am ps ps-am tags tags-recursive \ + uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. diff -Nru check-mk-1.2.2p3/manpage.template check-mk-1.2.6p12/manpage.template --- check-mk-1.2.2p3/manpage.template 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/manpage.template 2013-12-16 16:59:11.000000000 +0000 @@ -0,0 +1,45 @@ +title: Dummy check man page - used as template for new check manuals +agents: linux, windows, aix, solaris, hpux, vms, freebsd, snmp +catalog: see modules/catalog.py for possible values +license: GPL +distribution: check_mk +description: + Describe here: (1) what the check actually does, (2) under which + circumstances it goes warning/critical, (3) which devices are supported + by the check, (4) if the check requires a separated plugin or + tool or separate configuration on the target host. + +item: + Describe the syntax and meaning of the check's item here. Provide all + information one needs if coding a manual check with {checks +=} in {main.mk}. + Give an example. If the check uses {None} as sole item, + then leave out this section. + +examples: + # Give examples for configuration in {main.mk} here. If the check has + # configuration variable, then give example for them here. + + # set default levels to 40 and 60 percent: + foo_default_values = (40, 60) + + # another configuration variable here: + inventory_foo_filter = [ "superfoo", "superfoo2" ] + +perfdata: + Describe precisely the number and meaning of performance variables + the check sends. If it outputs no performance data, then leave out this + section. + +inventory: + Describe how the inventory for the check works. Which items + will it find? Describe the influence of check specific + configuration parameters to the inventory. + +[parameters] +foofirst(int): describe the first parameter here (if parameters are grouped + as tuple) +fooother(string): describe another parameter here. + +[configuration] +foo_default_levels(int, int): Describe global configuration variable of + foo here. Important: also tell the user how they are preset. diff -Nru check-mk-1.2.2p3/masterguard_out_voltage check-mk-1.2.6p12/masterguard_out_voltage --- check-mk-1.2.2p3/masterguard_out_voltage 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/masterguard_out_voltage 2015-06-24 09:48:36.000000000 +0000 @@ -1,16 +1,16 @@ -title: Show the out voltage of masterguard ups devices +title: Masterguard UPS Devices: Out voltage agents: snmp -author: Bastian Kuhn +catalog: hw/power/emerson license: GPL distribution: check_mk description: - Out Voltage + Checks the Out Voltage of Masterguard UPS Devices item: ID of the phase perfdata: - The current Voltage + One value: The current Voltage inventory: One service per phase will be created diff -Nru check-mk-1.2.2p3/mbg_lantime_refclock check-mk-1.2.6p12/mbg_lantime_refclock --- check-mk-1.2.2p3/mbg_lantime_refclock 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/mbg_lantime_refclock 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -88,20 +88,23 @@ perfdata = [('sat_good', gps_sat_good, params[0], params[1]), ('sat_total', gps_sat_total) ] - return (state, '%s - %s' % (nagios_state_names[state], ', '.join(state_txt)), perfdata) + return (state, ', '.join(state_txt), perfdata) - return (3, 'UNKNOWN - Got no state information') + return (3, 'Got no state information') - -check_info['mbg_lantime_refclock'] = ( check_mbg_lantime_refclock, "LANTIME Refclock", 1, inventory_mbg_lantime_refclock ) -snmp_info['mbg_lantime_refclock'] = ( ".1.3.6.1.4.1.5597.3.2", [ - 4, # MBG-SNMP-MIB::mbgLtRefClockModeVal - 6, # MBG-SNMP-MIB::mbgLtRefGpsStateVal - 7, # MBG-SNMP-MIB::mbgLtRefGpsPosition - 9, # MBG-SNMP-MIB::mbgLtRefGpsSatellitesGood - 10, # MBG-SNMP-MIB::mbgLtRefGpsSatellitesInView - 16, # MBG-SNMP-MIB::mbgLtRefGpsModeVal - ]) - -snmp_scan_functions['mbg_lantime_refclock'] \ - = lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.5597.3" +check_info["mbg_lantime_refclock"] = { + 'check_function': check_mbg_lantime_refclock, + 'inventory_function': inventory_mbg_lantime_refclock, + 'service_description': 'LANTIME Refclock', + 'has_perfdata': True, + 'snmp_info': ('.1.3.6.1.4.1.5597.3.2', [ + 4, # MBG-SNMP-MIB::mbgLtRefClockModeVal + 6, # MBG-SNMP-MIB::mbgLtRefGpsStateVal + 7, # MBG-SNMP-MIB::mbgLtRefGpsPosition + 9, # MBG-SNMP-MIB::mbgLtRefGpsSatellitesGood + 10, # MBG-SNMP-MIB::mbgLtRefGpsSatellitesInView + 16, # MBG-SNMP-MIB::mbgLtRefGpsModeVal + ]), + 'snmp_scan_function': \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.5597.3", +} diff -Nru check-mk-1.2.2p3/mbg_lantime_state check-mk-1.2.6p12/mbg_lantime_state --- check-mk-1.2.2p3/mbg_lantime_state 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/mbg_lantime_state 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -81,21 +81,22 @@ perfdata = [('offset', refclock_offset, params[2], params[3])] - return (state, '%s - %s' % (nagios_state_names[state], ', '.join(state_txt)), perfdata) + return (state, ', '.join(state_txt), perfdata) - return (3, 'UNKNOWN - Got no state information') + return (3, 'Got no state information') - -check_info['mbg_lantime_state'] = ( check_mbg_lantime_state, "LANTIME State", 1, inventory_mbg_lantime_state ) -snmp_info['mbg_lantime_state'] = ( ".1.3.6.1.4.1.5597.3.1", [ - 2, # MBG-SNMP-MIB::mbgLtNtpCurrentStateVal - 3, # MBG-SNMP-MIB::mbgLtNtpNtpStratum - 4, # MBG-SNMP-MIB::mbgLtNtpActiveRefclockId - 5, # MBG-SNMP-MIB::mbgLtNtpActiveRefclockName - 7, # MBG-SNMP-MIB::mbgLtNtpActiveRefclockOffsetVal - ]) - - - -snmp_scan_functions['mbg_lantime_state'] \ - = lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.5597.3" +check_info["mbg_lantime_state"] = { + 'check_function': check_mbg_lantime_state, + 'inventory_function': inventory_mbg_lantime_state, + 'service_description': 'LANTIME State', + 'has_perfdata': True, + 'snmp_info': ('.1.3.6.1.4.1.5597.3.1', [ + 2, # MBG-SNMP-MIB::mbgLtNtpCurrentStateVal + 3, # MBG-SNMP-MIB::mbgLtNtpNtpStratum + 4, # MBG-SNMP-MIB::mbgLtNtpActiveRefclockId + 5, # MBG-SNMP-MIB::mbgLtNtpActiveRefclockName + 7, # MBG-SNMP-MIB::mbgLtNtpActiveRefclockOffsetVal + ]), + 'snmp_scan_function': \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.5597.3", +} diff -Nru check-mk-1.2.2p3/mcdata_fcport check-mk-1.2.6p12/mcdata_fcport --- check-mk-1.2.2p3/mcdata_fcport 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/mcdata_fcport 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -79,23 +79,25 @@ def check_mcdata_fcport(item, params, info): return check_if_common(item, params, mcdata_fcport_convert_to_if64(info)) -check_info['mcdata_fcport'] = (check_mcdata_fcport, "Port %s", 1, inventory_mcdata_fcport) check_includes['mcdata_fcport'] = [ "if.include" ] -check_default_levels['mcdata_fcport'] = "if_default_levels" - -snmp_info['mcdata_fcport'] = \ - ( ".1.3.6.1.4.1.289.2.1.1.2.3.1.1", [ - 1, # EF-6000-MIB::ef6000PortIndex - 3, # EF-6000-MIB::ef6000PortOpStatus - 11, # EF-6000-MIB::ef6000PortSpeed - 67, # EF-6000-MIB::ef6000PortTxWords64 - 68, # EF-6000-MIB::ef6000PortRxWords64 - 69, # EF-6000-MIB::ef6000PortTxFrames64 - 70, # EF-6000-MIB::ef6000PortRxFrames64 - 83, # EF-6000-MIB::ef6000PortC3Discards64 - 65, # EF-6000-MIB::ef6000PortCrcs - ] ) - -# check if number of network interfaces (IF-MIB::ifNumber.0) is at least 2 -snmp_scan_functions['mcdata_fcport'] = \ - lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.289.") +check_info["mcdata_fcport"] = { + 'check_function': check_mcdata_fcport, + 'inventory_function': inventory_mcdata_fcport, + 'service_description': 'Port %s', + 'has_perfdata': True, + 'snmp_info': ('.1.3.6.1.4.1.289.2.1.1.2.3.1.1', [ + 1, # EF-6000-MIB::ef6000PortIndex + 3, # EF-6000-MIB::ef6000PortOpStatus + 11, # EF-6000-MIB::ef6000PortSpeed + 67, # EF-6000-MIB::ef6000PortTxWords64 + 68, # EF-6000-MIB::ef6000PortRxWords64 + 69, # EF-6000-MIB::ef6000PortTxFrames64 + 70, # EF-6000-MIB::ef6000PortRxFrames64 + 83, # EF-6000-MIB::ef6000PortC3Discards64 + 65, # EF-6000-MIB::ef6000PortCrcs + ]), + # check if number of network interfaces (IF-MIB::ifNumber.0) is at least 2 + 'snmp_scan_function': \ + lambda oid: oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.289."), + 'default_levels_variable': 'if_default_levels', +} diff -Nru check-mk-1.2.2p3/md check-mk-1.2.6p12/md --- check-mk-1.2.2p3/md 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/md 2015-07-15 09:04:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -63,6 +63,50 @@ # unused devices: # --------------------------------------------------------- +# Another example with RAID1 replacement gone wrong +# --------------------------------------------------------- +# Personalities : [raid1] +# md0 : active raid1 sdc3[3] sda3[2](F) sdb3[1] +# 48837528 blocks super 1.0 [2/2] [UU] +# +# md1 : active raid1 sdc4[3] sda4[2](F) sdb4[1] +# 193277940 blocks super 1.0 [2/2] [UU] +# +# unused devices: +# ---------------------------------------------------------- + +# Another example with RAID5 being recovered +# --------------------------------------------------------- +# Personalities : [raid1] [raid6] [raid5] [raid4] +# md1 : active raid1 sdd1[1] sdc1[0] +# 10484668 blocks super 1.1 [2/2] [UU] +# bitmap: 1/1 pages [4KB], 65536KB chunk +# +# md127 : active raid5 sda3[0] sdb3[1] sdd3[4] sdc3[2] +# 11686055424 blocks super 1.2 level 5, 512k chunk, algorithm 2 [4/3] [UUU_] +# [======>..............] recovery = 31.8% (1241578496/3895351808) finish=746.8min speed=59224K/sec +# +# md0 : active raid1 sdb1[1] sda1[0] +# 10485688 blocks super 1.0 [2/2] [UU] +# bitmap: 0/1 pages [0KB], 65536KB chunk +# +# unused devices: +# ---------------------------------------------------------- + +# And now for something completely different: +# --------------------------------------------------------- +# Personalities : [raid1] [raid10] +# md1 : active raid10 sdd6[3] sdb6[1] sda6[0] +# 1463055360 blocks 64K chunks 2 near-copies [4/3] [UU_U] +# +# md0 : active raid1 sdd1[3] sdb1[1] sda1[0] +# 104320 blocks [4/3] [UU_U] +# +# unused devices: +# --------------------------------------------------------- + +# TODO: Write a parse function! + def inventory_md(info): inventory = [] for line in info: @@ -77,31 +121,51 @@ inventory.append( (device, None) ) return inventory + def check_md(item, _no_params, info): raid_state = '' its_next = False + state_next = False for line in info: if line[0] == item and line[1] == ':': raid_state = line[2] if raid_state != 'active' and raid_state != 'active(auto-read-only)': - return (2, "CRIT - raid state is '%s' (should be 'active')" % (raid_state,)) + return (2, "raid state is '%s' (should be 'active')" % (raid_state,)) # Usually (auto-read-only) sticks to active without a space. # But on some kernels it appears separated by a space if line[3] == '(auto-read-only)': del line[3] - num_disks = len([x for x in line[4:] if not x.endswith("(S)") ]) # omit spare disks + all_disks = len([x for x in line[4:]]) # all disks + spare_disks = len([x for x in line[4:] if x.endswith("(S)") ]) # spare disks + failed_disks = len([x for x in line[4:] if x.endswith("(F)") ]) # failed disks + active_disks = all_disks - spare_disks - failed_disks its_next = True elif its_next: disk_state_1 = line[-2] + (num_disks, expected_disks) = map(int,disk_state_1[1:-1].split('/')) disk_state_2 = line[-1] - if disk_state_1 != '[%d/%d]' % (num_disks, num_disks) or \ - disk_state_2 != '[' + ("U"*num_disks) + ']': - return (2, 'CRIT - disk state is %s %s (expected %d disks to be up)' % - (disk_state_1, disk_state_2, num_disks)) - else: - return (0, 'OK - raid active, disk state is %s %s' % (disk_state_1, disk_state_2)) - return (2, 'CRIT - no raid device %s' % item) + working_disks = disk_state_2.count('U') + state_next = True + its_next = False + elif state_next: + if num_disks == expected_disks and active_disks == working_disks: + return (0, 'raid active, disk state is %s %s' % (disk_state_1, disk_state_2)) + if len(line) > 6 and line[-6] != '' and "speed=" in line[-1]: + build_state_1 = line[-6] + build_state_2 = line[-4] + build_est = line[-2].partition('=')[2] + build_speed = float(line[-1].partition('=')[2][:-5]) / 1024 + return (1, 'disk state is %s %s (expected %d disks to be up) - %s %s @ %.1fMB/s (%s)' % + (disk_state_1, disk_state_2, expected_disks, build_state_1, build_state_2, build_speed, build_est)) + return (2, 'disk state is %s %s (expected %d disks to be up)' % + (disk_state_1, disk_state_2, expected_disks)) + return (2, 'no RAID device %s' % item) + -check_info['md'] = (check_md, "MD Softraid %s", 0, inventory_md) -checkgroup_of['md'] = "raid" +check_info["md"] = { + 'check_function': check_md, + 'inventory_function': inventory_md, + 'service_description': 'MD Softraid %s', + 'group': 'raid', +} diff -Nru check-mk-1.2.2p3/megaraid_bbu check-mk-1.2.6p12/megaraid_bbu --- check-mk-1.2.2p3/megaraid_bbu 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/megaraid_bbu 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -38,20 +38,24 @@ 'Charging Status' : ('None', 1), # nolearn 'Battery State' : ('Operational', 2), # nolearn 'Learn Cycle Status' : ('OK', 1), - 'Learn Cycle Active' : ('Yes', 0), + 'Learn Cycle Active' : ('No', 0), 'Battery Pack Missing' : ('No', 2), 'Battery Replacement required' : ('No', 1), 'Over Temperature' : ('No', 2), 'Over Charged' : ('No', 1), 'Voltage' : ('OK', 2), # nolearn + 'isSOHGood' : ('Yes', 2), } def megaraid_bbu_parse(info): controllers = {} + current_hba = None for line in info: - # Format the Agent output - name, data = " ".join(line).split(":") + joined = " ".join(line) + if ":" not in joined: + continue # skip garbage lines + name, data = joined.split(":") name = name.strip() data = data.strip() @@ -59,7 +63,7 @@ if name in [ "BBU status for Adapter", "BBU status for Adpater" ]: current_hba = {} controllers[data] = current_hba - else: + elif current_hba != None: # We lose the numerical temperature here # (same key is used twice in output of megacli) current_hba[name] = data @@ -73,7 +77,7 @@ def check_megaraid_bbu(item, _no_params, info): controllers = megaraid_bbu_parse(info) if item not in controllers: - return (3, "UNKNOWN - Controller data not found in agent output") + return (3, "Controller data not found in agent output") controller = controllers[item] broken = [] @@ -83,6 +87,8 @@ charge = ", No charge information reported for this controller" else: charge = ", Charge is %s" % controller['Relative State of Charge'] + if 'Full Charge Capacity' in controller: + charge += ", Capacity is %s" % controller['Full Charge Capacity'] # verify defined important parameters to current level for varname, (refvalue, refstate) in megaraid_bbu_refvalues.items(): @@ -90,22 +96,33 @@ # if your bbu chipset fails and you still get a partial response this will lead # to a false result. but people asked for it :> try: - if controller[varname] != refvalue: - broken.append("%s is %s, but should be %s(%s)" % (varname, value, refvalue, "!" * refstate)) + value = controller[varname] + # Some controllers report "Optimal" instead of "Operational" + if value == "Optimal": + pass + # Some controllers do not output Temperature: OK and Voltage: OK. + elif varname in [ "Temperature", "Voltage" ] and value[0].isdigit(): + pass + elif value != refvalue: + text = '%s is %s' % (varname, value) + if refstate: + text += ' (%s)' % ("!" * refstate) + text += ' (Expected: %s)' % refvalue + broken.append(text) state = max(state, refstate) - except: + except KeyError: pass if controller.get("Learn Cycle Active") == "Yes": - return (0, "OK - no states to check (controller is in learn cycle)" + charge) + return (0, "no states to check (controller is in learn cycle)" + charge) # return assembled info elif broken: - return (state, nagios_state_names[state] + " - " + ", ".join(broken) + charge) + return (state, ", ".join(broken) + charge) else: - return (0, "OK - all states as expected" + charge) + return (0, "all states as expected" + charge) - return (3, "UNKNOWN - Check not implemented") - - - -check_info["megaraid_bbu"] = (check_megaraid_bbu, "RAID Adapter/BBU %s", 0, inventory_megaraid_bbu) +check_info["megaraid_bbu"] = { + 'check_function': check_megaraid_bbu, + 'inventory_function': inventory_megaraid_bbu, + 'service_description': 'RAID Adapter/BBU %s', +} diff -Nru check-mk-1.2.2p3/megaraid_ldisks check-mk-1.2.6p12/megaraid_ldisks --- check-mk-1.2.2p3/megaraid_ldisks 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/megaraid_ldisks 2015-07-02 10:08:06.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -31,33 +31,68 @@ # State: Optimal # Stripe Size: 64kB # Number Of Drives:2 +# Adapter 1: No Virtual Drive Configured. + +def megaraid_ldisks_is_new_drive(l): + return l.startswith('Virtual Disk:') or l.startswith('Virtual Drive:') \ + or l.startswith('CacheCade Virtual Drive:') def inventory_megaraid_ldisks(info): inventory = [] adapter = None for line in info: - if line[0] == "Adapter": + l = ' '.join(line) + if line[0] == "Adapter" and not l.endswith('No Virtual Drive Configured.'): adapter = int(line[1]) - elif line[0] == "Virtual" and (line[1] == "Disk:" or line[1] == "Drive:"): - disk = int(line[2]) + elif megaraid_ldisks_is_new_drive(l): + disk = int(l.split(': ')[1].split(' ')[0]) inventory.append( ("%d/%d" % (adapter, disk), "", None) ) return inventory def check_megaraid_ldisks(item, _no_params, info): adapter = None + cache = None + write = None + found = False + result = 0 + infotext = '' for line in info: - if line[0] == "Adapter": + l = ' '.join(line) + if line[0] == "Adapter" and not l.endswith('No Virtual Drive Configured.'): adapter = int(line[1]) - elif line[0] == "Virtual" and (line[1] == "Disk:" or line[1] == "Drive:"): - disk = int(line[2]) + elif megaraid_ldisks_is_new_drive(l): + if found: + break + disk = int(l.split(': ')[1].split(' ')[0]) found = "%d/%d" % (adapter, disk) == item - elif found and line[0].startswith("State"): - state = " ".join(line[1:]).replace(': ', '') - infotext = "state is %s" % state - if state == "Optimal": - return (0, "OK - " + infotext) - else: - return (2, "CRIT - " + infotext) - return (3, "UNKNOWN - no such adapter/logical disk found") + elif found: + if line[0].startswith("State"): + state = " ".join(line[1:]).replace(': ', '') + infotext += "state is %s" % state + if state != "Optimal": + result = max(result, 2) + elif line[0].startswith("Default") and line[1].startswith("Cache"): + cache = " ".join(line[3:]).replace(': ', '') + elif line[0].startswith("Current") and line[1].startswith("Cache"): + state = " ".join(line[3:]).replace(': ', '') + if cache != state: + infotext += ", cache is %s, expected %s" % (state, cache) + result = max(result, 1) + elif line[0].startswith("Default") and line[1].startswith("Write"): + write = " ".join(line[3:]).replace(': ', '') + elif line[0].startswith("Current") and line[1].startswith("Write"): + state = " ".join(line[3:]).replace(': ', '') + if write != state: + infotext += ", write is %s, expected %s" % (write, cache) + result = max(result, 1) + if found: + return (result, infotext) + return (3, "no such adapter/logical disk found") + -check_info['megaraid_ldisks'] = (check_megaraid_ldisks, "RAID Adapter/LDisk %s", 1, inventory_megaraid_ldisks) +check_info["megaraid_ldisks"] = { + 'check_function': check_megaraid_ldisks, + 'inventory_function': inventory_megaraid_ldisks, + 'service_description': 'RAID Adapter/LDisk %s', + 'has_perfdata': True, +} diff -Nru check-mk-1.2.2p3/megaraid_pdisks check-mk-1.2.6p12/megaraid_pdisks --- check-mk-1.2.2p3/megaraid_pdisks 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/megaraid_pdisks 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -86,7 +86,7 @@ elif line[0] == "Slot": slot = int(line[-1]) elif line[0] == "Firmware" and line[1] == "state:": - state = line[2] + state = line[2].rstrip(',') elif line[0] == "Inquiry" and line[1] == "Data:": name = " ".join(line[2:]) #Adapter, Enclosure, Encolsure Device ID, Slot, State, Name @@ -96,25 +96,32 @@ return return_var - def inventory_megaraid_pdisks(info): - info = megaraid_pdisks_parse(info) inventory = [] for adapter, enclosure, enc_dev_id, slot, state, name in info: - inventory.append(("%s%s/%s" % (adapter, enclosure, slot), repr(state))) + inventory.append(("%s%s/%s" % (adapter, enclosure, slot), None)) return inventory -def check_megaraid_pdisks(item, target_state, info): +megaraid_pdisks_states = { + 'Online' : 0, + 'Hotspare' : 0, + 'Unconfigured(good)' : 0, + 'Failed' : 2, + 'Unconfigured(bad)' : 1, +} + +def check_megaraid_pdisks(item, _no_params, info): info = megaraid_pdisks_parse(info) for adapter, enclosure, enc_dev_id, slot, state, name in info: if "%s%s/%s" % (adapter, enclosure, slot) == item: - infotext = " - %s (%s)" % (state, name) - if state == target_state: - return (0, "OK" + infotext) - else: - return (2, "CRIT" + infotext) - return (3, "UNKNOWN - No disk in encl/slot %s found" % item) + return megaraid_pdisks_states.get(state, 3), "%s (%s)" % (state, name) + return 3, "No disk in encl/slot %s found" % item -check_info['megaraid_pdisks'] = (check_megaraid_pdisks, "RAID PDisk Adapt/Enc/Sl %s", 1, inventory_megaraid_pdisks) -checkgroup_of['megaraid_pdisks'] = "raid_disk" +check_info["megaraid_pdisks"] = { + 'check_function': check_megaraid_pdisks, + 'inventory_function': inventory_megaraid_pdisks, + 'service_description': 'RAID PDisk Adapt/Enc/Sl %s', + 'has_perfdata': False, + 'group': 'raid_disk', +} diff -Nru check-mk-1.2.2p3/mem check-mk-1.2.6p12/mem --- check-mk-1.2.2p3/mem 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/mem 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -37,6 +37,10 @@ # | used for unixoide operating systems. | # +----------------------------------------------------------------------+ +# The following variable is obsolete. It is kept here so that Check_MK +# won't fail if it's found in main.mk +mem_extended_perfdata = None + def parse_proc_meminfo(info): return dict([ (i[0][:-1], int(i[1])) for i in info ]) @@ -44,20 +48,20 @@ meminfo = parse_proc_meminfo(info) if "MemTotal" in meminfo and \ "PageTotal" not in meminfo: # This case is handled by mem.win - return [(None, "memused_default_levels")] + return [(None, {})] def check_mem_used(_no_item, params, info): meminfo = parse_proc_meminfo(info) return check_memory(params, meminfo) check_info['mem.used'] = { - "check_function" : check_mem_used, - "inventory_function" : inventory_mem_used, - "service_description" : "Memory used", - "has_perfdata" : True, - "group" : "memory", - "check_config_variables" : [ "mem_extended_perfdata" ], - "includes" : [ "mem.include" ], + "check_function" : check_mem_used, + "inventory_function" : inventory_mem_used, + "service_description" : "Memory used", + "has_perfdata" : True, + "group" : "memory", + "default_levels_variable" : "memory_default_levels", + "includes" : [ "mem.include" ], } @@ -74,23 +78,21 @@ # +----------------------------------------------------------------------+ # Special memory and page file check for Windows -check_default_levels['mem.win'] = "memory_win_default_levels" factory_settings["memory_win_default_levels"] = { "memory" : ( 80.0, 90.0 ), - "pagefile" : ( 50.0, 70.0 ), + "pagefile" : ( 80.0, 90.0 ), } def inventory_mem_win(info): meminfo = parse_proc_meminfo(info) - if "PageTotal" in meminfo: + if "MemTotal" in meminfo and "PageTotal" in meminfo: return [(None, {})] def check_mem_windows(item, params, info): meminfo = parse_proc_meminfo(info) - perfdata = [] - infotxts = [] MB = 1024.0 * 1024 - worststate = 0 + now = time.time() + for title, what, paramname in [ ( "Memory", "Mem", "memory" ), ( "Page file", "Page", "pagefile" )]: @@ -101,36 +103,75 @@ free_mb = free_kb / 1024.0 perc = 100.0 * used_kb / total_kb - # Now check the levels - warn, crit = params[paramname] - if (type(crit) == int and free_mb <= crit) or \ - (type(crit) == float and perc >= crit): - worststate = 2 - state_code = '(!!)' - elif (type(warn) == int and free_mb <= warn) or \ - (type(warn) == float and perc >= warn): - worststate = max(worststate, 1) - state_code = '(!)' - else: - state_code = "" + infotext = "%s usage: %.1f%% (%.1f/%.1f GB)" % \ + (title, perc, used_kb / MB, total_kb / MB) - # Convert levels to absolute values (for perfdata) - if type(warn) == float: - warn = total_kb * warn / 100 / 1024 - if type(crit) == float: - crit = total_kb * crit / 100 / 1024 + if type(params[paramname]) == tuple: + warn, crit = params[paramname] - infotxts.append("%s usage: %.1f%% (%.1f/%.1f GB)%s" % - (title, perc, used_kb / MB, total_kb / MB, state_code)) - perfdata.append((paramname, used_kb / 1024.0, warn, crit, 0, total_kb / 1024.0)) + # In perfdata show warn/crit as absolute values + if type(warn) == float: + warn_kb = total_kb * warn / 100 / 1024 + else: + warn_kb = warn * 1024 + + if type(crit) == float: + crit_kb = total_kb * crit / 100 / 1024 + else: + crit_kb = crit * 1024 - return (worststate, "%s - %s" % - (nagios_state_names[worststate], ", ".join(infotxts)), perfdata) + perfdata = [(paramname, used_kb / 1024.0, warn_kb, crit_kb, 0, total_kb / 1024.0)] + # Predictive levels have no level information in the performance data + else: + perfdata = [(paramname, used_kb / 1024.0, None, None, 0, total_kb / 1024.0)] + + # Do averaging, if configured, just for matching the levels + if "average" in params: + average_min = params["average"] + used_kb = get_average("mem.win.%s" % paramname, + now, used_kb, average_min, initialize_zero = False) + used_mb = used_kb / 1024.0 + free_mb = (total_kb / 1024.0) - used_mb + perc = 100.0 * used_kb / total_kb + infotext += ", %d min average: %.1f%% (%.1f GB)" % (average_min, perc, used_kb / MB) + perfdata.append((paramname + "_avg", used_kb / 1024.0)) -check_info['mem.win'] = (check_mem_windows, "Memory and pagefile", 1, inventory_mem_win) -checkgroup_of['mem.win'] = "memory_pagefile_win" + # Now check the levels + if type(params[paramname]) == tuple: + if (type(crit) == int and free_mb <= crit) or \ + (type(crit) == float and perc >= crit): + state = 2 + elif (type(warn) == int and free_mb <= warn) or \ + (type(warn) == float and perc >= warn): + state = 1 + else: + state = 0 + # Predictive levels + else: + state, infoadd, perfadd = check_levels( + used_kb / 1024.0, # Current value stored in MB in RRDs + "average" in params and paramname + "_avg" or paramname, # Name of RRD variable + params[paramname], + unit = "GB", # Levels are specified in GB... + scale = 1024, # ... in WATO ValueSpec + ) + if infoadd: + infotext += ", " + infoadd + perfdata += perfadd + + yield state, infotext, perfdata + + +check_info["mem.win"] = { + 'check_function': check_mem_windows, + 'inventory_function': inventory_mem_win, + 'service_description': 'Memory and pagefile', + 'has_perfdata': True, + 'group': 'memory_pagefile_win', + 'default_levels_variable': 'memory_win_default_levels', +} # +----------------------------------------------------------------------+ # | _ _ | @@ -195,6 +236,11 @@ state = max(state, s) infotxts.append(infotxt) perfdata.append( (var, v, w_mb, c_mb, 0, total_mb) ) - return (state, nagios_state_names[state] + (" - total %.1f MB, " % total_mb) + ", ".join(infotxts), perfdata) + return (state, ("total %.1f MB, " % total_mb) + ", ".join(infotxts), perfdata) -check_info["mem.vmalloc"] = (check_mem_vmalloc, "Vmalloc address space", 1, inventory_mem_vmalloc) +check_info["mem.vmalloc"] = { + 'check_function': check_mem_vmalloc, + 'inventory_function': inventory_mem_vmalloc, + 'service_description': 'Vmalloc address space', + 'has_perfdata': True, +} diff -Nru check-mk-1.2.2p3/mem.include check-mk-1.2.6p12/mem.include --- check-mk-1.2.2p3/mem.include 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/mem.include 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -25,74 +25,110 @@ # Boston, MA 02110-1301 USA. memused_default_levels = (150.0, 200.0) -mem_extended_perfdata = False +factory_settings["memory_default_levels"] = { + "levels" : memused_default_levels, +} + def check_memory(params, meminfo): - try: - swapused = meminfo['SwapTotal'] - meminfo['SwapFree'] - memused = meminfo['MemTotal'] - meminfo['MemFree'] - # Buffers and Cached are optional (not supported on Windows yet) - caches = meminfo.get('Buffers', 0) + meminfo.get('Cached', 0) - except: - return (3, "UNKNOWN - invalid output from plugin") - - # Add extended memory performance data, if this is - # enabled and the agent provides that information. - extended_perf = [] - extrainfo = "" - if mem_extended_perfdata: - mapped = meminfo.get('Mapped') - if mapped: - mapped_mb = int(mapped) / 1024 - committed_as = meminfo.get('Committed_AS') - if committed_as: - committed_as_mb = int(committed_as) / 1024 - else: - committed_as = 0 - extended_perf = [ - ('mapped', str(mapped_mb) + 'MB', '', '', 0, ''), - ('committed_as', str(committed_as_mb) + 'MB', '', '', 0, ''), - ] - extrainfo = ", %.1f GB mapped, %.1f GB committed" % \ - (mapped_mb / 1024.0, committed_as_mb / 1024.0) + swapused = meminfo['SwapTotal'] - meminfo['SwapFree'] + memused = meminfo['MemTotal'] - meminfo['MemFree'] + + # Buffers and Cached are optional. On Linux both mean basically the same. + caches = meminfo.get('Buffers', 0) + meminfo.get('Cached', 0) + + # Size of Pagetable on Linux can be relevant e.g. on ORACLE + # servers with much memory, that do not use HugeTables. We account + # that for used + pagetables = meminfo.get('PageTables', 0) + pagetables_mb = pagetables / 1024.0 - totalused_kb = (swapused + memused - caches) - totalused_mb = totalused_kb / 1024 + totalused_kb = (swapused + memused - caches + pagetables) + totalused_mb = totalused_kb / 1024.0 totalmem_kb = meminfo['MemTotal'] - totalmem_mb = totalmem_kb / 1024 + totalmem_mb = totalmem_kb / 1024.0 totalused_perc = 100 * (float(totalused_kb) / float(totalmem_kb)) - totalvirt_mb = (meminfo['SwapTotal'] + meminfo['MemTotal']) / 1024 - warn, crit = params + totalvirt_mb = (meminfo['SwapTotal'] + meminfo['MemTotal']) / 1024.0 - perfdata = [ - ('ramused', str( (memused - caches) / 1024) + 'MB', '', '', 0, totalmem_mb), - ('swapused', str(swapused / 1024) + 'MB', '', '', 0, meminfo['SwapTotal']/1024) ] - - # levels may be given either in int -> MB or in float -> percentages + if type(params) == tuple: + params = { "levels" : params } + warn, crit = params["levels"] - infotext = ("%.2f GB used (%.2f GB RAM + %.2f GB SWAP, this is %.1f%% of %.2f GB RAM)" % \ + if pagetables > 0: + pgtext = " + %.2f Pagetables" % (pagetables_mb / 1024.0) + else: + pgtext = "" + infotext = "%.2f GB used (%.2f RAM + %.2f SWAP%s, this is %.1f%% of %.2f RAM (%.2f total SWAP)" % \ (totalused_mb / 1024.0, (memused-caches) / 1024.0 / 1024, swapused / 1024.0 / 1024, - totalused_perc, totalmem_mb / 1024.0)) \ - + extrainfo + pgtext, totalused_perc, totalmem_mb / 1024.0, meminfo["SwapTotal"] / 1024.0 / 1024) + + # Take into account averaging + average_min = params.get("average") + if average_min: + totalused_mb_avg = get_average("mem.used.total", time.time(), + totalused_mb, average_min, initialize_zero = False) + totalused_perc_avg = totalused_mb_avg / totalmem_mb * 100 + infotext += ", %d min average %.1f%%" % (average_min, totalused_perc_avg) + comp_mb = totalused_mb_avg + else: + comp_mb = totalused_mb + infotext += ")" - if type(warn) == float: - perfdata.append(('memused', str(totalused_mb)+'MB', int(warn/100.0 * totalmem_mb), - int(crit/100.0 * totalmem_mb), 0, totalvirt_mb)) - perfdata += extended_perf - if totalused_perc >= crit: - return (2, 'CRIT - %s, critical at %.1f%%' % (infotext, crit), perfdata) - elif totalused_perc >= warn: - return (1, 'WARN - %s, warning at %.1f%%' % (infotext, warn), perfdata) - else: - return (0, 'OK - %s' % infotext, perfdata) + # levels may be given either in int -> MB or in float -> percentages. So convert + # effective levels to MB now + if type(warn) == float: + warn_mb = int(warn/100.0 * totalmem_mb) + crit_mb = int(crit/100.0 * totalmem_mb) + leveltext = lambda x: "%.1f%%" % x else: - perfdata.append(('memused', str(totalused_mb)+'MB', warn, crit, 0, totalvirt_mb)) - perfdata += extended_perf - if totalused_mb >= crit: - return (2, 'CRIT - %s, critical at %.2f GB' % (infotext, crit / 1024.0), perfdata) - elif totalused_mb >= warn: - return (1, 'WARN - %s, warning at %.2f GB' % (infotext, warn / 1024.0), perfdata) - else: - return (0, 'OK - %s' % infotext, perfdata) + warn_mb = warn + crit_mb = crit + leveltext = lambda x: "%.2f GB" % (x / 1024.0) + + # Prepare performance data + perfdata = [ + ('ramused', str( (memused - caches) / 1024) + 'MB', '', '', 0, totalmem_mb), + ('swapused', str(swapused / 1024) + 'MB', '', '', 0, meminfo['SwapTotal']/1024), + ('memused', str(totalused_mb) + 'MB', warn_mb, crit_mb, 0, totalvirt_mb), + ] + + if average_min: + perfdata.append(('memusedavg', str(totalused_mb_avg)+'MB')) + + # Check levels + state = 0 + if warn_mb > 0: # positive levels - used memory + if comp_mb >= crit_mb: + state = 2 + infotext += ", critical at %s used" % leveltext(crit) + elif comp_mb >= warn_mb: + state = 1 + infotext += ", warning at %s used" % leveltext(warn) + else: # negative levels - free memory + freemem_mb = totalvirt_mb - comp_mb + if freemem_mb <= -crit_mb: + state = 2 + infotext += ", critical at %s free" % leveltext(-crit) + elif freemem_mb <= -warn_mb: + state = 1 + infotext += ", warning at %s free" % leveltext(-warn) + + + # Add additional metrics, provided by Linux. + mapped = meminfo.get('Mapped') + if mapped: + mapped_mb = int(mapped) / 1024 + committed_as_mb = int(meminfo.get('Committed_AS', 0)) / 1024 + shared_mb = int(meminfo.get('Shmem', 0)) / 1024 + + perfdata += [ + ('mapped', str(mapped_mb) + 'MB'), + ('committed_as', str(committed_as_mb) + 'MB'), + ('pagetables', str(pagetables_mb) + 'MB'), + ('shared', str(shared_mb) + 'MB'), + ] + infotext += ", %.1f mapped, %.1f committed, %.1f shared" % \ + (mapped_mb / 1024.0, committed_as_mb / 1024.0, shared_mb / 1024.0) + return state, infotext, perfdata diff -Nru check-mk-1.2.2p3/mem.used check-mk-1.2.6p12/mem.used --- check-mk-1.2.2p3/mem.used 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/mem.used 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Check usage of physical and virtual RAM +title: Usage of physical and virtual RAM agents: linux -author: Mathias Kettner +catalog: os/kernel license: GPL distribution: check_mk description: @@ -9,39 +9,31 @@ and critical level for the usage of virtual memory, {not} for the usage of RAM. - This is not a bug, its a feature. In fact it is the - only way to do it right (at least for Linux): What - parts of a process currently reside in physical RAM - and what parts are swapped out is not related in a - direct way with the current memory usage. - - Linux tends to swap out - parts of processes even if RAM is available. It does - this in situations where disk buffers (are assumed to) - speed up the overall performance more than keeping - rarely used parts of processes in RAM. - - For example after a complete backup of your system - you might experiance that your swap usage has increased - while you have more RAM free then before. That is - because Linux has taken RAM from processes in order - to increase disk buffers. - - So when defining a level to check against, the only - value that is not affected by such internals of - memory management is the total amount of {virtual} - memory used up by processes (not by disk buffers). - - Check_mk lets you define levels in percentage of - the physically installed RAM or as absolute - values in MB. The default levels - are at 150% and 200%. That means that this check - gets critical if the memory used by processes - is twice the size of your RAM. - - Hint: If you want to monitor swapping, you probably - better measure major pagefaults. Please look - at the check {kernel}. + This is not a bug, it's a feature. In fact it is the only way to do it right + (at least for Linux): What parts of a process currently reside in physical + RAM and what parts are swapped out is not related in a direct way with the + current memory usage. + + Linux tends to swap out parts of processes even if RAM is available. It + does this in situations where disk buffers (are assumed to) speed up the + overall performance more than keeping rarely used parts of processes in RAM. + + For example after a complete backup of your system you might experiance + that your swap usage has increased while you have more RAM free then + before. That is because Linux has taken RAM from processes in order to + increase disk buffers. + + So when defining a level to check against, the only value that is not + affected by such internals of memory management is the total amount of + {virtual} memory used up by processes (not by disk buffers). + + Check_MK lets you define levels in percentage of the physically installed RAM + or as absolute values in MB. The default levels are at 150% and 200%. That + means that this check gets critical if the memory used by processes is + twice the size of your RAM. + + Hint: If you want to monitor swapping, you probably better measure major + pagefaults. Please look at the check {kernel}. item: {None} @@ -72,25 +64,28 @@ stacks swap usage on top of RAM usage und thus shows the amount of virtual RAM that is used by processes. - If {mem_extended_perfdata} is set to {True}, then - additional performance data is output (see below). + On Linux some additional performance values are output, + for example the size of the page tables and the shared + memory. -[parameters] -warning (int or float): the percentage of virtual memory used - by processes at which WARNING state is triggered. If the - level is defined as an integer value then it is interpreted - as an absolute value in megabytes. -critical (int or float): the percentage or absolute value - at which CRITICAL state is triggered + If averaging is turned on, then a value {memusedavg} is added. +[parameters] +parameters (dict): The check previously used a pair of + two numbers as a parameter. While this is internally still + supported, the new format is a dictionary with the following + keys: + + {"levels"}: A pair of two int or float values: if these are + float it means the the percentage of virtual memory used + by processes at which WARNING/CRIT state is triggered. If the + two numbers are defined as an integer value then they are interpreted + as an absolute value in megabytes. + + {"average"}: This key is optional. If set (integer), it means + a number of minutes. The levels are then applied to the + averaged value over that time horizon. [configuration] memused_default_levels (float, float): Levels used by all checks that are created by inventory. - -mem_extended_perfdata (boolean): If this variable is set - to {True}, then the checks outputs additional performance - data, if the agent provides that information. On Linux - the amount of mapped and committed memory is output - (see {Mapped} and {Committed_AS} in {/proc/meminfo}). - diff -Nru check-mk-1.2.2p3/mem.vmalloc check-mk-1.2.6p12/mem.vmalloc --- check-mk-1.2.2p3/mem.vmalloc 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/mem.vmalloc 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Check usage of Vmalloc address space +title: Usage of Vmalloc address space agents: linux -author: Mathias Kettner +catalog: os/kernel license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/mem.win check-mk-1.2.6p12/mem.win --- check-mk-1.2.2p3/mem.win 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/mem.win 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Check main memory and page file usage on Windows +title: Usage of main memory and page file agents: windows -author: Mathias Kettner +catalog: os/kernel license: GPL distribution: check_mk description: @@ -21,6 +21,7 @@ memory_win_default_levels = { "memory" : (80.0, 90.0), # alert at 80%/90% usage "pagefile" : (2048, 1024), # alert, if less then 2/1 GB free + "average" : 60, # apply levels on 60-min average } # Disable memory levels for all hosts with the tag "test" @@ -44,6 +45,9 @@ {"pagefile"} Warning and critical levels for page file usage. The same rules apply as for {memory}. + {"average"} This optional parameters sets a value in minutes for averaging. + In that case all warn/crit levels are applied to the averaged values. + [configuration] memory_win_default_levels (dict): Levels used by diff -Nru check-mk-1.2.2p3/mikrotik_signal check-mk-1.2.6p12/mikrotik_signal --- check-mk-1.2.2p3/mikrotik_signal 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/mikrotik_signal 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,64 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +mikrotik_signal_default_levels = ( 80, 70 ) + +def inventory_mikrotik_signal(info): + inventory = [] + for network, strenght, mode in info: + inventory.append((network, "mikrotik_signal_default_levels")) + return inventory + +def check_mikrotik_signal(item, params, info): + warn, crit = params + for network, strenght, mode in info: + if network == item: + strenght = saveint(strenght) + quality = "0" + if strenght <= -50 or strenght >= -100: + quality = 2 * ( strenght + 100 ) + if quality > 100: + quality = 100 + + infotext = "Signal quality %d%% (%ddBm). Mode is: %s" % ( quality, strenght, mode ) + perf = [ ("quality", quality, warn, crit) ] + if quality <= crit: + return 2, infotext, perf + if quality <= warn: + return 1, infotext, perf + return 0, infotext, perf + + return 3, "Network not found" + +check_info["mikrotik_signal"] = { + "group" : "signal_quality", + "check_function" : check_mikrotik_signal, + "inventory_function" : inventory_mikrotik_signal, + "service_description" : "Signal %s", + "has_perfdata" : True, + "snmp_info" : ( ".1.3.6.1.4.1.14988.1.1.1.1.1", [ "5.2", "4.2", "8.2" ] ), + "snmp_scan_function" : lambda oid: ".1.3.6.1.4.1.14988.1" in oid(".1.3.6.1.2.1.1.2.0") +} Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/mkeventd.tar.gz and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/mkeventd.tar.gz differ diff -Nru check-mk-1.2.2p3/mk-job check-mk-1.2.6p12/mk-job --- check-mk-1.2.2p3/mk-job 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/mk-job 2015-04-10 08:00:19.000000000 +0000 @@ -0,0 +1,69 @@ +#!/bin/bash +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +export MK_VARDIR=/var/lib/check_mk_agent + +help() { + echo "Usage: mk-job IDENT PROGRAM [ARGS...]" + echo "" + echo "Execute PROGRAM as subprocess while measuring performance information" + echo "about the running process and writing it to an output file. This file" + echo "can be monitored using Check_MK. The Check_MK Agent will forward the" + echo "information of all job files to the monitoring server." + echo "" + echo "This file is being distributed with the Check_MK Agent." +} + +if [ $# -lt 2 ]; then + help >&2 + exit 1 +fi + +MYSELF=$(id -nu) +OUTPUT_PATH=$MK_VARDIR/job/$MYSELF +IDENT=$1 +shift + +if [ ! -d "$OUTPUT_PATH" ]; then + if [ "$MYSELF" = root ] ; then + mkdir -p "$OUTPUT_PATH" + else + echo "ERROR: Missing output directory $OUTPUT_PATH for non-root user '$MYSELF'." >&2 + exit 1 + fi +fi + +if ! type $1 >/dev/null 2>&1; then + echo -e "ERROR: Cannot run $1. Command not found.\n" >&2 + help >&2 + exit 1 +fi + +date +"start_time %s" > "$OUTPUT_PATH/$IDENT.running" +/usr/bin/time -o "$OUTPUT_PATH/$IDENT.running" --append \ + -f "exit_code %x\nreal_time %E\nuser_time %U\nsystem_time %S\nreads %I\nwrites %O\nmax_res_kbytes %M\navg_mem_kbytes %K\ninvol_context_switches %c\nvol_context_switches %w" "$@" +RC=$? +mv "$OUTPUT_PATH/$IDENT.running" "$OUTPUT_PATH/$IDENT" +exit $RC diff -Nru check-mk-1.2.2p3/mk-job.solaris check-mk-1.2.6p12/mk-job.solaris --- check-mk-1.2.2p3/mk-job.solaris 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/mk-job.solaris 2014-12-18 11:01:22.000000000 +0000 @@ -0,0 +1,75 @@ +#!/bin/bash +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +export MK_VARDIR=/var/lib/check_mk + +help() { + echo "Usage: mk-job IDENT PROGRAM [ARGS...]" + echo "" + echo "Execute PROGRAM as subprocess while measuring performance information" + echo "about the running process and writing it to an output file. This file" + echo "can be monitored using Check_MK. The Check_MK Agent will forward the" + echo "information of all job files to the monitoring server." + echo "" + echo "This file is being distributed with the Check_MK Agent." +} + +if [ $# -lt 2 ]; then + help >&2 + exit 1 +fi + +MYSELF=$(id | awk -F')' '{print $1}' | awk -F'(' '{print $2}') +OUTPUT_PATH=$MK_VARDIR/job/$MYSELF +IDENT=$1 +shift + +if [ ! -d "$OUTPUT_PATH" ]; then + if [ "$MYSELF" = root ] ; then + mkdir -p "$OUTPUT_PATH" + else + echo "ERROR: Missing output directory $OUTPUT_PATH for non-root user '$MYSELF'." >&2 + exit 1 + fi +fi + +if ! type $1 >/dev/null 2>&1; then + echo -e "ERROR: Cannot run $1. Command not found.\n" >&2 + help >&2 + exit 1 +fi + + +echo "start_time $(perl -e 'print time')" > "$OUTPUT_PATH/$IDENT.running" + +info=$((/usr/bin/time -p sh -c "$@ 2>/dev/null 1>&2" 2>&1; echo $?) | sed -e 's/,/\./g'); +RC=$(echo $info | awk '{print $7}') + +(echo $info | awk '{print "exit_code "$7"\nreal_time "$2"\nuser_time "$4"\nsystem_time "$6""}') >> "$OUTPUT_PATH/$IDENT.running" +(echo -e "reads 0\nwrites 0\nmax_res_kbytes 0\navg_mem_kbytes 0\ninvol_context_switches 0\nvol_context_switches 0";) >> "$OUTPUT_PATH/$IDENT.running" + +mv "$OUTPUT_PATH/$IDENT.running" "$OUTPUT_PATH/$IDENT" +exit $RC + Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/modules.tar.gz and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/modules.tar.gz differ diff -Nru check-mk-1.2.2p3/mounts check-mk-1.2.6p12/mounts --- check-mk-1.2.2p3/mounts 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/mounts 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -26,8 +26,10 @@ def inventory_mounts(info): inventory = [] + devices = [] for dev, mp, fstype, options, dump, fsck in info: - if fstype not in [ 'tmpfs' ]: + if fstype not in [ 'tmpfs' ] and dev not in devices: + devices.append(dev) opts = options.split(",") opts.sort() inventory.append( (mp, opts) ) @@ -52,7 +54,7 @@ missing.append(o) if not missing and not exceeding: - return (0, "OK - mount options exactly as expected") + return (0, "mount options exactly as expected") infos = [] if missing: @@ -62,14 +64,19 @@ infotext = ", ".join(infos) if "ro" in exceeding: - return (2, "CRIT - filesystem has switched to read-only " + return (2, "filesystem has switched to read-only " "and is probably corrupted(!!), " + infotext) # Just warn in other cases - return (1, "OK - " + infotext) + return (1, infotext) - return (3, "UNKNOWN - filesystem not mounted") + return (3, "filesystem not mounted") -check_info['mounts'] = ( check_mounts, "Mount options of %s", 0, inventory_mounts) -checkgroup_of['mounts'] = "fs_mount_options" + +check_info["mounts"] = { + 'check_function': check_mounts, + 'inventory_function': inventory_mounts, + 'service_description': 'Mount options of %s', + 'group': 'fs_mount_options', +} diff -Nru check-mk-1.2.2p3/moxa_iologik_register check-mk-1.2.6p12/moxa_iologik_register --- check-mk-1.2.2p3/moxa_iologik_register 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/moxa_iologik_register 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,72 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# "0=Off, 1=On in DI/DO mode or N=Count in DO counter mode" + +def inventory_iologik_register(info): + inventory = [] + for line in info: + if line[2]: + inventory.append((line[0], None )) + return inventory + + +def check_iologik_register(item, params, info): + for line in info: + if line[0] == item: + if int(line[2]) in range(0, 2): + return (int(line[2]), line[1]) + else: + return (3, "Invalid value %s for register" % line[2]) + + return (3, "Register not found") + + +check_info['moxa_iologik_register'] = { + "check_function" : check_iologik_register, + "inventory_function" : inventory_iologik_register, + "service_description" : "Moxa Register", + "has_perfdata" : False, + "group" : "iologik_register", + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.4.1.8691.10.2242.2.0").startswith("E2242-T"), + "snmp_info" : (".1.3.6.1.4.1.8691.10.2242.10.4.1.1", + [ "1", # index + "2", # Name + "3", # Value + ]) +} + +# DIOEntry +# dioIndex Integer32 (0..11) "The channel dio index." +# dioType Integer32 (0..1) "The channel dio type. 0=DI, DO=1, AI=2" +# dioMode Integer32 (0..1) "The channel dio mode. 0=DI, 1=Event Counter" +# dioStatus Unsigned32 (0..4294967295) "The channel dio(di/do) status. 0=Off, 1=On in DI/DO mode or N=Count in DO counter mode +# dioFilter Integer32 (1..65535) "The channel dio(di) counter filter. unit=0.5ms" +# dioTrigger Integer32 (0..1) "The channel dio(di) counter trigger level. 0=L2H, 1=H2L" +# dioCntStart Integer32 (0..1) "The channel dio(do) counter start/stop. 0=stop, 1=start" +# dioPulseStart Integer32 (0..1) "The channel dio(do) pulse start/stop. 0=stop, 1=start" +# dioPulseONWidth Unsigned32 (1..4294967295) "The channel dio(do) signal ON width. unit=0.5ms" +# dioPulseOFFWidth Unsigned32 (1..4294967295) "The channel dio(do) signal OFF width. unit=0.5ms" diff -Nru check-mk-1.2.2p3/mq_queues check-mk-1.2.6p12/mq_queues --- check-mk-1.2.2p3/mq_queues 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/mq_queues 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,99 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +# [[SINGLE_ITEM_EXPORT_int_jens]] +# 0 0 0 0 +# [[SPRINGAPP-COMMAND-INBOX-DEV]] +# 0 0 15 15 +# [[SINGLE_ITEM_EXPORT_INT_jens]] +# 0 0 0 0 +# [[DEBITOR_LOCATION]] +# 0 1 84 84 +# [[EDATA_SERIALNUMBERQUERY_INBOX]] +# 0 0 0 0 + + +mq_queues_default_levels = { + "size" : (None, None), + "consumerCount" : (None, None), +} + +def inventory_mq_queues(info): + inventory = [] + for line in info: + if line[0].startswith('[['): + item = line[0][2:-2] + inventory.append((item, mq_queues_default_levels)) + return inventory + +def check_mq_queues(item, params, info): + found = False + for line in info: + if found == True: + size, consumerCount, enqueueCount, dequeueCount = map(int, line) + msg = "" + state = 0 + warn, crit = params['consumerCount'] + if crit and consumerCount < crit: + state = 2 + label = "(!!)" + elif warn and consumerCount < warn: + state = 1 + label = "(!)" + if state > 0: + msg = "%s consuming connections " % consumerCount + msg += "(Levels Warn/Crit below %s/%s)%s, " % (warn, crit, label) + + + label = "" + warn, crit = params['size'] + if crit and size >= crit: + state = 2 + label = "(!!)" + elif warn and size >= warn: + state = max(state, 1) + label = "(!)" + msg += "Queue Size: %s" % size + if label != "": + msg += "(Levels Warn/Crit at %s/%s)%s" % (warn, crit, label) + msg += ", Enqueue Count: %s, Dequeue Count: %s" % (enqueueCount, dequeueCount ) + + + perf = [("queue", size, warn, crit), ("enque", enqueueCount), ("deque", dequeueCount) ] + return state, msg, perf + if line[0].startswith('[[') and line[0][2:-2] == item: + found = True + return 2, "Queue not found" + +check_info["mq_queues"] = { + "check_function" : check_mq_queues, + "inventory_function" : inventory_mq_queues, + "service_description" : "Queue %s", + "has_perfdata" : True, + "group" : "mq_queues", +} + diff -Nru check-mk-1.2.2p3/mrpe check-mk-1.2.6p12/mrpe --- check-mk-1.2.2p3/mrpe 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/mrpe 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -37,6 +37,12 @@ return items +def mrpe_parse_perfdata(perfinfo): + varname, valuetxt = perfinfo.split("=", 1) + values = valuetxt.split(";") + return tuple( [varname] + values) + + def check_mrpe(item, params, info): # This check is cluster-aware. An item might be found # more than once. In that case we use the best of the @@ -51,42 +57,65 @@ else: check_name = None if line[0] == item: - state = int(line[1]) - if state not in [ 0, 1, 2, 3]: + try: + state = int(line[1]) + except: + state = None + + # convert to original format by joining and replacing \1 back with \n + rest = " ".join(line[2:]).replace("\1", "\n") + # split into lines + lines = rest.split('\n') + # First line: OUTPUT|PERFDATA + parts = lines[0].split("|", 1) + output = [parts[0].strip()] + if state == None or state not in [0, 1, 2, 3]: + output[0] = "Invalid plugin status %s. Output is: %s" % (state, output[0]) state = 3 - rest = " ".join(line[2:]) - parts = rest.split("|", 1) - # replace first line break with "\\n" -> Nagios expects it like this - output = parts[0].replace("\1", "\\n", 1).replace("\1", "
    ") - - perfdata = [] - if len(parts) > 1: # found pipe symbol - perftxt = parts[1].strip() - for perfinfo in perftxt.split(" "): + if len(parts) > 1: + perfdata = parts[1].strip().split() + else: + perfdata = [] + + # Further lines + now_comes_perfdata = False + for l in lines[1:]: + if now_comes_perfdata: + perfdata += l.split() + else: + parts = l.split("|", 1) + output.append(parts[0].strip()) + if len(parts) > 1: + perfdata += parts[1].strip().split() + now_comes_perfdata = True + + + if best_state in [ None, 2 ] \ + or (state < best_state and state != 2): + infotext = "\\n".join(output) + perf_parsed = [] + for perfvalue in perfdata: try: - varname, valuetxt = perfinfo.split("=", 1) - values = valuetxt.split(";") - perfdata.append(tuple( [varname] + values) ) + perf_parsed.append(mrpe_parse_perfdata(perfvalue)) except: pass - # name of check command needed for PNP to choose the correct template - if check_name: - perfdata.append(check_name) + # name of check command needed for PNP to choose the correct template + if check_name: + perf_parsed.append(check_name) + best_result = state, "\\n".join(output), perf_parsed + best_state = state - if best_state in [ None, 2 ] \ - or (state < best_state and state != 2): - best_result = state, output, perfdata - best_state = state if best_state == None: - return (3, "Check output not found in output of MRPE") + return (3, "Check output not found in output of MRPE") else: - return best_result + return best_result -check_info['mrpe'] = ( - check_mrpe, - "%s", - 1, - inventory_mrpe) +check_info["mrpe"] = { + 'check_function': check_mrpe, + 'inventory_function': inventory_mrpe, + 'service_description': '%s', + 'has_perfdata': True, +} diff -Nru check-mk-1.2.2p3/mssql_backup check-mk-1.2.6p12/mssql_backup --- check-mk-1.2.2p3/mssql_backup 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/mssql_backup 2015-07-01 12:18:10.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,28 +24,35 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -import datetime - -#<<>> -#MSSQL_SQLEXPRESS test123 1331207325 +# <<>> +# MSSQL_SQLEXPRESS test123 1331207325 # Might be None to have no thresholds or a tuple of two ints # (, ) mssql_backup_default_levels = None def inventory_mssql_backup(info): - return [ (line[0] + ' ' + line[1], 'mssql_backup_default_levels') for line in info ] + return [ (line[0] + ' ' + line[1], 'mssql_backup_default_levels') + for line in info + if len(line) == 4 ] def check_mssql_backup(item, params, info): for line in info: - inst, tablespace, last_backup_date, last_backup_time = line + last_backup_date = False + try: + inst, tablespace, last_backup_date, last_backup_time = line + except ValueError: + inst, tablespace, last_backup_timestamp = line + if item == inst + ' ' + tablespace: - dt = datetime.datetime(*time.strptime(last_backup_date + ' ' + last_backup_time, '%Y-%m-%d %H:%M:%S')[:6]) + if last_backup_date: + timestamp = time.mktime(time.strptime(last_backup_date + ' ' + last_backup_time, + '%Y-%m-%d %H:%M:%S')) + else: + timestamp = int(last_backup_timestamp) state = 0 - # Would be so nice to use delta.total_seconds(). But we must care about python < 2.7 - delta = datetime.datetime.now() - dt - sec_ago = (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10**6) / 10**6 + sec_ago = time.time() - timestamp if params is not None: if sec_ago >= params[1]: @@ -56,11 +63,11 @@ else: perfdata = [('seconds', sec_ago)] - return (state, '%s - Last backup was at %s (%ds ago)' % - (nagios_state_names[state], dt.strftime('%Y-%m-%d %H:%M:%S'), sec_ago), - perfdata) + return (state, 'Last backup was at %s (%s ago)' % + (time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(timestamp)), + get_age_human_readable(sec_ago)), perfdata) - return (3, 'UNKNOWN - Tablespace %s could not be found' % item) + return (3, 'Tablespace %s could not be found' % item) check_info['mssql_backup'] = { 'check_function': check_mssql_backup, diff -Nru check-mk-1.2.2p3/mssql_counters check-mk-1.2.6p12/mssql_counters --- check-mk-1.2.2p3/mssql_counters 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/mssql_counters 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -39,7 +39,7 @@ # MSSQL_SQLEXPRESS:Databases master Log_File(s)_Size_(KB) def mssql_counters_item(line, add_counter_name): - obj, counter, instance, value = line + obj, counter, instance = line[:3] if obj.endswith(':Databases'): obj = obj[:-10] @@ -86,11 +86,11 @@ base = float(line[-1]) if value is None or (perc_w_base and base is None): - return (3, 'UNKNOWN - Counter %s could not be found in agent output' % (item)) + return (3, 'Counter %s could not be found in agent output' % (item)) if perc_w_base: if base == 0: - return (3, 'UNKNOWN - Base is 0 (Value: %.2f)' % value) + base = 1 perc = value / base * 100.0 else: perc = value @@ -102,7 +102,7 @@ elif perc <= params[0]: state = 1 - return (state, '%s - %d%%' % (nagios_state_names[state], perc), [(counter_name, perc)]) + return (state, '%d%%' % (perc), [(counter_name, perc)]) check_info['mssql_counters.cache_hits'] = { 'check_function': check_mssql_counters_perc, @@ -120,7 +120,6 @@ output = [] perfdata = [] now = time.time() - wrapped = False for line in info: if mssql_counters_item(line, False) != item: continue @@ -133,18 +132,14 @@ if line[1] == counter: value = int(line[-1]) countername = "mssql_counters.%s.%s" % (item, counter) - try: - timedif, persec = get_counter(countername, now, value) - except MKCounterWrapped: - wrapped = True - continue + persec = get_rate(countername, now, value) output.append('%s: %.1f/s' % (label, persec)) perfdata.append((counter, persec)) if output: - return (0, 'OK - %s' % ', '.join(output), perfdata) + return (0, '%s' % ', '.join(output), perfdata) else: - return (3, 'UNKNOWN - Counters %s could not be found in agent output' % (item)) + return (3, 'Counters %s could not be found in agent output' % (item)) check_info['mssql_counters.transactions'] = { 'check_function': check_mssql_counters_transactions, @@ -159,7 +154,6 @@ output = [] perfdata = [] now = time.time() - wrapped = False for line in info: if mssql_counters_item(line, False) != item: continue @@ -175,11 +169,7 @@ value = float(line[-1]) # compute rate from counter value countername = "mssql_counters.%s.%s" % (item, counter) - try: - timedif, persec = get_counter(countername, now, value) - except MKCounterWrapped: - wrapped = True - continue + persec = get_rate(countername, now, value) p = params.get(counter) if p: @@ -196,13 +186,10 @@ output.append('%s: %.1f/s%s' % (label, persec, prob_txt)) perfdata.append((counter, persec, warn, crit)) - if wrapped: - raise MKCounterWrapped("", "Some counter wrapped, no data this time") - if output: - return (state, '%s - %s' % (nagios_state_names[state], ', '.join(output)), perfdata) + return (state, ', '.join(output), perfdata) else: - return (3, 'UNKNOWN - Counters %s could not be found in agent output' % (item)) + return (3, 'Counters %s could not be found in agent output' % (item)) check_info['mssql_counters.locks'] = { 'check_function': check_mssql_counters_locks, @@ -230,7 +217,7 @@ log_file_used_bytes = int(line[-1]) * 1024 if data_file_bytes is None and log_file_bytes is None and log_file_used_bytes is None: - return (3, 'UNKNOWN - Counters %s could not be found in agent output' % (item)) + return (3, 'Counters %s could not be found in agent output' % (item)) output = [] perfdata = [] @@ -244,7 +231,7 @@ perfdata.append(('log_files', log_file_bytes)) perfdata.append(('log_files_used', log_file_used_bytes)) - return (0, 'OK - %s' % ', '.join(output), perfdata) + return (0, '%s' % ', '.join(output), perfdata) check_info['mssql_counters.file_sizes'] = { diff -Nru check-mk-1.2.2p3/mssql_counters.cache_hits check-mk-1.2.6p12/mssql_counters.cache_hits --- check-mk-1.2.2p3/mssql_counters.cache_hits 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/mssql_counters.cache_hits 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Checks MSSQL cache hit ratio +title: MSSQL cache hit ratio agents: windows -author: Lars Michelsen +catalog: app/mssql license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/mssql_counters.file_sizes check-mk-1.2.6p12/mssql_counters.file_sizes --- check-mk-1.2.2p3/mssql_counters.file_sizes 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/mssql_counters.file_sizes 2015-09-21 10:59:54.000000000 +0000 @@ -1,6 +1,6 @@ -title: Checks size of data- and logfiles of MSSQL tablespaces +title: Size of data- and logfiles of MSSQL tablespaces agents: windows -author: Lars Michelsen +catalog: app/mssql license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/mssql_counters.locks check-mk-1.2.6p12/mssql_counters.locks --- check-mk-1.2.2p3/mssql_counters.locks 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/mssql_counters.locks 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Checks locks per second in MSSQL tablespaces +title: Locks per second in MSSQL tablespaces agents: windows -author: Lars Michelsen +catalog: app/mssql license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/mssql_counters.transactions check-mk-1.2.6p12/mssql_counters.transactions --- check-mk-1.2.2p3/mssql_counters.transactions 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/mssql_counters.transactions 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Checks transactions per second in MSSQL tablespaces +title: Transactions per second in MSSQL tablespaces agents: windows -author: Lars Michelsen +catalog: app/mssql license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/mssql_tablespaces check-mk-1.2.6p12/mssql_tablespaces --- check-mk-1.2.2p3/mssql_tablespaces 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/mssql_tablespaces 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -45,15 +45,21 @@ # 11: uom # 12: Total amount of space reserved for objects in the database, but not yet used. # 13: uom - +mssql_tablespace_default_levels = {} def inventory_mssql_tablespaces(info): inventory = [] for line in info: if len(line) > 1: - inventory.append((line[0] + ' ' + line[1], None)) + inventory.append((line[0] + ' ' + line[1], 'mssql_tablespace_default_levels')) return inventory def check_mssql_tablespaces(item, params, info): + # First version of this check was without levels, + # so it is possible that params are None + if not params: + params = {} + + state = 0 for line in info: if len(line) < 2 or item != line[0] + ' ' + line[1]: continue @@ -77,16 +83,29 @@ elif uom == 'TB': val_bytes = value * 1024 * 1024 * 1024 * 1024 - output.append('%s: %s' % (label, get_bytes_human_readable(val_bytes))) + warn, crit = params.get(key, (None, None)) + error_label = "" + if warn and crit: + levels = "(Warn/Crit at %s/%s)" % \ + (get_bytes_human_readable(warn), get_bytes_human_readable(crit)) + if val_bytes > crit: + state = 2 + error_label += levels + "(!!)" + elif val_bytes > warn: + state = max(state, 1) + error_label += levels +"(!)" + + output.append('%s: %s %s' % (label, get_bytes_human_readable(val_bytes), error_label)) perfdata.append((key, val_bytes)) - return (0, 'OK - %s' % ', '.join(output), perfdata) + return state, '%s' % ', '.join(output), perfdata - return (3, 'UNKNOWN - Tablespace %s could not be found' % item) + return (3, 'Tablespace %s could not be found' % item) check_info['mssql_tablespaces'] = { 'check_function': check_mssql_tablespaces, 'inventory_function': inventory_mssql_tablespaces, 'service_description': '%s Sizes', + 'group' : "mssql_tablespaces", 'has_perfdata': True, } diff -Nru check-mk-1.2.2p3/mssql_versions check-mk-1.2.6p12/mssql_versions --- check-mk-1.2.2p3/mssql_versions 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/mssql_versions 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -37,7 +37,12 @@ def check_mssql_versions(item, _unused, info): for line in info: if line[0] == item: - return (0, 'OK - Server is running Version %s' % line[1]) - return (2, "UNKNOWN - Server objance not existing or not running") + return (0, 'Server is running Version %s' % line[1]) + return (2, "Server instance not existing or not running") -check_info['mssql_versions'] = (check_mssql_versions, "%s Version", 0, inventory_mssql_versions ) + +check_info["mssql_versions"] = { + 'check_function': check_mssql_versions, + 'inventory_function': inventory_mssql_versions, + 'service_description': '%s Version', +} diff -Nru check-mk-1.2.2p3/multipath check-mk-1.2.6p12/multipath --- check-mk-1.2.2p3/multipath 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/multipath 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,8 +24,10 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. +# Configuration for using alias instead of UUID +inventory_multipath_rules = [] -# Output from multipath -l has the following format: +# Output from multipath -l has the following possible formats: # orabase.lun50 (360a9800043346937686f456f59386741) dm-15 NETAPP,LUN # [size=25G][features=1 queue_if_no_path][hwhandler=0] @@ -128,11 +130,28 @@ # \_ round-robin 0 [prio=-1][enabled] # \_ 4:0:0:11 sdt 65:48 [active][undef] +# This one here is from RedHat 6. Very creative... +# 1IET 00010001 dm-4 IET,VIRTUAL-DISK +# size=200G features='0' hwhandler='0' wp=rw +# |-+- policy='round-robin 0' prio=0 status=active +# | `- 23:0:0:1 sdk 8:160 active undef running +# |-+- policy='round-robin 0' prio=0 status=enabled +# | `- 21:0:0:1 sdj 8:144 active undef running +# |-+- policy='round-robin 0' prio=0 status=enabled +# | `- 22:0:0:1 sdg 8:96 active undef running +# `-+- policy='round-robin 0' prio=0 status=enabled +# `- 20:0:0:1 sdi 8:128 active undef running + +# And a completely new situation: +# <<>> +# Nov 05 17:17:03 | DM multipath kernel driver not loaded +# Nov 05 17:17:03 | /etc/multipath.conf does not exist, blacklisting all devices. +# Nov 05 17:17:03 | A sample multipath.conf file is located at +# Nov 05 17:17:03 | /usr/share/doc/device-mapper-multipath-0.4.9/multipath.conf +# Nov 05 17:17:03 | You can run /sbin/mpathconf to create or modify /etc/multipath.conf +# Nov 05 17:17:03 | DM multipath kernel driver not loaded - -def parse_multipath_output(info, only_uuid = None): - # only_uuid --> look only for data of this uuid or alias - +def parse_multipath(info): # New reported header lines need to be placed here # the matches need to be put in a list of tupples # while the structure of the tupple is: @@ -141,11 +160,11 @@ # 2: matched regex-group id of alias (optional) reg_headers = [ (get_regex(r"^[0-9a-z]{33}$"), 0, None), # 1. (should be included in 3.) - (get_regex(r"^([^\s]+)\s\(([0-9A-Za-z_-]+)\)"), 2, 1), # 2. + (get_regex(r"^([^\s]+)\s\(([0-9A-Za-z_-]+)\)"), 2, 1), # 2. (get_regex(r"^[a-zA-Z0-9_]+$"), 0, None), # 3. (get_regex(r"^([0-9a-z]{33}|[0-9a-z]{49})\s?dm.+$"), 1, None), # 4. (get_regex(r"^[a-zA-Z0-9_]+dm-.+$"), 0, None), # 5. Remove this line in 1.2.0 - (get_regex(r"^([a-zA-Z0-9_-]+)\s?dm-.+$"), 1, None), # 6. and 7. + (get_regex(r"^([-a-zA-Z0-9_ ]+)\s?dm-[0-9]+.*$"), 1, None), # 6. and 7. ] reg_prio = get_regex("[[ ]prio=") @@ -156,6 +175,7 @@ group = {} numpaths = None for line in info: + # Ignore error messages due to invalid multipath.conf if line[0] == "multipath.conf": continue @@ -171,7 +191,10 @@ l = " ".join(line) # Skip output when multipath is not present - if l.endswith('DM multipath kernel driver not loaded'): + if l.endswith('kernel driver not loaded') \ + or l.endswith('does not exist, blacklisting all devices.') \ + or l.endswith('A sample multipath.conf file is located at') \ + or l.endswith('multipath.conf'): uuid = None continue @@ -196,11 +219,6 @@ if not matchobject: raise Exception("Invalid line in agent output: " + l) - # Skip unwanted devices in one device mode - if only_uuid and only_uuid not in [ uuid, alias ]: - uuid = None # wrong uuid, ignore this one - continue - # initialize information about next device numpaths = 0 lun_info = [] @@ -241,27 +259,44 @@ # Get list of UUIDs of all multipath devices # Length of UUID is 360a9800043346937686f456f59386741 -def inventory_multipath(info): +def inventory_multipath(parsed): + settings = host_extra_conf_merged(g_hostname, inventory_multipath_rules) + inventory = [] - parsed = parse_multipath_output(info) for uuid, info in parsed.items(): # take current number of paths as target value - inventory.append( (uuid, " ".join(info['luns']), info['numpaths']) ) + if "alias" in info and settings.get("use_alias"): + item = info["alias"] + else: + item = uuid + inventory.append( (item, info['numpaths']) ) return inventory # item is UUID (e.g. '360a9800043346937686f456f59386741') or alias (e.g. 'mpath0') -def check_multipath(item, target_numpaths, info): +def check_multipath(item, target_numpaths, parsed): if target_numpaths == None: target_numpaths = 2 # default case: we need two paths - parsed = parse_multipath_output(info, item) # we look for one specific uuid/alias - if len(parsed) == 0: - return (3, "UNKNOWN - no map with uuid/alias %s" % item) - mmap = parsed.values()[0] + # Keys in parsed are the UUIDs. First assume that we are + # looking for a UUID. Then fall back to aliases + if item in parsed: + mmap = parsed[item] + else: + for mmap in parsed.values(): + if mmap.get("alias") == item: + break + else: + return 3, "Multipath device not found in agent output" + # If the item is the alias, then show the UUID in the plugin output. + # If the item is the UUID, then vice versa. alias = mmap.get('alias') - if alias: + uuid = mmap.get('uuid') + + if item == uuid and alias: aliasinfo = "(%s) " % alias + elif item == alias and uuid: + aliasinfo = "(%s) " % uuid else: aliasinfo = "" @@ -269,22 +304,21 @@ broken = mmap['broken_paths'] numbroken = len(broken) if numbroken > 0: - return (2, "CRIT - %sbroken paths: %s" % (aliasinfo, ",".join(broken))) + return (2, "%sbroken paths: %s" % (aliasinfo, ",".join(broken))) info = "%spaths expected: %d, paths active: %d" % (aliasinfo, target_numpaths, numpaths) if numpaths < target_numpaths: - return (2, "CRIT - " + info) + return (2, info) elif numpaths > target_numpaths: - return (1, "WARN - " + info) + return (1, info) else: - return (0, "OK - " + info) - - -check_info['multipath'] = ( - check_multipath, - "Multipath %s", - 0, - inventory_multipath) + return (0, info) -checkgroup_of['multipath'] = 'multipath' +check_info["multipath"] = { + 'check_function': check_multipath, + 'inventory_function': inventory_multipath, + 'parse_function': parse_multipath, + 'service_description': 'Multipath %s', + 'group': 'multipath', +} diff -Nru check-mk-1.2.2p3/multisite.mk-1.2.2p3 check-mk-1.2.6p12/multisite.mk-1.2.2p3 --- check-mk-1.2.2p3/multisite.mk-1.2.2p3 2013-11-05 09:42:57.000000000 +0000 +++ check-mk-1.2.6p12/multisite.mk-1.2.2p3 1970-01-01 00:00:00.000000000 +0000 @@ -1,116 +0,0 @@ -# Confguration for Check_MK Multisite - -# Users with unrestricted permissions. These users will always -# have the permissions to edit users, roles and permissions, -# even if configuration has been edited via WATO -admin_users = [ "nagiosadmin" ] - -# NagVis -# -# The NagVis-Snapin needs to know the URL to nagvis. -# This is not always /nagvis/ - especially not for OMD -nagvis_base_url = '/nagvis' - -# Views allow to play alarm sounds according to the -# "worst" state of the shown items. Enable sounds here: -# enable_sounds = True - -# You can configure here which sounds to play. Possible events are "critical", -# "warning", "unknown", "ok", "up", "down", "unreachable" and -# "pending". Sounds are expected in the sounds subdirectory -# of htdocs (Default is /usr/share/check_mk/web/htdocs/sounds). The -# following setting is the default: -# sounds = [ -# ( "down", "down.wav" ), -# ( "critical", "critical.wav" ), -# ( "unknown", "unknown.wav" ), -# ( "warning", "warning.wav" ), -# ( None, "ok.wav" ), -# ] - -# Tabs for choosing number of columns refresh -# view_option_refreshes = [ 30, 60, 90, 0 ] -# view_option_columns = [ 1, 2, 3, 4, 5, 6, 8 ] - -# Custom links for "Custom Links" Snapin. Feel free to add your -# own links here. The boolean values True and False determine -# wether the sections are open or closed by default. - -# Links for everyone -custom_links['guest'] = [ - ( "Classical Nagios GUI", "../nagios/", "link_home.gif" ), - ( "Addons", True, [ - ( "PNP4Nagios", "../pnp4nagios/", "link_reporting.gif" ), - ( "NagVis", False, [ - ( "Automap", "../nagvis/index.php?map=__automap", "link_map.gif"), - ( "Demo map", "../nagvis/index.php?map=demo-map", "link_map.gif"), - ( "Demo Map 2", "../nagvis/index.php?map=demo2", "link_map.gif"), - ]), - ]), -] - -# The members of the role 'user' get the same links as the guests -# but some in addition -custom_links['user'] = custom_links['guest'] + [ - ( "Open Source Components", False, [ - ( "Multisite", "http://mathias-kettner.de/checkmk_multisite.html", None, "_blank"), - ( "MK Livestatus", "http://mathias-kettner.de/checkmk_livestatus.html", None, "_blank"), - ( "Check_MK", "http://mathias-kettner.de/check_mk.html", None, "_blank"), - ( "Nagios", "http://www.nagios.org/", None, "_blank"), - ( "PNP4Nagios", "http://pnp4nagios.org/", None, "_blank"), - ( "NagVis", "http://nagvis.org/", None, "_blank"), - ( "RRDTool", "http://oss.oetiker.ch/rrdtool/", None, "_blank"), - ]) -] - -# The admins yet get further links -custom_links['admin'] = custom_links['user'] + [ - ( "Support", False, [ - ( "Mathias Kettner", "http://mathias-kettner.de/" ), - ( "Check_MK Mailinglists", "http://mathias-kettner.de/check_mk_lists.html" ), - ( "Check_MK Exchange (inofficial)", "http://exchange.check-mk.org/", None, "_blank" ), - ( "Monitoring Portal (German)", "http://monitoring-portal.org", None, "_blank"), - ]) -] - -# Hide certain views from the sidebar -# hidden_views = [ "hosttiles", "allhosts_mini" ] -# Vice versa: hide all views except these (be carefull, this - -# will also exclude custom views) -# visible_views = [ "allhosts", "searchsvc" ] - -# Load custom style sheet which can override styles defined in check_mk.css -# Put your style sheet into web/htdocs/ -# custom_style_sheet = "my_styles.css" - -# __ ___ _____ ___ -# \ \ / / \|_ _/ _ \ -# \ \ /\ / / _ \ | || | | | -# \ V V / ___ \| || |_| | -# \_/\_/_/ \_\_| \___/ -# -# Check_MK's Web Administration Tool - -# If you do not like WATO, you can disable it: -# wato_enabled = False - -# Host tags to be used in WATO -# wato_host_tags = [ -# ( "os_type", "Operating System", [ -# ( "lnx", "Linux", [ 'tcp' ],), -# ( "win", "Windows", [ 'tcp', 'snmp' ]), -# ( "net", "Network device", [ 'snmp' ]), -# ( "ping", "Other PING-only device", ), -# ]), -# ( "prod", "Productivity", [ -# ( "prod", "Production System" ), -# ( "test", "Test System" ), -# ]), -# ( "bulkwalk", "Bulkwalk (SNMP v2c)", [ -# ( None, "simple walk (SNMP v1)"), -# ( "bulk", "Bulkwalk (SNMP v2c)"), -# ], [ 'snmp' ]), -# -# ] - diff -Nru check-mk-1.2.2p3/multisite.mk-1.2.6p12 check-mk-1.2.6p12/multisite.mk-1.2.6p12 --- check-mk-1.2.2p3/multisite.mk-1.2.6p12 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/multisite.mk-1.2.6p12 2015-09-21 11:01:34.000000000 +0000 @@ -0,0 +1,112 @@ +# Confguration for Check_MK Multisite + +# Users with unrestricted permissions. These users will always +# have the permissions to edit users, roles and permissions, +# even if configuration has been edited via WATO +admin_users = [ "nagiosadmin" ] + +# NagVis +# +# The NagVis-Snapin needs to know the URL to nagvis. +# This is not always /nagvis/ - especially not for OMD +nagvis_base_url = '/nagvis' + +# Views allow to play alarm sounds according to the +# "worst" state of the shown items. Enable sounds here: +# enable_sounds = True + +# You can configure here which sounds to play. Possible events are "critical", +# "warning", "unknown", "ok", "up", "down", "unreachable" and +# "pending". Sounds are expected in the sounds subdirectory +# of htdocs (Default is /usr/share/check_mk/web/htdocs/sounds). The +# following setting is the default: +# sounds = [ +# ( "down", "down.wav" ), +# ( "critical", "critical.wav" ), +# ( "unknown", "unknown.wav" ), +# ( "warning", "warning.wav" ), +# ( None, "ok.wav" ), +# ] + +# Tabs for choosing number of columns refresh +# view_option_refreshes = [ 30, 60, 90, 0 ] +# view_option_columns = [ 1, 2, 3, 4, 5, 6, 8 ] + +# Custom links for "Custom Links" Snapin. Feel free to add your +# own links here. The boolean values True and False determine +# wether the sections are open or closed by default. + +# Links for everyone +custom_links['guest'] = [ + ( "Classical Nagios GUI", "../nagios/", "link_home.gif" ), + ( "Addons", True, [ + ( "PNP4Nagios", "../pnp4nagios/", "link_reporting.gif" ), + ( "NagVis", "../nagvis/", "link_map.gif" ), + ]), +] + +# The members of the role 'user' get the same links as the guests +# but some in addition +custom_links['user'] = custom_links['guest'] + [ + ( "Open Source Components", False, [ + ( "Multisite", "http://mathias-kettner.de/checkmk_multisite.html", None, "_blank"), + ( "MK Livestatus", "http://mathias-kettner.de/checkmk_livestatus.html", None, "_blank"), + ( "Check_MK", "http://mathias-kettner.de/check_mk.html", None, "_blank"), + ( "Nagios", "http://www.nagios.org/", None, "_blank"), + ( "PNP4Nagios", "http://pnp4nagios.org/", None, "_blank"), + ( "NagVis", "http://nagvis.org/", None, "_blank"), + ( "RRDTool", "http://oss.oetiker.ch/rrdtool/", None, "_blank"), + ]) +] + +# The admins yet get further links +custom_links['admin'] = custom_links['user'] + [ + ( "Support", False, [ + ( "Mathias Kettner", "http://mathias-kettner.de/" ), + ( "Check_MK Mailinglists", "http://mathias-kettner.de/check_mk_lists.html" ), + ( "Check_MK Exchange (inofficial)", "http://exchange.check-mk.org/", None, "_blank" ), + ( "Monitoring Portal (German)", "http://monitoring-portal.org", None, "_blank"), + ]) +] + +# Hide certain views from the sidebar +# hidden_views = [ "hosttiles", "allhosts_mini" ] +# Vice versa: hide all views except these (be carefull, this + +# will also exclude custom views) +# visible_views = [ "allhosts", "searchsvc" ] + +# Load custom style sheet which can override styles defined in check_mk.css +# Put your style sheet into web/htdocs/ +# custom_style_sheet = "my_styles.css" + +# __ ___ _____ ___ +# \ \ / / \|_ _/ _ \ +# \ \ /\ / / _ \ | || | | | +# \ V V / ___ \| || |_| | +# \_/\_/_/ \_\_| \___/ +# +# Check_MK's Web Administration Tool + +# If you do not like WATO, you can disable it: +# wato_enabled = False + +# Host tags to be used in WATO +# wato_host_tags = [ +# ( "os_type", "Operating System", [ +# ( "lnx", "Linux", [ 'tcp' ],), +# ( "win", "Windows", [ 'tcp', 'snmp' ]), +# ( "net", "Network device", [ 'snmp' ]), +# ( "ping", "Other PING-only device", ), +# ]), +# ( "prod", "Productivity", [ +# ( "prod", "Production System" ), +# ( "test", "Test System" ), +# ]), +# ( "bulkwalk", "Bulkwalk (SNMP v2c)", [ +# ( None, "simple walk (SNMP v1)"), +# ( "bulk", "Bulkwalk (SNMP v2c)"), +# ], [ 'snmp' ]), +# +# ] + diff -Nru check-mk-1.2.2p3/mysql check-mk-1.2.6p12/mysql --- check-mk-1.2.2p3/mysql 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/mysql 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -38,7 +38,7 @@ # Com_alter_db 0 # Com_alter_db_upgrade 0 -# .----------------------------------------------------------------------. +# .--Helpers-------------------------------------------------------------. # | _ _ _ | # | | | | | ___| |_ __ ___ _ __ ___ | # | | |_| |/ _ \ | '_ \ / _ \ '__/ __| | @@ -67,8 +67,8 @@ values[varname] = value return values - -# .----------------------------------------------------------------------. +#. +# .--Sessions------------------------------------------------------------. # | ____ _ | # | / ___| ___ ___ ___(_) ___ _ __ ___ | # | \___ \ / _ \/ __/ __| |/ _ \| '_ \/ __| | @@ -88,7 +88,7 @@ values = parse_mysql(info) total_sessions = values["Threads_connected"] running_sessions = values["Threads_running"] - timedif, connections = get_counter("mysql.sessions", time.time(), values["Connections"]) + connections = get_rate("mysql.sessions", time.time(), values["Connections"]) infotext = " - %d sessions (%d running), %.2f connections/s" % ( total_sessions, running_sessions, connections) @@ -114,8 +114,7 @@ warn, crit = None, None perfdata.append((what, value, warn, crit)) - infotext = " - " + ", ".join(infos) - return (status, nagios_state_names[status] + infotext, perfdata) + return (status, ", ".join(infos), perfdata) check_info['mysql.sessions'] = { @@ -126,8 +125,8 @@ "group" : "mysql_sessions", } - -# .----------------------------------------------------------------------. +#. +# .--InnoDB-IO-----------------------------------------------------------. # | ___ ____ ____ ___ ___ | # | |_ _|_ __ _ __ ___ | _ \| __ ) |_ _/ _ \ | # | | || '_ \| '_ \ / _ \| | | | _ \ _____| | | | | | @@ -137,12 +136,13 @@ # '----------------------------------------------------------------------' def inventory_mysql_iostat(info): - if len(info) > 200: + values = parse_mysql(info) + if "Innodb_data_read" in values.keys(): return [(None, {})] def check_mysql_iostat(item, params, info): values = parse_mysql(info) - line = [ None, values["Innodb_data_read"] / 512, values["Innodb_data_written"] / 512] + line = [ None, None, values["Innodb_data_read"] / 512, values["Innodb_data_written"] / 512] return check_diskstat_line(time.time(), 'innodb_io', params, line) @@ -155,6 +155,7 @@ "group" : "mysql_innodb_io", } +#. # .--Connections---------------------------------------------------------. # | ____ _ _ | # | / ___|___ _ __ _ __ ___ ___| |_(_) ___ _ __ ___ | @@ -172,7 +173,7 @@ def check_mysql_connections(item, params, info): values = parse_mysql(info) if 'Max_used_connections' not in values: - return (3, 'UNKNOWN - Connection information are missing') + return (3, 'Connection information are missing') # The maximum number of connections that have been in use simultaneously # since the server started. @@ -185,23 +186,25 @@ status = 0 status_txt = '' if 'perc_used' in params: - if perc_used >= params['perc_used'][1]: + warn, crit = params['perc_used'] + if perc_used >= crit: status = 2 status_txt = ' (Threshold (%0.2f%%) for number of maximum parallel connections ' \ - 'has been reached at least once since program start' % params['perc_used'][1] - elif perc_used >= params['perc_used'][0]: + 'has been reached at least once since program start' % crit + elif perc_used >= warn: status = 1 status_txt = ' (Threshold (%0.2f%%) for number of maximum parallel connections ' \ - 'has been reached at least once since program start)' % params['perc_used'][0] + 'has been reached at least once since program start)' % warn - return (status, '%s - Max. parallel Connections: %d (Max.: %d): %0.2f%%%s' % - (nagios_state_names[status], conn, max_conn, perc_used, status_txt)) + return (status, 'Max. parallel Connections: %d (Max.: %d): %0.2f%%%s' % + (conn, max_conn, perc_used, status_txt)) check_info['mysql.connections'] = { "check_function" : check_mysql_connections, "inventory_function" : inventory_mysql_connections, "service_description" : "MySQL Daemon Connections", - "has_perfdata" : True, + "has_perfdata" : False, "group" : "mysql_connections", } + diff -Nru check-mk-1.2.2p3/mysql_capacity check-mk-1.2.6p12/mysql_capacity --- check-mk-1.2.2p3/mysql_capacity 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/mysql_capacity 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -35,32 +35,37 @@ def inventory_mysql_size(info): inventory = [] for dbname, used, avail in info: - if dbname not in [ "information_schema", "mysql", "performance_schema" ]: + if dbname not in [ "information_schema", "mysql", "performance_schema" ] \ + and used != 'NULL' and avail != 'NULL': inventory.append((dbname, None)) return inventory + def check_mysql_size(item, params, info): + # size and avail are given as bytes for dbname, size, avail in info: if item == dbname: + if size == 'NULL': + return 3, "Missing information - Size is reported as 'NULL'" size = int(size) - infotext = " - Size is %s" % get_bytes_human_readable(size) + infotext = "Size is %s" % get_bytes_human_readable(size) if params: state = 0 warn, crit = params # in MB warn_b = warn * 1048576 crit_b = crit * 1048576 - perfdata = [("size", size, warn, crit)] - if size > crit: + perfdata = [("size", size, warn_b, crit_b)] + if size > crit_b: state = 2 - infotext += " (critical at %s)" % get_bytes_human_readable(crit) - elif size > warn: + infotext += " (critical at %s)" % get_bytes_human_readable(crit_b) + elif size > warn_b: state = 1 - infotext += " (warning at %s)" % get_bytes_human_readable(crit) + infotext += " (warning at %s)" % get_bytes_human_readable(crit_b) else: state = 0 perfdata = [("size", size)] - return (state, nagios_state_names[state] + infotext, perfdata) - return (3, "UNKNOWN - Database not found in Agent output") + return (state, infotext, perfdata) + return (3, "Database not found in Agent output") check_info['mysql_capacity'] = { @@ -69,6 +74,4 @@ "service_description" : "MySQL DB %s Size", "has_perfdata" : True, "group" : "dbsize", -# "default_levels_variable" : "filesystem_default_levels", -# "includes" : [ "dbsize.include" ], } diff -Nru check-mk-1.2.2p3/mysql.connections check-mk-1.2.6p12/mysql.connections --- check-mk-1.2.2p3/mysql.connections 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/mysql.connections 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,34 @@ +title: MySQL Database: Maximum connection usage since startup +agents: linux +catalog: app/mysql +license: GPL +distribution: check_mk +description: + This check allows the tracking of connection usage for a MySQL database. + Therefore it queries the maximum number of connections that have been + in use simultaneously since the server started and + the maximum number of possible parallel connections. + From these both it calculates the + maximum usage level of these connections in percent + and matches the specified {WARN} and {CRIT} thresholds against it. + + This check needs the agent plugin {mk_mysql} to be installed. + Further details about this plugin and monitoring of MySQL can be + found in the Check_MK online documentation in the article + "Monitoring MySQL with Check_MK". + +inventory: + On each host where the agent plugin {mk_mysql} is being installed + and the MySQL daemon is running one service is being generated. + +[parameters] +parameters (dict): A dictionary with currentl just one possible key: {"perc_used"}. This + is a pair if two floating point numbers: + + {warn} (float): The maximum connection usage (in percent) that triggers + a {WARN} state. + + {crit} (float): The maximum connection usage (in percent) that triggers + a {CRIT} state. + + If {perc_used} is not set, the check always returns {OK} state. diff -Nru check-mk-1.2.2p3/mysql.innodb_io check-mk-1.2.6p12/mysql.innodb_io --- check-mk-1.2.2p3/mysql.innodb_io 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/mysql.innodb_io 2015-09-21 10:59:54.000000000 +0000 @@ -1,6 +1,6 @@ title: MySQL InnoDB engine IO statistics agents: linux -author: Mathias Kettner +catalog: app/mysql license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/mysql.sessions check-mk-1.2.6p12/mysql.sessions --- check-mk-1.2.2p3/mysql.sessions 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/mysql.sessions 2015-09-21 10:59:54.000000000 +0000 @@ -1,6 +1,6 @@ title: MySQL Database sessions agents: linux -author: Mathias Kettner +catalog: app/mysql license: GPL distribution: check_mk description: @@ -15,12 +15,10 @@ inventory: The check generates one item for the sessions connected to the MySQL daemon. +perfdata: + The check generates perfdata for the total and running sessions, and the + connection rate. [configuration] warn(int): number of sessions at which the check goes warn crit(int): number of sessions for a critical state - - -perfdata: - The check generates perfdata for the total and running sessions, and the - connection rate. diff -Nru check-mk-1.2.2p3/mysql_slave check-mk-1.2.6p12/mysql_slave --- check-mk-1.2.2p3/mysql_slave 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/mysql_slave 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,95 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_mysql_slave(info): + if info: + return [(None, {})] + +def parse_mysql_slave(info): + data = {} + for l in info: + if ':' in l[0]: + val = ' '.join(l[1:]) + + # Parse some values + try: + val = int(val) + except ValueError: + if val == 'Yes': + val = True + elif val == 'No': + val = False + elif val == 'None': + val = None + + data[l[0][:-1]] = val + return data + +def check_mysql_slave(_unused, params, info): + data = parse_mysql_slave(info) + + state = 0 + perfdata = [] + output = [] + + if data['Slave_IO_Running']: + output.append('Slave-IO: running') + else: + output.append('Slave-IO: not running(!!)') + state = 2 + + if data['Slave_SQL_Running']: + output.append('Slave-SQL: running') + + # Makes only sense to monitor the age when the SQL slave is running + if data['Seconds_Behind_Master'] == 'NULL': + output.append('Time behind master: NULL (Lost connection?)(!!)') + state = 2 + else: + out = 'Time behind Master: %s' % get_age_human_readable(data['Seconds_Behind_Master']) + warn, crit = params.get('seconds_behind_master', (None, None)) + if crit != None and data['Seconds_Behind_Master'] > crit: + state = 2 + out += '(!!)' + elif warn != None and data['Seconds_Behind_Master'] > warn: + state = max(state, 1) + out += '(!)' + output.append(out) + perfdata.append(('seconds_behind_master', data['Seconds_Behind_Master'], warn, crit)) + else: + output.append('Slave-SQL: not running(!!)') + state = 2 + + return state, ', '.join(output), perfdata + +check_info['mysql_slave'] = { + "check_function" : check_mysql_slave, + "inventory_function" : inventory_mysql_slave, + "service_description" : "MySQL DB Slave", + "has_perfdata" : True, + "group" : "mysql_slave", +} + diff -Nru check-mk-1.2.2p3/nagios4/bitmap.h check-mk-1.2.6p12/nagios4/bitmap.h --- check-mk-1.2.2p3/nagios4/bitmap.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/bitmap.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,156 @@ +#ifndef LIBNAGIOS_bitmap_h__ +#define LIBNAGIOS_bitmap_h__ + +/** + * @file bitmap.h + * @brief Bit map API + * + * The bitmap api is useful for running set operations on objects + * indexed by unsigned integers. + * @{ + */ +struct bitmap; +typedef struct bitmap bitmap; + +/** + * Resize a bitmap + * If the bitmap is made smaller, data will silently be lost. + * + * @param bm The bitmap to resize + * @param size The new desired size of the bitmap + * @return 0 on success, -1 on errors. + */ +extern int bitmap_resize(bitmap *bm, unsigned long size); + +/** + * Create a bitmaptor of size 'size' + * @param size Desired storage capacity + * @return A bitmap pointer on success, NULL on errors + */ +extern bitmap *bitmap_create(unsigned long size); + +/** + * Destroy a bitmaptor by freeing all the memory it uses + * @param bm The bitmaptor to destroy + */ +extern void bitmap_destroy(bitmap *bm); + +/** + * Copy a bitmaptor + * @param bm The bitmaptor to copy + * @return Pointer to an identical bitmap on success, NULL on errors + */ +extern bitmap *bitmap_copy(const bitmap *bm); + +/** + * Set a bit in the map + * @param bm The bitmaptor to operate on + * @param pos Position of the bit to set + * @return 0 on success, -1 on errors + */ +extern int bitmap_set(bitmap *bm, unsigned long pos); + +/** + * Check if a particular bit is set in the map + * @param bm The bitmaptor to check + * @param pos Position of the bit to check + * @return 1 if set, otherwise 0 + */ +extern int bitmap_isset(const bitmap *bm, unsigned long pos); + +/** + * Unset a particular bit in the map + * @param bm The bitmaptor to operate on + * @param pos Position of the bit to unset + */ +extern int bitmap_unset(bitmap *bm, unsigned long pos); + +/** + * Obtain cardinality (max number of elements) of the bitmaptor + * @param bm The bitmaptor to check + * @return The cardinality of the bitmaptor + */ +extern unsigned long bitmap_cardinality(const bitmap *bm); +#define bitmap_size bitmap_cardinality + +/** + * Count set bits in map. Completed in O(n/8) time. + * @param bm The bitmaptor to count bits in + * @return The number of set bits + */ +extern unsigned long bitmap_count_set_bits(const bitmap *bm); + +/** + * Count unset bits in map. Completed in O(n/8) time. + * @param bm The bitmaptor to count bits in + * @return The number of set bits + */ +extern unsigned long bitmap_count_unset_bits(const bitmap *bm); + +/** + * Unset all bits in a bitmap + * @param bm The bitmap to clear + */ +extern void bitmap_clear(bitmap *bm); + +/** + * Calculate intersection of two bitmaps + * The intersection is defined as all bits that are members of + * both A and B. It's equivalent to bitwise AND. + * This function completes in O(n/sizeof(long)) operations. + * @param a The first bitmaptor + * @param b The second bitmaptor + * @return NULL on errors; A newly created bitmaptor on success. + */ +extern bitmap *bitmap_intersect(const bitmap *a, const bitmap *b); + +/** + * Calculate union of two bitmaps + * The union is defined as all bits that are members of + * A or B or both A and B. It's equivalent to bitwise OR. + * This function completes in O(n/sizeof(long)) operations. + * @param a The first bitmaptor + * @param b The second bitmaptor + * @return NULL on errors; A newly created bitmaptor on success. + */ +extern bitmap *bitmap_union(const bitmap *a, const bitmap *b); + +/** + * Calculate union of two bitmaps and store result in one of them + * @param res The first bitmap + * @param addme The bitmap to unite to the first bitmap + * @return NULL on errors, res on success + */ +extern bitmap *bitmap_unite(bitmap *res, const bitmap *addme); + +/** + * Calculate set difference between two bitmaps + * The set difference of A / B is defined as all members of A + * that isn't members of B. Note that parameter ordering matters + * for this function. + * This function completes in O(n/sizeof(long)) operations. + * @param a The first bitmaptor (numerator) + * @param b The first bitmaptor (denominator) + * @return NULL on errors; A newly created bitmaptor on success. + */ +extern bitmap *bitmap_diff(const bitmap *a, const bitmap *b); + +/** + * Calculate symmetric difference between two bitmaps + * The symmetric difference between A and B is the set that + * contains all elements in either set but not in both. + * This function completes in O(n/sizeof(long)) operations. + * @param a The first bitmaptor + * @param b The second bitmaptor + */ +extern bitmap *bitmap_symdiff(const bitmap *a, const bitmap *b); + +/** + * Compare two bitmaps for equality + * @param a The first bitmaptor + * @param b The other bitmaptor + * @return Similar to memcmp(), with tiebreaks determined by cardinality + */ +extern int bitmap_cmp(const bitmap *a, const bitmap *b); +/** @} */ +#endif /* LIBNAGIOS_bitmap_h__ */ diff -Nru check-mk-1.2.2p3/nagios4/broker.h check-mk-1.2.6p12/nagios4/broker.h --- check-mk-1.2.2p3/nagios4/broker.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/broker.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,213 @@ +/***************************************************************************** + * + * BROKER.H - Event broker includes for Nagios + * + * + * License: + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + *****************************************************************************/ + +#ifndef _BROKER_H +#define _BROKER_H + +#include "nagios.h" + +/*************** EVENT BROKER OPTIONS *****************/ + +#define BROKER_NOTHING 0 +#define BROKER_EVERYTHING 1048575 + +#define BROKER_PROGRAM_STATE 1 /* DONE */ +#define BROKER_TIMED_EVENTS 2 /* DONE */ +#define BROKER_SERVICE_CHECKS 4 /* DONE */ +#define BROKER_HOST_CHECKS 8 /* DONE */ +#define BROKER_EVENT_HANDLERS 16 /* DONE */ +#define BROKER_LOGGED_DATA 32 /* DONE */ +#define BROKER_NOTIFICATIONS 64 /* DONE */ +#define BROKER_FLAPPING_DATA 128 /* DONE */ +#define BROKER_COMMENT_DATA 256 /* DONE */ +#define BROKER_DOWNTIME_DATA 512 /* DONE */ +#define BROKER_SYSTEM_COMMANDS 1024 /* DONE */ +#define BROKER_OCP_DATA_UNUSED 2048 /* reusable */ +#define BROKER_STATUS_DATA 4096 /* DONE */ +#define BROKER_ADAPTIVE_DATA 8192 /* DONE */ +#define BROKER_EXTERNALCOMMAND_DATA 16384 /* DONE */ +#define BROKER_RETENTION_DATA 32768 /* DONE */ +#define BROKER_ACKNOWLEDGEMENT_DATA 65536 +#define BROKER_STATECHANGE_DATA 131072 +#define BROKER_RESERVED18 262144 +#define BROKER_RESERVED19 524288 + + +/****** EVENT TYPES ************************/ + +#define NEBTYPE_NONE 0 + +#define NEBTYPE_HELLO 1 +#define NEBTYPE_GOODBYE 2 +#define NEBTYPE_INFO 3 + +#define NEBTYPE_PROCESS_START 100 +#define NEBTYPE_PROCESS_DAEMONIZE 101 +#define NEBTYPE_PROCESS_RESTART 102 +#define NEBTYPE_PROCESS_SHUTDOWN 103 +#define NEBTYPE_PROCESS_PRELAUNCH 104 /* before objects are read or verified */ +#define NEBTYPE_PROCESS_EVENTLOOPSTART 105 +#define NEBTYPE_PROCESS_EVENTLOOPEND 106 + +#define NEBTYPE_TIMEDEVENT_ADD 200 +#define NEBTYPE_TIMEDEVENT_REMOVE 201 +#define NEBTYPE_TIMEDEVENT_EXECUTE 202 +#define NEBTYPE_TIMEDEVENT_DELAY 203 /* NOT IMPLEMENTED */ +#define NEBTYPE_TIMEDEVENT_SKIP 204 /* NOT IMPLEMENTED */ +#define NEBTYPE_TIMEDEVENT_SLEEP 205 + +#define NEBTYPE_LOG_DATA 300 +#define NEBTYPE_LOG_ROTATION 301 + +#define NEBTYPE_SYSTEM_COMMAND_START 400 +#define NEBTYPE_SYSTEM_COMMAND_END 401 + +#define NEBTYPE_EVENTHANDLER_START 500 +#define NEBTYPE_EVENTHANDLER_END 501 + +#define NEBTYPE_NOTIFICATION_START 600 +#define NEBTYPE_NOTIFICATION_END 601 +#define NEBTYPE_CONTACTNOTIFICATION_START 602 +#define NEBTYPE_CONTACTNOTIFICATION_END 603 +#define NEBTYPE_CONTACTNOTIFICATIONMETHOD_START 604 +#define NEBTYPE_CONTACTNOTIFICATIONMETHOD_END 605 + +#define NEBTYPE_SERVICECHECK_INITIATE 700 +#define NEBTYPE_SERVICECHECK_PROCESSED 701 +#define NEBTYPE_SERVICECHECK_RAW_START 702 /* NOT IMPLEMENTED */ +#define NEBTYPE_SERVICECHECK_RAW_END 703 /* NOT IMPLEMENTED */ +#define NEBTYPE_SERVICECHECK_ASYNC_PRECHECK 704 + +#define NEBTYPE_HOSTCHECK_INITIATE 800 /* a check of the route to the host has been initiated */ +#define NEBTYPE_HOSTCHECK_PROCESSED 801 /* the processed/final result of a host check */ +#define NEBTYPE_HOSTCHECK_RAW_START 802 /* the start of a "raw" host check */ +#define NEBTYPE_HOSTCHECK_RAW_END 803 /* a finished "raw" host check */ +#define NEBTYPE_HOSTCHECK_ASYNC_PRECHECK 804 +#define NEBTYPE_HOSTCHECK_SYNC_PRECHECK 805 + +#define NEBTYPE_COMMENT_ADD 900 +#define NEBTYPE_COMMENT_DELETE 901 +#define NEBTYPE_COMMENT_LOAD 902 + +#define NEBTYPE_FLAPPING_START 1000 +#define NEBTYPE_FLAPPING_STOP 1001 + +#define NEBTYPE_DOWNTIME_ADD 1100 +#define NEBTYPE_DOWNTIME_DELETE 1101 +#define NEBTYPE_DOWNTIME_LOAD 1102 +#define NEBTYPE_DOWNTIME_START 1103 +#define NEBTYPE_DOWNTIME_STOP 1104 + +#define NEBTYPE_PROGRAMSTATUS_UPDATE 1200 +#define NEBTYPE_HOSTSTATUS_UPDATE 1201 +#define NEBTYPE_SERVICESTATUS_UPDATE 1202 +#define NEBTYPE_CONTACTSTATUS_UPDATE 1203 + +#define NEBTYPE_ADAPTIVEPROGRAM_UPDATE 1300 +#define NEBTYPE_ADAPTIVEHOST_UPDATE 1301 +#define NEBTYPE_ADAPTIVESERVICE_UPDATE 1302 +#define NEBTYPE_ADAPTIVECONTACT_UPDATE 1303 + +#define NEBTYPE_EXTERNALCOMMAND_START 1400 +#define NEBTYPE_EXTERNALCOMMAND_END 1401 + +#define NEBTYPE_AGGREGATEDSTATUS_STARTDUMP 1500 +#define NEBTYPE_AGGREGATEDSTATUS_ENDDUMP 1501 + +#define NEBTYPE_RETENTIONDATA_STARTLOAD 1600 +#define NEBTYPE_RETENTIONDATA_ENDLOAD 1601 +#define NEBTYPE_RETENTIONDATA_STARTSAVE 1602 +#define NEBTYPE_RETENTIONDATA_ENDSAVE 1603 + +#define NEBTYPE_ACKNOWLEDGEMENT_ADD 1700 +#define NEBTYPE_ACKNOWLEDGEMENT_REMOVE 1701 /* NOT IMPLEMENTED */ +#define NEBTYPE_ACKNOWLEDGEMENT_LOAD 1702 /* NOT IMPLEMENTED */ + +#define NEBTYPE_STATECHANGE_START 1800 /* NOT IMPLEMENTED */ +#define NEBTYPE_STATECHANGE_END 1801 + + + +/****** EVENT FLAGS ************************/ + +#define NEBFLAG_NONE 0 +#define NEBFLAG_PROCESS_INITIATED 1 /* event was initiated by Nagios process */ +#define NEBFLAG_USER_INITIATED 2 /* event was initiated by a user request */ +#define NEBFLAG_MODULE_INITIATED 3 /* event was initiated by an event broker module */ + + + + +/****** EVENT ATTRIBUTES *******************/ + +#define NEBATTR_NONE 0 + +#define NEBATTR_SHUTDOWN_NORMAL 1 +#define NEBATTR_SHUTDOWN_ABNORMAL 2 +#define NEBATTR_RESTART_NORMAL 4 +#define NEBATTR_RESTART_ABNORMAL 8 + +#define NEBATTR_FLAPPING_STOP_NORMAL 1 +#define NEBATTR_FLAPPING_STOP_DISABLED 2 /* flapping stopped because flap detection was disabled */ + +#define NEBATTR_DOWNTIME_STOP_NORMAL 1 +#define NEBATTR_DOWNTIME_STOP_CANCELLED 2 + + + +/****** EVENT BROKER FUNCTIONS *************/ + +#ifdef USE_EVENT_BROKER +NAGIOS_BEGIN_DECL + +struct timeval get_broker_timestamp(struct timeval *); +void broker_program_state(int, int, int, struct timeval *); +void broker_timed_event(int, int, int, timed_event *, struct timeval *); +void broker_log_data(int, int, int, char *, unsigned long, time_t, struct timeval *); +int broker_event_handler(int, int, int, int, void *, int, int, struct timeval, struct timeval, double, int, int, int, char *, char *, char *, struct timeval *); +void broker_system_command(int, int, int, struct timeval, struct timeval, double, int, int, int, char *, char *, struct timeval *); +int broker_host_check(int, int, int, host *, int, int, int, struct timeval, struct timeval, char *, double, double, int, int, int, char *, char *, char *, char *, struct timeval *, check_result *); +int broker_service_check(int, int, int, service *, int, struct timeval, struct timeval, char *, double, double, int, int, int, char *, struct timeval *, check_result *); +void broker_comment_data(int, int, int, int, int, char *, char *, time_t, char *, char *, int, int, int, time_t, unsigned long, struct timeval *); +void broker_downtime_data(int, int, int, int, char *, char *, time_t, char *, char *, time_t, time_t, int, unsigned long, unsigned long, unsigned long, struct timeval *); +void broker_flapping_data(int, int, int, int, void *, double, double, double, struct timeval *); +void broker_program_status(int, int, int, struct timeval *); +void broker_host_status(int, int, int, host *, struct timeval *); +void broker_service_status(int, int, int, service *, struct timeval *); +void broker_contact_status(int, int, int, contact *, struct timeval *); +int broker_notification_data(int, int, int, int, int, struct timeval, struct timeval, void *, char *, char *, int, int, struct timeval *); +int broker_contact_notification_data(int, int, int, int, int, struct timeval, struct timeval, void *, contact *, char *, char *, int, struct timeval *); +int broker_contact_notification_method_data(int, int, int, int, int, struct timeval, struct timeval, void *, contact *, char *, char *, char *, int, struct timeval *); +void broker_adaptive_program_data(int, int, int, int, unsigned long, unsigned long, unsigned long, unsigned long, struct timeval *); +void broker_adaptive_host_data(int, int, int, host *, int, unsigned long, unsigned long, struct timeval *); +void broker_adaptive_service_data(int, int, int, service *, int, unsigned long, unsigned long, struct timeval *); +void broker_adaptive_contact_data(int, int, int, contact *, int, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, struct timeval *); +void broker_external_command(int, int, int, int, time_t, char *, char *, struct timeval *); +void broker_aggregated_status_data(int, int, int, struct timeval *); +void broker_retention_data(int, int, int, struct timeval *); +void broker_acknowledgement_data(int, int, int, int, void *, char *, char *, int, int, int, struct timeval *); +void broker_statechange_data(int, int, int, int, void *, int, int, int, int, struct timeval *); + +NAGIOS_END_DECL +#endif + +#endif diff -Nru check-mk-1.2.2p3/nagios4/cgiauth.h check-mk-1.2.6p12/nagios4/cgiauth.h --- check-mk-1.2.2p3/nagios4/cgiauth.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/cgiauth.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,70 @@ +/***************************************************************************** + * + * CGIAUTH.H - Authorization utilities header file + * + * + * License: + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + *****************************************************************************/ + +#ifndef _AUTH_H +#define _AUTH_H +#include "common.h" +#include "objects.h" + +NAGIOS_BEGIN_DECL + +typedef struct authdata_struct { + char *username; + int authorized_for_all_hosts; + int authorized_for_all_host_commands; + int authorized_for_all_services; + int authorized_for_all_service_commands; + int authorized_for_system_information; + int authorized_for_system_commands; + int authorized_for_configuration_information; + int authorized_for_read_only; + int authenticated; + } authdata; + + + +int get_authentication_information(authdata *); /* gets current authentication information */ + +int is_authorized_for_host(host *, authdata *); +int is_authorized_for_service(service *, authdata *); + +int is_authorized_for_all_hosts(authdata *); +int is_authorized_for_all_services(authdata *); + +int is_authorized_for_system_information(authdata *); +int is_authorized_for_system_commands(authdata *); +int is_authorized_for_host_commands(host *, authdata *); +int is_authorized_for_service_commands(service *, authdata *); + +int is_authorized_for_hostgroup(hostgroup *, authdata *); +int is_authorized_for_servicegroup(servicegroup *, authdata *); + +int is_authorized_for_hostgroup_commands(hostgroup *, authdata *); +int is_authorized_for_servicegroup_commands(servicegroup *, authdata *); + +int is_authorized_for_configuration_information(authdata *); + +int is_authorized_for_read_only(authdata *); + +NAGIOS_END_DECL + +#endif diff -Nru check-mk-1.2.2p3/nagios4/cgiutils.h check-mk-1.2.6p12/nagios4/cgiutils.h --- check-mk-1.2.2p3/nagios4/cgiutils.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/cgiutils.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,479 @@ +/************************************************************************ + * + * CGIUTILS.H - Header file for common CGI functions + * + * License: + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + ************************************************************************/ + +#ifndef _CGIUTILS_H +#define _CGIUTILS_H +#include "lib/libnagios.h" +#include "logging.h" +#include "objects.h" +#include "cgiauth.h" + +NAGIOS_BEGIN_DECL + + /**************************** CGI REFRESH RATE ******************************/ + +#define DEFAULT_REFRESH_RATE 60 /* 60 second refresh rate for CGIs */ + + + /******************************* CGI NAMES **********************************/ + +#define STATUS_CGI "status.cgi" +#define STATUSMAP_CGI "statusmap.cgi" +#define STATUSWORLD_CGI "statuswrl.cgi" +#define COMMAND_CGI "cmd.cgi" +#define EXTINFO_CGI "extinfo.cgi" +#define SHOWLOG_CGI "showlog.cgi" +#define NOTIFICATIONS_CGI "notifications.cgi" +#define HISTORY_CGI "history.cgi" +#define CONFIG_CGI "config.cgi" +#define OUTAGES_CGI "outages.cgi" +#define TRENDS_CGI "trends.cgi" +#define AVAIL_CGI "avail.cgi" +#define TAC_CGI "tac.cgi" +#define STATUSWML_CGI "statuswml.cgi" +#define TRACEROUTE_CGI "traceroute.cgi" +#define HISTOGRAM_CGI "histogram.cgi" +#define CHECKSANITY_CGI "checksanity.cgi" +#define MINISTATUS_CGI "ministatus.cgi" +#define SUMMARY_CGI "summary.cgi" + + + /**************************** STYLE SHEET NAMES ******************************/ + +#define COMMON_CSS "common.css" + +#define SHOWLOG_CSS "showlog.css" +#define STATUS_CSS "status.css" +#define STATUSMAP_CSS "statusmap.css" +#define COMMAND_CSS "cmd.css" +#define EXTINFO_CSS "extinfo.css" +#define NOTIFICATIONS_CSS "notifications.css" +#define HISTORY_CSS "history.css" +#define CONFIG_CSS "config.css" +#define OUTAGES_CSS "outages.css" +#define TRENDS_CSS "trends.css" +#define AVAIL_CSS "avail.css" +#define TAC_CSS "tac.css" +#define HISTOGRAM_CSS "histogram.css" +#define CHECKSANITY_CSS "checksanity.css" +#define MINISTATUS_CSS "ministatus.css" +#define SUMMARY_CSS "summary.css" + + /********************************* JAVASCRIPT INCLUDES **********************/ +#define JQUERY_JS "jquery-1.7.1.min.js" + + /********************************* ICONS ************************************/ + +#define STATUS_ICON_WIDTH 20 +#define STATUS_ICON_HEIGHT 20 + +#define INFO_ICON "info.png" +#define INFO_ICON_ALT "Informational Message" +#define START_ICON "start.gif" +#define START_ICON_ALT "Program Start" +#define STOP_ICON "stop.gif" +#define STOP_ICON_ALT "Program End" +#define RESTART_ICON "restart.gif" +#define RESTART_ICON_ALT "Program Restart" +#define OK_ICON "recovery.png" +#define OK_ICON_ALT "Service Ok" +#define CRITICAL_ICON "critical.png" +#define CRITICAL_ICON_ALT "Service Critical" +#define WARNING_ICON "warning.png" +#define WARNING_ICON_ALT "Service Warning" +#define UNKNOWN_ICON "unknown.png" +#define UNKNOWN_ICON_ALT "Service Unknown" +#define NOTIFICATION_ICON "notify.gif" +#define NOTIFICATION_ICON_ALT "Service Notification" +#define LOG_ROTATION_ICON "logrotate.png" +#define LOG_ROTATION_ICON_ALT "Log Rotation" +#define EXTERNAL_COMMAND_ICON "command.png" +#define EXTERNAL_COMMAND_ICON_ALT "External Command" + +#define STATUS_DETAIL_ICON "status2.gif" +#define STATUS_OVERVIEW_ICON "status.gif" +#define STATUSMAP_ICON "status3.gif" +#define STATUSWORLD_ICON "status4.gif" +#define EXTINFO_ICON "extinfo.gif" +#define HISTORY_ICON "history.gif" +#define CONTACTGROUP_ICON "contactgroup.gif" +#define TRENDS_ICON "trends.gif" + +#define DISABLED_ICON "disabled.gif" +#define ENABLED_ICON "enabled.gif" +#define PASSIVE_ONLY_ICON "passiveonly.gif" +#define NOTIFICATIONS_DISABLED_ICON "ndisabled.gif" +#define ACKNOWLEDGEMENT_ICON "ack.gif" +#define REMOVE_ACKNOWLEDGEMENT_ICON "noack.gif" +#define COMMENT_ICON "comment.gif" +#define DELETE_ICON "delete.gif" +#define DELAY_ICON "delay.gif" +#define DOWNTIME_ICON "downtime.gif" +#define PASSIVE_ICON "passiveonly.gif" +#define RIGHT_ARROW_ICON "right.gif" +#define LEFT_ARROW_ICON "left.gif" +#define UP_ARROW_ICON "up.gif" +#define DOWN_ARROW_ICON "down.gif" +#define FLAPPING_ICON "flapping.gif" +#define SCHEDULED_DOWNTIME_ICON "downtime.gif" +#define EMPTY_ICON "empty.gif" + +#define ACTIVE_ICON "active.gif" +#define ACTIVE_ICON_ALT "Active Mode" +#define STANDBY_ICON "standby.gif" +#define STANDBY_ICON_ALT "Standby Mode" + +#define HOST_DOWN_ICON "critical.png" +#define HOST_DOWN_ICON_ALT "Host Down" +#define HOST_UNREACHABLE_ICON "critical.png" +#define HOST_UNREACHABLE_ICON_ALT "Host Unreachable" +#define HOST_UP_ICON "recovery.png" +#define HOST_UP_ICON_ALT "Host Up" +#define HOST_NOTIFICATION_ICON "notify.gif" +#define HOST_NOTIFICATION_ICON_ALT "Host Notification" + +#define SERVICE_EVENT_ICON "serviceevent.gif" +#define SERVICE_EVENT_ICON_ALT "Service Event Handler" +#define HOST_EVENT_ICON "hostevent.gif" +#define HOST_EVENT_ICON_ALT "Host Event Handler" + +#define THERM_OK_IMAGE "thermok.png" +#define THERM_WARNING_IMAGE "thermwarn.png" +#define THERM_CRITICAL_IMAGE "thermcrit.png" + +#define CONFIGURATION_ICON "config.gif" +#define NOTES_ICON "notes.gif" +#define ACTION_ICON "action.gif" +#define DETAIL_ICON "detail.gif" + +#define PARENT_TRAVERSAL_ICON "parentup.gif" + +#define TAC_DISABLED_ICON "tacdisabled.png" +#define TAC_ENABLED_ICON "tacenabled.png" + +#define ZOOM1_ICON "zoom1.gif" +#define ZOOM2_ICON "zoom2.gif" + +#define CONTEXT_HELP_ICON1 "contexthelp1.gif" +#define CONTEXT_HELP_ICON2 "contexthelp2.gif" + +#define SPLUNK_SMALL_WHITE_ICON "splunk1.gif" +#define SPLUNK_SMALL_BLACK_ICON "splunk2.gif" + +#define FIRST_PAGE_ICON "b_first2.png" +#define LAST_PAGE_ICON "b_last2.png" +#define NEXT_PAGE_ICON "b_next2.png" +#define PREVIOUS_PAGE_ICON "b_prev2.png" + + + /********************* EXTENDED INFO CGI DISPLAY TYPES *********************/ + +#define DISPLAY_PROCESS_INFO 0 +#define DISPLAY_HOST_INFO 1 +#define DISPLAY_SERVICE_INFO 2 +#define DISPLAY_COMMENTS 3 +#define DISPLAY_PERFORMANCE 4 +#define DISPLAY_HOSTGROUP_INFO 5 +#define DISPLAY_DOWNTIME 6 +#define DISPLAY_SCHEDULING_QUEUE 7 +#define DISPLAY_SERVICEGROUP_INFO 8 + + + /************************ COMMAND CGI COMMAND MODES *************************/ + +#define CMDMODE_NONE 0 +#define CMDMODE_REQUEST 1 +#define CMDMODE_COMMIT 2 + + + + /******************** HOST AND SERVICE NOTIFICATION TYPES ******************/ + +#define NOTIFICATION_ALL 0 /* all service and host notifications */ +#define NOTIFICATION_SERVICE_ALL 1 /* all types of service notifications */ +#define NOTIFICATION_HOST_ALL 2 /* all types of host notifications */ +#define NOTIFICATION_SERVICE_WARNING 4 +#define NOTIFICATION_SERVICE_UNKNOWN 8 +#define NOTIFICATION_SERVICE_CRITICAL 16 +#define NOTIFICATION_SERVICE_RECOVERY 32 +#define NOTIFICATION_HOST_DOWN 64 +#define NOTIFICATION_HOST_UNREACHABLE 128 +#define NOTIFICATION_HOST_RECOVERY 256 +#define NOTIFICATION_SERVICE_ACK 512 +#define NOTIFICATION_HOST_ACK 1024 +#define NOTIFICATION_SERVICE_FLAP 2048 +#define NOTIFICATION_HOST_FLAP 4096 +#define NOTIFICATION_SERVICE_CUSTOM 8192 +#define NOTIFICATION_HOST_CUSTOM 16384 + + + /********************** HOST AND SERVICE ALERT TYPES **********************/ + +#define HISTORY_ALL 0 /* all service and host alert */ +#define HISTORY_SERVICE_ALL 1 /* all types of service alerts */ +#define HISTORY_HOST_ALL 2 /* all types of host alerts */ +#define HISTORY_SERVICE_WARNING 4 +#define HISTORY_SERVICE_UNKNOWN 8 +#define HISTORY_SERVICE_CRITICAL 16 +#define HISTORY_SERVICE_RECOVERY 32 +#define HISTORY_HOST_DOWN 64 +#define HISTORY_HOST_UNREACHABLE 128 +#define HISTORY_HOST_RECOVERY 256 + + + /****************************** SORT TYPES *******************************/ + +#define SORT_NONE 0 +#define SORT_ASCENDING 1 +#define SORT_DESCENDING 2 + + + /***************************** SORT OPTIONS ******************************/ + +#define SORT_NOTHING 0 +#define SORT_HOSTNAME 1 +#define SORT_SERVICENAME 2 +#define SORT_SERVICESTATUS 3 +#define SORT_LASTCHECKTIME 4 +#define SORT_CURRENTATTEMPT 5 +#define SORT_STATEDURATION 6 +#define SORT_NEXTCHECKTIME 7 +#define SORT_HOSTSTATUS 8 +#define SORT_HOSTURGENCY 9 + + + /****************** HOST AND SERVICE FILTER PROPERTIES *******************/ + +#define HOST_SCHEDULED_DOWNTIME 1 +#define HOST_NO_SCHEDULED_DOWNTIME 2 +#define HOST_STATE_ACKNOWLEDGED 4 +#define HOST_STATE_UNACKNOWLEDGED 8 +#define HOST_CHECKS_DISABLED 16 +#define HOST_CHECKS_ENABLED 32 +#define HOST_EVENT_HANDLER_DISABLED 64 +#define HOST_EVENT_HANDLER_ENABLED 128 +#define HOST_FLAP_DETECTION_DISABLED 256 +#define HOST_FLAP_DETECTION_ENABLED 512 +#define HOST_IS_FLAPPING 1024 +#define HOST_IS_NOT_FLAPPING 2048 +#define HOST_NOTIFICATIONS_DISABLED 4096 +#define HOST_NOTIFICATIONS_ENABLED 8192 +#define HOST_PASSIVE_CHECKS_DISABLED 16384 +#define HOST_PASSIVE_CHECKS_ENABLED 32768 +#define HOST_PASSIVE_CHECK 65536 +#define HOST_ACTIVE_CHECK 131072 +#define HOST_HARD_STATE 262144 +#define HOST_SOFT_STATE 524288 + + +#define SERVICE_SCHEDULED_DOWNTIME 1 +#define SERVICE_NO_SCHEDULED_DOWNTIME 2 +#define SERVICE_STATE_ACKNOWLEDGED 4 +#define SERVICE_STATE_UNACKNOWLEDGED 8 +#define SERVICE_CHECKS_DISABLED 16 +#define SERVICE_CHECKS_ENABLED 32 +#define SERVICE_EVENT_HANDLER_DISABLED 64 +#define SERVICE_EVENT_HANDLER_ENABLED 128 +#define SERVICE_FLAP_DETECTION_ENABLED 256 +#define SERVICE_FLAP_DETECTION_DISABLED 512 +#define SERVICE_IS_FLAPPING 1024 +#define SERVICE_IS_NOT_FLAPPING 2048 +#define SERVICE_NOTIFICATIONS_DISABLED 4096 +#define SERVICE_NOTIFICATIONS_ENABLED 8192 +#define SERVICE_PASSIVE_CHECKS_DISABLED 16384 +#define SERVICE_PASSIVE_CHECKS_ENABLED 32768 +#define SERVICE_PASSIVE_CHECK 65536 +#define SERVICE_ACTIVE_CHECK 131072 +#define SERVICE_HARD_STATE 262144 +#define SERVICE_SOFT_STATE 524288 + + + /****************************** SSI TYPES ********************************/ + +#define SSI_HEADER 0 +#define SSI_FOOTER 1 + + + + /************************ CONTEXT-SENSITIVE HELP *************************/ + +#define CONTEXTHELP_STATUS_DETAIL "A1" +#define CONTEXTHELP_STATUS_HGOVERVIEW "A2" +#define CONTEXTHELP_STATUS_HGSUMMARY "A3" +#define CONTEXTHELP_STATUS_HGGRID "A4" +#define CONTEXTHELP_STATUS_SVCPROBLEMS "A5" +#define CONTEXTHELP_STATUS_HOST_DETAIL "A6" +#define CONTEXTHELP_STATUS_HOSTPROBLEMS "A7" +#define CONTEXTHELP_STATUS_SGOVERVIEW "A8" +#define CONTEXTHELP_STATUS_SGSUMMARY "A9" +#define CONTEXTHELP_STATUS_SGGRID "A10" + +#define CONTEXTHELP_TAC "B1" + +#define CONTEXTHELP_MAP "C1" + +#define CONTEXTHELP_LOG "D1" + +#define CONTEXTHELP_HISTORY "E1" + +#define CONTEXTHELP_NOTIFICATIONS "F1" + +#define CONTEXTHELP_TRENDS_MENU1 "G1" +#define CONTEXTHELP_TRENDS_MENU2 "G2" +#define CONTEXTHELP_TRENDS_MENU3 "G3" +#define CONTEXTHELP_TRENDS_MENU4 "G4" +#define CONTEXTHELP_TRENDS_HOST "G5" +#define CONTEXTHELP_TRENDS_SERVICE "G6" + +#define CONTEXTHELP_AVAIL_MENU1 "H1" +#define CONTEXTHELP_AVAIL_MENU2 "H2" +#define CONTEXTHELP_AVAIL_MENU3 "H3" +#define CONTEXTHELP_AVAIL_MENU4 "H4" +#define CONTEXTHELP_AVAIL_MENU5 "H5" +#define CONTEXTHELP_AVAIL_HOSTGROUP "H6" +#define CONTEXTHELP_AVAIL_HOST "H7" +#define CONTEXTHELP_AVAIL_SERVICE "H8" +#define CONTEXTHELP_AVAIL_SERVICEGROUP "H9" + +#define CONTEXTHELP_EXT_HOST "I1" +#define CONTEXTHELP_EXT_SERVICE "I2" +#define CONTEXTHELP_EXT_HOSTGROUP "I3" +#define CONTEXTHELP_EXT_PROCESS "I4" +#define CONTEXTHELP_EXT_PERFORMANCE "I5" +#define CONTEXTHELP_EXT_COMMENTS "I6" +#define CONTEXTHELP_EXT_DOWNTIME "I7" +#define CONTEXTHELP_EXT_QUEUE "I8" +#define CONTEXTHELP_EXT_SERVICEGROUP "I9" + +#define CONTEXTHELP_CMD_INPUT "J1" +#define CONTEXTHELP_CMD_COMMIT "J2" + +#define CONTEXTHELP_OUTAGES "K1" + +#define CONTEXTHELP_CONFIG_MENU "L1" +#define CONTEXTHELP_CONFIG_HOSTS "L2" +#define CONTEXTHELP_CONFIG_HOSTDEPENDENCIES "L3" +#define CONTEXTHELP_CONFIG_HOSTESCALATIONS "L4" +#define CONTEXTHELP_CONFIG_HOSTGROUPS "L5" +#define CONTEXTHELP_CONFIG_HOSTGROUPESCALATIONS "L6" +#define CONTEXTHELP_CONFIG_SERVICES "L7" +#define CONTEXTHELP_CONFIG_SERVICEDEPENDENCIES "L8" +#define CONTEXTHELP_CONFIG_SERVICEESCALATIONS "L9" +#define CONTEXTHELP_CONFIG_CONTACTS "L10" +#define CONTEXTHELP_CONFIG_CONTACTGROUPS "L11" +#define CONTEXTHELP_CONFIG_TIMEPERIODS "L12" +#define CONTEXTHELP_CONFIG_COMMANDS "L13" +#define CONTEXTHELP_CONFIG_HOSTEXTINFO "L14" +#define CONTEXTHELP_CONFIG_SERVICEEXTINFO "L15" +#define CONTEXTHELP_CONFIG_SERVICEGROUPS "L16" + +#define CONTEXTHELP_HISTOGRAM_MENU1 "M1" +#define CONTEXTHELP_HISTOGRAM_MENU2 "M2" +#define CONTEXTHELP_HISTOGRAM_MENU3 "M3" +#define CONTEXTHELP_HISTOGRAM_MENU4 "M4" +#define CONTEXTHELP_HISTOGRAM_HOST "M5" +#define CONTEXTHELP_HISTOGRAM_SERVICE "M6" + +#define CONTEXTHELP_SUMMARY_MENU "N1" +#define CONTEXTHELP_SUMMARY_RECENT_ALERTS "N2" +#define CONTEXTHELP_SUMMARY_ALERT_TOTALS "N3" +#define CONTEXTHELP_SUMMARY_HOSTGROUP_ALERT_TOTALS "N4" +#define CONTEXTHELP_SUMMARY_HOST_ALERT_TOTALS "N5" +#define CONTEXTHELP_SUMMARY_SERVICE_ALERT_TOTALS "N6" +#define CONTEXTHELP_SUMMARY_ALERT_PRODUCERS "N7" +#define CONTEXTHELP_SUMMARY_SERVICEGROUP_ALERT_TOTALS "N8" + + + /************************** LIFO RETURN CODES ****************************/ + +#define LIFO_OK 0 +#define LIFO_ERROR_MEMORY 1 +#define LIFO_ERROR_FILE 2 +#define LIFO_ERROR_DATA 3 + + + + + +/*************************** DATA STRUCTURES *****************************/ + +/* LIFO data structure */ +typedef struct lifo_struct { + char *data; + struct lifo_struct *next; + } lifo; + +/******************************** FUNCTIONS *******************************/ + +void reset_cgi_vars(void); +void cgi_init(void (*doc_header)(int), void (*doc_footer)(void), int object_options, int status_options); +void free_memory(void); + +const char *get_cgi_config_location(void); /* gets location of the CGI config file to read */ +const char *get_cmd_file_location(void); /* gets location of external command file to write to */ + +int read_cgi_config_file(const char *); +int read_main_config_file(const char *); +int read_all_object_configuration_data(const char *, int); +int read_all_status_data(const char *, int); + +char *unescape_newlines(char *); +void sanitize_plugin_output(char *); /* strips HTML and bad characters from plugin output */ +void strip_html_brackets(char *); /* strips > and < from string */ + +void get_time_string(time_t *, char *, int, int); /* gets a date/time string */ +void get_interval_time_string(double, char *, int); /* gets a time string for an interval of time */ + +const char *url_encode(const char *); /* encodes a string in proper URL format */ +char *html_encode(char *, int); /* encodes a string in HTML format (for what the user sees) */ +char *escape_string(const char *); /* escape string for html form usage */ + +void get_log_archive_to_use(int, char *, int); /* determines the name of the log archive to use */ +void determine_log_rotation_times(int); +int determine_archive_to_use_from_time(time_t); + +void print_extra_hostgroup_url(char *, char *); +void print_extra_servicegroup_url(char *, char *); + +void display_info_table(const char *, int, authdata *); +void display_nav_table(char *, int); + +void display_splunk_host_url(host *); +void display_splunk_service_url(service *); +void display_splunk_generic_url(char *, int); +void strip_splunk_query_terms(char *); + +void include_ssi_files(const char *, int); /* include user-defined SSI footers/headers */ +void include_ssi_file(const char *); /* include user-defined SSI footer/header */ + +void cgi_config_file_error(const char *); +void main_config_file_error(const char *); +void object_data_error(void); +void status_data_error(void); + +void display_context_help(const char *); /* displays context-sensitive help window */ + +int read_file_into_lifo(char *); /* LIFO functions */ +void free_lifo_memory(void); +int push_lifo(char *); +char *pop_lifo(void); + +NAGIOS_END_DECL +#endif diff -Nru check-mk-1.2.2p3/nagios4/comments.h check-mk-1.2.6p12/nagios4/comments.h --- check-mk-1.2.2p3/nagios4/comments.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/comments.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,118 @@ +/***************************************************************************** + * + * COMMENTS.H - Header file for comment functions + * + * + * License: + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + *****************************************************************************/ + + +#ifndef _COMMENTS_H +#define _COMMENTS_H +#include "common.h" +#include "objects.h" + + +/**************************** COMMENT SOURCES ******************************/ + +#define COMMENTSOURCE_INTERNAL 0 +#define COMMENTSOURCE_EXTERNAL 1 + + + +/***************************** COMMENT TYPES *******************************/ + +#define HOST_COMMENT 1 +#define SERVICE_COMMENT 2 + + +/****************************** ENTRY TYPES ********************************/ + +#define USER_COMMENT 1 +#define DOWNTIME_COMMENT 2 +#define FLAPPING_COMMENT 3 +#define ACKNOWLEDGEMENT_COMMENT 4 + + +/*************************** CHAINED HASH LIMITS ***************************/ + +#define COMMENT_HASHSLOTS 1024 + + +/**************************** DATA STRUCTURES ******************************/ + +NAGIOS_BEGIN_DECL + +/* COMMENT structure */ +typedef struct comment { + int comment_type; + int entry_type; + unsigned long comment_id; + int source; + int persistent; + time_t entry_time; + int expires; + time_t expire_time; + char *host_name; + char *service_description; + char *author; + char *comment_data; + struct comment *next; + struct comment *nexthash; + } comment; + +extern struct comment *comment_list; + +#ifndef NSCGI +int initialize_comment_data(void); /* initializes comment data */ +int add_new_comment(int, int, char *, char *, time_t, char *, char *, int, int, int, time_t, unsigned long *); /* adds a new host or service comment */ +int add_new_host_comment(int, char *, time_t, char *, char *, int, int, int, time_t, unsigned long *); /* adds a new host comment */ +int add_new_service_comment(int, char *, char *, time_t, char *, char *, int, int, int, time_t, unsigned long *); /* adds a new service comment */ +int delete_comment(int, unsigned long); /* deletes a host or service comment */ +int delete_host_comment(unsigned long); /* deletes a host comment */ +int delete_service_comment(unsigned long); /* deletes a service comment */ +int delete_all_comments(int, char *, char *); /* deletes all comments for a particular host or service */ +int delete_all_host_comments(char *); /* deletes all comments for a specific host */ +int delete_host_acknowledgement_comments(struct host *); /* deletes all non-persistent ack comments for a specific host */ +int delete_all_service_comments(char *, char *); /* deletes all comments for a specific service */ +int delete_service_acknowledgement_comments(struct service *); /* deletes all non-persistent ack comments for a specific service */ + +int check_for_expired_comment(unsigned long); /* expires a comment */ +#endif + +struct comment *find_comment(unsigned long, int); /* finds a specific comment */ +struct comment *find_service_comment(unsigned long); /* finds a specific service comment */ +struct comment *find_host_comment(unsigned long); /* finds a specific host comment */ + +struct comment *get_first_comment_by_host(char *); +struct comment *get_next_comment_by_host(char *, struct comment *); + +int number_of_host_comments(char *); /* returns the number of comments associated with a particular host */ +int number_of_service_comments(char *, char *); /* returns the number of comments associated with a particular service */ + +int add_comment(int, int, char *, char *, time_t, char *, char *, unsigned long, int, int, time_t, int); /* adds a comment (host or service) */ +int sort_comments(void); +int add_host_comment(int, char *, time_t, char *, char *, unsigned long, int, int, time_t, int); /* adds a host comment */ +int add_service_comment(int, char *, char *, time_t, char *, char *, unsigned long, int, int, time_t, int); /* adds a service comment */ + +int add_comment_to_hashlist(struct comment *); + +void free_comment_data(void); /* frees memory allocated to the comment list */ + +NAGIOS_BEGIN_DECL + +#endif diff -Nru check-mk-1.2.2p3/nagios4/common.h check-mk-1.2.6p12/nagios4/common.h --- check-mk-1.2.2p3/nagios4/common.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/common.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,531 @@ +/************************************************************************ + * + * Nagios Common Header File + * Written By: Ethan Galstad (egalstad@nagios.org) + * + * License: + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + ************************************************************************/ + +#ifndef INCLUDE_COMMON_H +#define INCLUDE_COMMON_H + +#include "shared.h" + +#define PROGRAM_VERSION "4.0.2" +#define PROGRAM_MODIFICATION_DATE "11-25-2013" + +NAGIOS_BEGIN_DECL + +/*************************************************************/ +/************** SHARED GLOBAL VARIABLES **********************/ +/*************************************************************/ +extern int date_format; +extern int interval_length; +extern char *illegal_output_chars; +extern char illegal_output_char_map[256]; + +extern int log_rotation_method; +extern int check_external_commands; +/* set this if you're going to add a ton of comments at once */ +extern int defer_comment_sorting; +extern unsigned long next_downtime_id; + +extern char *object_cache_file; +extern char *status_file; + +extern time_t program_start; +extern int nagios_pid; +extern int daemon_mode; + +extern time_t last_log_rotation; + +extern int process_performance_data; +extern int enable_flap_detection; +extern int enable_notifications; +extern int execute_service_checks; +extern int accept_passive_service_checks; +extern int execute_host_checks; +extern int accept_passive_host_checks; +extern int enable_event_handlers; +extern int obsess_over_services; +extern int obsess_over_hosts; + +extern int enable_timing_point; + +extern char *config_file_dir; + +#ifdef HAVE_TZNAME +#ifdef CYGWIN +extern char *_tzname[2] __declspec(dllimport); +#else +extern char *tzname[2]; +#endif +#endif + + +NAGIOS_END_DECL + + +/* Experimental performance tweaks - use with caution */ +#undef USE_MEMORY_PERFORMANCE_TWEAKS + + +/****************** OBJECT STATES ********************/ +#define STATE_OK 0 +#define STATE_WARNING 1 +#define STATE_CRITICAL 2 +#define STATE_UNKNOWN 3 +#define STATE_UP 0 +#define STATE_DOWN 1 +#define STATE_UNREACHABLE 2 +/* for legacy reasons */ +#define HOST_UP STATE_UP +#define HOST_DOWN STATE_DOWN +#define HOST_UNREACHABLE STATE_UNREACHABLE + +/***************************** COMMANDS *********************************/ + +#define CMD_NONE 0 + +#define CMD_ADD_HOST_COMMENT 1 +#define CMD_DEL_HOST_COMMENT 2 + +#define CMD_ADD_SVC_COMMENT 3 +#define CMD_DEL_SVC_COMMENT 4 + +#define CMD_ENABLE_SVC_CHECK 5 +#define CMD_DISABLE_SVC_CHECK 6 + +#define CMD_SCHEDULE_SVC_CHECK 7 + +#define CMD_DELAY_SVC_NOTIFICATION 9 + +#define CMD_DELAY_HOST_NOTIFICATION 10 + +#define CMD_DISABLE_NOTIFICATIONS 11 +#define CMD_ENABLE_NOTIFICATIONS 12 + +#define CMD_RESTART_PROCESS 13 +#define CMD_SHUTDOWN_PROCESS 14 + +#define CMD_ENABLE_HOST_SVC_CHECKS 15 +#define CMD_DISABLE_HOST_SVC_CHECKS 16 + +#define CMD_SCHEDULE_HOST_SVC_CHECKS 17 + +#define CMD_DELAY_HOST_SVC_NOTIFICATIONS 19 /* currently unimplemented */ + +#define CMD_DEL_ALL_HOST_COMMENTS 20 +#define CMD_DEL_ALL_SVC_COMMENTS 21 + +#define CMD_ENABLE_SVC_NOTIFICATIONS 22 +#define CMD_DISABLE_SVC_NOTIFICATIONS 23 +#define CMD_ENABLE_HOST_NOTIFICATIONS 24 +#define CMD_DISABLE_HOST_NOTIFICATIONS 25 +#define CMD_ENABLE_ALL_NOTIFICATIONS_BEYOND_HOST 26 +#define CMD_DISABLE_ALL_NOTIFICATIONS_BEYOND_HOST 27 +#define CMD_ENABLE_HOST_SVC_NOTIFICATIONS 28 +#define CMD_DISABLE_HOST_SVC_NOTIFICATIONS 29 + +#define CMD_PROCESS_SERVICE_CHECK_RESULT 30 + +#define CMD_SAVE_STATE_INFORMATION 31 +#define CMD_READ_STATE_INFORMATION 32 + +#define CMD_ACKNOWLEDGE_HOST_PROBLEM 33 +#define CMD_ACKNOWLEDGE_SVC_PROBLEM 34 + +#define CMD_START_EXECUTING_SVC_CHECKS 35 +#define CMD_STOP_EXECUTING_SVC_CHECKS 36 + +#define CMD_START_ACCEPTING_PASSIVE_SVC_CHECKS 37 +#define CMD_STOP_ACCEPTING_PASSIVE_SVC_CHECKS 38 + +#define CMD_ENABLE_PASSIVE_SVC_CHECKS 39 +#define CMD_DISABLE_PASSIVE_SVC_CHECKS 40 + +#define CMD_ENABLE_EVENT_HANDLERS 41 +#define CMD_DISABLE_EVENT_HANDLERS 42 + +#define CMD_ENABLE_HOST_EVENT_HANDLER 43 +#define CMD_DISABLE_HOST_EVENT_HANDLER 44 + +#define CMD_ENABLE_SVC_EVENT_HANDLER 45 +#define CMD_DISABLE_SVC_EVENT_HANDLER 46 + +#define CMD_ENABLE_HOST_CHECK 47 +#define CMD_DISABLE_HOST_CHECK 48 + +#define CMD_START_OBSESSING_OVER_SVC_CHECKS 49 +#define CMD_STOP_OBSESSING_OVER_SVC_CHECKS 50 + +#define CMD_REMOVE_HOST_ACKNOWLEDGEMENT 51 +#define CMD_REMOVE_SVC_ACKNOWLEDGEMENT 52 + +#define CMD_SCHEDULE_FORCED_HOST_SVC_CHECKS 53 +#define CMD_SCHEDULE_FORCED_SVC_CHECK 54 + +#define CMD_SCHEDULE_HOST_DOWNTIME 55 +#define CMD_SCHEDULE_SVC_DOWNTIME 56 + +#define CMD_ENABLE_HOST_FLAP_DETECTION 57 +#define CMD_DISABLE_HOST_FLAP_DETECTION 58 + +#define CMD_ENABLE_SVC_FLAP_DETECTION 59 +#define CMD_DISABLE_SVC_FLAP_DETECTION 60 + +#define CMD_ENABLE_FLAP_DETECTION 61 +#define CMD_DISABLE_FLAP_DETECTION 62 + +#define CMD_ENABLE_HOSTGROUP_SVC_NOTIFICATIONS 63 +#define CMD_DISABLE_HOSTGROUP_SVC_NOTIFICATIONS 64 + +#define CMD_ENABLE_HOSTGROUP_HOST_NOTIFICATIONS 65 +#define CMD_DISABLE_HOSTGROUP_HOST_NOTIFICATIONS 66 + +#define CMD_ENABLE_HOSTGROUP_SVC_CHECKS 67 +#define CMD_DISABLE_HOSTGROUP_SVC_CHECKS 68 + +/* commands 69-77 are unimplemented */ +#define CMD_UNIMPLEMENTED_69 69 +#define CMD_UNIMPLEMENTED_70 70 +#define CMD_UNIMPLEMENTED_71 71 +#define CMD_UNIMPLEMENTED_72 72 +#define CMD_UNIMPLEMENTED_73 73 +#define CMD_UNIMPLEMENTED_74 74 +#define CMD_UNIMPLEMENTED_75 75 +#define CMD_UNIMPLEMENTED_76 76 +#define CMD_UNIMPLEMENTED_77 77 + +#define CMD_DEL_HOST_DOWNTIME 78 +#define CMD_DEL_SVC_DOWNTIME 79 + +#define CMD_ENABLE_PERFORMANCE_DATA 82 +#define CMD_DISABLE_PERFORMANCE_DATA 83 + +#define CMD_SCHEDULE_HOSTGROUP_HOST_DOWNTIME 84 +#define CMD_SCHEDULE_HOSTGROUP_SVC_DOWNTIME 85 +#define CMD_SCHEDULE_HOST_SVC_DOWNTIME 86 + +/* new commands in Nagios 2.x found below... */ +#define CMD_PROCESS_HOST_CHECK_RESULT 87 + +#define CMD_START_EXECUTING_HOST_CHECKS 88 +#define CMD_STOP_EXECUTING_HOST_CHECKS 89 + +#define CMD_START_ACCEPTING_PASSIVE_HOST_CHECKS 90 +#define CMD_STOP_ACCEPTING_PASSIVE_HOST_CHECKS 91 + +#define CMD_ENABLE_PASSIVE_HOST_CHECKS 92 +#define CMD_DISABLE_PASSIVE_HOST_CHECKS 93 + +#define CMD_START_OBSESSING_OVER_HOST_CHECKS 94 +#define CMD_STOP_OBSESSING_OVER_HOST_CHECKS 95 + +#define CMD_SCHEDULE_HOST_CHECK 96 +#define CMD_SCHEDULE_FORCED_HOST_CHECK 98 + +#define CMD_START_OBSESSING_OVER_SVC 99 +#define CMD_STOP_OBSESSING_OVER_SVC 100 + +#define CMD_START_OBSESSING_OVER_HOST 101 +#define CMD_STOP_OBSESSING_OVER_HOST 102 + +#define CMD_ENABLE_HOSTGROUP_HOST_CHECKS 103 +#define CMD_DISABLE_HOSTGROUP_HOST_CHECKS 104 + +#define CMD_ENABLE_HOSTGROUP_PASSIVE_SVC_CHECKS 105 +#define CMD_DISABLE_HOSTGROUP_PASSIVE_SVC_CHECKS 106 + +#define CMD_ENABLE_HOSTGROUP_PASSIVE_HOST_CHECKS 107 +#define CMD_DISABLE_HOSTGROUP_PASSIVE_HOST_CHECKS 108 + +#define CMD_ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS 109 +#define CMD_DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS 110 + +#define CMD_ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS 111 +#define CMD_DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS 112 + +#define CMD_ENABLE_SERVICEGROUP_SVC_CHECKS 113 +#define CMD_DISABLE_SERVICEGROUP_SVC_CHECKS 114 + +#define CMD_ENABLE_SERVICEGROUP_HOST_CHECKS 115 +#define CMD_DISABLE_SERVICEGROUP_HOST_CHECKS 116 + +#define CMD_ENABLE_SERVICEGROUP_PASSIVE_SVC_CHECKS 117 +#define CMD_DISABLE_SERVICEGROUP_PASSIVE_SVC_CHECKS 118 + +#define CMD_ENABLE_SERVICEGROUP_PASSIVE_HOST_CHECKS 119 +#define CMD_DISABLE_SERVICEGROUP_PASSIVE_HOST_CHECKS 120 + +#define CMD_SCHEDULE_SERVICEGROUP_HOST_DOWNTIME 121 +#define CMD_SCHEDULE_SERVICEGROUP_SVC_DOWNTIME 122 + +#define CMD_CHANGE_GLOBAL_HOST_EVENT_HANDLER 123 +#define CMD_CHANGE_GLOBAL_SVC_EVENT_HANDLER 124 + +#define CMD_CHANGE_HOST_EVENT_HANDLER 125 +#define CMD_CHANGE_SVC_EVENT_HANDLER 126 + +#define CMD_CHANGE_HOST_CHECK_COMMAND 127 +#define CMD_CHANGE_SVC_CHECK_COMMAND 128 + +#define CMD_CHANGE_NORMAL_HOST_CHECK_INTERVAL 129 +#define CMD_CHANGE_NORMAL_SVC_CHECK_INTERVAL 130 +#define CMD_CHANGE_RETRY_SVC_CHECK_INTERVAL 131 + +#define CMD_CHANGE_MAX_HOST_CHECK_ATTEMPTS 132 +#define CMD_CHANGE_MAX_SVC_CHECK_ATTEMPTS 133 + +#define CMD_SCHEDULE_AND_PROPAGATE_TRIGGERED_HOST_DOWNTIME 134 + +#define CMD_ENABLE_HOST_AND_CHILD_NOTIFICATIONS 135 +#define CMD_DISABLE_HOST_AND_CHILD_NOTIFICATIONS 136 + +#define CMD_SCHEDULE_AND_PROPAGATE_HOST_DOWNTIME 137 + +#define CMD_ENABLE_SERVICE_FRESHNESS_CHECKS 138 +#define CMD_DISABLE_SERVICE_FRESHNESS_CHECKS 139 + +#define CMD_ENABLE_HOST_FRESHNESS_CHECKS 140 +#define CMD_DISABLE_HOST_FRESHNESS_CHECKS 141 + +#define CMD_SET_HOST_NOTIFICATION_NUMBER 142 +#define CMD_SET_SVC_NOTIFICATION_NUMBER 143 + +/* new commands in Nagios 3.x found below... */ +#define CMD_CHANGE_HOST_CHECK_TIMEPERIOD 144 +#define CMD_CHANGE_SVC_CHECK_TIMEPERIOD 145 + +#define CMD_PROCESS_FILE 146 + +#define CMD_CHANGE_CUSTOM_HOST_VAR 147 +#define CMD_CHANGE_CUSTOM_SVC_VAR 148 +#define CMD_CHANGE_CUSTOM_CONTACT_VAR 149 + +#define CMD_ENABLE_CONTACT_HOST_NOTIFICATIONS 150 +#define CMD_DISABLE_CONTACT_HOST_NOTIFICATIONS 151 +#define CMD_ENABLE_CONTACT_SVC_NOTIFICATIONS 152 +#define CMD_DISABLE_CONTACT_SVC_NOTIFICATIONS 153 + +#define CMD_ENABLE_CONTACTGROUP_HOST_NOTIFICATIONS 154 +#define CMD_DISABLE_CONTACTGROUP_HOST_NOTIFICATIONS 155 +#define CMD_ENABLE_CONTACTGROUP_SVC_NOTIFICATIONS 156 +#define CMD_DISABLE_CONTACTGROUP_SVC_NOTIFICATIONS 157 + +#define CMD_CHANGE_RETRY_HOST_CHECK_INTERVAL 158 + +#define CMD_SEND_CUSTOM_HOST_NOTIFICATION 159 +#define CMD_SEND_CUSTOM_SVC_NOTIFICATION 160 + +#define CMD_CHANGE_HOST_NOTIFICATION_TIMEPERIOD 161 +#define CMD_CHANGE_SVC_NOTIFICATION_TIMEPERIOD 162 +#define CMD_CHANGE_CONTACT_HOST_NOTIFICATION_TIMEPERIOD 163 +#define CMD_CHANGE_CONTACT_SVC_NOTIFICATION_TIMEPERIOD 164 + +#define CMD_CHANGE_HOST_MODATTR 165 +#define CMD_CHANGE_SVC_MODATTR 166 +#define CMD_CHANGE_CONTACT_MODATTR 167 +#define CMD_CHANGE_CONTACT_MODHATTR 168 +#define CMD_CHANGE_CONTACT_MODSATTR 169 + +#define CMD_DEL_DOWNTIME_BY_HOST_NAME 170 +#define CMD_DEL_DOWNTIME_BY_HOSTGROUP_NAME 171 +#define CMD_DEL_DOWNTIME_BY_START_TIME_COMMENT 172 + +/* custom command introduced in Nagios 3.x */ +#define CMD_CUSTOM_COMMAND 999 + +/**************************** COMMAND ERRORS *****************************/ +#define CMD_ERROR_OK 0 /* No errors encountered */ +#define CMD_ERROR_UNKNOWN_COMMAND 1 /* Unknown/unsupported command */ +#define CMD_ERROR_MALFORMED_COMMAND 2 /* Command malformed/missing timestamp? */ +#define CMD_ERROR_INTERNAL_ERROR 3 /* Internal error */ +#define CMD_ERROR_FAILURE 4 /* Command routine failed */ + +extern const char *cmd_error_strerror(int error_code); + +/**************************** CHECK TYPES ********************************/ + +#define CHECK_TYPE_ACTIVE 0 +#define CHECK_TYPE_PASSIVE 1 +#define CHECK_TYPE_PARENT 2 /* (active) check for the benefit of dependent objects */ +#define CHECK_TYPE_FILE 3 /* from spool files (yuck) */ +#define CHECK_TYPE_OTHER 4 /* for modules to use */ + + +/************* LEGACY (deprecated) CHECK TYPES ***************************/ + +#define SERVICE_CHECK_ACTIVE CHECK_TYPE_ACTIVE +#define SERVICE_CHECK_PASSIVE CHECK_TYPE_PASSIVE +#define HOST_CHECK_ACTIVE CHECK_TYPE_ACTIVE +#define HOST_CHECK_PASSIVE CHECK_TYPE_PASSIVE + + +/************************ SERVICE STATE TYPES ****************************/ + +#define SOFT_STATE 0 +#define HARD_STATE 1 + + +/************************* SCHEDULED DOWNTIME TYPES **********************/ + +#define SERVICE_DOWNTIME 1 /* service downtime */ +#define HOST_DOWNTIME 2 /* host downtime */ +#define ANY_DOWNTIME 3 /* host or service downtime */ + + +/************************** NOTIFICATION OPTIONS *************************/ + +#define NOTIFICATION_OPTION_NONE 0 +#define NOTIFICATION_OPTION_BROADCAST 1 +#define NOTIFICATION_OPTION_FORCED 2 +#define NOTIFICATION_OPTION_INCREMENT 4 + + +/************************** ACKNOWLEDGEMENT TYPES ************************/ + +#define HOST_ACKNOWLEDGEMENT 0 +#define SERVICE_ACKNOWLEDGEMENT 1 + +#define ACKNOWLEDGEMENT_NONE 0 +#define ACKNOWLEDGEMENT_NORMAL 1 +#define ACKNOWLEDGEMENT_STICKY 2 + + +/**************************** DEPENDENCY TYPES ***************************/ + +#define NOTIFICATION_DEPENDENCY 1 +#define EXECUTION_DEPENDENCY 2 + + + +/********************** HOST/SERVICE CHECK OPTIONS ***********************/ + +#define CHECK_OPTION_NONE 0 /* no check options */ +#define CHECK_OPTION_FORCE_EXECUTION 1 /* force execution of a check (ignores disabled services/hosts, invalid timeperiods) */ +#define CHECK_OPTION_FRESHNESS_CHECK 2 /* this is a freshness check */ +#define CHECK_OPTION_ORPHAN_CHECK 4 /* this is an orphan check */ +#define CHECK_OPTION_DEPENDENCY_CHECK 8 /* dependency check. different scheduling rules apply */ + + +/**************************** PROGRAM MODES ******************************/ + +#define STANDBY_MODE 0 +#define ACTIVE_MODE 1 + + +/************************** LOG ROTATION MODES ***************************/ + +#define LOG_ROTATION_NONE 0 +#define LOG_ROTATION_HOURLY 1 +#define LOG_ROTATION_DAILY 2 +#define LOG_ROTATION_WEEKLY 3 +#define LOG_ROTATION_MONTHLY 4 + + +/***************************** LOG VERSIONS ******************************/ + +#define LOG_VERSION_1 "1.0" +#define LOG_VERSION_2 "2.0" + + + +/*************************** CHECK STATISTICS ****************************/ + +#define ACTIVE_SCHEDULED_SERVICE_CHECK_STATS 0 +#define ACTIVE_ONDEMAND_SERVICE_CHECK_STATS 1 +#define PASSIVE_SERVICE_CHECK_STATS 2 +#define ACTIVE_SCHEDULED_HOST_CHECK_STATS 3 +#define ACTIVE_ONDEMAND_HOST_CHECK_STATS 4 +#define PASSIVE_HOST_CHECK_STATS 5 +#define ACTIVE_CACHED_HOST_CHECK_STATS 6 +#define ACTIVE_CACHED_SERVICE_CHECK_STATS 7 +#define EXTERNAL_COMMAND_STATS 8 +#define PARALLEL_HOST_CHECK_STATS 9 +#define SERIAL_HOST_CHECK_STATS 10 +#define MAX_CHECK_STATS_TYPES 11 + + +/****************** HOST CONFIG FILE READING OPTIONS ********************/ + +#define READ_HOSTS 1 +#define READ_HOSTGROUPS 2 +#define READ_CONTACTS 4 +#define READ_CONTACTGROUPS 8 +#define READ_SERVICES 16 +#define READ_COMMANDS 32 +#define READ_TIMEPERIODS 64 +#define READ_SERVICEESCALATIONS 128 +#define READ_HOSTGROUPESCALATIONS 256 /* no longer implemented */ +#define READ_SERVICEDEPENDENCIES 512 +#define READ_HOSTDEPENDENCIES 1024 +#define READ_HOSTESCALATIONS 2048 +#define READ_HOSTEXTINFO 4096 +#define READ_SERVICEEXTINFO 8192 +#define READ_SERVICEGROUPS 16384 + +#define READ_ALL_OBJECT_DATA READ_HOSTS | READ_HOSTGROUPS | READ_CONTACTS | READ_CONTACTGROUPS | READ_SERVICES | READ_COMMANDS | READ_TIMEPERIODS | READ_SERVICEESCALATIONS | READ_SERVICEDEPENDENCIES | READ_HOSTDEPENDENCIES | READ_HOSTESCALATIONS | READ_HOSTEXTINFO | READ_SERVICEEXTINFO | READ_SERVICEGROUPS + + +/************************** DATE/TIME TYPES *****************************/ + +#define LONG_DATE_TIME 0 +#define SHORT_DATE_TIME 1 +#define SHORT_DATE 2 +#define SHORT_TIME 3 +#define HTTP_DATE_TIME 4 /* time formatted for use in HTTP headers */ + + +/**************************** DATE FORMATS ******************************/ + +#define DATE_FORMAT_US 0 /* U.S. (MM-DD-YYYY HH:MM:SS) */ +#define DATE_FORMAT_EURO 1 /* European (DD-MM-YYYY HH:MM:SS) */ +#define DATE_FORMAT_ISO8601 2 /* ISO8601 (YYYY-MM-DD HH:MM:SS) */ +#define DATE_FORMAT_STRICT_ISO8601 3 /* ISO8601 (YYYY-MM-DDTHH:MM:SS) */ + + +/************************** MISC DEFINITIONS ****************************/ + +#define MAX_FILENAME_LENGTH 256 /* max length of path/filename that Nagios will process */ +#define MAX_INPUT_BUFFER 1024 /* size in bytes of max. input buffer (for reading files, misc stuff) */ +#define MAX_COMMAND_BUFFER 8192 /* max length of raw or processed command line */ +#define MAX_EXTERNAL_COMMAND_LENGTH 8192 /* max length of an external command */ + +#define MAX_DATETIME_LENGTH 48 + + +/************************* MODIFIED ATTRIBUTES **************************/ + +#define MODATTR_NONE 0 +#define MODATTR_NOTIFICATIONS_ENABLED 1 +#define MODATTR_ACTIVE_CHECKS_ENABLED 2 +#define MODATTR_PASSIVE_CHECKS_ENABLED 4 +#define MODATTR_EVENT_HANDLER_ENABLED 8 +#define MODATTR_FLAP_DETECTION_ENABLED 16 +#define MODATTR_FAILURE_PREDICTION_ENABLED 32 +#define MODATTR_PERFORMANCE_DATA_ENABLED 64 +#define MODATTR_OBSESSIVE_HANDLER_ENABLED 128 +#define MODATTR_EVENT_HANDLER_COMMAND 256 +#define MODATTR_CHECK_COMMAND 512 +#define MODATTR_NORMAL_CHECK_INTERVAL 1024 +#define MODATTR_RETRY_CHECK_INTERVAL 2048 +#define MODATTR_MAX_CHECK_ATTEMPTS 4096 +#define MODATTR_FRESHNESS_CHECKS_ENABLED 8192 +#define MODATTR_CHECK_TIMEPERIOD 16384 +#define MODATTR_CUSTOM_VARIABLE 32768 +#define MODATTR_NOTIFICATION_TIMEPERIOD 65536 +#endif /* INCLUDE_COMMON_H */ diff -Nru check-mk-1.2.2p3/nagios4/config.h check-mk-1.2.6p12/nagios4/config.h --- check-mk-1.2.2p3/nagios4/config.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/config.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,344 @@ +/* include/config.h. Generated from config.h.in by configure. */ +/************************************************************************ + * + * Nagios Config Header File + * Written By: Ethan Galstad (egalstad@nagios.org) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + ************************************************************************/ + + +/***** NAGIOS STUFF *****/ + +#define DEFAULT_NAGIOS_USER "nagios" +#define DEFAULT_NAGIOS_GROUP "nagios" + +/* stop gcc from bitching about implicit asprintf declarations */ +#define _GNU_SOURCE 1 + +/* Event broker integration */ +#define USE_EVENT_BROKER /**/ + +/* commands used by CGIs */ +#define TRACEROUTE_COMMAND "/usr/sbin/traceroute" +/* #undef PING_COMMAND */ +/* #undef PING_PACKETS_FIRST */ + +/* Debugging options */ +/* function entry and exit */ +/* #undef DEBUG0 */ +/* general info messages */ +/* #undef DEBUG1 */ +/* warning messages */ +/* #undef DEBUG2 */ +/* service and host checks, other events */ +/* #undef DEBUG3 */ +/* service and host notifications */ +/* #undef DEBUG4 */ +/* SQL queries (defunct) */ +/* #undef DEBUG5 */ + +/* I/O implementations */ +/* #undef USE_XSDDEFAULT */ +/* #undef USE_XCDDEFAULT */ +/* #undef USE_XRDDEFAULT */ +/* #undef USE_XODTEMPLATE */ +/* #undef USE_XPDDEFAULT */ +/* #undef USE_XDDDEFAULT */ + + +/***** CGI COMPILE OPTIONS *****/ +/* should we compile and use the statusmap CGI? */ +/* #undef USE_STATUSMAP */ +/* should we compile and use the statuswrl CGI? */ +#define USE_STATUSWRL /**/ +/* should we compile and use the trends CGI? */ +/* #undef USE_TRENDS */ +/* should we compile and use the histogram CGI? */ +/* #undef USE_HISTOGRAM */ + + + +/***** FUNCTION DEFINITIONS *****/ + +#define HAVE_SETENV 1 +#define HAVE_UNSETENV 1 +/* #undef HAVE_SOCKET */ +#define HAVE_STRDUP 1 +#define HAVE_STRSTR 1 +#define HAVE_STRTOUL 1 +#define HAVE_INITGROUPS 1 +/* #undef HAVE_GETLOADAVG */ +/* #undef HAVE_GDIMAGECREATETRUECOLOR */ + + + +/***** ASPRINTF() AND FRIENDS *****/ + +/* #undef HAVE_VSNPRINTF */ +/* #undef HAVE_SNPRINTF */ +/* #undef HAVE_ASPRINTF */ +/* #undef HAVE_VASPRINTF */ +#define HAVE_C99_VSNPRINTF 1 +#define HAVE_VA_COPY 1 +/* #undef HAVE___VA_COPY */ + + + +/***** MISC DEFINITIONS *****/ + +#define USE_NANOSLEEP /**/ +#define STDC_HEADERS 1 +#define HAVE_TM_ZONE 1 +/* #undef HAVE_TZNAME */ +/* #undef USE_PROC */ +#define SOCKET_SIZE_TYPE size_t +#define GETGROUPS_T gid_t +#define RETSIGTYPE void + + + +/***** HEADER FILES *****/ + +#include +#include + +/* needed for the time_t structures we use later... */ +/* this include must come before sys/resource.h or we can have problems on some OSes */ +#define TIME_WITH_SYS_TIME 1 +#define HAVE_SYS_TIME_H 1 +#if TIME_WITH_SYS_TIME +#include +#include +#else +#if HAVE_SYS_TIME_H +#include +#else +#include +#endif +#endif + +#define HAVE_SYS_RESOURCE_H 1 +#ifdef HAVE_SYS_RESOURCE_H +#include +#endif + +#define HAVE_LIMITS_H 1 +#ifdef HAVE_LIMITS_H +#include +#endif + +#define HAVE_PWD_H 1 +#ifdef HAVE_PWD_H +#include +#endif + +#define HAVE_GRP_H 1 +#ifdef HAVE_GRP_H +#include +#endif + +#define HAVE_STRINGS_H 1 +#ifdef HAVE_STRINGS_H +#include +#endif + +#define HAVE_STRING_H 1 +#ifdef HAVE_STRINGS_H +#include +#endif + +#define HAVE_UNISTD_H 1 +#ifdef HAVE_UNISTD_H +#include +#endif + +#define HAVE_SYSLOG_H 1 +#ifdef HAVE_SYSLOG_H +#include +#endif + +#define HAVE_SIGNAL_H 1 +#ifdef HAVE_SIGNAL_H +#include +#endif + +#define HAVE_SYS_STAT_H 1 +#ifdef HAVE_SYS_STAT_H +#include +#endif + +#define HAVE_SYS_MMAN_H 1 +#ifdef HAVE_SYS_MMAN_H +#include +#endif + +#define HAVE_FCNTL_H 1 +#ifdef HAVE_FCNTL_H +#include +#endif + +#define HAVE_STDARG_H 1 +#ifdef HAVE_STDARG_H +#include +#endif + +#define HAVE_SYS_TYPES_H 1 +#ifdef HAVE_SYS_TYPES_H +#include +#endif + +#define HAVE_SYS_WAIT_H 1 +#ifdef HAVE_SYS_WAIT_H +#include +#endif + +#define HAVE_ERRNO_H 1 +#ifdef HAVE_ERRNO_H +#include +#endif + +#define HAVE_SYS_TIMEB_H 1 +#if HAVE_SYS_TIMEB_H +#include +#endif + +#define HAVE_SYS_IPC_H 1 +#ifdef HAVE_SYS_IPC_H +#include +#endif + +#define HAVE_SYS_MSG_H 1 +#ifdef HAVE_SYS_MSG_H +#include +#endif + +#define HAVE_MATH_H 1 +#ifdef HAVE_MATH_H +#include +#endif + +#define HAVE_CTYPE_H 1 +#ifdef HAVE_CTYPE_H +#include +#endif + +#define HAVE_DIRENT_H 1 +#ifdef HAVE_DIRENT_H +#include +#endif + +#define HAVE_REGEX_H 1 +#ifdef HAVE_REGEX_H +#include + +#define HAVE_SYS_SOCKET_H 1 +#ifdef HAVE_SYS_SOCKET_H +#include +#endif + +/* #undef HAVE_SOCKET */ +#ifdef HAVE_SOCKET_H +#include +#endif + +#define HAVE_NETINET_IN_H 1 +#ifdef HAVE_NETINET_IN_H +#include +#endif + +#define HAVE_ARPA_INET_H 1 +#ifdef HAVE_ARPA_INET_H +#include +#endif + +#define HAVE_NETDB_H 1 +#ifdef HAVE_NETDB_H +#include +#endif + +#define HAVE_LIBGEN_H 1 +#ifdef HAVE_LIBGEN_H +#include +#endif + +#define HAVE_SYS_UN_H 1 +#ifdef HAVE_SYS_UN_H +#include +#endif + +#define HAVE_SYS_POLL_H 1 +#ifdef HAVE_SYS_POLL_H +#include +#endif + +#define HAVE_GETOPT_H 1 +#ifdef HAVE_GETOPT_H +#include +#endif + +/* #undef HAVE_LINUX_MODULE_H */ +#ifdef HAVE_LINUX_MODULE_H +#include +#endif + +#define HAVE_LOCALE_H 1 +#ifdef HAVE_LOCALE_H +#include +#endif + +#define HAVE_WCHAR_H 1 +#ifdef HAVE_WCHAR_H +#include +#endif + +/* configure script should allow user to override ltdl choice, but this will do for now... */ +/* #undef USE_LTDL */ +/* #undef HAVE_LTDL_H */ +#ifdef HAVE_LTDL_H +#define USE_LTDL +#endif + +#ifdef USE_LTDL +#include +#else +#define HAVE_DLFCN_H /**/ +#ifdef HAVE_DLFCN_H +#include +#endif +#endif + + +/* moved to end to prevent AIX compiler warnings */ +#ifndef RTLD_GLOBAL +#define RTLD_GLOBAL 0 +#endif + +#ifndef RTLD_NOW +#define RTLD_NOW 0 +#endif + + +/***** MARO DEFINITIONS *****/ + +/* this needs to come after all system include files, so we don't accidentally attempt to redefine it */ +#ifndef WEXITSTATUS +# define WEXITSTATUS(stat_val) ((unsigned)(stat_val) >> 8) +#endif +#ifndef WIFEXITED +# define WIFEXITED(stat_val) (((stat_val) & 255) == 0) +#endif + + +#endif diff -Nru check-mk-1.2.2p3/nagios4/defaults.h check-mk-1.2.6p12/nagios4/defaults.h --- check-mk-1.2.2p3/nagios4/defaults.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/defaults.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,94 @@ +#ifndef INCLUDE_defaults_h__ +#define INCLUDE_defaults_h__ + +/******************* DEFAULT VALUES *******************/ + +#define DEFAULT_LOG_LEVEL 1 /* log all events to main log file */ +#define DEFAULT_USE_SYSLOG 1 /* log events to syslog? 1=yes, 0=no */ +#define DEFAULT_SYSLOG_LEVEL 2 /* log only severe events to syslog */ + +#define DEFAULT_NOTIFICATION_LOGGING 1 /* log notification events? 1=yes, 0=no */ + +#define DEFAULT_INTER_CHECK_DELAY 5.0 /* seconds between initial service check scheduling */ +#define DEFAULT_INTERLEAVE_FACTOR 1 /* default interleave to use when scheduling checks */ +#define DEFAULT_RETRY_INTERVAL 30 /* services are retried in 30 seconds if they're not OK */ +#define DEFAULT_CHECK_REAPER_INTERVAL 10 /* interval in seconds to reap host and service check results */ +#define DEFAULT_MAX_REAPER_TIME 30 /* maximum number of seconds to spend reaping service checks before we break out for a while */ +#define DEFAULT_MAX_CHECK_RESULT_AGE 3600 /* maximum number of seconds that a check result file is considered to be valid */ +#define DEFAULT_MAX_PARALLEL_SERVICE_CHECKS 0 /* maximum number of service checks we can have running at any given time (0=unlimited) */ +#define DEFAULT_RETENTION_UPDATE_INTERVAL 60 /* minutes between auto-save of retention data */ +#define DEFAULT_RETENTION_SCHEDULING_HORIZON 900 /* max seconds between program restarts that we will preserve scheduling information */ +#define DEFAULT_STATUS_UPDATE_INTERVAL 60 /* seconds between aggregated status data updates */ +#define DEFAULT_FRESHNESS_CHECK_INTERVAL 60 /* seconds between service result freshness checks */ +#define DEFAULT_AUTO_RESCHEDULING_INTERVAL 30 /* seconds between host and service check rescheduling events */ +#define DEFAULT_AUTO_RESCHEDULING_WINDOW 180 /* window of time (in seconds) for which we should reschedule host and service checks */ +#define DEFAULT_ORPHAN_CHECK_INTERVAL 60 /* seconds between checks for orphaned hosts and services */ + +#define DEFAULT_INTERVAL_LENGTH 60 /* seconds per interval unit for check scheduling */ + +#define DEFAULT_NOTIFICATION_TIMEOUT 30 /* max time in seconds to wait for notification commands to complete */ +#define DEFAULT_EVENT_HANDLER_TIMEOUT 30 /* max time in seconds to wait for event handler commands to complete */ +#define DEFAULT_HOST_CHECK_TIMEOUT 30 /* max time in seconds to wait for host check commands to complete */ +#define DEFAULT_SERVICE_CHECK_TIMEOUT 60 /* max time in seconds to wait for service check commands to complete */ +#define DEFAULT_OCSP_TIMEOUT 15 /* max time in seconds to wait for obsessive compulsive processing commands to complete */ +#define DEFAULT_OCHP_TIMEOUT 15 /* max time in seconds to wait for obsessive compulsive processing commands to complete */ +#define DEFAULT_PERFDATA_TIMEOUT 5 /* max time in seconds to wait for performance data commands to complete */ +#define DEFAULT_TIME_CHANGE_THRESHOLD 900 /* compensate for time changes of more than 15 minutes */ + +#define DEFAULT_LOG_HOST_RETRIES 0 /* don't log host retries */ +#define DEFAULT_LOG_SERVICE_RETRIES 0 /* don't log service retries */ +#define DEFAULT_LOG_EVENT_HANDLERS 1 /* log event handlers */ +#define DEFAULT_LOG_INITIAL_STATES 0 /* don't log initial service and host states */ +#define DEFAULT_LOG_CURRENT_STATES 1 /* log current service and host states after rotating log */ +#define DEFAULT_LOG_EXTERNAL_COMMANDS 1 /* log external commands */ +#define DEFAULT_LOG_PASSIVE_CHECKS 1 /* log passive service checks */ + +#define DEFAULT_DEBUG_LEVEL 0 /* don't log any debugging information */ +#define DEFAULT_DEBUG_VERBOSITY 1 +#define DEFAULT_MAX_DEBUG_FILE_SIZE 1000000 /* max size of debug log */ + +#define DEFAULT_AGGRESSIVE_HOST_CHECKING 0 /* don't use "aggressive" host checking */ +#define DEFAULT_CHECK_EXTERNAL_COMMANDS 1 /* check for external commands */ +#define DEFAULT_CHECK_ORPHANED_SERVICES 1 /* check for orphaned services */ +#define DEFAULT_CHECK_ORPHANED_HOSTS 1 /* check for orphaned hosts */ +#define DEFAULT_ENABLE_FLAP_DETECTION 0 /* don't enable flap detection */ +#define DEFAULT_PROCESS_PERFORMANCE_DATA 0 /* don't process performance data */ +#define DEFAULT_CHECK_SERVICE_FRESHNESS 1 /* check service result freshness */ +#define DEFAULT_CHECK_HOST_FRESHNESS 0 /* don't check host result freshness */ +#define DEFAULT_AUTO_RESCHEDULE_CHECKS 0 /* don't auto-reschedule host and service checks */ +#define DEFAULT_TRANSLATE_PASSIVE_HOST_CHECKS 0 /* should we translate DOWN/UNREACHABLE passive host checks? */ +#define DEFAULT_PASSIVE_HOST_CHECKS_SOFT 0 /* passive host checks are treated as HARD by default */ + +#define DEFAULT_LOW_SERVICE_FLAP_THRESHOLD 20.0 /* low threshold for detection of service flapping */ +#define DEFAULT_HIGH_SERVICE_FLAP_THRESHOLD 30.0 /* high threshold for detection of service flapping */ +#define DEFAULT_LOW_HOST_FLAP_THRESHOLD 20.0 /* low threshold for detection of host flapping */ +#define DEFAULT_HIGH_HOST_FLAP_THRESHOLD 30.0 /* high threshold for detection of host flapping */ + +#define DEFAULT_HOST_CHECK_SPREAD 30 /* max minutes to schedule all initial host checks */ +#define DEFAULT_SERVICE_CHECK_SPREAD 30 /* max minutes to schedule all initial service checks */ + +#define DEFAULT_CACHED_HOST_CHECK_HORIZON 15 /* max age in seconds that cached host checks can be used */ +#define DEFAULT_CACHED_SERVICE_CHECK_HORIZON 15 /* max age in seconds that cached service checks can be used */ +#define DEFAULT_ENABLE_PREDICTIVE_HOST_DEPENDENCY_CHECKS 1 /* should we use predictive host dependency checks? */ +#define DEFAULT_ENABLE_PREDICTIVE_SERVICE_DEPENDENCY_CHECKS 1 /* should we use predictive service dependency checks? */ + +#define DEFAULT_USE_LARGE_INSTALLATION_TWEAKS 0 /* don't use tweaks for large Nagios installations */ + +#define DEFAULT_ADDITIONAL_FRESHNESS_LATENCY 15 /* seconds to be added to freshness thresholds when automatically calculated by Nagios */ + +#define DEFAULT_CHECK_FOR_UPDATES 1 /* should we check for new Nagios releases? */ +#define DEFAULT_BARE_UPDATE_CHECK 0 /* report current version and new installs */ +#define MINIMUM_UPDATE_CHECK_INTERVAL 60*60*22 /* 22 hours minimum between checks - please be kind to our servers! */ +#define BASE_UPDATE_CHECK_INTERVAL 60*60*22 /* 22 hours base interval */ +#define UPDATE_CHECK_INTERVAL_WOBBLE 60*60*4 /* 4 hour wobble on top of base interval */ +#define BASE_UPDATE_CHECK_RETRY_INTERVAL 60*60*1 /* 1 hour base retry interval */ +#define UPDATE_CHECK_RETRY_INTERVAL_WOBBLE 60*60*3 /* 3 hour wobble on top of base retry interval */ + +#define DEFAULT_ALLOW_EMPTY_HOSTGROUP_ASSIGNMENT 2 /* Allow assigning to empty hostgroups by default, but warn about it */ + +#define DEFAULT_HOST_PERFDATA_FILE_TEMPLATE "[HOSTPERFDATA]\t$TIMET$\t$HOSTNAME$\t$HOSTEXECUTIONTIME$\t$HOSTOUTPUT$\t$HOSTPERFDATA$" +#define DEFAULT_SERVICE_PERFDATA_FILE_TEMPLATE "[SERVICEPERFDATA]\t$TIMET$\t$HOSTNAME$\t$SERVICEDESC$\t$SERVICEEXECUTIONTIME$\t$SERVICELATENCY$\t$SERVICEOUTPUT$\t$SERVICEPERFDATA$" +#define DEFAULT_HOST_PERFDATA_PROCESS_EMPTY_RESULTS 1 +#define DEFAULT_SERVICE_PERFDATA_PROCESS_EMPTY_RESULTS 1 + +#endif /* INCLUDE_defaults_h__ */ diff -Nru check-mk-1.2.2p3/nagios4/dkhash.h check-mk-1.2.6p12/nagios4/dkhash.h --- check-mk-1.2.2p3/nagios4/dkhash.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/dkhash.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,134 @@ +#ifndef LIBNAGIOS_dkhash_h__ +#define LIBNAGIOS_dkhash_h__ +#include + +/** + * @file dkhash.h + * @brief Dual-key hash functions for Nagios + * + * Having a dual-key hash function is pretty unusual, but since so + * much data in Nagios pertains to services (which are uniquely + * identified based on both host_name and service_description), it + * makes sense here. + * + * @{ + */ + +/** return flags usable from the callback function of dkhash_walk_data() */ +#define DKHASH_WALK_REMOVE 1 /**< Remove the most recently visited object */ +#define DKHASH_WALK_STOP 2 /**< Cause walking to stop */ + +/** return values for dkhash_insert() */ +#define DKHASH_OK 0 /**< Success */ +#define DKHASH_EDUPE (-EPERM) /**< duplicate insert attempted */ +#define DKHASH_EPERM (-EPERM) /**< duplicate insert attempted */ +#define DKHASH_EINVAL (-EINVAL) /**< Invalid parameters passed */ +#define DKHASH_ENOMEM (-ENOMEM) /**< Memory allocation failed */ + +struct dkhash_table; +/** opaque type */ +typedef struct dkhash_table dkhash_table; + +/** + * Create a dual-keyed hash-table of the given size + * Note that it's generally useful to make the table 25-30% larger + * than the number of items you intend to store, and also note that + * the 'size' arguments gets rounded up to the nearest power of 2. + * @param size The desired size of the hash-table. + */ +extern dkhash_table *dkhash_create(unsigned int size); + +/** + * Destroy a dual-keyed hash table + * @param t The table to destroy + * @return 0 on success, -1 on errors + */ +extern int dkhash_destroy(dkhash_table *t); + +/** + * Fetch the data associated with a particular key + * @param t The table to get the data from + * @param k1 The first key + * @param k2 The second key + * @return The data on success, NULL on errors or if data isn't found + */ +extern void *dkhash_get(dkhash_table *t, const char *k1, const char *k2); + +/** + * Insert a new entry into the hash table + * @param t The hash table + * @param k1 The first key + * @param k2 The second key (may be null) + * @param data The data to insert + * @return 0 on success, < 0 on errors + */ +extern int dkhash_insert(dkhash_table *t, const char *k1, const char *k2, void *data); + +/** + * Remove data from the hash table + * Note that this does not free() the pointer to the data stored in the + * table. It just destroys containers for that data in the hash table. + * @param t The hash table + * @param k1 The first key + * @param k2 The second key + * @return The removed data on success, or NULL on errors + */ +extern void *dkhash_remove(dkhash_table *t, const char *k1, const char *k2); + +/** + * Call a function once for each item in the hash-table + * The callback function can return DKHASH_WALK_{REMOVE,STOP} or any + * OR'ed combination thereof to control the walking procedure, and + * should return 0 on the normal case. + * @param t The hash table + * @param walker The callback function to send the data to + */ +extern void dkhash_walk_data(dkhash_table *t, int (*walker)(void *data)); + + +/** + * Get number of collisions in hash table + * Many collisions is a sign of a too small hash table or + * poor hash-function. + * @param t The hash table to report on + * @return The total number of collisions (not duplicates) from inserts + */ +extern unsigned int dkhash_collisions(dkhash_table *t); + +/** + * Get number of items in the hash table + * @param t The hash table + * @return Number of items currently in the hash-table + */ +extern unsigned int dkhash_num_entries(dkhash_table *t); + +/** + * Get max number of items stored in the hash table + * @param t The hash table + * @return Max number of items stored in hash-table + */ +extern unsigned int dkhash_num_entries_max(dkhash_table *t); + +/** + * Get number of entries added to hash table + * Note that some of them may have been removed. + * @param t The hash table + * @return The number of items added to the table + */ +extern unsigned int dkhash_num_entries_added(dkhash_table *t); + +/** + * Get number of removed items from hash table + * @param t The hash table + * @return Number of items removed from hash table + */ +extern unsigned int dkhash_num_entries_removed(dkhash_table *t); + +/** + * Get actual table size (in number of buckets) + * @param t The hash table + * @return Number of bucket-slots in hash table + */ +extern unsigned int dkhash_table_size(dkhash_table *t); +/** @} */ +#endif /* LIBNAGIOS_dkhash_h__ */ diff -Nru check-mk-1.2.2p3/nagios4/downtime.h check-mk-1.2.6p12/nagios4/downtime.h --- check-mk-1.2.2p3/nagios4/downtime.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/downtime.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,112 @@ +/***************************************************************************** + * + * DOWNTIME.H - Header file for scheduled downtime functions + * + * + * License: + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + *****************************************************************************/ + + +#ifndef _DOWNTIME_H +#define _DOWNTIME_H + +#include "common.h" +#include "objects.h" +#ifndef NSCGI +#include "nagios.h" +#endif + +NAGIOS_BEGIN_DECL + +/* SCHEDULED_DOWNTIME_ENTRY structure */ +typedef struct scheduled_downtime { + int type; + char *host_name; + char *service_description; + time_t entry_time; + time_t start_time; + time_t flex_downtime_start; /* Time the flexible downtime started */ + time_t end_time; + int fixed; + unsigned long triggered_by; + unsigned long duration; + unsigned long downtime_id; + int is_in_effect; + int start_notification_sent; + char *author; + char *comment; +#ifndef NSCGI + unsigned long comment_id; + int start_flex_downtime; + int incremented_pending_downtime; +#endif + struct scheduled_downtime *next; +#ifndef NSCGI + struct timed_event *start_event, *stop_event; +#endif + struct scheduled_downtime *prev; + } scheduled_downtime; + +extern struct scheduled_downtime *scheduled_downtime_list; + + +int initialize_downtime_data(void); /* initializes scheduled downtime data */ +int cleanup_downtime_data(void); /* cleans up scheduled downtime data */ + +#ifndef NSCGI +int add_new_downtime(int, char *, char *, time_t, char *, char *, time_t, time_t, int, unsigned long, unsigned long, unsigned long *, int, int); +int add_new_host_downtime(char *, time_t, char *, char *, time_t, time_t, int, unsigned long, unsigned long, unsigned long *, int, int); +int add_new_service_downtime(char *, char *, time_t, char *, char *, time_t, time_t, int, unsigned long, unsigned long, unsigned long *, int, int); + +int delete_host_downtime(unsigned long); +int delete_service_downtime(unsigned long); +int delete_downtime(int, unsigned long); + +int schedule_downtime(int, char *, char *, time_t, char *, char *, time_t, time_t, int, unsigned long, unsigned long, unsigned long *); +int unschedule_downtime(int, unsigned long); + +int register_downtime(int, unsigned long); +int handle_scheduled_downtime(struct scheduled_downtime *); +int handle_scheduled_downtime_by_id(unsigned long); + +int check_pending_flex_host_downtime(struct host *); +int check_pending_flex_service_downtime(struct service *); + +int check_for_expired_downtime(void); +#endif + +int add_host_downtime(char *, time_t, char *, char *, time_t, time_t, time_t, int, unsigned long, unsigned long, unsigned long, int, int); +int add_service_downtime(char *, char *, time_t, char *, char *, time_t, time_t, time_t, int, unsigned long, unsigned long, unsigned long, int, int); + +/* If you are going to be adding a lot of downtime in sequence, set + defer_downtime_sorting to 1 before you start and then call + sort_downtime afterwards. Things will go MUCH faster. */ + +extern int defer_downtime_sorting; +int add_downtime(int, char *, char *, time_t, char *, char *, time_t, time_t, time_t, int, unsigned long, unsigned long, unsigned long, int, int); +int sort_downtime(void); + +struct scheduled_downtime *find_downtime(int, unsigned long); +struct scheduled_downtime *find_host_downtime(unsigned long); +struct scheduled_downtime *find_service_downtime(unsigned long); + +void free_downtime_data(void); /* frees memory allocated to scheduled downtime list */ + +int delete_downtime_by_hostname_service_description_start_time_comment(char *, char *, time_t, char *); + +NAGIOS_END_DECL +#endif diff -Nru check-mk-1.2.2p3/nagios4/fanout.h check-mk-1.2.6p12/nagios4/fanout.h --- check-mk-1.2.2p3/nagios4/fanout.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/fanout.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,73 @@ +#ifndef LIBNAGIOS_fanout_h__ +#define LIBNAGIOS_fanout_h__ +#include "lnag-utils.h" + +/** + * @file fanout.h + * @brief Simple fanout table implementation + * + * Fanouts are useful to hold short-lived integer-indexed data where + * the keyspan between smallest and largest key can be too large and + * change too often for it to be practical to maintain a growing array. + * If you think of it as a hash-table optimized for unsigned longs you've + * got the right idea. + * + * @{ + */ + +NAGIOS_BEGIN_DECL + +/** Primary (opaque) type for this api */ +typedef struct fanout_table fanout_table; + +/** + * Create a fanout table + * @param[in] size The size of the table. Preferrably a power of 2 + * @return Pointer to a newly created table + */ +extern fanout_table *fanout_create(unsigned long size); + +/** + * Destroy a fanout table, with optional destructor. + * This function will iterate over all the entries in the fanout + * table and remove them, one by one. If 'destructor' is not NULL, + * it will be called on each and every object in the table. Note that + * 'free' is a valid destructor. + * + * @param[in] t The fanout table to destroy + * @param[in] destructor Function to call on data pointers in table + */ +extern void fanout_destroy(fanout_table *t, void (*destructor)(void *)); + +/** + * Return a pointer from the fanout table t + * + * @param[in] t table to fetch from + * @param[in] key key to fetch + * @return NULL on errors; Pointer to data on success + */ +extern void *fanout_get(fanout_table *t, unsigned long key); + +/** + * Add an entry to the fanout table. + * Note that we don't check if the key is unique. If it isn't, + * fanout_remove() will remove the latest added first. + * + * @param[in] t fanout table to add to + * @param[in] key Key for this entry + * @param[in] data Data to add. Must not be NULL + * @return 0 on success, -1 on errors + */ +extern int fanout_add(fanout_table *t, unsigned long key, void *data); + +/** + * Remove an entry from the fanout table and return its data. + * + * @param[in] t fanout table to look in + * @param[key] The key whose data we should locate + * @return Pointer to the data stored on success; NULL on errors + */ +extern void *fanout_remove(fanout_table *t, unsigned long key); +NAGIOS_END_DECL +/** @} */ +#endif diff -Nru check-mk-1.2.2p3/nagios4/iobroker.h check-mk-1.2.6p12/nagios4/iobroker.h --- check-mk-1.2.2p3/nagios4/iobroker.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/iobroker.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,175 @@ +/* lib/iobroker.h. Generated from iobroker.h.in by configure. */ +#ifndef LIBNAGIOS_iobroker_h__ +#define LIBNAGIOS_iobroker_h__ + +/** + * @file iobroker.h + * @brief I/O broker library function declarations + * + * The I/O broker library handles multiplexing between hundreds or + * thousands of sockets with a few simple calls. It's designed to + * be as lightweight as possible so as to not cause memory bloat, + * and is therefore highly suitable for use by processes that are + * fork()-intensive. + * + * @{ + */ + +#define IOBROKER_USES_EPOLL 1 +/* #undef IOBROKER_USES_POLL */ +/* #undef IOBROKER_USES_SELECT */ + +#if (_POSIX_C_SOURCE - 0) >= 200112L +#include +# define IOBROKER_POLLIN POLLIN +# define IOBROKER_POLLPRI POLLPRI +# define IOBROKER_POLLOUT POLLOUT + +# define IOBROKER_POLLERR POLLERR +# define IOBROKER_POLLHUP POLLHUP +# define IOBROKER_POLLNVAL POLLNVAL +#else +# define IOBROKER_POLLIN 0x001 /* there is data to read */ +# define IOBROKER_POLLPRI 0x002 /* there is urgent data to read */ +# define IOBROKER_POLLOUT 0x004 /* writing now will not block */ + +# define IOBROKER_POLLERR 0x008 /* error condition */ +# define IOBROKER_POLLHUP 0x010 /* hung up */ +# define IOBROKER_POLLNVAL 0x020 /* invalid polling request */ +#endif + +/** return codes */ +#define IOBROKER_SUCCESS 0 +#define IOBROKER_ENOSET (-1) +#define IOBROKER_ENOINIT (-2) +#define IOBROKER_ELIB (-3) +#define IOBROKER_EALREADY (-EALREADY) +#define IOBROKER_EINVAL (-EINVAL) + + +/** Flags for iobroker_destroy() */ +#define IOBROKER_CLOSE_SOCKETS 1 + +/* Opaque type. Callers needn't worry about this */ +struct iobroker_set; +typedef struct iobroker_set iobroker_set; + +/** + * Get a string describing the error in the last iobroker call. + * The returned string must not be free()'d. + * @param error The error code + * @return A string describing the meaning of the error code + */ +extern const char *iobroker_strerror(int error); + +/** + * Create a new socket set + * @return An iobroker_set on success. NULL on errors. + */ +extern iobroker_set *iobroker_create(void); + +/** + * Published utility function used to determine the max number of + * file descriptors this process can keep open at any one time. + * @return Max number of filedescriptors we can keep open + */ +extern int iobroker_max_usable_fds(void); + +/** + * Register a socket for input polling with the broker. + * + * @param iobs The socket set to add the socket to. + * @param sd The socket descriptor to add + * @param arg Argument passed to input handler on available input + * @param handler The callback function to call when input is available + * + * @return 0 on succes. < 0 on errors. + */ +extern int iobroker_register(iobroker_set *iobs, int sd, void *arg, int (*handler)(int, int, void *)); + + +/** + * Register a socket for output polling with the broker + * @note There's no guarantee that *ALL* data is writable just + * because the socket won't block you completely. + * + * @param iobs The socket set to add the socket to. + * @param sd The socket descriptor to add + * @param arg Argument passed to output handler on ready-to-write + * @param handler The function to call when output won't block + * + * @return 0 on success. < 0 on errors + */ +extern int iobroker_register_out(iobroker_set *iobs, int sd, void *arg, int (*handler)(int, int, void *)); + +/** + * Check if a particular filedescriptor is registered with the iobroker set + * @param[in] iobs The iobroker set the filedescriptor should be member of + * @param[in] fd The filedescriptor to check for + * @return 1 if the filedescriptor is registered and 0 otherwise + */ +extern int iobroker_is_registered(iobroker_set *iobs, int fd); + +/** + * Getter function for number of file descriptors registered in + * the set specified. + * @param iobs The io broker set to query + * @return Number of file descriptors registered in the set + */ +extern int iobroker_get_num_fds(iobroker_set *iobs); + +/** + * Getter function for the maximum amount of file descriptors this + * set can handle. + * @param iobs The io broker set to query + * @return Max file descriptor capacity for the set + */ +extern int iobroker_get_max_fds(iobroker_set *iobs); + +/** + * Unregister a socket for input polling with the broker. + * + * @param iobs The socket set to remove the socket from + * @param sd The socket descriptor to remove + * @return 0 on succes. < 0 on errors. + */ +extern int iobroker_unregister(iobroker_set *iobs, int sd); + +/** + * Deregister a socket for input polling with the broker + * (this is identical to iobroker_unregister()) + * @param iobs The socket set to remove the socket from + * @param sd The socket descriptor to remove + * @return 0 on success. < 0 on errors. + */ +extern int iobroker_deregister(iobroker_set *iobs, int sd); + +/** + * Unregister and close(2) a socket registered for input with the + * broker. This is a convenience function which exists only to avoid + * doing multiple calls when read() returns 0, as closed sockets must + * always be removed from the socket set to avoid consuming tons of + * cpu power from iterating "too fast" over the file descriptors. + * + * @param iobs The socket set to remove the socket from + * @param sd The socket descriptor to remove and close + * @return 0 on success. < 0 on errors + */ +extern int iobroker_close(iobroker_set *iobs, int sd); + +/** + * Destroy a socket set as created by iobroker_create + * @param iobs The socket set to destroy + * @param flags If set, close(2) all registered sockets + */ +extern void iobroker_destroy(iobroker_set *iobs, int flags); + +/** + * Wait for input on any of the registered sockets. + * @param iobs The socket set to wait for. + * @param timeout Timeout in milliseconds. -1 is "wait indefinitely" + * @return -1 on errors, or number of filedescriptors with input + */ +extern int iobroker_poll(iobroker_set *iobs, int timeout); +#endif /* INCLUDE_iobroker_h__ */ +/** @} */ diff -Nru check-mk-1.2.2p3/nagios4/iocache.h check-mk-1.2.6p12/nagios4/iocache.h --- check-mk-1.2.2p3/nagios4/iocache.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/iocache.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,181 @@ +#ifndef LIBNAGIOS_iocache_h__ +#define LIBNAGIOS_iocache_h__ +#include +#include +#include + +/** + * @file iocache.h + * @brief I/O cache function declarations + * + * The I/O cache library is useful for reading large chunks of data + * from sockets and utilizing parts of that data based on either + * size or a magic delimiter. + * + * @{ + */ + +/** opaque type for iocache operations */ +struct iocache; +typedef struct iocache iocache; + +/** + * Destroys an iocache object, freeing all memory allocated to it. + * @param ioc The iocache object to destroy + */ +extern void iocache_destroy(iocache *ioc); + +/** + * Resets an iocache struct, discarding all data in it without free()'ing + * any memory. + * + * @param[in] ioc The iocache struct to reset + */ +extern void iocache_reset(iocache *ioc); + +/** + * Resizes the buffer in an io cache + * @param ioc The io cache to resize + * @param new_size The new size of the io cache + * @return 0 on success, -1 on errors + */ +extern int iocache_resize(iocache *ioc, unsigned long new_size); + +/** + * Grows an iocache object + * This uses iocache_resize() internally + * @param[in] ioc The iocache to grow + * @param[in] increment How much to increase it + * @return 0 on success, -1 on errors + */ +extern int iocache_grow(iocache *ioc, unsigned long increment); + +/** + * Returns the total size of the io cache + * @param[in] ioc The iocache to inspect + * @return The size of the io cache. If ioc is null, 0 is returned + */ +extern unsigned long iocache_size(iocache *ioc); + +/** + * Returns remaining read capacity of the io cache + * @param ioc The io cache to operate on + * @return The number of bytes available to read + */ +extern unsigned long iocache_capacity(iocache *ioc); + +/** + * Return the amount of unread but stored data in the io cache + * @param ioc The io cache to operate on + * @return Number of bytes available to read + */ +extern unsigned long iocache_available(iocache *ioc); + +/** + * Use a chunk of data from iocache based on size. The caller + * must take care not to write beyond the end of the requested + * buffer, or Bad Things(tm) will happen. + * + * @param ioc The io cache we should use data from + * @param size The size of the data we want returned + * @return NULL on errors (insufficient data, fe). pointer on success + */ +extern char *iocache_use_size(iocache *ioc, unsigned long size); + +/** + * Use a chunk of data from iocache based on delimiter. The + * caller must take care not to write beyond the end of the + * requested buffer, if any is returned, or Bad Things(tm) will + * happen. + * + * @param ioc The io cache to use data from + * @param delim The delimiter + * @param delim_len Length of the delimiter + * @param size Length of the returned buffer + * @return NULL on errors (delimiter not found, insufficient data). pointer on success + */ +extern char *iocache_use_delim(iocache *ioc, const char *delim, size_t delim_len, unsigned long *size); + +/** + * Forget that a specified number of bytes have been used. + * @param ioc The io cache that you want to un-use data in + * @param size The number of bytes you want to forget you've seen + * @return -1 if there was an error, 0 otherwise. + */ +extern int iocache_unuse_size(iocache *ioc, unsigned long size); + +/** + * Creates the iocache object, initializing it with the given size + * @param size Initial size of the iocache buffer + * @return Pointer to a valid iocache object + */ +extern iocache *iocache_create(unsigned long size); + +/** + * Read data into the iocache buffer + * @param ioc The io cache we should read into + * @param fd The filedescriptor we should read from + * @return The number of bytes read on success. < 0 on errors + */ +extern int iocache_read(iocache *ioc, int fd); + +/** + * Add data to the iocache buffer + * The data is copied, so it can safely be taken from the stack in a + * function that returns before the data is used. + * If the io cache is too small to hold the data, -1 will be returned. + * + * @param[in] ioc The io cache to add to + * @param[in] buf Pointer to the data we should add + * @param[in] len Length (in bytes) of data pointed to by buf + * @return iocache_available(ioc) on success, -1 on errors + */ +extern int iocache_add(iocache *ioc, char *buf, unsigned int len); + +/** + * Like sendto(), but sends all cached data prior to the requested + * + * @param[in] ioc The iocache to send, or cache data in + * @param[in] fd The file descriptor to send to + * @param[in] buf Pointer to the data to send + * @param[in] len Length (in bytes) of data to send + * @param[in] flags Flags passed to sendto(2) + * @param[in] dest_addr Destination address + * @param[in] addrlen size (in bytes) of dest_addr + * @return bytes sent on success, -ERRNO on errors + */ +extern int iocache_sendto(iocache *ioc, int fd, char *buf, unsigned int len, int flags, const struct sockaddr *dest_addr, socklen_t addrlen); + +/** + * Like send(2), but sends all cached data prior to the requested + * This function uses iocache_sendto() internally, but can only be + * used on connected sockets or open()'ed files. + * + * @param[in] ioc The iocache to send, or cache data in + * @param[in] fd The file descriptor to send to + * @param[in] buf Pointer to the data to send + * @param[in] len Length (in bytes) of data to send + * @param[in] flags Flags passed to sendto(2) + * @return bytes sent on success, -ERRNO on errors + */ +static inline int iocache_send(iocache *ioc, int fd, char *buf, unsigned int len, int flags) +{ + return iocache_sendto(ioc, fd, buf, len, flags, NULL, 0); +} + +/** + * Like write(2), but sends all cached data prior to the requested + * This function uses iocache_send() internally. + * + * @param[in] ioc The iocache to send, or cache data in + * @param[in] fd The file descriptor to send to + * @param[in] buf Pointer to the data to send + * @param[in] len Length (in bytes) of data to send + * @return bytes sent on success, -ERRNO on errors + */ +static inline int iocache_write(iocache *ioc, int fd, char *buf, unsigned int len) +{ + return iocache_send(ioc, fd, buf, len, 0); +} +#endif /* INCLUDE_iocache_h__ */ +/** @} */ diff -Nru check-mk-1.2.2p3/nagios4/kvvec.h check-mk-1.2.6p12/nagios4/kvvec.h --- check-mk-1.2.2p3/nagios4/kvvec.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/kvvec.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,207 @@ +#ifndef LIBNAGIOS_kvvec_h__ +#define LIBNAGIOS_kvvec_h__ + +/** + * @file kvvec.h + * @brief Key/value vector library function and type declarations + * + * The kvvec library is nifty as either a configuration meta-format + * or for IPC purposes. Take a look at the buf2kvvec() and kvvec2buf() + * pair of functions for the latter. + * @{ + */ + +/** + * key/value pair + * One of the two major components of the kvvec api + */ +struct key_value { + char *key; /**< The key */ + char *value; /**< The value */ + int key_len; /**< Length of key */ + int value_len; /**< Length of value */ +}; + +/** + * key/value vector buffer. Actually just a buffer, but one that gets + * used as return value and internal tracker for kvvec2buf() + */ +struct kvvec_buf { + char *buf; /**< The buffer */ + unsigned long buflen; /**< Length of buffer */ + unsigned long bufsize; /**< Size of buffer (includes overalloc) */ +}; + +/** + * key/value vector struct + * This is the main component of the kvvec library + * @note This should be made opaque, with a kvvec_foreach() using a + * callback to iterate over key/value pairs. + */ +struct kvvec { + struct key_value *kv; /**< The key/value array */ + int kv_alloc; /**< Allocated size of key/value array */ + int kv_pairs; /**< Number of key/value pairs */ + int kvv_sorted; /**< Determines if this kvvec has been sorted */ +}; + +/** Portable initializer for stack-allocated key/value vectors */ +#define KVVEC_INITIALIZER { NULL, 0, 0, 0 } + +/** Parameters for kvvec_destroy() */ +#define KVVEC_FREE_KEYS 1 /**< Free keys when destroying a kv vector */ +#define KVVEC_FREE_VALUES 2 /**< Free values when destroying a kv vector */ +/** Free both keys and values when destroying a kv vector */ +#define KVVEC_FREE_ALL (KVVEC_FREE_KEYS | KVVEC_FREE_VALUES) + +#define KVVEC_ASSIGN 0 /**< Assign from buf in buf2kvvec_prealloc() */ +#define KVVEC_COPY 1 /**< Copy from buf in buf2kvvec_prealloc() */ +#define KVVEC_APPEND 2 /**< Don't reset kvvec in buf2kvvec_prealloc() */ + +/** + * Initialize a previously allocated key/value vector + * + * @param kvv The key/value vector to initialize + * @param hint Number of key/value pairs we expect to store + * @return Pointer to a struct kvvec, properly initialized + */ +extern struct kvvec *kvvec_init(struct kvvec *kvv, int hint); + +/** + * Create a key/value vector + * + * @param hint Number of key/value pairs we expect to store + * @return Pointer to a struct kvvec, properly initialized + */ +extern struct kvvec *kvvec_create(int hint); + +/** + * Resize a key/value vector + * Used by kvvec_grow(). If size is smaller than the current number of + * used key/value slots, -1 is returned. + * + * @param[in] kvv The key/value vector to resize + * @param[in] size The size to grow to + * @return 0 on success, < 0 on errors + */ +extern int kvvec_resize(struct kvvec *kvv, int size); + +/** + * Grow a key/value vector. + * Used internally as needed by the kvvec api. If 'hint' is zero, the + * key/value capacity is increased by a third of the current capacity + * plus a small constant number. This uses kvvec_resize() internally. + * + * @param kvv The key/value vector to grow + * @param hint The amount of key/value slots we should grow by + * @return 0 on success, < 0 on errors + */ +extern int kvvec_grow(struct kvvec *kvv, int hint); + +/** + * Return remaining storage capacity of key/value vector + * @param[in] kvv The key/value vector to check + * @return Number of key/value pairs that can be stored without growing + */ +extern unsigned int kvvec_capacity(struct kvvec *kvv); + +/** + * Sort a key/value vector alphabetically by key name + * @param kvv The key/value vector to sort + * @return 0 + */ +extern int kvvec_sort(struct kvvec *kvv); + +/** + * Add a key/value pair to an existing key/value vector, with + * lengths of strings already calculated + * @param kvv The key/value vector to add this key/value pair to + * @param key The key + * @param keylen Length of the key + * @param value The value + * @param valuelen Length of the value + * @return 0 on success, < 0 on errors + */ +extern int kvvec_addkv_wlen(struct kvvec *kvv, const char *key, int keylen, const char *value, int valuelen); + +/** + * Shortcut to kvvec_addkv_wlen() when lengths aren't known + * @param kvv The key/value vector to add this key/value pair to + * @param key The key + * @param value The value + * @return 0 on success, < 0 on errors + */ +#define kvvec_addkv(kvv, key, value) kvvec_addkv_wlen(kvv, key, 0, value, 0) + +/** + * Walk each key/value pair in a key/value vector, sending them + * as arguments to a callback function. The callback function has + * no control over the iteration process and must not delete or + * modify the key/value vector it's operating on. + * @param kvv The key/value vector to walk + * @param arg Extra argument to the callback function + * @param callback Callback function + * @return 0 on success, < 0 on errors + */ +extern int kvvec_foreach(struct kvvec *kvv, void *arg, int (*callback)(struct key_value *, void *)); + +/** + * Destroy a key/value vector + * @param kvv The key/value vector to destroy + * @param flags or'ed combination of KVVEC_FREE_{KEYS,VALUES}, or KVVEC_FREE_ALL + * @return 0 on success, < 0 on errors + */ +extern int kvvec_destroy(struct kvvec *kvv, int flags); + +/** + * Free key/value pairs associated with a key/value vector + * @param kvv The key/value vector to operate on + * @param flags flags or'ed combination of KVVEC_FREE_{KEYS,VALUES}, or KVVEC_FREE_ALL + */ +void kvvec_free_kvpairs(struct kvvec *kvv, int flags); + +/** + * Create a linear buffer of all the key/value pairs and + * return it as a kvvec_buf. The caller must free() all + * pointers in the returned kvvec_buf + * (FIXME: add kvvec_buf_destroy(), or move this and its counterpart + * out of the kvvec api into a separate one) + * + * @param kvv The key/value vector to convert + * @param kv_sep Character separating keys and their values + * @param pair_sep Character separating key/value pairs + * @param overalloc Integer determining how much extra data we should + * allocate. The overallocated memory is filled with + * nul bytes. + * @return A pointer to a newly created kvvec_buf structure + */ +extern struct kvvec_buf *kvvec2buf(struct kvvec *kvv, char kv_sep, char pair_sep, int overalloc); + +/** + * Create a key/value vector from a pre-parsed buffer. Immensely + * useful for ipc in combination with kvvec2buf(). + * + * @param str The buffer to convert to a key/value vector + * @param len Length of buffer to convert + * @param kvsep Character separating key and value + * @param pair_sep Character separating key/value pairs + * @param flags bitmask. See KVVEC_{ASSIGN,COPY,APPEND} for values + * @return The created key/value vector + */ +extern struct kvvec *buf2kvvec(char *str, unsigned int len, const char kvsep, const char pair_sep, int flags); + +/** + * Parse a buffer into the pre-allocated key/value vector. Immensely + * useful for ipc in combination with kvvec2buf(). + * + * @param kvv A pre-allocated key/value vector to populate + * @param str The buffer to convert to a key/value vector + * @param len Length of buffer to convert + * @param kvsep Character separating key and value + * @param pair_sep Character separating key/value pairs + * @param flags bitmask. See KVVEC_{ASSIGN,COPY,APPEND} for values + * @return The number of pairs in the created key/value vector + */ +extern int buf2kvvec_prealloc(struct kvvec *kvv, char *str, unsigned int len, const char kvsep, const char pair_sep, int flags); +/** @} */ +#endif /* INCLUDE_kvvec_h__ */ diff -Nru check-mk-1.2.2p3/nagios4/libnagios.h check-mk-1.2.6p12/nagios4/libnagios.h --- check-mk-1.2.2p3/nagios4/libnagios.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/libnagios.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,25 @@ +#ifndef LIBNAGIOS_libnagios_h__ +#define LIBNAGIOS_libnagios_h__ +/** + * @file libnagios.h + * + * @brief Include this for all public parts of libnagios to be accessible + */ + +#include "lnag-utils.h" +#include "fanout.h" +#include "nsutils.h" +#include "pqueue.h" +#include "squeue.h" +#include "kvvec.h" +#include "iobroker.h" +#include "iocache.h" +#include "runcmd.h" +#include "bitmap.h" +#include "dkhash.h" +#include "worker.h" +#include "skiplist.h" +#include "nsock.h" +#include "nspath.h" +#include "snprintf.h" +#endif /* LIB_libnagios_h__ */ diff -Nru check-mk-1.2.2p3/nagios4/lnag-utils.h check-mk-1.2.6p12/nagios4/lnag-utils.h --- check-mk-1.2.2p3/nagios4/lnag-utils.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/lnag-utils.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,111 @@ +#ifndef LIBNAGIOS_lnag_utils_h__ +#define LIBNAGIOS_lnag_utils_h__ + +#include /* for sysconf() */ +#include /* for rand() */ + +/** + * @file lnag-utils.h + * @brief libnagios helper and compatibility macros that lack a "real" home. + * + * This is the home of random macros that must be present for compilation + * to succeed but are missing on some platforms. + * + * @{ + */ + +#define NAGIOS_MKVERSION(a, b, c) \ + (((a) * 10000) + ((b) * 100) + (c)) + +#ifdef __cplusplus +/** C++ compatibility macro that avoids confusing indentation programs */ +# define NAGIOS_BEGIN_DECL extern "C" { +/** + * Use at end of header file declarations to obtain C++ compatibility + * ... without confusing indentation programs + */ +# define NAGIOS_END_DECL } +#else +/** C++ compatibility macro that avoids confusing indentation programs */ +# define NAGIOS_BEGIN_DECL /* nothing */ +/** C++ compatibility macro that avoid confusing indentation programs */ +# define NAGIOS_END_DECL /* more of nothing */ +#endif + +#ifndef NODOXY /* doxy comments are useless here */ +# ifndef __GNUC__ +# define GCC_VERSION 0 +# define __attribute__(x) /* nothing */ +# else +# ifdef __GNUC_PATCHLEVEL__ +# define GCC_VERSION NAGIOS_MKVERSION(__GNUC__, __GNUC_MINOR__, __GNUC_PATCHLEVEL__) +# else +# define GCC_VERSION NAGIOS_MKVERSION(__GNUC__, __GNUC_MINOR__, 0) +# endif /* __GNUC_PATCHLEVEL__ */ +# endif /* __GNUC__ */ +#endif /* NODOXY */ + +#if GCC_VERSION >= NAGIOS_MKVERSION(4, 5, 0) +# define NAGIOS_DEPRECATED(version, hint) \ + __attribute__((deprecated("This function will be removed in Nagios v" #version ". Please use " #hint " instead"))) +#else +/** Macro for alerting module authors to function deprecation */ +# define NAGIOS_DEPRECATED(version, hint) \ + __attribute__((deprecated)) +#endif + +/* + * These macros are widely used throughout Nagios + */ +#define OK 0 /**< Indicates successful function call in Nagios */ +#define ERROR -2 /**< Non-successful function call in Nagios */ + +#ifdef FALSE +#undef FALSE +#endif +#define FALSE 0 /**< Not true */ + +#ifdef TRUE +#undef TRUE +#endif +#define TRUE (!FALSE) /**< Not false */ + +/** Useful macro to safely avoid double-free memory corruption */ +#define my_free(ptr) do { if(ptr) { free(ptr); ptr = NULL; } } while(0) + +#ifndef ARRAY_SIZE +/** Useful for iterating over all elements in a static array */ +# define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0])) +#endif +#ifndef veclen +/** useful for iterating over all elements in a static array */ +# define veclen ARRAY_SIZE +#endif + +#ifndef offsetof +/** standard offsetof macro */ +# define offsetof(t, f) ((unsigned long)&((t *)0)->f) +#endif + +/** character map initialization for .bss-allocated char maps */ +#define CHAR_MAP_INIT(k) { \ + k, k, k, k, k, k, k, k, k, k, k, k, k, k, k, k, \ + k, k, k, k, k, k, k, k, k, k, k, k, k, k, k, k, \ + k, k, k, k, k, k, k, k, k, k, k, k, k, k, k, k, \ + k, k, k, k, k, k, k, k, k, k, k, k, k, k, k, k, \ + k, k, k, k, k, k, k, k, k, k, k, k, k, k, k, k, \ + k, k, k, k, k, k, k, k, k, k, k, k, k, k, k, k, \ + k, k, k, k, k, k, k, k, k, k, k, k, k, k, k, k, \ + k, k, k, k, k, k, k, k, k, k, k, k, k, k, k, k, \ + k, k, k, k, k, k, k, k, k, k, k, k, k, k, k, k, \ + k, k, k, k, k, k, k, k, k, k, k, k, k, k, k, k, \ + k, k, k, k, k, k, k, k, k, k, k, k, k, k, k, k, \ + k, k, k, k, k, k, k, k, k, k, k, k, k, k, k, k, \ + k, k, k, k, k, k, k, k, k, k, k, k, k, k, k, k, \ + k, k, k, k, k, k, k, k, k, k, k, k, k, k, k, k, \ + k, k, k, k, k, k, k, k, k, k, k, k, k, k, k, k, \ + k, k, k, k, k, k, k, k, k, k, k, k, k, k, k, k, \ + } + +/** @} */ +#endif diff -Nru check-mk-1.2.2p3/nagios4/locations.h check-mk-1.2.6p12/nagios4/locations.h --- check-mk-1.2.2p3/nagios4/locations.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/locations.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,41 @@ +/************************************************************************ + * + * Nagios Locations Header File + * Written By: Ethan Galstad (egalstad@nagios.org) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + ************************************************************************/ + +#define DEFAULT_TEMP_FILE "/usr/local/nagios/var/tempfile" +#define DEFAULT_TEMP_PATH "/tmp" +#define DEFAULT_CHECK_RESULT_PATH "/usr/local/nagios/var/spool/checkresults" +#define DEFAULT_STATUS_FILE "/usr/local/nagios/var/status.dat" +#define DEFAULT_LOG_FILE "/usr/local/nagios/var/nagios.log" +#define DEFAULT_LOG_ARCHIVE_PATH "/usr/local/nagios/var/archives/" +#define DEFAULT_DEBUG_FILE "/usr/local/nagios/var/nagios.debug" +#define DEFAULT_COMMENT_FILE "/usr/local/nagios/var/comments.dat" +#define DEFAULT_DOWNTIME_FILE "/usr/local/nagios/var/downtime.dat" +#define DEFAULT_RETENTION_FILE "/usr/local/nagios/var/retention.dat" +#define DEFAULT_COMMAND_FILE "/usr/local/nagios/var/rw/nagios.cmd" +#define DEFAULT_QUERY_SOCKET "/usr/local/nagios/var/rw/nagios.qh" +#define DEFAULT_CONFIG_FILE "/usr/local/nagios/etc/nagios.cfg" +#define DEFAULT_PHYSICAL_HTML_PATH "/usr/local/nagios/share" +#define DEFAULT_URL_HTML_PATH "/nagios" +#define DEFAULT_PHYSICAL_CGIBIN_PATH "/usr/local/nagios/sbin" +#define DEFAULT_URL_CGIBIN_PATH "/nagios/cgi-bin" +#define DEFAULT_CGI_CONFIG_FILE "/usr/local/nagios/etc/cgi.cfg" +#define DEFAULT_LOCK_FILE "/usr/local/nagios/var/nagios.lock" +#define DEFAULT_OBJECT_CACHE_FILE "/usr/local/nagios/var/objects.cache" +#define DEFAULT_PRECACHED_OBJECT_FILE "/usr/local/nagios/var/objects.precache" +#define DEFAULT_EVENT_BROKER_FILE "/usr/local/nagios/var/broker.socket" diff -Nru check-mk-1.2.2p3/nagios4/logging.h check-mk-1.2.6p12/nagios4/logging.h --- check-mk-1.2.2p3/nagios4/logging.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/logging.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,90 @@ +#ifndef INCLUDE_logging_h__ +#define INCLUDE_logging_h__ + +#include "objects.h" + +/******************* LOGGING TYPES ********************/ + +#define NSLOG_RUNTIME_ERROR 1 +#define NSLOG_RUNTIME_WARNING 2 + +#define NSLOG_VERIFICATION_ERROR 4 +#define NSLOG_VERIFICATION_WARNING 8 + +#define NSLOG_CONFIG_ERROR 16 +#define NSLOG_CONFIG_WARNING 32 + +#define NSLOG_PROCESS_INFO 64 +#define NSLOG_EVENT_HANDLER 128 +/*#define NSLOG_NOTIFICATION 256*/ /* NOT USED ANYMORE - CAN BE REUSED */ +#define NSLOG_EXTERNAL_COMMAND 512 + +#define NSLOG_HOST_UP 1024 +#define NSLOG_HOST_DOWN 2048 +#define NSLOG_HOST_UNREACHABLE 4096 + +#define NSLOG_SERVICE_OK 8192 +#define NSLOG_SERVICE_UNKNOWN 16384 +#define NSLOG_SERVICE_WARNING 32768 +#define NSLOG_SERVICE_CRITICAL 65536 + +#define NSLOG_PASSIVE_CHECK 131072 + +#define NSLOG_INFO_MESSAGE 262144 + +#define NSLOG_HOST_NOTIFICATION 524288 +#define NSLOG_SERVICE_NOTIFICATION 1048576 + +/***************** DEBUGGING LEVELS *******************/ + +#define DEBUGL_ALL -1 +#define DEBUGL_NONE 0 +#define DEBUGL_FUNCTIONS 1 +#define DEBUGL_CONFIG 2 +#define DEBUGL_PROCESS 4 +#define DEBUGL_STATUSDATA 4 +#define DEBUGL_RETENTIONDATA 4 +#define DEBUGL_EVENTS 8 +#define DEBUGL_CHECKS 16 +#define DEBUGL_FLAPPING 16 +#define DEBUGL_EVENTHANDLERS 16 +#define DEBUGL_PERFDATA 16 +#define DEBUGL_NOTIFICATIONS 32 +#define DEBUGL_EVENTBROKER 64 +#define DEBUGL_EXTERNALCOMMANDS 128 +#define DEBUGL_COMMANDS 256 +#define DEBUGL_DOWNTIME 512 +#define DEBUGL_COMMENTS 1024 +#define DEBUGL_MACROS 2048 +#define DEBUGL_IPC 4096 +#define DEBUGL_SCHEDULING 8192 + +#define DEBUGV_BASIC 0 +#define DEBUGV_MORE 1 +#define DEBUGV_MOST 2 + +NAGIOS_BEGIN_DECL +/**** Logging Functions ****/ +void logit(int, int, const char *, ...) +__attribute__((__format__(__printf__, 3, 4))); +int log_debug_info(int, int, const char *, ...) +__attribute__((__format__(__printf__, 3, 4))); + +#ifndef NSCGI +int write_to_all_logs(char *, unsigned long); /* writes a string to main log file and syslog facility */ +int write_to_log(char *, unsigned long, time_t *); /* write a string to the main log file */ +int write_to_syslog(char *, unsigned long); /* write a string to the syslog facility */ +int log_service_event(service *); /* logs a service event */ +int log_host_event(host *); /* logs a host event */ +int log_host_states(int, time_t *); /* logs initial/current host states */ +int log_service_states(int, time_t *); /* logs initial/current service states */ +int rotate_log_file(time_t); /* rotates the main log file */ +int write_log_file_info(time_t *); /* records log file/version info */ +int open_debug_log(void); +int close_debug_log(void); +int close_log_file(void); +int fix_log_file_owner(uid_t uid, gid_t gid); +#endif /* !NSCGI */ + +NAGIOS_END_DECL +#endif diff -Nru check-mk-1.2.2p3/nagios4/macros.h check-mk-1.2.6p12/nagios4/macros.h --- check-mk-1.2.2p3/nagios4/macros.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/macros.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,339 @@ +/************************************************************************ + * + * MACROS.H - Common macro functions + * Written By: Ethan Galstad (egalstad@nagios.org) + * + * License: + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + ************************************************************************/ + +#ifndef _MACROS_H +#define _MACROS_H + +#include "common.h" +#include "objects.h" + + + +/****************** LENGTH LIMITATIONS ****************/ + +#define MAX_COMMAND_ARGUMENTS 32 /* maximum number of $ARGx$ macros */ + + +/****************** MACRO DEFINITIONS *****************/ + +#define MACRO_ENV_VAR_PREFIX "NAGIOS_" + +#define MAX_USER_MACROS 256 /* maximum number of $USERx$ macros */ + +#define MACRO_X_COUNT 156 /* size of macro_x[] array */ + +NAGIOS_BEGIN_DECL + +struct nagios_macros { + char *x[MACRO_X_COUNT]; + char *argv[MAX_COMMAND_ARGUMENTS]; + char *contactaddress[MAX_CONTACT_ADDRESSES]; + char *ondemand; + host *host_ptr; + hostgroup *hostgroup_ptr; + service *service_ptr; + servicegroup *servicegroup_ptr; + contact *contact_ptr; + contactgroup *contactgroup_ptr; + customvariablesmember *custom_host_vars; + customvariablesmember *custom_service_vars; + customvariablesmember *custom_contact_vars; + }; +typedef struct nagios_macros nagios_macros; + + + +#define MACRO_HOSTNAME 0 +#define MACRO_HOSTALIAS 1 +#define MACRO_HOSTADDRESS 2 +#define MACRO_SERVICEDESC 3 +#define MACRO_SERVICESTATE 4 +#define MACRO_SERVICESTATEID 5 +#define MACRO_SERVICEATTEMPT 6 +#define MACRO_LONGDATETIME 7 +#define MACRO_SHORTDATETIME 8 +#define MACRO_DATE 9 +#define MACRO_TIME 10 +#define MACRO_TIMET 11 +#define MACRO_LASTHOSTCHECK 12 +#define MACRO_LASTSERVICECHECK 13 +#define MACRO_LASTHOSTSTATECHANGE 14 +#define MACRO_LASTSERVICESTATECHANGE 15 +#define MACRO_HOSTOUTPUT 16 +#define MACRO_SERVICEOUTPUT 17 +#define MACRO_HOSTPERFDATA 18 +#define MACRO_SERVICEPERFDATA 19 +#define MACRO_CONTACTNAME 20 +#define MACRO_CONTACTALIAS 21 +#define MACRO_CONTACTEMAIL 22 +#define MACRO_CONTACTPAGER 23 +#define MACRO_ADMINEMAIL 24 +#define MACRO_ADMINPAGER 25 +#define MACRO_HOSTSTATE 26 +#define MACRO_HOSTSTATEID 27 +#define MACRO_HOSTATTEMPT 28 +#define MACRO_NOTIFICATIONTYPE 29 +#define MACRO_NOTIFICATIONNUMBER 30 /* deprecated - see HOSTNOTIFICATIONNUMBER and SERVICENOTIFICATIONNUMBER macros */ +#define MACRO_HOSTEXECUTIONTIME 31 +#define MACRO_SERVICEEXECUTIONTIME 32 +#define MACRO_HOSTLATENCY 33 +#define MACRO_SERVICELATENCY 34 +#define MACRO_HOSTDURATION 35 +#define MACRO_SERVICEDURATION 36 +#define MACRO_HOSTDURATIONSEC 37 +#define MACRO_SERVICEDURATIONSEC 38 +#define MACRO_HOSTDOWNTIME 39 +#define MACRO_SERVICEDOWNTIME 40 +#define MACRO_HOSTSTATETYPE 41 +#define MACRO_SERVICESTATETYPE 42 +#define MACRO_HOSTPERCENTCHANGE 43 +#define MACRO_SERVICEPERCENTCHANGE 44 +#define MACRO_HOSTGROUPNAME 45 +#define MACRO_HOSTGROUPALIAS 46 +#define MACRO_SERVICEGROUPNAME 47 +#define MACRO_SERVICEGROUPALIAS 48 +#define MACRO_HOSTACKAUTHOR 49 +#define MACRO_HOSTACKCOMMENT 50 +#define MACRO_SERVICEACKAUTHOR 51 +#define MACRO_SERVICEACKCOMMENT 52 +#define MACRO_LASTSERVICEOK 53 +#define MACRO_LASTSERVICEWARNING 54 +#define MACRO_LASTSERVICEUNKNOWN 55 +#define MACRO_LASTSERVICECRITICAL 56 +#define MACRO_LASTHOSTUP 57 +#define MACRO_LASTHOSTDOWN 58 +#define MACRO_LASTHOSTUNREACHABLE 59 +#define MACRO_SERVICECHECKCOMMAND 60 +#define MACRO_HOSTCHECKCOMMAND 61 +#define MACRO_MAINCONFIGFILE 62 +#define MACRO_STATUSDATAFILE 63 +#define MACRO_HOSTDISPLAYNAME 64 +#define MACRO_SERVICEDISPLAYNAME 65 +#define MACRO_RETENTIONDATAFILE 66 +#define MACRO_OBJECTCACHEFILE 67 +#define MACRO_TEMPFILE 68 +#define MACRO_LOGFILE 69 +#define MACRO_RESOURCEFILE 70 +#define MACRO_COMMANDFILE 71 +#define MACRO_HOSTPERFDATAFILE 72 +#define MACRO_SERVICEPERFDATAFILE 73 +#define MACRO_HOSTACTIONURL 74 +#define MACRO_HOSTNOTESURL 75 +#define MACRO_HOSTNOTES 76 +#define MACRO_SERVICEACTIONURL 77 +#define MACRO_SERVICENOTESURL 78 +#define MACRO_SERVICENOTES 79 +#define MACRO_TOTALHOSTSUP 80 +#define MACRO_TOTALHOSTSDOWN 81 +#define MACRO_TOTALHOSTSUNREACHABLE 82 +#define MACRO_TOTALHOSTSDOWNUNHANDLED 83 +#define MACRO_TOTALHOSTSUNREACHABLEUNHANDLED 84 +#define MACRO_TOTALHOSTPROBLEMS 85 +#define MACRO_TOTALHOSTPROBLEMSUNHANDLED 86 +#define MACRO_TOTALSERVICESOK 87 +#define MACRO_TOTALSERVICESWARNING 88 +#define MACRO_TOTALSERVICESCRITICAL 89 +#define MACRO_TOTALSERVICESUNKNOWN 90 +#define MACRO_TOTALSERVICESWARNINGUNHANDLED 91 +#define MACRO_TOTALSERVICESCRITICALUNHANDLED 92 +#define MACRO_TOTALSERVICESUNKNOWNUNHANDLED 93 +#define MACRO_TOTALSERVICEPROBLEMS 94 +#define MACRO_TOTALSERVICEPROBLEMSUNHANDLED 95 +#define MACRO_PROCESSSTARTTIME 96 +#define MACRO_HOSTCHECKTYPE 97 +#define MACRO_SERVICECHECKTYPE 98 +#define MACRO_LONGHOSTOUTPUT 99 +#define MACRO_LONGSERVICEOUTPUT 100 +#define MACRO_TEMPPATH 101 +#define MACRO_HOSTNOTIFICATIONNUMBER 102 +#define MACRO_SERVICENOTIFICATIONNUMBER 103 +#define MACRO_HOSTNOTIFICATIONID 104 +#define MACRO_SERVICENOTIFICATIONID 105 +#define MACRO_HOSTEVENTID 106 +#define MACRO_LASTHOSTEVENTID 107 +#define MACRO_SERVICEEVENTID 108 +#define MACRO_LASTSERVICEEVENTID 109 +#define MACRO_HOSTGROUPNAMES 110 +#define MACRO_SERVICEGROUPNAMES 111 +#define MACRO_HOSTACKAUTHORNAME 112 +#define MACRO_HOSTACKAUTHORALIAS 113 +#define MACRO_SERVICEACKAUTHORNAME 114 +#define MACRO_SERVICEACKAUTHORALIAS 115 +#define MACRO_MAXHOSTATTEMPTS 116 +#define MACRO_MAXSERVICEATTEMPTS 117 +#define MACRO_SERVICEISVOLATILE 118 +#define MACRO_TOTALHOSTSERVICES 119 +#define MACRO_TOTALHOSTSERVICESOK 120 +#define MACRO_TOTALHOSTSERVICESWARNING 121 +#define MACRO_TOTALHOSTSERVICESUNKNOWN 122 +#define MACRO_TOTALHOSTSERVICESCRITICAL 123 +#define MACRO_HOSTGROUPNOTES 124 +#define MACRO_HOSTGROUPNOTESURL 125 +#define MACRO_HOSTGROUPACTIONURL 126 +#define MACRO_SERVICEGROUPNOTES 127 +#define MACRO_SERVICEGROUPNOTESURL 128 +#define MACRO_SERVICEGROUPACTIONURL 129 +#define MACRO_HOSTGROUPMEMBERS 130 +#define MACRO_SERVICEGROUPMEMBERS 131 +#define MACRO_CONTACTGROUPNAME 132 +#define MACRO_CONTACTGROUPALIAS 133 +#define MACRO_CONTACTGROUPMEMBERS 134 +#define MACRO_CONTACTGROUPNAMES 135 +#define MACRO_NOTIFICATIONRECIPIENTS 136 +#define MACRO_NOTIFICATIONISESCALATED 137 +#define MACRO_NOTIFICATIONAUTHOR 138 +#define MACRO_NOTIFICATIONAUTHORNAME 139 +#define MACRO_NOTIFICATIONAUTHORALIAS 140 +#define MACRO_NOTIFICATIONCOMMENT 141 +#define MACRO_EVENTSTARTTIME 142 +#define MACRO_HOSTPROBLEMID 143 +#define MACRO_LASTHOSTPROBLEMID 144 +#define MACRO_SERVICEPROBLEMID 145 +#define MACRO_LASTSERVICEPROBLEMID 146 +#define MACRO_ISVALIDTIME 147 +#define MACRO_NEXTVALIDTIME 148 +#define MACRO_LASTHOSTSTATE 149 +#define MACRO_LASTHOSTSTATEID 150 +#define MACRO_LASTSERVICESTATE 151 +#define MACRO_LASTSERVICESTATEID 152 +#define MACRO_HOSTVALUE 153 +#define MACRO_SERVICEVALUE 154 +#define MACRO_PROBLEMVALUE 155 + + +/************* MACRO CLEANING OPTIONS *****************/ + +#define STRIP_ILLEGAL_MACRO_CHARS 1 +#define ESCAPE_MACRO_CHARS 2 +#define URL_ENCODE_MACRO_CHARS 4 + + + +/****************** MACRO FUNCTIONS ******************/ + +nagios_macros *get_global_macros(void); + +/* + * Replace macros with their actual values + * This function modifies the global_macros struct and is thus + * not thread-safe. + */ +int process_macros(char *, char **, int); + +/* thread-safe version of the above */ +int process_macros_r(nagios_macros *mac, char *, char **, int); + +/* cleans macros characters before insertion into output string */ +char *clean_macro_chars(char *, int); + +/* + * These functions updates **macros with the values from + * their respective object type. + */ + +int grab_service_macros(service *); +int grab_host_macros(host *); +int grab_servicegroup_macros(servicegroup *); +int grab_hostgroup_macros(hostgroup *); +int grab_contact_macros(contact *); + +int grab_macro_value(char *, char **, int *, int *); +int grab_macrox_value(int, char *, char *, char **, int *); +int grab_custom_macro_value(char *, char *, char *, char **); +int grab_datetime_macro(int, char *, char *, char **); +int grab_standard_host_macro(int, host *, char **, int *); +int grab_standard_hostgroup_macro(int, hostgroup *, char **); +int grab_standard_service_macro(int, service *, char **, int *); +int grab_standard_servicegroup_macro(int, servicegroup *, char **); +int grab_standard_contact_macro(int, contact *, char **); +int grab_contact_address_macro(int, contact *, char **); +int grab_standard_contactgroup_macro(int, contactgroup *, char **); +int grab_custom_object_macro(char *, customvariablesmember *, char **); + +/* thread-safe version of the above */ +int grab_service_macros_r(nagios_macros *mac, service *); +int grab_host_macros_r(nagios_macros *mac, host *); +int grab_servicegroup_macros_r(nagios_macros *mac, servicegroup *); +int grab_hostgroup_macros_r(nagios_macros *mac, hostgroup *); +int grab_contact_macros_r(nagios_macros *mac, contact *); + +int grab_macro_value_r(nagios_macros *mac, char *, char **, int *, int *); +int grab_macrox_value_r(nagios_macros *mac, int, char *, char *, char **, int *); +int grab_custom_macro_value_r(nagios_macros *mac, char *, char *, char *, char **); +int grab_datetime_macro_r(nagios_macros *mac, int, char *, char *, char **); +int grab_standard_host_macro_r(nagios_macros *mac, int, host *, char **, int *); +int grab_standard_hostgroup_macro_r(nagios_macros *mac, int, hostgroup *, char **); +int grab_standard_service_macro_r(nagios_macros *mac, int, service *, char **, int *); +int grab_standard_servicegroup_macro_r(nagios_macros *mac, int, servicegroup *, char **); +int grab_standard_contact_macro_r(nagios_macros *mac, int, contact *, char **); +int grab_custom_object_macro_r(nagios_macros *mac, char *, customvariablesmember *, char **); + + +char *get_url_encoded_string(char *); /* URL encode a string */ + +int init_macros(void); +int init_macrox_names(void); +int free_macrox_names(void); + +extern void copy_constant_macros(char **dest); + +/* clear macros */ +int clear_argv_macros(void); +int clear_volatile_macros(void); +int clear_host_macros(void); +int clear_service_macros(void); +int clear_hostgroup_macros(void); +int clear_servicegroup_macros(void); +int clear_contact_macros(void); +int clear_contactgroup_macros(void); +int clear_summary_macros(void); + +/* thread-safe version of the above */ +int clear_argv_macros_r(nagios_macros *mac); +int clear_volatile_macros_r(nagios_macros *mac); +int clear_host_macros_r(nagios_macros *mac); +int clear_service_macros_r(nagios_macros *mac); +int clear_hostgroup_macros_r(nagios_macros *mac); +int clear_servicegroup_macros_r(nagios_macros *mac); +int clear_contact_macros_r(nagios_macros *mac); +int clear_contactgroup_macros_r(nagios_macros *mac); +int clear_summary_macros_r(nagios_macros *mac); + + +#ifndef NSCGI +int set_all_macro_environment_vars(int); +int set_macrox_environment_vars(int); +int set_argv_macro_environment_vars(int); +int set_custom_macro_environment_vars(int); +int set_contact_address_environment_vars(int); +int set_macro_environment_var(char *, char *, int); + +/* thread-safe version of the above */ +int set_all_macro_environment_vars_r(nagios_macros *mac, int); +int set_macrox_environment_vars_r(nagios_macros *mac, int); +int set_argv_macro_environment_vars_r(nagios_macros *mac, int); +int set_custom_macro_environment_vars_r(nagios_macros *mac, int); +int set_contact_address_environment_vars_r(nagios_macros *mac, int); + +#endif + +NAGIOS_END_DECL +#endif diff -Nru check-mk-1.2.2p3/nagios4/nagios.h check-mk-1.2.6p12/nagios4/nagios.h --- check-mk-1.2.2p3/nagios4/nagios.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/nagios.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,756 @@ +/************************************************************************ + * + * Nagios Main Header File + * Written By: Ethan Galstad (egalstad@nagios.org) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + ************************************************************************/ + +#ifndef _NAGIOS_H +#define _NAGIOS_H + +#ifndef NSCORE +# define NSCORE +#endif + +#include "defaults.h" +#include "common.h" +#include "logging.h" +#include "locations.h" +#include "objects.h" +#include "macros.h" +#include "config.h" + +/* + * global variables only used in the core. Reducing this list would be + * a Good Thing(tm). + */ +extern char *nagios_binary_path; +extern char *config_file; +extern char *command_file; +extern char *temp_file; +extern char *temp_path; +extern char *check_result_path; +extern char *lock_file; +extern char *object_precache_file; + +extern unsigned int nofile_limit, nproc_limit, max_apps; + +extern int num_check_workers; +extern char *qh_socket_path; + +extern char *nagios_user; +extern char *nagios_group; + +extern char *macro_user[MAX_USER_MACROS]; + +extern char *ocsp_command; +extern char *ochp_command; +extern command *ocsp_command_ptr; +extern command *ochp_command_ptr; +extern int ocsp_timeout; +extern int ochp_timeout; + +extern char *global_host_event_handler; +extern char *global_service_event_handler; +extern command *global_host_event_handler_ptr; +extern command *global_service_event_handler_ptr; + +extern char *illegal_object_chars; + +extern int use_regexp_matches; +extern int use_true_regexp_matching; + +extern int use_syslog; +extern char *log_file; +extern char *log_archive_path; +extern int log_notifications; +extern int log_service_retries; +extern int log_host_retries; +extern int log_event_handlers; +extern int log_external_commands; +extern int log_passive_checks; +extern unsigned long logging_options; +extern unsigned long syslog_options; + +extern int service_check_timeout; +extern int service_check_timeout_state; +extern int host_check_timeout; +extern int event_handler_timeout; +extern int notification_timeout; + +extern int log_initial_states; +extern int log_current_states; + +extern int daemon_dumps_core; +extern int sig_id; +extern int caught_signal; + + +extern int verify_config; +extern int test_scheduling; +extern int precache_objects; +extern int use_precached_objects; + +extern int service_inter_check_delay_method; +extern int host_inter_check_delay_method; +extern int service_interleave_factor_method; +extern int max_host_check_spread; +extern int max_service_check_spread; + +extern sched_info scheduling_info; + +extern int max_parallel_service_checks; + +extern int check_reaper_interval; +extern int max_check_reaper_time; +extern int service_freshness_check_interval; +extern int host_freshness_check_interval; +extern int auto_rescheduling_interval; +extern int auto_rescheduling_window; + +extern int check_orphaned_services; +extern int check_orphaned_hosts; +extern int check_service_freshness; +extern int check_host_freshness; +extern int auto_reschedule_checks; + +extern int additional_freshness_latency; + +extern int check_for_updates; +extern int bare_update_check; +extern time_t last_update_check; +extern unsigned long update_uid; +extern int update_available; +extern char *last_program_version; +extern char *new_program_version; + +extern int use_aggressive_host_checking; +extern time_t cached_host_check_horizon; +extern time_t cached_service_check_horizon; +extern int enable_predictive_host_dependency_checks; +extern int enable_predictive_service_dependency_checks; + +extern int soft_state_dependencies; + +extern int retain_state_information; +extern int retention_update_interval; +extern int use_retained_program_state; +extern int use_retained_scheduling_info; +extern int retention_scheduling_horizon; +extern char *retention_file; +extern unsigned long retained_host_attribute_mask; +extern unsigned long retained_service_attribute_mask; +extern unsigned long retained_contact_host_attribute_mask; +extern unsigned long retained_contact_service_attribute_mask; +extern unsigned long retained_process_host_attribute_mask; +extern unsigned long retained_process_service_attribute_mask; + +extern int translate_passive_host_checks; +extern int passive_host_checks_are_soft; + +extern int status_update_interval; +extern char *retention_file; + +extern int time_change_threshold; + +extern unsigned long event_broker_options; + +extern double low_service_flap_threshold; +extern double high_service_flap_threshold; +extern double low_host_flap_threshold; +extern double high_host_flap_threshold; + +extern int use_large_installation_tweaks; +extern int enable_environment_macros; +extern int free_child_process_memory; +extern int child_processes_fork_twice; + +extern char *use_timezone; + +extern time_t max_check_result_file_age; + +extern char *debug_file; +extern int debug_level; +extern int debug_verbosity; +extern unsigned long max_debug_file_size; + +extern int allow_empty_hostgroup_assignment; + +extern time_t last_program_stop; +extern time_t event_start; + +extern int sigshutdown, sigrestart; +extern int currently_running_service_checks; +extern int currently_running_host_checks; + +extern unsigned long next_event_id; +extern unsigned long next_problem_id; +extern unsigned long next_comment_id; +extern unsigned long next_notification_id; + +extern unsigned long modified_process_attributes; +extern unsigned long modified_host_process_attributes; +extern unsigned long modified_service_process_attributes; + +extern squeue_t *nagios_squeue; +extern iobroker_set *nagios_iobs; + +extern struct check_stats check_statistics[MAX_CHECK_STATS_TYPES]; + +/*** perfdata variables ***/ +extern int perfdata_timeout; +extern char *host_perfdata_command; +extern char *service_perfdata_command; +extern char *host_perfdata_file_template; +extern char *service_perfdata_file_template; +extern char *host_perfdata_file; +extern char *service_perfdata_file; +extern int host_perfdata_file_append; +extern int service_perfdata_file_append; +extern int host_perfdata_file_pipe; +extern int service_perfdata_file_pipe; +extern unsigned long host_perfdata_file_processing_interval; +extern unsigned long service_perfdata_file_processing_interval; +extern char *host_perfdata_file_processing_command; +extern char *service_perfdata_file_processing_command; +extern int host_perfdata_process_empty_results; +extern int service_perfdata_process_empty_results; +/*** end perfdata variables */ + +extern struct notify_list *notification_list; + +extern struct check_engine nagios_check_engine; + +/* + * Everything we need to keep system load in check. + * Don't use this from modules. + */ +struct load_control { + time_t last_check; /* last time we checked the real load */ + time_t last_change; /* last time we changed settings */ + time_t check_interval; /* seconds between load checks */ + double load[3]; /* system load, as reported by getloadavg() */ + float backoff_limit; /* limit we must reach before we back off */ + float rampup_limit; /* limit we must reach before we ramp back up */ + unsigned int backoff_change; /* backoff by this much */ + unsigned int rampup_change; /* ramp up by this much */ + unsigned int changes; /* number of times we've changed settings */ + unsigned int jobs_max; /* upper setting for jobs_limit */ + unsigned int jobs_limit; /* current limit */ + unsigned int jobs_min; /* lower setting for jobs_limit */ + unsigned int jobs_running; /* jobs currently running */ + unsigned int nproc_limit; /* rlimit for user processes */ + unsigned int nofile_limit; /* rlimit for open files */ + unsigned int options; /* various option flags */ +}; +extern struct load_control loadctl; + +/* options for load control */ +#define LOADCTL_ENABLED (1 << 0) + + + /************* MISC LENGTH/SIZE DEFINITIONS ***********/ + + /* + NOTE: Plugin length is artificially capped at 8k to prevent runaway plugins from returning MBs/GBs of data + back to Nagios. If you increase the 8k cap by modifying this value, make sure you also increase the value + of MAX_EXTERNAL_COMMAND_LENGTH in common.h to allow for passive checks results received through the external + command file. EG 10/19/07 + */ +#define MAX_PLUGIN_OUTPUT_LENGTH 8192 /* max length of plugin output (including perf data) */ + + + /******************* STATE LOGGING TYPES **************/ + +#define INITIAL_STATES 1 +#define CURRENT_STATES 2 + + + + /************ SERVICE DEPENDENCY VALUES ***************/ + +#define DEPENDENCIES_OK 0 +#define DEPENDENCIES_FAILED 1 + + + + /*********** ROUTE CHECK PROPAGATION TYPES ************/ + +#define PROPAGATE_TO_PARENT_HOSTS 1 +#define PROPAGATE_TO_CHILD_HOSTS 2 + + + + /****************** FLAPPING TYPES ********************/ + +#define HOST_FLAPPING 0 +#define SERVICE_FLAPPING 1 + + + + /**************** NOTIFICATION TYPES ******************/ + +#define HOST_NOTIFICATION 0 +#define SERVICE_NOTIFICATION 1 + + + + /************* NOTIFICATION REASON TYPES ***************/ + +#define NOTIFICATION_NORMAL 0 +#define NOTIFICATION_ACKNOWLEDGEMENT 1 +#define NOTIFICATION_FLAPPINGSTART 2 +#define NOTIFICATION_FLAPPINGSTOP 3 +#define NOTIFICATION_FLAPPINGDISABLED 4 +#define NOTIFICATION_DOWNTIMESTART 5 +#define NOTIFICATION_DOWNTIMEEND 6 +#define NOTIFICATION_DOWNTIMECANCELLED 7 +#define NOTIFICATION_CUSTOM 8 + + + + /**************** EVENT HANDLER TYPES *****************/ + +#define HOST_EVENTHANDLER 0 +#define SERVICE_EVENTHANDLER 1 +#define GLOBAL_HOST_EVENTHANDLER 2 +#define GLOBAL_SERVICE_EVENTHANDLER 3 + + + + /***************** STATE CHANGE TYPES *****************/ + +#define HOST_STATECHANGE 0 +#define SERVICE_STATECHANGE 1 + + + + /***************** OBJECT CHECK TYPES *****************/ +#define SERVICE_CHECK 0 +#define HOST_CHECK 1 + + + + /******************* EVENT TYPES **********************/ + +#define EVENT_SERVICE_CHECK 0 /* active service check */ +#define EVENT_COMMAND_CHECK 1 /* external command check */ +#define EVENT_LOG_ROTATION 2 /* log file rotation */ +#define EVENT_PROGRAM_SHUTDOWN 3 /* program shutdown */ +#define EVENT_PROGRAM_RESTART 4 /* program restart */ +#define EVENT_CHECK_REAPER 5 /* reaps results from host and service checks */ +#define EVENT_ORPHAN_CHECK 6 /* checks for orphaned hosts and services */ +#define EVENT_RETENTION_SAVE 7 /* save (dump) retention data */ +#define EVENT_STATUS_SAVE 8 /* save (dump) status data */ +#define EVENT_SCHEDULED_DOWNTIME 9 /* scheduled host or service downtime */ +#define EVENT_SFRESHNESS_CHECK 10 /* checks service result "freshness" */ +#define EVENT_EXPIRE_DOWNTIME 11 /* checks for (and removes) expired scheduled downtime */ +#define EVENT_HOST_CHECK 12 /* active host check */ +#define EVENT_HFRESHNESS_CHECK 13 /* checks host result "freshness" */ +#define EVENT_RESCHEDULE_CHECKS 14 /* adjust scheduling of host and service checks */ +#define EVENT_EXPIRE_COMMENT 15 /* removes expired comments */ +#define EVENT_CHECK_PROGRAM_UPDATE 16 /* checks for new version of Nagios */ +#define EVENT_SLEEP 98 /* asynchronous sleep event that occurs when event queues are empty */ +#define EVENT_USER_FUNCTION 99 /* USER-defined function (modules) */ + +/* + * VERSIONFIX: Make EVENT_SLEEP and EVENT_USER_FUNCTION appear + * linearly in order. + */ + +#define EVENT_TYPE_STR(type) ( \ + type == EVENT_SERVICE_CHECK ? "SERVICE_CHECK" : \ + type == EVENT_COMMAND_CHECK ? "COMMAND_CHECK" : \ + type == EVENT_LOG_ROTATION ? "LOG_ROTATION" : \ + type == EVENT_PROGRAM_SHUTDOWN ? "PROGRAM_SHUTDOWN" : \ + type == EVENT_PROGRAM_RESTART ? "PROGRAM_RESTART" : \ + type == EVENT_CHECK_REAPER ? "CHECK_REAPER" : \ + type == EVENT_ORPHAN_CHECK ? "ORPHAN_CHECK" : \ + type == EVENT_RETENTION_SAVE ? "RETENTION_SAVE" : \ + type == EVENT_STATUS_SAVE ? "STATUS_SAVE" : \ + type == EVENT_SCHEDULED_DOWNTIME ? "SCHEDULED_DOWNTIME" : \ + type == EVENT_SFRESHNESS_CHECK ? "SFRESHNESS_CHECK" : \ + type == EVENT_EXPIRE_DOWNTIME ? "EXPIRE_DOWNTIME" : \ + type == EVENT_HOST_CHECK ? "HOST_CHECK" : \ + type == EVENT_HFRESHNESS_CHECK ? "HFRESHNESS_CHECK" : \ + type == EVENT_RESCHEDULE_CHECKS ? "RESCHEDULE_CHECKS" : \ + type == EVENT_EXPIRE_COMMENT ? "EXPIRE_COMMENT" : \ + type == EVENT_CHECK_PROGRAM_UPDATE ? "CHECK_PROGRAM_UPDATE" : \ + type == EVENT_SLEEP ? "SLEEP" : \ + type == EVENT_USER_FUNCTION ? "USER_FUNCTION" : \ + "UNKNOWN" \ +) + + + + /******* INTER-CHECK DELAY CALCULATION TYPES **********/ + +#define ICD_NONE 0 /* no inter-check delay */ +#define ICD_DUMB 1 /* dumb delay of 1 second */ +#define ICD_SMART 2 /* smart delay */ +#define ICD_USER 3 /* user-specified delay */ + + + + /******* INTERLEAVE FACTOR CALCULATION TYPES **********/ + +#define ILF_USER 0 /* user-specified interleave factor */ +#define ILF_SMART 1 /* smart interleave */ + + + + /************ SCHEDULED DOWNTIME TYPES ****************/ + +#define ACTIVE_DOWNTIME 0 /* active downtime - currently in effect */ +#define PENDING_DOWNTIME 1 /* pending downtime - scheduled for the future */ + + +NAGIOS_BEGIN_DECL + +/* useful for hosts and services to determine time 'til next check */ +#define normal_check_window(o) ((time_t)(o->check_interval * interval_length)) +#define retry_check_window(o) ((time_t)(o->retry_interval * interval_length)) +#define check_window(o) \ + ((!o->current_state && o->state_type == SOFT_STATE) ? \ + retry_check_window(o) : \ + normal_check_window(o)) + +/** Nerd subscription type */ +struct nerd_subscription { + int sd; + struct nerd_channel *chan; + char *format; /* requested format (macro string) for this subscription */ +}; + +/******************** FUNCTIONS **********************/ +extern int set_loadctl_options(char *opts, unsigned int len); + +/* silly helpers useful pretty much all over the place */ +extern const char *service_state_name(int state); +extern const char *host_state_name(int state); +extern const char *state_type_name(int state_type); +extern const char *check_type_name(int check_type); +extern const char *check_result_source(check_result *cr); + +/*** Nagios Event Radio Dispatcher functions ***/ +extern int nerd_init(void); +extern int nerd_mkchan(const char *name, const char *description, int (*handler)(int, void *), unsigned int callbacks); +extern int nerd_cancel_subscriber(int sd); +extern int nerd_get_channel_id(const char *chan_name); +extern objectlist *nerd_get_subscriptions(int chan_id); +extern int nerd_broadcast(unsigned int chan_id, void *buf, unsigned int len); + +/*** Query Handler functions, types and macros*/ +typedef int (*qh_handler)(int, char *, unsigned int); +extern int dump_event_stats(int sd); + +/* return codes for query_handlers() */ +#define QH_OK 0 /* keep listening */ +#define QH_CLOSE 1 /* we should close the socket */ +#define QH_INVALID 2 /* invalid query. Log and close */ +#define QH_TAKEOVER 3 /* handler will take full control. de-register but don't close */ +extern int qh_init(const char *path); +extern void qh_deinit(const char *path); +extern int qh_register_handler(const char *name, const char *description, unsigned int options, qh_handler handler); +extern const char *qh_strerror(int code); + +/**** Configuration Functions ****/ +int read_main_config_file(char *); /* reads the main config file (nagios.cfg) */ +int read_resource_file(char *); /* processes macros in resource file */ +int read_all_object_data(char *); /* reads all object config data */ + + +/**** Setup Functions ****/ +int pre_flight_check(void); /* try and verify the configuration data */ +int pre_flight_object_check(int *, int *); /* verify object relationships and settings */ +int pre_flight_circular_check(int *, int *); /* detects circular dependencies and paths */ +void init_timing_loop(void); /* setup the initial scheduling queue */ +void setup_sighandler(void); /* trap signals */ +void reset_sighandler(void); /* reset signals to default action */ +extern void handle_sigxfsz(int); /* handle SIGXFSZ */ + +int daemon_init(void); /* switches to daemon mode */ +int drop_privileges(char *, char *); /* drops privileges before startup */ +void display_scheduling_info(void); /* displays service check scheduling information */ + + +/**** Event Queue Functions ****/ +int init_event_queue(void); /* creates the queue nagios_squeue */ +timed_event *schedule_new_event(int, int, time_t, int, unsigned long, void *, int, void *, void *, int); /* schedules a new timed event */ +void reschedule_event(squeue_t *sq, timed_event *event); /* reschedules an event */ +void add_event(squeue_t *sq, timed_event *event); /* adds an event to the execution queue */ +void remove_event(squeue_t *sq, timed_event *event); /* remove an event from the execution queue */ +int event_execution_loop(void); /* main monitoring/event handler loop */ +int handle_timed_event(timed_event *); /* top level handler for timed events */ +void adjust_check_scheduling(void); /* auto-adjusts scheduling of host and service checks */ +void compensate_for_system_time_change(unsigned long, unsigned long); /* attempts to compensate for a change in the system time */ +void adjust_timestamp_for_time_change(time_t, time_t, unsigned long, time_t *); /* adjusts a timestamp variable for a system time change */ + + +/**** IPC Functions ****/ +int process_check_result_queue(char *); +int process_check_result_file(char *); +int process_check_result(check_result *); +int delete_check_result_file(char *); +int init_check_result(check_result *); +int free_check_result(check_result *); /* frees memory associated with a host/service check result */ +int parse_check_output(char *, char **, char **, char **, int, int); +int open_command_file(void); /* creates the external command file as a named pipe (FIFO) and opens it for reading */ +int close_command_file(void); /* closes and deletes the external command file (FIFO) */ + + +/**** Monitoring/Event Handler Functions ****/ +int check_service_dependencies(service *, int); /* checks service dependencies */ +int check_host_dependencies(host *, int); /* checks host dependencies */ +void check_for_orphaned_services(void); /* checks for orphaned services */ +void check_for_orphaned_hosts(void); /* checks for orphaned hosts */ +void check_service_result_freshness(void); /* checks the "freshness" of service check results */ +int is_service_result_fresh(service *, time_t, int); /* determines if a service's check results are fresh */ +void check_host_result_freshness(void); /* checks the "freshness" of host check results */ +int is_host_result_fresh(host *, time_t, int); /* determines if a host's check results are fresh */ +int my_system(char *, int, int *, double *, char **, int); /* executes a command via popen(), but also protects against timeouts */ +int my_system_r(nagios_macros *mac, char *, int, int *, double *, char **, int); /* thread-safe version of the above */ + + +/**** Flap Detection Functions ****/ +void check_for_service_flapping(service *, int, int); /* determines whether or not a service is "flapping" between states */ +void check_for_host_flapping(host *, int, int, int); /* determines whether or not a host is "flapping" between states */ +void set_service_flap(service *, double, double, double, int); /* handles a service that is flapping */ +void clear_service_flap(service *, double, double, double); /* handles a service that has stopped flapping */ +void set_host_flap(host *, double, double, double, int); /* handles a host that is flapping */ +void clear_host_flap(host *, double, double, double); /* handles a host that has stopped flapping */ +void enable_flap_detection_routines(void); /* enables flap detection on a program-wide basis */ +void disable_flap_detection_routines(void); /* disables flap detection on a program-wide basis */ +void enable_host_flap_detection(host *); /* enables flap detection for a particular host */ +void disable_host_flap_detection(host *); /* disables flap detection for a particular host */ +void enable_service_flap_detection(service *); /* enables flap detection for a particular service */ +void disable_service_flap_detection(service *); /* disables flap detection for a particular service */ +void handle_host_flap_detection_disabled(host *); /* handles the details when flap detection is disabled globally or on a per-host basis */ +void handle_service_flap_detection_disabled(service *); /* handles the details when flap detection is disabled globally or on a per-service basis */ + + +/**** Route/Host Check Functions ****/ +int check_host_check_viability(host *, int, int *, time_t *); +int adjust_host_check_attempt(host *, int); +int determine_host_reachability(host *); +int process_host_check_result(host *, int, char *, int, int, int, unsigned long); +int perform_on_demand_host_check(host *, int *, int, int, unsigned long); +int execute_sync_host_check(host *); +int run_scheduled_host_check(host *, int, double); +int run_async_host_check(host *, int, double, int, int, int *, time_t *); +int handle_async_host_check_result(host *, check_result *); + + +/**** Service Check Functions ****/ +int check_service_check_viability(service *, int, int *, time_t *); +int run_scheduled_service_check(service *, int, double); +int run_async_service_check(service *, int, double, int, int, int *, time_t *); +int handle_async_service_check_result(service *, check_result *); + + +/**** Event Handler Functions ****/ +int handle_host_state(host *); /* top level host state handler */ + + +/**** Common Check Fucntions *****/ +int reap_check_results(void); + + +/**** Check Statistics Functions ****/ +int init_check_stats(void); +int update_check_stats(int, time_t); +int generate_check_stats(void); + + +/**** Event Handler Functions ****/ +int obsessive_compulsive_service_check_processor(service *); /* distributed monitoring craziness... */ +int obsessive_compulsive_host_check_processor(host *); /* distributed monitoring craziness... */ +int handle_service_event(service *); /* top level service event logic */ +int run_service_event_handler(nagios_macros *mac, service *); /* runs the event handler for a specific service */ +int run_global_service_event_handler(nagios_macros *mac, service *); /* runs the global service event handler */ +int handle_host_event(host *); /* top level host event logic */ +int run_host_event_handler(nagios_macros *mac, host *); /* runs the event handler for a specific host */ +int run_global_host_event_handler(nagios_macros *mac, host *); /* runs the global host event handler */ + + +/**** Notification Functions ****/ +const char *notification_reason_name(unsigned int reason_type); +int check_service_notification_viability(service *, int, int); /* checks viability of notifying all contacts about a service */ +int is_valid_escalation_for_service_notification(service *, serviceescalation *, int); /* checks if an escalation entry is valid for a particular service notification */ +int should_service_notification_be_escalated(service *); /* checks if a service notification should be escalated */ +int service_notification(service *, int, char *, char *, int); /* notify all contacts about a service (problem or recovery) */ +int check_contact_service_notification_viability(contact *, service *, int, int); /* checks viability of notifying a contact about a service */ +int notify_contact_of_service(nagios_macros *mac, contact *, service *, int, char *, char *, int, int); /* notify a single contact about a service */ +int check_host_notification_viability(host *, int, int); /* checks viability of notifying all contacts about a host */ +int is_valid_escalation_for_host_notification(host *, hostescalation *, int); /* checks if an escalation entry is valid for a particular host notification */ +int should_host_notification_be_escalated(host *); /* checks if a host notification should be escalated */ +int host_notification(host *, int, char *, char *, int); /* notify all contacts about a host (problem or recovery) */ +int check_contact_host_notification_viability(contact *, host *, int, int); /* checks viability of notifying a contact about a host */ +int notify_contact_of_host(nagios_macros *mac, contact *, host *, int, char *, char *, int, int); /* notify a single contact about a host */ +int create_notification_list_from_host(nagios_macros *mac, host *,int,int *,int); /* given a host, create list of contacts to be notified (remove duplicates) */ +int create_notification_list_from_service(nagios_macros *mac, service *,int,int *,int); /* given a service, create list of contacts to be notified (remove duplicates) */ +int add_notification(nagios_macros *mac, contact *); /* adds a notification instance */ +notification *find_notification(contact *); /* finds a notification object */ +time_t get_next_host_notification_time(host *, time_t); /* calculates nex acceptable re-notification time for a host */ +time_t get_next_service_notification_time(service *, time_t); /* calculates nex acceptable re-notification time for a service */ + + +/**** Cleanup Functions ****/ +void cleanup(void); /* cleanup after ourselves (before quitting or restarting) */ +void free_memory(nagios_macros *mac); /* free memory allocated to all linked lists in memory */ +int reset_variables(void); /* reset all global variables */ +void free_notification_list(void); /* frees all memory allocated to the notification list */ + + +/**** Miscellaneous Functions ****/ +void sighandler(int); /* handles signals */ +void my_system_sighandler(int); /* handles timeouts when executing commands via my_system() */ +char *get_next_string_from_buf(char *buf, int *start_index, int bufsize); +int compare_strings(char *, char *); /* compares two strings for equality */ +char *escape_newlines(char *); +int contains_illegal_object_chars(char *); /* tests whether or not an object name (host, service, etc.) contains illegal characters */ +int my_rename(char *, char *); /* renames a file - works across filesystems */ +int my_fcopy(char *, char *); /* copies a file - works across filesystems */ +int my_fdcopy(char *, char *, int); /* copies a named source to an already opened destination file */ + +/* thread-safe version of get_raw_command_line_r() */ +extern int get_raw_command_line_r(nagios_macros *mac, command *, char *, char **, int); + +/* + * given a raw command line, determine the actual command to run + * Manipulates global_macros.argv and is thus not threadsafe + */ +extern int get_raw_command_line(command *, char *, char **, int); + +int check_time_against_period(time_t, timeperiod *); /* check to see if a specific time is covered by a time period */ +int is_daterange_single_day(daterange *); +time_t calculate_time_from_weekday_of_month(int, int, int, int); /* calculates midnight time of specific (3rd, last, etc.) weekday of a particular month */ +time_t calculate_time_from_day_of_month(int, int, int); /* calculates midnight time of specific (1st, last, etc.) day of a particular month */ +void get_next_valid_time(time_t, time_t *, timeperiod *); /* get the next valid time in a time period */ +time_t get_next_log_rotation_time(void); /* determine the next time to schedule a log rotation */ +int dbuf_init(dbuf *, int); +int dbuf_free(dbuf *); +int dbuf_strcat(dbuf *, const char *); +int set_environment_var(char *, char *, int); /* sets/clears and environment variable */ +int check_for_nagios_updates(int, int); /* checks to see if new version of Nagios are available */ +int query_update_api(void); /* checks to see if new version of Nagios are available */ + + +/**** External Command Functions ****/ +int process_external_command1(char *); /* top-level external command processor */ +int process_external_command2(int, time_t, char *); /* process an external command */ +int process_external_commands_from_file(char *, int); /* process external commands in a file */ +int process_host_command(int, time_t, char *); /* process an external host command */ +int process_hostgroup_command(int, time_t, char *); /* process an external hostgroup command */ +int process_service_command(int, time_t, char *); /* process an external service command */ +int process_servicegroup_command(int, time_t, char *); /* process an external servicegroup command */ +int process_contact_command(int, time_t, char *); /* process an external contact command */ +int process_contactgroup_command(int, time_t, char *); /* process an external contactgroup command */ + + +/**** External Command Implementations ****/ +int cmd_add_comment(int, time_t, char *); /* add a service or host comment */ +int cmd_delete_comment(int, char *); /* delete a service or host comment */ +int cmd_delete_all_comments(int, char *); /* delete all comments associated with a host or service */ +int cmd_delay_notification(int, char *); /* delay a service or host notification */ +int cmd_schedule_check(int, char *); /* schedule an immediate or delayed host check */ +int cmd_schedule_host_service_checks(int, char *, int); /* schedule an immediate or delayed checks of all services on a host */ +int cmd_signal_process(int, char *); /* schedules a program shutdown or restart */ +int cmd_process_service_check_result(int, time_t, char *); /* processes a passive service check */ +int cmd_process_host_check_result(int, time_t, char *); /* processes a passive host check */ +int cmd_acknowledge_problem(int, char *); /* acknowledges a host or service problem */ +int cmd_remove_acknowledgement(int, char *); /* removes a host or service acknowledgement */ +int cmd_schedule_downtime(int, time_t, char *); /* schedules host or service downtime */ +int cmd_delete_downtime(int, char *); /* cancels active/pending host or service scheduled downtime */ +int cmd_change_object_int_var(int, char *); /* changes host/svc (int) variable */ +int cmd_change_object_char_var(int, char *); /* changes host/svc (char) variable */ +int cmd_change_object_custom_var(int, char *); /* changes host/svc custom variable */ +int cmd_process_external_commands_from_file(int, char *); /* process external commands from a file */ +int cmd_delete_downtime_by_start_time_comment(int, char *); +int cmd_delete_downtime_by_host_name(int, char *); +int cmd_delete_downtime_by_hostgroup_name(int, char *); + +int process_passive_service_check(time_t, char *, char *, int, char *); +int process_passive_host_check(time_t, char *, int, char *); + + +/**** Internal Command Implementations ****/ +void disable_service_checks(service *); /* disables a service check */ +void enable_service_checks(service *); /* enables a service check */ +void schedule_service_check(service *, time_t, int); /* schedules an immediate or delayed service check */ +void schedule_host_check(host *, time_t, int); /* schedules an immediate or delayed host check */ +void enable_all_notifications(void); /* enables notifications on a program-wide basis */ +void disable_all_notifications(void); /* disables notifications on a program-wide basis */ +void enable_service_notifications(service *); /* enables service notifications */ +void disable_service_notifications(service *); /* disables service notifications */ +void enable_host_notifications(host *); /* enables host notifications */ +void disable_host_notifications(host *); /* disables host notifications */ +void enable_and_propagate_notifications(host *, int, int, int, int); /* enables notifications for all hosts and services beyond a given host */ +void disable_and_propagate_notifications(host *, int, int, int, int); /* disables notifications for all hosts and services beyond a given host */ +void schedule_and_propagate_downtime(host *, time_t, char *, char *, time_t, time_t, int, unsigned long, unsigned long); /* schedules downtime for all hosts beyond a given host */ +void acknowledge_host_problem(host *, char *, char *, int, int, int); /* acknowledges a host problem */ +void acknowledge_service_problem(service *, char *, char *, int, int, int); /* acknowledges a service problem */ +void remove_host_acknowledgement(host *); /* removes a host acknowledgement */ +void remove_service_acknowledgement(service *); /* removes a service acknowledgement */ +void start_executing_service_checks(void); /* starts executing service checks */ +void stop_executing_service_checks(void); /* stops executing service checks */ +void start_accepting_passive_service_checks(void); /* starts accepting passive service check results */ +void stop_accepting_passive_service_checks(void); /* stops accepting passive service check results */ +void enable_passive_service_checks(service *); /* enables passive service checks for a particular service */ +void disable_passive_service_checks(service *); /* disables passive service checks for a particular service */ +void start_using_event_handlers(void); /* enables event handlers on a program-wide basis */ +void stop_using_event_handlers(void); /* disables event handlers on a program-wide basis */ +void enable_service_event_handler(service *); /* enables the event handler for a particular service */ +void disable_service_event_handler(service *); /* disables the event handler for a particular service */ +void enable_host_event_handler(host *); /* enables the event handler for a particular host */ +void disable_host_event_handler(host *); /* disables the event handler for a particular host */ +void enable_host_checks(host *); /* enables checks of a particular host */ +void disable_host_checks(host *); /* disables checks of a particular host */ +void start_obsessing_over_service_checks(void); /* start obsessing about service check results */ +void stop_obsessing_over_service_checks(void); /* stop obsessing about service check results */ +void start_obsessing_over_host_checks(void); /* start obsessing about host check results */ +void stop_obsessing_over_host_checks(void); /* stop obsessing about host check results */ +void enable_service_freshness_checks(void); /* enable service freshness checks */ +void disable_service_freshness_checks(void); /* disable service freshness checks */ +void enable_host_freshness_checks(void); /* enable host freshness checks */ +void disable_host_freshness_checks(void); /* disable host freshness checks */ +void enable_performance_data(void); /* enables processing of performance data on a program-wide basis */ +void disable_performance_data(void); /* disables processing of performance data on a program-wide basis */ +void start_executing_host_checks(void); /* starts executing host checks */ +void stop_executing_host_checks(void); /* stops executing host checks */ +void start_accepting_passive_host_checks(void); /* starts accepting passive host check results */ +void stop_accepting_passive_host_checks(void); /* stops accepting passive host check results */ +void enable_passive_host_checks(host *); /* enables passive host checks for a particular host */ +void disable_passive_host_checks(host *); /* disables passive host checks for a particular host */ +void start_obsessing_over_service(service *); /* start obsessing about specific service check results */ +void stop_obsessing_over_service(service *); /* stop obsessing about specific service check results */ +void start_obsessing_over_host(host *); /* start obsessing about specific host check results */ +void stop_obsessing_over_host(host *); /* stop obsessing about specific host check results */ +void set_host_notification_number(host *, int); /* sets current notification number for a specific host */ +void set_service_notification_number(service *, int); /* sets current notification number for a specific service */ +void enable_contact_host_notifications(contact *); /* enables host notifications for a specific contact */ +void disable_contact_host_notifications(contact *); /* disables host notifications for a specific contact */ +void enable_contact_service_notifications(contact *); /* enables service notifications for a specific contact */ +void disable_contact_service_notifications(contact *); /* disables service notifications for a specific contact */ + +int launch_command_file_worker(void); +int shutdown_command_file_worker(void); + +char *get_program_version(void); +char *get_program_modification_date(void); + +NAGIOS_END_DECL +#endif + diff -Nru check-mk-1.2.2p3/nagios4/nebcallbacks.h check-mk-1.2.6p12/nagios4/nebcallbacks.h --- check-mk-1.2.2p3/nagios4/nebcallbacks.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/nebcallbacks.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,70 @@ +/***************************************************************************** + * + * NEBCALLBACKS.H - Include file for event broker modules + * + * + * License: + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + *****************************************************************************/ + +#ifndef _NEBCALLBACKS_H +#define _NEBCALLBACKS_H + +#include "nebmodules.h" + + +/***** CALLBACK TYPES *****/ + +#define NEBCALLBACK_NUMITEMS 26 /* total number of callback types we have */ + +#define NEBCALLBACK_PROCESS_DATA 0 +#define NEBCALLBACK_TIMED_EVENT_DATA 1 +#define NEBCALLBACK_LOG_DATA 2 +#define NEBCALLBACK_SYSTEM_COMMAND_DATA 3 +#define NEBCALLBACK_EVENT_HANDLER_DATA 4 +#define NEBCALLBACK_NOTIFICATION_DATA 5 +#define NEBCALLBACK_SERVICE_CHECK_DATA 6 +#define NEBCALLBACK_HOST_CHECK_DATA 7 +#define NEBCALLBACK_COMMENT_DATA 8 +#define NEBCALLBACK_DOWNTIME_DATA 9 +#define NEBCALLBACK_FLAPPING_DATA 10 +#define NEBCALLBACK_PROGRAM_STATUS_DATA 11 +#define NEBCALLBACK_HOST_STATUS_DATA 12 +#define NEBCALLBACK_SERVICE_STATUS_DATA 13 +#define NEBCALLBACK_ADAPTIVE_PROGRAM_DATA 14 +#define NEBCALLBACK_ADAPTIVE_HOST_DATA 15 +#define NEBCALLBACK_ADAPTIVE_SERVICE_DATA 16 +#define NEBCALLBACK_EXTERNAL_COMMAND_DATA 17 +#define NEBCALLBACK_AGGREGATED_STATUS_DATA 18 +#define NEBCALLBACK_RETENTION_DATA 19 +#define NEBCALLBACK_CONTACT_NOTIFICATION_DATA 20 +#define NEBCALLBACK_CONTACT_NOTIFICATION_METHOD_DATA 21 +#define NEBCALLBACK_ACKNOWLEDGEMENT_DATA 22 +#define NEBCALLBACK_STATE_CHANGE_DATA 23 +#define NEBCALLBACK_CONTACT_STATUS_DATA 24 +#define NEBCALLBACK_ADAPTIVE_CONTACT_DATA 25 + +#define nebcallback_flag(x) (1 << (x)) + +/***** CALLBACK FUNCTIONS *****/ +NAGIOS_BEGIN_DECL + +int neb_register_callback(int callback_type, void *mod_handle, int priority, int (*callback_func)(int, void *)); +int neb_deregister_callback(int callback_type, int (*callback_func)(int, void *)); +int neb_deregister_module_callbacks(nebmodule *); + +NAGIOS_END_DECL +#endif diff -Nru check-mk-1.2.2p3/nagios4/neberrors.h check-mk-1.2.6p12/nagios4/neberrors.h --- check-mk-1.2.2p3/nagios4/neberrors.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/neberrors.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,67 @@ +/***************************************************************************** + * + * NEBERRORS.H - Event broker errors + * + * + * License: + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + *****************************************************************************/ + +#ifndef _NEBERRORS_H +#define _NEBERRORS_H + + +/***** GENERIC DEFINES *****/ + +#define NEB_OK 0 +#define NEB_ERROR -1 + +#define NEB_TRUE 1 +#define NEB_FALSE 0 + + + +/***** GENERIC ERRORS *****/ + +#define NEBERROR_NOMEM 100 /* memory could not be allocated */ + + + +/***** CALLBACK ERRORS *****/ + +#define NEBERROR_NOCALLBACKFUNC 200 /* no callback function was specified */ +#define NEBERROR_NOCALLBACKLIST 201 /* callback list not initialized */ +#define NEBERROR_CALLBACKBOUNDS 202 /* callback type was out of bounds */ +#define NEBERROR_CALLBACKNOTFOUND 203 /* the callback could not be found */ +#define NEBERROR_NOMODULEHANDLE 204 /* no module handle specified */ +#define NEBERROR_BADMODULEHANDLE 205 /* bad module handle */ +#define NEBERROR_CALLBACKOVERRIDE 206 /* module wants to override default Nagios handling of event */ +#define NEBERROR_CALLBACKCANCEL 207 /* module wants to cancel callbacks to other modules */ + + + +/***** MODULE ERRORS *****/ + +#define NEBERROR_NOMODULE 300 /* no module was specified */ + + + +/***** MODULE INFO ERRORS *****/ + +#define NEBERROR_MODINFOBOUNDS 400 /* module info index was out of bounds */ + + +#endif diff -Nru check-mk-1.2.2p3/nagios4/nebmods.h check-mk-1.2.6p12/nagios4/nebmods.h --- check-mk-1.2.2p3/nagios4/nebmods.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/nebmods.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,62 @@ +/***************************************************************************** + * + * NEBMODS.H - Include file for event broker modules + * + * + * License: + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + *****************************************************************************/ + +#ifndef _NEBMODS_H +#define _NEBMODS_H + +#include "nebcallbacks.h" +#include "nebmodules.h" + +NAGIOS_BEGIN_DECL + +/***** MODULE STRUCTURES *****/ + +/* NEB module callback list struct */ +typedef struct nebcallback_struct { + void *callback_func; + void *module_handle; + int priority; + struct nebcallback_struct *next; + } nebcallback; + + + +/***** MODULE FUNCTIONS *****/ + +int neb_init_modules(void); +int neb_deinit_modules(void); +int neb_load_all_modules(void); +int neb_load_module(nebmodule *); +int neb_free_module_list(void); +int neb_unload_all_modules(int, int); +int neb_unload_module(nebmodule *, int, int); +int neb_add_module(char *, char *, int); +int neb_add_core_module(nebmodule *mod); + + +/***** CALLBACK FUNCTIONS *****/ +int neb_init_callback_list(void); +int neb_free_callback_list(void); +int neb_make_callbacks(int, void *); + +NAGIOS_END_DECL +#endif diff -Nru check-mk-1.2.2p3/nagios4/nebmodules.h check-mk-1.2.6p12/nagios4/nebmodules.h --- check-mk-1.2.2p3/nagios4/nebmodules.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/nebmodules.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,94 @@ +/***************************************************************************** + * + * NEBMODULES.H - Include file for event broker modules + * + * + * License: + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + *****************************************************************************/ + +#ifndef _NEBMODULES_H +#define _NEBMODULES_H + +#include "common.h" +NAGIOS_BEGIN_DECL + + /***** MODULE VERSION INFORMATION *****/ + +#define NEB_API_VERSION(x) int __neb_api_version = x; +#define CURRENT_NEB_API_VERSION 4 + + + + /***** MODULE INFORMATION *****/ + +#define NEBMODULE_MODINFO_NUMITEMS 6 +#define NEBMODULE_MODINFO_TITLE 0 +#define NEBMODULE_MODINFO_AUTHOR 1 +#define NEBMODULE_MODINFO_COPYRIGHT 2 +#define NEBMODULE_MODINFO_VERSION 3 +#define NEBMODULE_MODINFO_LICENSE 4 +#define NEBMODULE_MODINFO_DESC 5 + + + + /***** MODULE LOAD/UNLOAD OPTIONS *****/ + +#define NEBMODULE_NORMAL_LOAD 0 /* module is being loaded normally */ +#define NEBMODULE_REQUEST_UNLOAD 0 /* request module to unload (but don't force it) */ +#define NEBMODULE_FORCE_UNLOAD 1 /* force module to unload */ + + + + /***** MODULES UNLOAD REASONS *****/ + +#define NEBMODULE_NEB_SHUTDOWN 1 /* event broker is shutting down */ +#define NEBMODULE_NEB_RESTART 2 /* event broker is restarting */ +#define NEBMODULE_ERROR_NO_INIT 3 /* _module_init() function was not found in module */ +#define NEBMODULE_ERROR_BAD_INIT 4 /* _module_init() function returned a bad code */ +#define NEBMODULE_ERROR_API_VERSION 5 /* module version is incompatible with current api */ + + + +/***** MODULE STRUCTURES *****/ + +/* NEB module structure */ +typedef struct nebmodule_struct { + char *filename; + char *dl_file; /* the file we actually loaded */ + char *args; + char *info[NEBMODULE_MODINFO_NUMITEMS]; + int should_be_loaded; + int is_currently_loaded; + int core_module; +#ifdef USE_LTDL + lt_dlhandle module_handle; + lt_ptr init_func; + lt_ptr deinit_func; +#else + void *module_handle; + void *init_func; + void *deinit_func; +#endif + struct nebmodule_struct *next; + } nebmodule; + + +/***** MODULE FUNCTIONS *****/ +int neb_set_module_info(void *, int, char *); + +NAGIOS_END_DECL +#endif diff -Nru check-mk-1.2.2p3/nagios4/nebstructs.h check-mk-1.2.6p12/nagios4/nebstructs.h --- check-mk-1.2.2p3/nagios4/nebstructs.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/nebstructs.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,525 @@ +/***************************************************************************** + * + * NEBSTRUCTS.H - Event broker includes for Nagios + * + * + * License: + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + *****************************************************************************/ + +#ifndef _NEBSTRUCTS_H +#define _NEBSTRUCTS_H + +#include "common.h" +#include "objects.h" +#include "nagios.h" + +NAGIOS_BEGIN_DECL + +/****** STRUCTURES *************************/ + +/* process data structure */ +typedef struct nebstruct_process_struct { + int type; + int flags; + int attr; + struct timeval timestamp; + } nebstruct_process_data; + + +/* timed event data structure */ +typedef struct nebstruct_timed_event_struct { + int type; + int flags; + int attr; + struct timeval timestamp; + + int event_type; + int recurring; + time_t run_time; + void *event_data; + + void *event_ptr; + } nebstruct_timed_event_data; + + +/* log data structure */ +typedef struct nebstruct_log_struct { + int type; + int flags; + int attr; + struct timeval timestamp; + + time_t entry_time; + int data_type; + char *data; + } nebstruct_log_data; + + +/* system command structure */ +typedef struct nebstruct_system_command_struct { + int type; + int flags; + int attr; + struct timeval timestamp; + + struct timeval start_time; + struct timeval end_time; + int timeout; + char *command_line; + int early_timeout; + double execution_time; + int return_code; + char *output; + } nebstruct_system_command_data; + + +/* event handler structure */ +typedef struct nebstruct_event_handler_struct { + int type; + int flags; + int attr; + struct timeval timestamp; + + int eventhandler_type; + char *host_name; + char *service_description; + int state_type; + int state; + int timeout; + char *command_name; + char *command_args; + char *command_line; + struct timeval start_time; + struct timeval end_time; + int early_timeout; + double execution_time; + int return_code; + char *output; + + void *object_ptr; + } nebstruct_event_handler_data; + + +/* host check structure */ +typedef struct nebstruct_host_check_struct { + int type; + int flags; + int attr; + struct timeval timestamp; + + char *host_name; + int current_attempt; + int check_type; + int max_attempts; + int state_type; + int state; + int timeout; + char *command_name; + char *command_args; + char *command_line; + struct timeval start_time; + struct timeval end_time; + int early_timeout; + double execution_time; + double latency; + int return_code; + char *output; + char *long_output; + char *perf_data; + check_result *check_result_ptr; + + void *object_ptr; + } nebstruct_host_check_data; + + +/* service check structure */ +typedef struct nebstruct_service_check_struct { + int type; + int flags; + int attr; + struct timeval timestamp; + + char *host_name; + char *service_description; + int check_type; + int current_attempt; + int max_attempts; + int state_type; + int state; + int timeout; + char *command_name; + char *command_args; + char *command_line; + struct timeval start_time; + struct timeval end_time; + int early_timeout; + double execution_time; + double latency; + int return_code; + char *output; + char *long_output; + char *perf_data; + check_result *check_result_ptr; + + void *object_ptr; + } nebstruct_service_check_data; + + +/* comment data structure */ +typedef struct nebstruct_comment_struct { + int type; + int flags; + int attr; + struct timeval timestamp; + + int comment_type; + char *host_name; + char *service_description; + time_t entry_time; + char *author_name; + char *comment_data; + int persistent; + int source; + int entry_type; + int expires; + time_t expire_time; + unsigned long comment_id; + + void *object_ptr; /* not implemented yet */ + } nebstruct_comment_data; + + +/* downtime data structure */ +typedef struct nebstruct_downtime_struct { + int type; + int flags; + int attr; + struct timeval timestamp; + + int downtime_type; + char *host_name; + char *service_description; + time_t entry_time; + char *author_name; + char *comment_data; + time_t start_time; + time_t end_time; + int fixed; + unsigned long duration; + unsigned long triggered_by; + unsigned long downtime_id; + + void *object_ptr; /* not implemented yet */ + } nebstruct_downtime_data; + + +/* flapping data structure */ +typedef struct nebstruct_flapping_struct { + int type; + int flags; + int attr; + struct timeval timestamp; + + int flapping_type; + char *host_name; + char *service_description; + double percent_change; + double high_threshold; + double low_threshold; + unsigned long comment_id; + + void *object_ptr; + } nebstruct_flapping_data; + + +/* program status structure */ +typedef struct nebstruct_program_status_struct { + int type; + int flags; + int attr; + struct timeval timestamp; + + time_t program_start; + int pid; + int daemon_mode; + time_t last_log_rotation; + int notifications_enabled; + int active_service_checks_enabled; + int passive_service_checks_enabled; + int active_host_checks_enabled; + int passive_host_checks_enabled; + int event_handlers_enabled; + int flap_detection_enabled; + int process_performance_data; + int obsess_over_hosts; + int obsess_over_services; + unsigned long modified_host_attributes; + unsigned long modified_service_attributes; + char *global_host_event_handler; + char *global_service_event_handler; + } nebstruct_program_status_data; + + +/* host status structure */ +typedef struct nebstruct_host_status_struct { + int type; + int flags; + int attr; + struct timeval timestamp; + + void *object_ptr; + } nebstruct_host_status_data; + + +/* service status structure */ +typedef struct nebstruct_service_status_struct { + int type; + int flags; + int attr; + struct timeval timestamp; + + void *object_ptr; + } nebstruct_service_status_data; + + +/* contact status structure */ +typedef struct nebstruct_contact_status_struct { + int type; + int flags; + int attr; + struct timeval timestamp; + + void *object_ptr; + } nebstruct_contact_status_data; + + +/* notification data structure */ +typedef struct nebstruct_notification_struct { + int type; + int flags; + int attr; + struct timeval timestamp; + + int notification_type; + struct timeval start_time; + struct timeval end_time; + char *host_name; + char *service_description; + int reason_type; + int state; + char *output; + char *ack_author; + char *ack_data; + int escalated; + int contacts_notified; + + void *object_ptr; + } nebstruct_notification_data; + + +/* contact notification data structure */ +typedef struct nebstruct_contact_notification_struct { + int type; + int flags; + int attr; + struct timeval timestamp; + + int notification_type; + struct timeval start_time; + struct timeval end_time; + char *host_name; + char *service_description; + char *contact_name; + int reason_type; + int state; + char *output; + char *ack_author; + char *ack_data; + int escalated; + + void *object_ptr; + void *contact_ptr; + } nebstruct_contact_notification_data; + + +/* contact notification method data structure */ +typedef struct nebstruct_contact_notification_method_struct { + int type; + int flags; + int attr; + struct timeval timestamp; + + int notification_type; + struct timeval start_time; + struct timeval end_time; + char *host_name; + char *service_description; + char *contact_name; + char *command_name; + char *command_args; + int reason_type; + int state; + char *output; + char *ack_author; + char *ack_data; + int escalated; + + void *object_ptr; + void *contact_ptr; + } nebstruct_contact_notification_method_data; + + +/* adaptive program data structure */ +typedef struct nebstruct_adaptive_program_data_struct { + int type; + int flags; + int attr; + struct timeval timestamp; + + int command_type; + unsigned long modified_host_attribute; + unsigned long modified_host_attributes; + unsigned long modified_service_attribute; + unsigned long modified_service_attributes; + } nebstruct_adaptive_program_data; + + +/* adaptive host data structure */ +typedef struct nebstruct_adaptive_host_data_struct { + int type; + int flags; + int attr; + struct timeval timestamp; + + int command_type; + unsigned long modified_attribute; + unsigned long modified_attributes; + + void *object_ptr; + } nebstruct_adaptive_host_data; + + +/* adaptive service data structure */ +typedef struct nebstruct_adaptive_service_data_struct { + int type; + int flags; + int attr; + struct timeval timestamp; + + int command_type; + unsigned long modified_attribute; + unsigned long modified_attributes; + + void *object_ptr; + } nebstruct_adaptive_service_data; + + +/* adaptive contact data structure */ +typedef struct nebstruct_adaptive_contact_data_struct { + int type; + int flags; + int attr; + struct timeval timestamp; + + int command_type; + unsigned long modified_attribute; + unsigned long modified_attributes; + unsigned long modified_host_attribute; + unsigned long modified_host_attributes; + unsigned long modified_service_attribute; + unsigned long modified_service_attributes; + + void *object_ptr; + } nebstruct_adaptive_contact_data; + + +/* external command data structure */ +typedef struct nebstruct_external_command_struct { + int type; + int flags; + int attr; + struct timeval timestamp; + + int command_type; + time_t entry_time; + char *command_string; + char *command_args; + } nebstruct_external_command_data; + + +/* aggregated status data structure */ +typedef struct nebstruct_aggregated_status_struct { + int type; + int flags; + int attr; + struct timeval timestamp; + + } nebstruct_aggregated_status_data; + + +/* retention data structure */ +typedef struct nebstruct_retention_struct { + int type; + int flags; + int attr; + struct timeval timestamp; + + } nebstruct_retention_data; + + +/* acknowledgement structure */ +typedef struct nebstruct_acknowledgement_struct { + int type; + int flags; + int attr; + struct timeval timestamp; + + int acknowledgement_type; + char *host_name; + char *service_description; + int state; + char *author_name; + char *comment_data; + int is_sticky; + int persistent_comment; + int notify_contacts; + + void *object_ptr; + } nebstruct_acknowledgement_data; + + +/* state change structure */ +typedef struct nebstruct_statechange_struct { + int type; + int flags; + int attr; + struct timeval timestamp; + + int statechange_type; + char *host_name; + char *service_description; + int state; + int state_type; + int current_attempt; + int max_attempts; + char *output; + + void *object_ptr; + } nebstruct_statechange_data; + +NAGIOS_END_DECL +#endif diff -Nru check-mk-1.2.2p3/nagios4/nsock.h check-mk-1.2.6p12/nagios4/nsock.h --- check-mk-1.2.2p3/nagios4/nsock.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/nsock.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,76 @@ +#ifndef LIBNAGIOS_nsock_h__ +#define LIBNAGIOS_nsock_h__ +#include + +/** + * @file nsock.h + * @brief Nagios socket helper library + * + * This is a pretty stupid library, but since so many addons and + * now Nagios core itself makes use of sockets, we might as well + * have some simple wrappers for it that handle the most common + * cases. + * + * @{ + */ + +#define NSOCK_EBIND (-1) /**< failed to bind() */ +#define NSOCK_ELISTEN (-2) /**< failed to listen() */ +#define NSOCK_ESOCKET (-3) /**< failed to socket() */ +#define NSOCK_EUNLINK (-4) /**< failed to unlink() */ +#define NSOCK_ECONNECT (-5) /**< failed to connect() */ +#define NSOCK_EFCNTL (-6) /**< failed to fcntl() */ +#define NSOCK_EINVAL (-EINVAL) /**< -22, normally */ + +/* flags for the various create calls */ +#define NSOCK_TCP (1 << 0) /**< use tcp mode */ +#define NSOCK_UDP (1 << 1) /**< use udp mode */ +#define NSOCK_UNLINK (1 << 2) /**< unlink existing path (only nsock_unix) */ +#define NSOCK_REUSE (1 << 2) /**< reuse existing address */ +#define NSOCK_CONNECT (1 << 3) /**< connect rather than create */ +#define NSOCK_BLOCK (1 << 4) /**< socket should be in blocking mode */ + +/** + * Grab an error string relating to nsock_unix() + * @param code The error code return by the nsock library + * @return An error string describing the error + */ +extern const char *nsock_strerror(int code); + +/** + * Create or connect to a unix socket + * To control permissions on sockets when NSOCK_LISTEN is specified, + * callers will have to modify their umask() before (and possibly + * after) the nsock_unix() call. + * + * @param path The path to connect to or create + * @param flags Various options controlling the mode of the socket + * @return An NSOCK_E macro on errors, the created socket on succes + */ +extern int nsock_unix(const char *path, unsigned int flags); + +/** + * Write a nul-terminated message to the socket pointed to by sd. + * This isn't quite the same as dprintf(), which doesn't include + * the terminating nul byte. + * @note This function may block, so poll(2) for writability + * @param sd The socket to write to + * @param fmt The format string + * @return Whatever write() returns + */ +extern int nsock_printf_nul(int sd, const char *fmt, ...) + __attribute__((__format__(__printf__, 2, 3))); + +/** + * Write a printf()-formatted string to the socket pointed to by sd. + * This is identical to dprintf(), which is unfortunately GNU only. + * @note This function may block, so poll(2) for writability + * @param sd The socket to write to + * @param fmt The format string + * @return Whatever write() returns + */ +extern int nsock_printf(int sd, const char *fmt, ...) + __attribute__((__format__(__printf__, 2, 3))); + +/** @} */ +#endif /* LIBNAGIOS_nsock_h__ */ diff -Nru check-mk-1.2.2p3/nagios4/nspath.h check-mk-1.2.6p12/nagios4/nspath.h --- check-mk-1.2.2p3/nagios4/nspath.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/nspath.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,91 @@ +#ifndef LIBNAGIOS_nspath_h__ +#define LIBNAGIOS_nspath_h__ +#ifndef _GNU_SOURCE +# ifndef NODOXY +# define _GNU_SOURCE 1 +# endif +#endif +#include +#include +#include "snprintf.h" + +/** + * @file nspath.h + * @brief path handling functions + * + * This library handles path normalization and resolution. It's nifty + * if you want to turn relative paths into absolute ones, or if you + * want to make insane ones sane, but without chdir()'ing your way + * around the filesystem. + * + * @{ + */ + +/** + * Normalize a path + * By "normalize", we mean that we convert dot-slash and dot-dot-slash + * embedded components into a legible continuous string of characters. + * Leading and trailing slashes are kept exactly as they are in input, + * but with sequences of slashes reduced to a single one. + * + * "foo/bar/.././lala.txt" becomes "foo/lala.txt" + * "../../../../bar/../foo/" becomes "/foo/" + * "////foo////././bar" becomes "/foo/bar" + * @param orig_path The path to normalize + * @return A newly allocated string containing the normalized path + */ +extern char *nspath_normalize(const char *orig_path); + +/** + * Make the "base"-relative path "rel_path" absolute. + * Turns the relative path "rel_path" into an absolute path and + * resolves it as if we were currently in "base". If "base" is + * NULL, the current working directory is used. If "base" is not + * null, it should be an absolute path for the result to make + * sense. + * + * @param rel_path The relative path to convert + * @param base The base directory (if NULL, we use current working dir) + * @return A newly allocated string containing the absolute path + */ +extern char *nspath_absolute(const char *rel_path, const char *base); + +/** + * Canonicalize the "base"-relative path "rel_path". + * errno gets properly set in case of errors. + * @param rel_path The path to transform + * @param base The base we should operate relative to + * @return Newly allocated canonical path on succes, NULL on errors + */ +extern char *nspath_real(const char *rel_path, const char *base); + +/** + * Get absolute dirname of "path", relative to "base" + * @param path Full path to target object (file or subdir) + * @param base The base directory (if NULL, we use current working dir) + * @return NULL on errors, allocated absolute directory name on success + */ +extern char *nspath_absolute_dirname(const char *path, const char *base); + + +/** + * Recursively create a directory, just like mkdir_p would. + * @note This function *will* taint errno with ENOENT if any path + * component has to be created. + * @note If "path" has a trailing slash, NSPATH_MKDIR_SKIP_LAST + * won't have any effect. That's considered a feature, since the + * option is designed so one can send a file-path to the function + * and have it create the directory structure for it. + * @param path Path to create, in normalized form + * @param mode Filemode (same as mkdir() takes) + * @param options Options flag. See NSPATH_MKDIR_* for or-able options + * @return 0 on success, -1 on errors and errno will hold error code + * from either stat() or mkdir(). + */ +extern int nspath_mkdir_p(const char *path, mode_t mode, int options); + +/** Don't mkdir() last element of path when calling nspath_mkdir_p() */ +#define NSPATH_MKDIR_SKIP_LAST (1 << 0) + +/** @} */ +#endif diff -Nru check-mk-1.2.2p3/nagios4/nsutils.h check-mk-1.2.6p12/nagios4/nsutils.h --- check-mk-1.2.2p3/nagios4/nsutils.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/nsutils.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,111 @@ +#ifndef LIBNAGIOS_nsutils_h__ +#define LIBNAGIOS_nsutils_h__ +#include + +/** + * @file nsutils.h + * @brief Non-Standard (or Nagios) utility functions and macros. + * + * This is where we house all helpers and macros that fall outside + * the "standard-ish" norm. The prefixes "nsu_" and NSU_ are + * reserved for this purpose, so we avoid clashing with other + * applications that may have similarly-acting functions with + * identical names. + * + * The functions already here lack the nsu_ prefix for backwards + * compatibility reasons. It's possible we'll have to fix that + * some day, but let's leave that for later. + * + * @{ + */ + +/** Macro for dynamically increasing vector lengths */ +#define alloc_nr(x) (((x)+16)*3/2) + +/** + * Check if a number is a power of 2 + * @param x The number to check + * @return 1 if the number is a power of 2, 0 if it's not + */ +static inline int nsu_ispow2(unsigned int x) +{ + return x > 1 ? !(x & (x - 1)) : 0; +} + +/** + * Round up to a power of 2 + * Yes, this is the most cryptic function name in all of Nagios, but I + * like it, so shush. + * @param r The number to round up + * @return r, rounded up to the nearest power of 2. + */ +static inline unsigned int rup2pof2(unsigned int r) +{ + r--; + if (!r) + return 2; + r |= r >> 1; + r |= r >> 2; + r |= r >> 4; + r |= r >> 8; + r |= r >> 16; + + return r + 1; +} + +/** + * Grab a random unsigned int in the range between low and high. + * Note that the PRNG has to be seeded prior to calling this. + * @param low The lower bound, inclusive + * @param high The higher bound, inclusive + * @return An unsigned integer in the mathematical range [low, high] + */ +static inline unsigned int ranged_urand(unsigned int low, unsigned int high) +{ + return low + (rand() * (1.0 / (RAND_MAX + 1.0)) * (high - low)); +} + +/** + * Get number of online cpus + * @return Active cpu cores detected on success. 0 on failure. + */ +extern int real_online_cpus(void); + +/** + * Wrapper for real_online_cpus(), returning 1 in case we can't + * detect any active cpus. + * @return Number of active cpu cores on success. 1 on failure. + */ +extern int online_cpus(void); + +/** + * Create a short-lived string in stack-allocated memory + * The number and size of strings is limited (currently to 256 strings of + * 32 bytes each), so beware and use this sensibly. Intended for + * number-to-string conversion and other short strings. + * @note The returned string must *not* be free()'d! + * @param[in] fmt The format string + * @return A pointer to the formatted string on success. Undefined on errors + */ +extern const char *mkstr(const char *fmt, ...) + __attribute__((__format__(__printf__, 1, 2))); + +/** + * Calculate the millisecond delta between two timeval structs + * @param[in] start The start time + * @param[in] stop The stop time + * @return The millisecond delta between the two structs + */ +extern int tv_delta_msec(const struct timeval *start, const struct timeval *stop); + + +/** + * Get timeval delta as seconds + * @param start The start time + * @param stop The stop time + * @return time difference in fractions of seconds + */ +extern float tv_delta_f(const struct timeval *start, const struct timeval *stop); + +/** @} */ +#endif /* LIBNAGIOS_nsutils_h__ */ diff -Nru check-mk-1.2.2p3/nagios4/objects.h check-mk-1.2.6p12/nagios4/objects.h --- check-mk-1.2.2p3/nagios4/objects.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/objects.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,853 @@ +/***************************************************************************** + * + * OBJECTS.H - Header file for object addition/search functions + * + * + * License: + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + *****************************************************************************/ + + +#ifndef _OBJECTS_H +#define _OBJECTS_H + +#include "common.h" + +NAGIOS_BEGIN_DECL + + +/*************** CURRENT OBJECT REVISION **************/ + +#define CURRENT_OBJECT_STRUCTURE_VERSION 402 /* increment when changes are made to data structures... */ +/* Nagios 3 starts at 300, Nagios 4 at 400, etc. */ + + + +/***************** OBJECT SIZE LIMITS *****************/ + +#define MAX_STATE_HISTORY_ENTRIES 21 /* max number of old states to keep track of for flap detection */ +#define MAX_CONTACT_ADDRESSES 6 /* max number of custom addresses a contact can have */ + + + +/***************** SKIP LISTS ****************/ + +#define NUM_OBJECT_SKIPLISTS 12 +#define NUM_HASHED_OBJECT_TYPES 8 + +#define HOST_SKIPLIST 0 +#define SERVICE_SKIPLIST 1 +#define COMMAND_SKIPLIST 2 +#define TIMEPERIOD_SKIPLIST 3 +#define CONTACT_SKIPLIST 4 +#define CONTACTGROUP_SKIPLIST 5 +#define HOSTGROUP_SKIPLIST 6 +#define SERVICEGROUP_SKIPLIST 7 +#define HOSTDEPENDENCY_SKIPLIST 8 +#define SERVICEDEPENDENCY_SKIPLIST 9 +#define HOSTESCALATION_SKIPLIST 10 +#define SERVICEESCALATION_SKIPLIST 11 + + +/***************** DATE RANGE TYPES *******************/ + +#define DATERANGE_CALENDAR_DATE 0 /* 2008-12-25 */ +#define DATERANGE_MONTH_DATE 1 /* july 4 (specific month) */ +#define DATERANGE_MONTH_DAY 2 /* day 21 (generic month) */ +#define DATERANGE_MONTH_WEEK_DAY 3 /* 3rd thursday (specific month) */ +#define DATERANGE_WEEK_DAY 4 /* 3rd thursday (generic month) */ +#define DATERANGE_TYPES 5 + + +/* + * flags for notification_options, flapping_options and other similar + * flags. They overlap (hosts and services), so we can't use enum's. + */ +#define OPT_NOTHING 0 /* no options selected */ +#define OPT_ALL (~0) /* everything selected, so all bits set */ +#define OPT_DOWN (1 << HOST_DOWN) +#define OPT_UP (1 << HOST_UP) +#define OPT_UNREACHABLE (1 << HOST_UNREACHABLE) +#define OPT_OK (1 << STATE_OK) +#define OPT_WARNING (1 << STATE_WARNING) +#define OPT_CRITICAL (1 << STATE_CRITICAL) +#define OPT_UNKNOWN (1 << STATE_UNKNOWN) +#define OPT_RECOVERY OPT_OK +/* and now the "unreal" states... */ +#define OPT_PENDING (1 << 10) +#define OPT_FLAPPING (1 << 11) +#define OPT_DOWNTIME (1 << 12) +#define OPT_DISABLED (1 << 15) /* will denote disabled checks some day */ + +/* macros useful with both hosts and services */ +#define flag_set(c, flag) ((c) |= (flag)) +#define flag_get(c, flag) (unsigned int)((c) & (flag)) +#define flag_isset(c, flag) (flag_get((c), (flag)) == (unsigned int)(flag)) +#define flag_unset(c, flag) (c &= ~(flag)) +#define should_stalk(o) flag_isset(o->stalking_options, 1 << o->current_state) +#define should_flap_detect(o) flag_isset(o->flap_detection_options, 1 << o->current_state) +#define should_notify(o) flag_isset(o->notification_options, 1 << o->current_state) +#define add_notified_on(o, f) (o->notified_on |= (1 << f)) + + +/****************** DATA STRUCTURES *******************/ + +/* @todo Remove typedef's of non-opaque types in Nagios 5 */ +typedef struct host host; +typedef struct service service; +typedef struct contact contact; + +/* TIMED_EVENT structure */ +typedef struct timed_event { + int event_type; + time_t run_time; + int recurring; + unsigned long event_interval; + int compensate_for_time_change; + void *timing_func; + void *event_data; + void *event_args; + int event_options; + unsigned int priority; /* 0 is auto, 1 is highest. n+1 < n */ + struct squeue_event *sq_event; + } timed_event; + + +/* NOTIFY_LIST structure */ +typedef struct notify_list { + struct contact *contact; + struct notify_list *next; + } notification; + + +/* + * *name can be "Nagios Core", "Merlin", "mod_gearman" or "DNX", fe. + * source_name gets passed the 'source' pointer from check_result + * and must return a non-free()'able string useful for printing what + * we need to determine exactly where the check was received from, + * such as "mod_gearman worker@10.11.12.13", or "Nagios Core command + * file worker" (for passive checks submitted locally), which will be + * stashed with hosts and services and used as the "CHECKSOURCE" macro. + */ +struct check_engine { + char *name; /* "Nagios Core", "Merlin", "Mod Gearman" fe */ + const char *(*source_name)(void *); + void (*clean_result)(void *); +}; + +/* CHECK_RESULT structure */ +typedef struct check_result { + int object_check_type; /* is this a service or a host check? */ + char *host_name; /* host name */ + char *service_description; /* service description */ + int check_type; /* was this an active or passive service check? */ + int check_options; + int scheduled_check; /* was this a scheduled or an on-demand check? */ + int reschedule_check; /* should we reschedule the next check */ + char *output_file; /* what file is the output stored in? */ + FILE *output_file_fp; + double latency; + struct timeval start_time; /* time the service check was initiated */ + struct timeval finish_time; /* time the service check was completed */ + int early_timeout; /* did the service check timeout? */ + int exited_ok; /* did the plugin check return okay? */ + int return_code; /* plugin return code */ + char *output; /* plugin output */ + struct rusage rusage; /* resource usage by this check */ + struct check_engine *engine; /* where did we get this check from? */ + void *source; /* engine handles this */ + } check_result; + + +/* SCHED_INFO structure */ +typedef struct sched_info { + int total_services; + int total_scheduled_services; + int total_hosts; + int total_scheduled_hosts; + double average_services_per_host; + double average_scheduled_services_per_host; + unsigned long service_check_interval_total; + unsigned long host_check_interval_total; + double average_service_execution_time; + double average_service_check_interval; + double average_host_check_interval; + double average_service_inter_check_delay; + double average_host_inter_check_delay; + double service_inter_check_delay; + double host_inter_check_delay; + int service_interleave_factor; + int max_service_check_spread; + int max_host_check_spread; + time_t first_service_check; + time_t last_service_check; + time_t first_host_check; + time_t last_host_check; + } sched_info; + + +/* DBUF structure - dynamic string storage */ +typedef struct dbuf { + char *buf; + unsigned long used_size; + unsigned long allocated_size; + unsigned long chunk_size; + } dbuf; + + +#define CHECK_STATS_BUCKETS 15 + +/* used for tracking host and service check statistics */ +typedef struct check_stats { + int current_bucket; + int bucket[CHECK_STATS_BUCKETS]; + int overflow_bucket; + int minute_stats[3]; + time_t last_update; + } check_stats; + + + +/* OBJECT LIST STRUCTURE */ +typedef struct objectlist { + void *object_ptr; + struct objectlist *next; + } objectlist; + + +/* TIMERANGE structure */ +typedef struct timerange { + unsigned long range_start; + unsigned long range_end; + struct timerange *next; + } timerange; + + +/* DATERANGE structure */ +typedef struct daterange { + int type; + int syear; /* start year */ + int smon; /* start month */ + int smday; /* start day of month (may 3rd, last day in feb) */ + int swday; /* start day of week (thursday) */ + int swday_offset; /* start weekday offset (3rd thursday, last monday in jan) */ + int eyear; + int emon; + int emday; + int ewday; + int ewday_offset; + int skip_interval; + struct timerange *times; + struct daterange *next; + } daterange; + + +/* TIMEPERIODEXCLUSION structure */ +typedef struct timeperiodexclusion { + char *timeperiod_name; + struct timeperiod *timeperiod_ptr; + struct timeperiodexclusion *next; + } timeperiodexclusion; + + +/* TIMEPERIOD structure */ +typedef struct timeperiod { + unsigned int id; + char *name; + char *alias; + struct timerange *days[7]; + struct daterange *exceptions[DATERANGE_TYPES]; + struct timeperiodexclusion *exclusions; + struct timeperiod *next; + } timeperiod; + + +/* CONTACTSMEMBER structure */ +typedef struct contactsmember { + char *contact_name; + struct contact *contact_ptr; + struct contactsmember *next; + } contactsmember; + + +/* CONTACTGROUP structure */ +typedef struct contactgroup { + unsigned int id; + char *group_name; + char *alias; + struct contactsmember *members; + struct contactgroup *next; + } contactgroup; + + +/* CONTACTGROUPSMEMBER structure */ +typedef struct contactgroupsmember { + char *group_name; + struct contactgroup *group_ptr; + struct contactgroupsmember *next; + } contactgroupsmember; + + +/* CUSTOMVARIABLESMEMBER structure */ +typedef struct customvariablesmember { + char *variable_name; + char *variable_value; + int has_been_modified; + struct customvariablesmember *next; + } customvariablesmember; + + +/* COMMAND structure */ +typedef struct command { + unsigned int id; + char *name; + char *command_line; + struct command *next; + } command; + + +/* COMMANDSMEMBER structure */ +typedef struct commandsmember { + char *command; + struct command *command_ptr; + struct commandsmember *next; + } commandsmember; + + +/* CONTACT structure */ +struct contact { + unsigned int id; + char *name; + char *alias; + char *email; + char *pager; + char *address[MAX_CONTACT_ADDRESSES]; + struct commandsmember *host_notification_commands; + struct commandsmember *service_notification_commands; + unsigned int host_notification_options; + unsigned int service_notification_options; + unsigned int minimum_value; + char *host_notification_period; + char *service_notification_period; + int host_notifications_enabled; + int service_notifications_enabled; + int can_submit_commands; + int retain_status_information; + int retain_nonstatus_information; + struct customvariablesmember *custom_variables; +#ifndef NSCGI + time_t last_host_notification; + time_t last_service_notification; + unsigned long modified_attributes; + unsigned long modified_host_attributes; + unsigned long modified_service_attributes; +#endif + + struct timeperiod *host_notification_period_ptr; + struct timeperiod *service_notification_period_ptr; + struct objectlist *contactgroups_ptr; + struct contact *next; + }; + + +/* SERVICESMEMBER structure */ +typedef struct servicesmember { + char *host_name; + char *service_description; + struct service *service_ptr; + struct servicesmember *next; + } servicesmember; + + +/* HOSTSMEMBER structure */ +typedef struct hostsmember { + char *host_name; + struct host *host_ptr; + struct hostsmember *next; + } hostsmember; + + +/* HOSTGROUP structure */ +typedef struct hostgroup { + unsigned int id; + char *group_name; + char *alias; + struct hostsmember *members; + char *notes; + char *notes_url; + char *action_url; + struct hostgroup *next; + } hostgroup; + + +/* HOST structure */ +struct host { + unsigned int id; + char *name; + char *display_name; + char *alias; + char *address; + struct hostsmember *parent_hosts; + struct hostsmember *child_hosts; + struct servicesmember *services; + char *check_command; + int initial_state; + double check_interval; + double retry_interval; + int max_attempts; + char *event_handler; + struct contactgroupsmember *contact_groups; + struct contactsmember *contacts; + double notification_interval; + double first_notification_delay; + unsigned int notification_options; + unsigned int hourly_value; + char *notification_period; + char *check_period; + int flap_detection_enabled; + double low_flap_threshold; + double high_flap_threshold; + int flap_detection_options; + unsigned int stalking_options; + int check_freshness; + int freshness_threshold; + int process_performance_data; + int checks_enabled; + const char *check_source; + int accept_passive_checks; + int event_handler_enabled; + int retain_status_information; + int retain_nonstatus_information; + int obsess; + char *notes; + char *notes_url; + char *action_url; + char *icon_image; + char *icon_image_alt; + char *statusmap_image; /* used by lots of graphing tools */ +/* #ifdef NSCGI */ + /* + * these are kept in ancillary storage for the daemon and + * thrown out as soon as we've created the object cache. + * The CGI's still attach them though, since they are the + * only users of this utter crap. + */ + char *vrml_image; + int have_2d_coords; + int x_2d; + int y_2d; + int have_3d_coords; + double x_3d; + double y_3d; + double z_3d; + int should_be_drawn; +/* #endif */ + customvariablesmember *custom_variables; +#ifndef NSCGI + int problem_has_been_acknowledged; + int acknowledgement_type; + int check_type; + int current_state; + int last_state; + int last_hard_state; + char *plugin_output; + char *long_plugin_output; + char *perf_data; + int state_type; + int current_attempt; + unsigned long current_event_id; + unsigned long last_event_id; + unsigned long current_problem_id; + unsigned long last_problem_id; + double latency; + double execution_time; + int is_executing; + int check_options; + int notifications_enabled; + time_t last_notification; + time_t next_notification; + time_t next_check; + int should_be_scheduled; + time_t last_check; + time_t last_state_change; + time_t last_hard_state_change; + time_t last_time_up; + time_t last_time_down; + time_t last_time_unreachable; + int has_been_checked; + int is_being_freshened; + int notified_on; + int current_notification_number; + int no_more_notifications; + unsigned long current_notification_id; + int check_flapping_recovery_notification; + int scheduled_downtime_depth; + int pending_flex_downtime; + int state_history[MAX_STATE_HISTORY_ENTRIES]; /* flap detection */ + int state_history_index; + time_t last_state_history_update; + int is_flapping; + unsigned long flapping_comment_id; + double percent_state_change; + int total_services; + unsigned long total_service_check_interval; + unsigned long modified_attributes; +#endif + + struct command *event_handler_ptr; + struct command *check_command_ptr; + struct timeperiod *check_period_ptr; + struct timeperiod *notification_period_ptr; + struct objectlist *hostgroups_ptr; + /* objects we depend upon */ + struct objectlist *exec_deps, *notify_deps; + struct objectlist *escalation_list; + struct host *next; + struct timed_event *next_check_event; + }; + + +/* SERVICEGROUP structure */ +typedef struct servicegroup { + unsigned int id; + char *group_name; + char *alias; + struct servicesmember *members; + char *notes; + char *notes_url; + char *action_url; + struct servicegroup *next; + } servicegroup; + + +/* SERVICE structure */ +struct service { + unsigned int id; + char *host_name; + char *description; + char *display_name; + struct servicesmember *parents; + struct servicesmember *children; + char *check_command; + char *event_handler; + int initial_state; + double check_interval; + double retry_interval; + int max_attempts; + int parallelize; + struct contactgroupsmember *contact_groups; + struct contactsmember *contacts; + double notification_interval; + double first_notification_delay; + unsigned int notification_options; + unsigned int stalking_options; + unsigned int hourly_value; + int is_volatile; + char *notification_period; + char *check_period; + int flap_detection_enabled; + double low_flap_threshold; + double high_flap_threshold; + unsigned int flap_detection_options; + int process_performance_data; + int check_freshness; + int freshness_threshold; + int accept_passive_checks; + int event_handler_enabled; + int checks_enabled; + const char *check_source; + int retain_status_information; + int retain_nonstatus_information; + int notifications_enabled; + int obsess; + char *notes; + char *notes_url; + char *action_url; + char *icon_image; + char *icon_image_alt; + struct customvariablesmember *custom_variables; +#ifndef NSCGI + int problem_has_been_acknowledged; + int acknowledgement_type; + int host_problem_at_last_check; + int check_type; + int current_state; + int last_state; + int last_hard_state; + char *plugin_output; + char *long_plugin_output; + char *perf_data; + int state_type; + time_t next_check; + int should_be_scheduled; + time_t last_check; + int current_attempt; + unsigned long current_event_id; + unsigned long last_event_id; + unsigned long current_problem_id; + unsigned long last_problem_id; + time_t last_notification; + time_t next_notification; + int no_more_notifications; + int check_flapping_recovery_notification; + time_t last_state_change; + time_t last_hard_state_change; + time_t last_time_ok; + time_t last_time_warning; + time_t last_time_unknown; + time_t last_time_critical; + int has_been_checked; + int is_being_freshened; + unsigned int notified_on; + int current_notification_number; + unsigned long current_notification_id; + double latency; + double execution_time; + int is_executing; + int check_options; + int scheduled_downtime_depth; + int pending_flex_downtime; + int state_history[MAX_STATE_HISTORY_ENTRIES]; /* flap detection */ + int state_history_index; + int is_flapping; + unsigned long flapping_comment_id; + double percent_state_change; + unsigned long modified_attributes; +#endif + + struct host *host_ptr; + struct command *event_handler_ptr; + char *event_handler_args; + struct command *check_command_ptr; + char *check_command_args; + struct timeperiod *check_period_ptr; + struct timeperiod *notification_period_ptr; + struct objectlist *servicegroups_ptr; + struct objectlist *exec_deps, *notify_deps; + struct objectlist *escalation_list; + struct service *next; + struct timed_event *next_check_event; + }; + + +/* SERVICE ESCALATION structure */ +typedef struct serviceescalation { + unsigned int id; + char *host_name; + char *description; + int first_notification; + int last_notification; + double notification_interval; + char *escalation_period; + int escalation_options; + struct contactgroupsmember *contact_groups; + struct contactsmember *contacts; + struct service *service_ptr; + struct timeperiod *escalation_period_ptr; + } serviceescalation; + + +/* SERVICE DEPENDENCY structure */ +typedef struct servicedependency { + unsigned int id; + int dependency_type; + char *dependent_host_name; + char *dependent_service_description; + char *host_name; + char *service_description; + char *dependency_period; + int inherits_parent; + int failure_options; + struct service *master_service_ptr; + struct service *dependent_service_ptr; + struct timeperiod *dependency_period_ptr; + } servicedependency; + + +/* HOST ESCALATION structure */ +typedef struct hostescalation { + unsigned int id; + char *host_name; + int first_notification; + int last_notification; + double notification_interval; + char *escalation_period; + int escalation_options; + struct contactgroupsmember *contact_groups; + struct contactsmember *contacts; + struct host *host_ptr; + struct timeperiod *escalation_period_ptr; + } hostescalation; + + +/* HOST DEPENDENCY structure */ +typedef struct hostdependency { + unsigned int id; + int dependency_type; + char *dependent_host_name; + char *host_name; + char *dependency_period; + int inherits_parent; + int failure_options; + struct host *master_host_ptr; + struct host *dependent_host_ptr; + struct timeperiod *dependency_period_ptr; + } hostdependency; + +extern struct command *command_list; +extern struct timeperiod *timeperiod_list; +extern struct host *host_list; +extern struct service *service_list; +extern struct contact *contact_list; +extern struct hostgroup *hostgroup_list; +extern struct servicegroup *servicegroup_list; +extern struct contactgroup *contactgroup_list; +extern struct hostescalation *hostescalation_list; +extern struct serviceescalation *serviceescalation_list; +extern struct command **command_ary; +extern struct timeperiod **timeperiod_ary; +extern struct host **host_ary; +extern struct service **service_ary; +extern struct contact **contact_ary; +extern struct hostgroup **hostgroup_ary; +extern struct servicegroup **servicegroup_ary; +extern struct contactgroup **contactgroup_ary; +extern struct hostescalation **hostescalation_ary; +extern struct hostdependency **hostdependency_ary; +extern struct serviceescalation **serviceescalation_ary; +extern struct servicedependency **servicedependency_ary; + + +/********************* FUNCTIONS **********************/ + +/**** Top-level input functions ****/ +int read_object_config_data(const char *, int); /* reads all external configuration data of specific types */ + + +/**** Object Creation Functions ****/ +struct contact *add_contact(char *name, char *alias, char *email, char *pager, char **addresses, char *svc_notification_period, char *host_notification_period, int service_notification_options, int host_notification_options, int service_notifications_enabled, int host_notifications_enabled, int can_submit_commands, int retain_status_information, int retain_nonstatus_information, unsigned int minimum_value); +struct commandsmember *add_service_notification_command_to_contact(contact *, char *); /* adds a service notification command to a contact definition */ +struct commandsmember *add_host_notification_command_to_contact(contact *, char *); /* adds a host notification command to a contact definition */ +struct customvariablesmember *add_custom_variable_to_contact(contact *, char *, char *); /* adds a custom variable to a service definition */ +struct host *add_host(char *name, char *display_name, char *alias, char *address, char *check_period, int initial_state, double check_interval, double retry_interval, int max_attempts, int notification_options, double notification_interval, double first_notification_delay, char *notification_period, int notifications_enabled, char *check_command, int checks_enabled, int accept_passive_checks, char *event_handler, int event_handler_enabled, int flap_detection_enabled, double low_flap_threshold, double high_flap_threshold, int flap_detection_options, int stalking_options, int process_perfdata, int check_freshness, int freshness_threshold, char *notes, char *notes_url, char *action_url, char *icon_image, char *icon_image_alt, char *vrml_image, char *statusmap_image, int x_2d, int y_2d, int have_2d_coords, double x_3d, double y_3d, double z_3d, int have_3d_coords, int should_be_drawn, int retain_status_information, int retain_nonstatus_information, int obsess_over_host, unsigned int hourly_value); +struct hostsmember *add_parent_host_to_host(host *, char *); /* adds a parent host to a host definition */ +struct servicesmember *add_parent_service_to_service(service *, char *host_name, char *description); +struct hostsmember *add_child_link_to_host(host *, host *); /* adds a child host to a host definition */ +struct contactgroupsmember *add_contactgroup_to_host(host *, char *); /* adds a contactgroup to a host definition */ +struct contactsmember *add_contact_to_host(host *, char *); /* adds a contact to a host definition */ +struct customvariablesmember *add_custom_variable_to_host(host *, char *, char *); /* adds a custom variable to a host definition */ +struct timeperiod *add_timeperiod(char *, char *); /* adds a timeperiod definition */ +struct timeperiodexclusion *add_exclusion_to_timeperiod(timeperiod *, char *); /* adds an exclusion to a timeperiod */ +struct timerange *add_timerange_to_timeperiod(timeperiod *, int, unsigned long, unsigned long); /* adds a timerange to a timeperiod definition */ +struct daterange *add_exception_to_timeperiod(timeperiod *, int, int, int, int, int, int, int, int, int, int, int, int); +struct timerange *add_timerange_to_daterange(daterange *, unsigned long, unsigned long); +struct hostgroup *add_hostgroup(char *, char *, char *, char *, char *); /* adds a hostgroup definition */ +struct hostsmember *add_host_to_hostgroup(hostgroup *, char *); /* adds a host to a hostgroup definition */ +struct servicegroup *add_servicegroup(char *, char *, char *, char *, char *); /* adds a servicegroup definition */ +struct servicesmember *add_service_to_servicegroup(servicegroup *, char *, char *); /* adds a service to a servicegroup definition */ +struct contactgroup *add_contactgroup(char *, char *); /* adds a contactgroup definition */ +struct contactsmember *add_contact_to_contactgroup(contactgroup *, char *); /* adds a contact to a contact group definition */ +struct command *add_command(char *, char *); /* adds a command definition */ +struct service *add_service(char *host_name, char *description, char *display_name, char *check_period, int initial_state, int max_attempts, int parallelize, int accept_passive_checks, double check_interval, double retry_interval, double notification_interval, double first_notification_delay, char *notification_period, int notification_options, int notifications_enabled, int is_volatile, char *event_handler, int event_handler_enabled, char *check_command, int checks_enabled, int flap_detection_enabled, double low_flap_threshold, double high_flap_threshold, int flap_detection_options, int stalking_options, int process_perfdata, int check_freshness, int freshness_threshold, char *notes, char *notes_url, char *action_url, char *icon_image, char *icon_image_alt, int retain_status_information, int retain_nonstatus_information, int obsess_over_service, unsigned int hourly_value); +struct contactgroupsmember *add_contactgroup_to_service(service *, char *); /* adds a contact group to a service definition */ +struct contactsmember *add_contact_to_service(service *, char *); /* adds a contact to a host definition */ +struct serviceescalation *add_serviceescalation(char *host_name, char *description, int first_notification, int last_notification, double notification_interval, char *escalation_period, int escalation_options); +struct contactgroupsmember *add_contactgroup_to_serviceescalation(serviceescalation *, char *); /* adds a contact group to a service escalation definition */ +struct contactsmember *add_contact_to_serviceescalation(serviceescalation *, char *); /* adds a contact to a service escalation definition */ +struct customvariablesmember *add_custom_variable_to_service(service *, char *, char *); /* adds a custom variable to a service definition */ +struct servicedependency *add_service_dependency(char *dependent_host_name, char *dependent_service_description, char *host_name, char *service_description, int dependency_type, int inherits_parent, int failure_options, char *dependency_period); +struct hostdependency *add_host_dependency(char *dependent_host_name, char *host_name, int dependency_type, int inherits_parent, int failure_options, char *dependency_period); +struct hostescalation *add_hostescalation(char *host_name, int first_notification, int last_notification, double notification_interval, char *escalation_period, int escalation_options); +struct contactsmember *add_contact_to_hostescalation(hostescalation *, char *); /* adds a contact to a host escalation definition */ +struct contactgroupsmember *add_contactgroup_to_hostescalation(hostescalation *, char *); /* adds a contact group to a host escalation definition */ + +struct contactsmember *add_contact_to_object(contactsmember **, char *); /* adds a contact to an object */ +struct customvariablesmember *add_custom_variable_to_object(customvariablesmember **, char *, char *); /* adds a custom variable to an object */ + + +struct servicesmember *add_service_link_to_host(host *, service *); + + +int skiplist_compare_text(const char *val1a, const char *val1b, const char *val2a, const char *val2b); +int get_host_count(void); +int get_service_count(void); + + +int create_object_tables(unsigned int *); + +/**** Object Search Functions ****/ +struct timeperiod *find_timeperiod(const char *); +struct host *find_host(const char *); +struct hostgroup *find_hostgroup(const char *); +struct servicegroup *find_servicegroup(const char *); +struct contact *find_contact(const char *); +struct contactgroup *find_contactgroup(const char *); +struct command *find_command(const char *); +struct service *find_service(const char *, const char *); + + +#define OBJECTLIST_DUPE 1 +int add_object_to_objectlist(struct objectlist **, void *); +int prepend_object_to_objectlist(struct objectlist **, void *); +int prepend_unique_object_to_objectlist(struct objectlist **, void *, size_t size); +int free_objectlist(objectlist **); + + +/**** Object Query Functions ****/ +unsigned int host_services_value(struct host *h); +int is_host_immediate_child_of_host(struct host *, struct host *); /* checks if a host is an immediate child of another host */ +int is_host_primary_immediate_child_of_host(struct host *, struct host *); /* checks if a host is an immediate child (and primary child) of another host */ +int is_host_immediate_parent_of_host(struct host *, struct host *); /* checks if a host is an immediate child of another host */ +int is_host_member_of_hostgroup(struct hostgroup *, struct host *); /* tests whether or not a host is a member of a specific hostgroup */ +int is_host_member_of_servicegroup(struct servicegroup *, struct host *); /* tests whether or not a service is a member of a specific servicegroup */ +int is_service_member_of_servicegroup(struct servicegroup *, struct service *); /* tests whether or not a service is a member of a specific servicegroup */ +int is_contact_member_of_contactgroup(struct contactgroup *, struct contact *); /* tests whether or not a contact is a member of a specific contact group */ +int is_contact_for_host(struct host *, struct contact *); /* tests whether or not a contact is a contact member for a specific host */ +int is_escalated_contact_for_host(struct host *, struct contact *); /* checks whether or not a contact is an escalated contact for a specific host */ +int is_contact_for_service(struct service *, struct contact *); /* tests whether or not a contact is a contact member for a specific service */ +int is_escalated_contact_for_service(struct service *, struct contact *); /* checks whether or not a contact is an escalated contact for a specific service */ + +int number_of_immediate_child_hosts(struct host *); /* counts the number of immediate child hosts for a particular host */ +int number_of_total_child_hosts(struct host *); /* counts the number of total child hosts for a particular host */ +int number_of_immediate_parent_hosts(struct host *); /* counts the number of immediate parents hosts for a particular host */ + +#ifndef NSCGI +void fcache_contactlist(FILE *fp, const char *prefix, struct contactsmember *list); +void fcache_contactgrouplist(FILE *fp, const char *prefix, struct contactgroupsmember *list); +void fcache_hostlist(FILE *fp, const char *prefix, struct hostsmember *list); +void fcache_customvars(FILE *fp, struct customvariablesmember *cvlist); +void fcache_timeperiod(FILE *fp, struct timeperiod *temp_timeperiod); +void fcache_command(FILE *fp, struct command *temp_command); +void fcache_contactgroup(FILE *fp, struct contactgroup *temp_contactgroup); +void fcache_hostgroup(FILE *fp, struct hostgroup *temp_hostgroup); +void fcache_servicegroup(FILE *fp, struct servicegroup *temp_servicegroup); +void fcache_contact(FILE *fp, struct contact *temp_contact); +void fcache_host(FILE *fp, struct host *temp_host); +void fcache_service(FILE *fp, struct service *temp_service); +void fcache_servicedependency(FILE *fp, struct servicedependency *temp_servicedependency); +void fcache_serviceescalation(FILE *fp, struct serviceescalation *temp_serviceescalation); +void fcache_hostdependency(FILE *fp, struct hostdependency *temp_hostdependency); +void fcache_hostescalation(FILE *fp, struct hostescalation *temp_hostescalation); +int fcache_objects(char *cache_file); +#endif + + +/**** Object Cleanup Functions ****/ +int free_object_data(void); /* frees all allocated memory for the object definitions */ + + +NAGIOS_END_DECL +#endif diff -Nru check-mk-1.2.2p3/nagios4/perfdata.h check-mk-1.2.6p12/nagios4/perfdata.h --- check-mk-1.2.2p3/nagios4/perfdata.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/perfdata.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,38 @@ +/***************************************************************************** + * + * PERFDATA.H - Include file for performance data routines + * + * + * License: + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + *****************************************************************************/ + +#ifndef _PERFDATA_H +#define _PERFDATA_H + +#include "common.h" +#include "objects.h" + +NAGIOS_BEGIN_DECL + +int initialize_performance_data(const char *); /* initializes performance data */ +int cleanup_performance_data(void); /* cleans up performance data */ + +int update_host_performance_data(host *); /* updates host performance data */ +int update_service_performance_data(service *); /* updates service performance data */ + +NAGIOS_END_DECL +#endif diff -Nru check-mk-1.2.2p3/nagios4/pqueue.h check-mk-1.2.6p12/nagios4/pqueue.h --- check-mk-1.2.2p3/nagios4/pqueue.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/pqueue.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,185 @@ +/* + * Copyright 2010 Volkan Yazıcı + * Copyright 2006-2010 The Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +#ifndef LIBNAGIOS_pqueue_h__ +#define LIBNAGIOS_pqueue_h__ +#include + +/** + * @file pqueue.h + * @brief Priority Queue function declarations + * + * This priority queue library was originally written by Volkan Yazici + * . It was lated adapted for Nagios by + * Andreas Ericsson . Changes compared to the original + * version are pretty much limited to changing pqueue_pri_t to be + * an unsigned long long instead of a double, since ULL comparisons + * are 107 times faster on my 64-bit laptop. + * + * @{ + */ + + +/** priority data type (used to be double, but ull is 107 times faster) */ +typedef unsigned long long pqueue_pri_t; + +/** callback functions to get/set/compare the priority of an element */ +typedef pqueue_pri_t (*pqueue_get_pri_f)(void *a); +typedef void (*pqueue_set_pri_f)(void *a, pqueue_pri_t pri); +typedef int (*pqueue_cmp_pri_f)(pqueue_pri_t next, pqueue_pri_t curr); + + +/** callback functions to get/set the position of an element */ +typedef unsigned int (*pqueue_get_pos_f)(void *a); +typedef void (*pqueue_set_pos_f)(void *a, unsigned int pos); + + +/** debug callback function to print a entry */ +typedef void (*pqueue_print_entry_f)(FILE *out, void *a); + + +/** the priority queue handle */ +typedef struct pqueue_t +{ + unsigned int size; /**< number of elements in this queue */ + unsigned int avail; /**< slots available in this queue */ + unsigned int step; /**< growth stepping setting */ + pqueue_cmp_pri_f cmppri; /**< callback to compare nodes */ + pqueue_get_pri_f getpri; /**< callback to get priority of a node */ + pqueue_set_pri_f setpri; /**< callback to set priority of a node */ + pqueue_get_pos_f getpos; /**< callback to get position of a node */ + pqueue_set_pos_f setpos; /**< callback to set position of a node */ + void **d; /**< The actualy queue in binary heap form */ +} pqueue_t; + + +/** + * initialize the queue + * + * @param n the initial estimate of the number of queue items for which memory + * should be preallocated + * @param cmppri The callback function to run to compare two elements + * This callback should return 0 for 'lower' and non-zero + * for 'higher', or vice versa if reverse priority is desired + * @param setpri the callback function to run to assign a score to an element + * @param getpri the callback function to run to set a score to an element + * @param getpos the callback function to get the current element's position + * @param setpos the callback function to set the current element's position + * + * @return the handle or NULL for insufficent memory + */ +pqueue_t * +pqueue_init(unsigned int n, + pqueue_cmp_pri_f cmppri, + pqueue_get_pri_f getpri, + pqueue_set_pri_f setpri, + pqueue_get_pos_f getpos, + pqueue_set_pos_f setpos); + + +/** + * free all memory used by the queue + * @param q the queue + */ +void pqueue_free(pqueue_t *q); + + +/** + * return the size of the queue. + * @param q the queue + */ +unsigned int pqueue_size(pqueue_t *q); + + +/** + * insert an item into the queue. + * @param q the queue + * @param d the item + * @return 0 on success + */ +int pqueue_insert(pqueue_t *q, void *d); + + +/** + * move an existing entry to a different priority + * @param q the queue + * @param new_pri the new priority + * @param d the entry + */ +void +pqueue_change_priority(pqueue_t *q, + pqueue_pri_t new_pri, + void *d); + + +/** + * pop the highest-ranking item from the queue. + * @param q the queue + * @return NULL on error, otherwise the entry + */ +void *pqueue_pop(pqueue_t *q); + + +/** + * remove an item from the queue. + * @param q the queue + * @param d the entry + * @return 0 on success + */ +int pqueue_remove(pqueue_t *q, void *d); + + +/** + * access highest-ranking item without removing it. + * @param q the queue + * @return NULL on error, otherwise the entry + */ +void *pqueue_peek(pqueue_t *q); + + +/** + * print the queue + * @internal + * DEBUG function only + * @param q the queue + * @param out the output handle + * @param the callback function to print the entry + */ +void +pqueue_print(pqueue_t *q, FILE *out, pqueue_print_entry_f print); + + +/** + * dump the queue and it's internal structure + * @internal + * debug function only + * @param q the queue + * @param out the output handle + * @param the callback function to print the entry + */ +void pqueue_dump(pqueue_t *q, FILE *out, pqueue_print_entry_f print); + + +/** + * checks that the pq is in the right order, etc + * @internal + * debug function only + * @param q the queue + */ +int pqueue_is_valid(pqueue_t *q); + +#endif +/** @} */ diff -Nru check-mk-1.2.2p3/nagios4/README check-mk-1.2.6p12/nagios4/README --- check-mk-1.2.2p3/nagios4/README 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/README 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1 @@ +These files are taken directly from Nagios 4.0.2. diff -Nru check-mk-1.2.2p3/nagios4/runcmd.h check-mk-1.2.6p12/nagios4/runcmd.h --- check-mk-1.2.2p3/nagios4/runcmd.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/runcmd.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,96 @@ +#ifndef LIBNAGIOS_runcmd_h__ +#define LIBNAGIOS_runcmd_h__ +#include + +/** + * @file runcmd.h + * @brief runcmd library function declarations + * + * @note This is inherited from the nagiosplugins project, although + * I (AE) wrote the original code, and it might need refactoring + * for performance later. + * @{ + */ + +/** Return code bitflags for runcmd_cmd2strv() */ +#define RUNCMD_HAS_REDIR (1 << 0) /**< I/O redirection */ +#define RUNCMD_HAS_SUBCOMMAND (1 << 1) /**< subcommands present */ +#define RUNCMD_HAS_PAREN (1 << 2) /**< parentheses present in command */ +#define RUNCMD_HAS_JOBCONTROL (1 << 3) /**< job control stuff present */ +#define RUNCMD_HAS_UBSQ (1 << 4) /**< unbalanced single quotes */ +#define RUNCMD_HAS_UBDQ (1 << 5) /**< unbalanced double quotes */ +#define RUNCMD_HAS_WILDCARD (1 << 6) /**< wildcards present */ +#define RUNCMD_HAS_SHVAR (1 << 7) /**< shell variables present */ + + +#define RUNCMD_EFD (-1) /**< Failed to pipe() or open() */ +#define RUNCMD_EALLOC (-2) /**< Failed to alloc */ +#define RUNCMD_ECMD (-3) /**< Bad command */ +#define RUNCMD_EFORK (-4) /**< Failed to fork() */ +#define RUNCMD_EINVAL (-5) /**< Invalid parameters */ +#define RUNCMD_EWAIT (-6) /**< Failed to wait() */ + +/** + * Initialize the runcmd library. + * + * Only multi-threaded programs that might launch the first external + * program from multiple threads simultaneously need to bother with + * this. + */ +extern void runcmd_init(void); + +/** + * Return pid of a command with a specific file descriptor + * @param[in] fd stdout filedescriptor of the child to get pid from + * @return pid of the child, or 0 on errors + */ +extern pid_t runcmd_pid(int fd); + +/** + * Return explanation of which system call or operation failed + * @param code Error code returned by a library function + * @return A non-free()'able string explaining where the error occurred + */ +extern const char *runcmd_strerror(int code); + +/** + * Start a command from a command string + * @param[in] cmdstring The command to launch + * @param[out] pfd Child's stdout filedescriptor + * @param[out] pfderr Child's stderr filedescriptor + * @param[in] env Currently ignored for portability + * @param[in] iobreg The callback function to register the iobrokers for the read ends of the pipe + * @param[in] iobregarg The "arg" value to pass to iobroker_register() + */ +extern int runcmd_open(const char *cmd, int *pfd, int *pfderr, char **env, + void (*iobreg)(int, int, void *), void *iobregarg) + __attribute__((__nonnull__(1, 2, 3, 5, 6))); + +/** + * Close a command and return its exit status + * @note Don't use this. It's a retarded way to reap children suitable + * only for launching a one-shot program. + * + * @param[in] fd The child's stdout filedescriptor + * @return exit-status of the child, or -1 in case of errors + */ +extern int runcmd_close(int fd); + +/** + * Convert a string to a vector of arguments like a shell would + * @note This might have bugs and is only tested to behave similar + * to how /bin/sh does things. For csh or other non bash-ish shells + * there are no guarantees. + * @note The out_argv array has to be large enough to hold all strings + * found in the command. + * @param[in] str The string to convert to an argument vector + * @param[out] out_argc The number of arguments found + * @param[out] out_argv The argument vector + * @return 0 on (great) success, or a bitmask of failure-codes + * representing f.e. unclosed quotes, job control or output redirection. + * See the RUNCMD_HAS_* and their ilk to find out about the flag. + */ +extern int runcmd_cmd2strv(const char *str, int *out_argc, char **out_argv); + +#endif /* INCLUDE_runcmd_h__ */ +/** @} */ diff -Nru check-mk-1.2.2p3/nagios4/shared.h check-mk-1.2.6p12/nagios4/shared.h --- check-mk-1.2.2p3/nagios4/shared.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/shared.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,55 @@ +#ifndef INCLUDE__shared_h__ +#define INCLUDE__shared_h__ + +#include +#include "libnagios.h" + +NAGIOS_BEGIN_DECL + +/* mmapfile structure - used for reading files via mmap() */ +typedef struct mmapfile_struct { + char *path; + int mode; + int fd; + unsigned long file_size; + unsigned long current_position; + unsigned long current_line; + void *mmap_buf; + } mmapfile; + +/* official count of first-class objects */ +struct object_count { + unsigned int commands; + unsigned int timeperiods; + unsigned int hosts; + unsigned int hostescalations; + unsigned int hostdependencies; + unsigned int services; + unsigned int serviceescalations; + unsigned int servicedependencies; + unsigned int contacts; + unsigned int contactgroups; + unsigned int hostgroups; + unsigned int servicegroups; + }; + +extern struct object_count num_objects; + +extern void timing_point(const char *fmt, ...); /* print a message and the time since the first message */ +extern char *my_strtok(char *buffer, const char *tokens); +extern char *my_strsep(char **stringp, const char *delim); +extern mmapfile *mmap_fopen(const char *filename); +extern int mmap_fclose(mmapfile *temp_mmapfile); +extern char *mmap_fgets(mmapfile *temp_mmapfile); +extern char *mmap_fgets_multiline(mmapfile * temp_mmapfile); +extern void strip(char *buffer); +extern int hashfunc(const char *name1, const char *name2, int hashslots); +extern int compare_hashdata(const char *val1a, const char *val1b, const char *val2a, + const char *val2b); +extern void get_datetime_string(time_t *raw_time, char *buffer, + int buffer_length, int type); +extern void get_time_breakdown(unsigned long raw_time, int *days, int *hours, + int *minutes, int *seconds); + +NAGIOS_END_DECL +#endif diff -Nru check-mk-1.2.2p3/nagios4/skiplist.h check-mk-1.2.6p12/nagios4/skiplist.h --- check-mk-1.2.2p3/nagios4/skiplist.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/skiplist.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,162 @@ +/************************************************************************ + * + * SKIPLIST.H - Skiplist data structures and functions + * + * + * License: + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + ************************************************************************/ + +#ifndef LIBNAGIOS_skiplist_h__ +#define LIBNAGIOS_skiplist_h__ +#include "lnag-utils.h" + +/** + * @file skiplist.h + * @brief Skiplist library functions + * + * http://en.wikipedia.org/wiki/Skiplist + * + * @{ + */ + +#define SKIPLIST_OK 0 /**< A ok */ +#define SKIPLIST_ERROR_ARGS 1 /**< Bad arguments */ +#define SKIPLIST_ERROR_MEMORY 2 /**< Memory error */ +#define SKIPLIST_ERROR_DUPLICATE 3 /**< Trying to insert non-unique item */ + +NAGIOS_BEGIN_DECL + +struct skiplist_struct; +typedef struct skiplist_struct skiplist; + +/** + * Return number of items currently in the skiplist + * @param list The list to investigate + * @return number of items in list + */ +unsigned long skiplist_num_items(skiplist *list); + +/** + * Create a new skiplist + * @param max_levels Number of "ups" we have. + * This Should be kept close to lg2 of the number of items to store. + * @param level_probability Ignored + * @param allow_duplicates Allow duplicates in this list + * @param append_duplicates Append rather than prepend duplicates + * @param compare_function Comparison function for data entries + * @return pointer to a new skiplist on success, NULL on errors + */ +skiplist *skiplist_new(int max_levels, float level_probability, int allow_duplicates, int append_duplicates, int (*compare_function)(void *, void *)); + +/** + * Insert an item into a skiplist + * @param list The list to insert to + * @param data The data to insert + * @return SKIPLIST_OK on success, or an error code + */ +int skiplist_insert(skiplist *list, void *data); + +/** + * Empty the skiplist of all data + * @param list The list to empty + * @return ERROR on failures. OK on success + */ +int skiplist_empty(skiplist *list); + +/** + * Free all nodes (but not all data) in a skiplist + * This is similar to skiplist_empty(), but also free()'s the head node + * @param list The list to free + * @return OK on success, ERROR on failures + */ +int skiplist_free(skiplist **list); + +/** + * Get the first item in the skiplist + * @param list The list to peek into + * @return The first item, or NULL if there is none + */ +void *skiplist_peek(skiplist *list); + +/** + * Pop the first item from the skiplist + * @param list The list to pop from + */ +void *skiplist_pop(skiplist *list); + +/** + * Get first node of skiplist + * @param list The list to search + * @param[out] node_ptr State variable for skiplist_get_next() + * @return The data-item of the first node on success, NULL on errors + */ +void *skiplist_get_first(skiplist *list, void **node_ptr); + +/** + * Get next item from node_ptr + * @param[out] node_ptr State variable primed from an earlier call to + * skiplist_get_first() or skiplist_get_next() + * @return The next data-item matching node_ptr on success, NULL on errors + */ +void *skiplist_get_next(void **node_ptr); + +/** + * Find first entry in skiplist matching data + * @param list The list to search + * @param data Comparison object used to search + * @param[out] node_ptr State variable for future lookups with + * skiplist_find_next() + * @return The first found data-item, of NULL if none could be found + */ +void *skiplist_find_first(skiplist *list, void *data, void **node_ptr); + +/** + * Find next entry in skiplist matching data + * @param list The list to search + * @param data The data to compare against + * @param[out] node_ptr State var primed from earlier call to + * skiplist_find_next() or skiplist_find_first() + * @return The next found data-item, or NULL if none could be found + */ +void *skiplist_find_next(skiplist *list, void *data, void **node_ptr); + +/** + * Delete all items matching 'data' from skiplist + * @param list The list to delete from + * @param data Comparison object used to find the real node + * @return OK on success, ERROR on errors + */ +int skiplist_delete(skiplist *list, void *data); + +/** + * Delete first item matching 'data' from skiplist + * @param list The list to delete from + * @param data Comparison object used to search the list + * @return OK on success, ERROR on errors. + */ +int skiplist_delete_first(skiplist *list, void *data); + +/** + * Delete a particular node from the skiplist + * @param list The list to search + * @param node_ptr The node to delete + * @return OK on success, ERROR on errors. + */ +int skiplist_delete_node(skiplist *list, void *node_ptr); + +NAGIOS_END_DECL +/* @} */ +#endif diff -Nru check-mk-1.2.2p3/nagios4/snprintf.h check-mk-1.2.6p12/nagios4/snprintf.h --- check-mk-1.2.2p3/nagios4/snprintf.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/snprintf.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,7 @@ +/* lib/snprintf.h. Generated from snprintf.h.in by configure. */ +/* -*- C -*- */ +#ifndef LIBNAGIOS_snprintf_h__ +#define LIBNAGIOS_snprintf_h__ +/* #undef HAVE_SNPRINTF */ +/* #undef NEED_VA_LIST */ +#endif diff -Nru check-mk-1.2.2p3/nagios4/squeue.h check-mk-1.2.6p12/nagios4/squeue.h --- check-mk-1.2.2p3/nagios4/squeue.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/squeue.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,161 @@ +#ifndef LIBNAGIOS_squeue_h__ +#define LIBNAGIOS_squeue_h__ +#include +#include +#include "pqueue.h" +/** + * @file squeue.h + * @brief Scheduling queue function declarations + * + * This library is based on the pqueue api, which implements a + * priority queue based on a binary heap, providing O(lg n) times + * for insert() and remove(), and O(1) time for peek(). + * @note There is no "find". Callers must maintain pointers to their + * scheduled events if they wish to be able to remove them. + * + * @{ + */ + +/* + * All opaque types here. + * The pqueue library can be useful on its own though, so we + * don't block that from user view. + */ +typedef pqueue_t squeue_t; +struct squeue_event; +typedef struct squeue_event squeue_event; + +/** + * Options for squeue_destroy()'s flag parameter + */ +#define SQUEUE_FREE_DATA (1 << 0) /** Call free() on all data pointers */ + +/** + * Get the scheduled runtime of this event + * @param[in] evt The event to get runtime of + * @return struct timeval on success, NULL on errors + */ +extern const struct timeval *squeue_event_runtime(squeue_event *evt); + +/** + * Get data of an squeue_event struct + * @param[in] evt The event to operate on + * @return The data object pointed to by the event + */ +extern void *squeue_event_data(squeue_event *evt); + +/** + * Creates a scheduling queue optimized for handling events within + * the given timeframe. Callers should take care to create a queue + * of a decent but not overly large size, as too small or too large + * a queue will impact performance negatively. A queue can hold any + * number of events. A good value for "horizon" would be the max + * seconds into the future one expects to schedule things, although + * with few scheduled items in that timeframe you'd be better off + * using a more narrow horizon. + * + * @param size Hint about how large this queue will get + * @return A pointer to a scheduling queue + */ +extern squeue_t *squeue_create(unsigned int size); + +/** + * Destroys a scheduling queue completely + * @param[in] q The doomed queue + * @param[in] flags Flags determining the the level of destruction + */ +extern void squeue_destroy(squeue_t *q, int flags); + +/** + * Enqueue an event with microsecond precision. + * It's up to the caller to keep the event pointer in case he/she + * wants to remove the event from the queue later. + * + * @param q The scheduling queue to add to + * @param tv When this event should occur + * @param data Pointer to any kind of data + * @return The complete scheduled event + */ +extern squeue_event *squeue_add_tv(squeue_t *q, struct timeval *tv, void *data); + +/** + * Adds an event to the scheduling queue. + * See notes for squeue_add_tv() for details + * + * @param q The scheduling queue to add to + * @param when The unix timestamp when this event is to occur + * @param data Pointer to any kind of data + * @return The complete scheduled event + */ +extern squeue_event *squeue_add(squeue_t *q, time_t when, void *data); + +/** + * Adds an event to the scheduling queue with millisecond precision + * See notes on squeue_add_tv() for details + * + * @param[in] q The scheduling queue to add to + * @param[in] when Unix timestamp when this event should occur + * @param[in] usec Millisecond of above this event should occur + * @param[in] data Pointer to any kind of data + * @return NULL on errors. squeue_event pointer on success + */ +extern squeue_event *squeue_add_usec(squeue_t *q, time_t when, time_t usec, void *data); + +/** + * Adds an event to the scheduling queue with millisecond precision + * See notes on squeue_add_tv() for details + * + * @param[in] q The scheduling queue to add to + * @param[in] when Unix timestamp when this event should occur + * @param[in] msec Millisecond of above this event should occur + * @param[in] data Pointer to any kind of data + * @return NULL on errors. squeue_event pointer on success + */ +extern squeue_event *squeue_add_msec(squeue_t *q, time_t when, time_t msec, void *data); + +/** + * Returns the data of the next scheduled event from the scheduling + * queue without removing it from the queue. + * + * @param q The scheduling queue to peek into + */ +extern void *squeue_peek(squeue_t *q); + +/** + * Pops the next scheduled event from the scheduling queue and + * returns the data for it. + * This is equivalent to squeue_peek() + squeue_pop() + * @note This causes the squeue_event to be free()'d. + * + * @param q The scheduling queue to pop from + */ +extern void *squeue_pop(squeue_t *q); + +/** + * Removes the given event from the scheduling queue + * @note This causes the associated squeue_event() to be free()'d. + * @param[in] q The scheduling queue to remove from + * @param[in] evt The event to remove + */ +extern int squeue_remove(squeue_t *q, squeue_event *evt); + +/** + * Returns the number of events in the scheduling queue. This + * function never fails. + * + * @param[in] q The scheduling queue to inspect + * @return number of events in the inspected queue + */ +extern unsigned int squeue_size(squeue_t *q); + + +/** + * Returns true if passed timeval is after the time for the event + * + * @param[in] evt The queue event to inspect + * @param[in] reftime The reference time to compare to the queue event time + * @return 1 if reftime > event time, 0 otherwise + */ +extern int squeue_evt_when_is_after(squeue_event *evt, struct timeval *reftime); +#endif +/** @} */ diff -Nru check-mk-1.2.2p3/nagios4/sretention.h check-mk-1.2.6p12/nagios4/sretention.h --- check-mk-1.2.2p3/nagios4/sretention.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/sretention.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,31 @@ +/***************************************************************************** + * + * SRETENTION.H - Header for state retention routines + * + * + * License: + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + *****************************************************************************/ + +#include "common.h" +NAGIOS_BEGIN_DECL + +int initialize_retention_data(const char *); +int cleanup_retention_data(void); +int save_state_information(int); /* saves all host and state information */ +int read_initial_state_information(void); /* reads in initial host and state information */ + +NAGIOS_END_DECL diff -Nru check-mk-1.2.2p3/nagios4/statusdata.h check-mk-1.2.6p12/nagios4/statusdata.h --- check-mk-1.2.2p3/nagios4/statusdata.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/statusdata.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,199 @@ +/***************************************************************************** + * + * STATUSDATA.H - Header for external status data routines + * + * + * License: + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + *****************************************************************************/ + +#ifndef _STATUSDATA_H +#define _STATUSDATA_H + +#include "common.h" +#include "objects.h" + +#ifdef NSCGI +#define READ_PROGRAM_STATUS 1 +#define READ_HOST_STATUS 2 +#define READ_SERVICE_STATUS 4 +#define READ_CONTACT_STATUS 8 + +#define READ_ALL_STATUS_DATA READ_PROGRAM_STATUS | READ_HOST_STATUS | READ_SERVICE_STATUS | READ_CONTACT_STATUS + + + + /*************************** CHAINED HASH LIMITS ***************************/ + +#define SERVICESTATUS_HASHSLOTS 1024 +#define HOSTSTATUS_HASHSLOTS 1024 + + + /**************************** DATA STRUCTURES ******************************/ + +NAGIOS_BEGIN_DECL + +/* HOST STATUS structure */ +typedef struct hoststatus_struct { + char *host_name; + char *plugin_output; + char *long_plugin_output; + char *perf_data; + int status; + time_t last_update; + int has_been_checked; + int should_be_scheduled; + int current_attempt; + int max_attempts; + time_t last_check; + time_t next_check; + int check_options; + int check_type; + time_t last_state_change; + time_t last_hard_state_change; + int last_hard_state; + time_t last_time_up; + time_t last_time_down; + time_t last_time_unreachable; + int state_type; + time_t last_notification; + time_t next_notification; + int no_more_notifications; + int notifications_enabled; + int problem_has_been_acknowledged; + int acknowledgement_type; + int current_notification_number; + int accept_passive_checks; + int event_handler_enabled; + int checks_enabled; + int flap_detection_enabled; + int is_flapping; + double percent_state_change; + double latency; + double execution_time; + int scheduled_downtime_depth; + int process_performance_data; + int obsess; + struct hoststatus_struct *next; + struct hoststatus_struct *nexthash; + } hoststatus; + + +/* SERVICE STATUS structure */ +typedef struct servicestatus_struct { + char *host_name; + char *description; + char *plugin_output; + char *long_plugin_output; + char *perf_data; + int max_attempts; + int current_attempt; + int status; + time_t last_update; + int has_been_checked; + int should_be_scheduled; + time_t last_check; + time_t next_check; + int check_options; + int check_type; + int checks_enabled; + time_t last_state_change; + time_t last_hard_state_change; + int last_hard_state; + time_t last_time_ok; + time_t last_time_warning; + time_t last_time_unknown; + time_t last_time_critical; + int state_type; + time_t last_notification; + time_t next_notification; + int no_more_notifications; + int notifications_enabled; + int problem_has_been_acknowledged; + int acknowledgement_type; + int current_notification_number; + int accept_passive_checks; + int event_handler_enabled; + int flap_detection_enabled; + int is_flapping; + double percent_state_change; + double latency; + double execution_time; + int scheduled_downtime_depth; + int process_performance_data; + int obsess; + struct servicestatus_struct *next; + struct servicestatus_struct *nexthash; + } servicestatus; + + +/*************************** SERVICE STATES ***************************/ + +#define SERVICE_PENDING 1 +#define SERVICE_OK 2 +#define SERVICE_WARNING 4 +#define SERVICE_UNKNOWN 8 +#define SERVICE_CRITICAL 16 + + + +/**************************** HOST STATES ****************************/ + +#define HOST_PENDING 1 +#define SD_HOST_UP 2 +#define SD_HOST_DOWN 4 +#define SD_HOST_UNREACHABLE 8 + +/* Convert the (historically ordered) host states into a notion of "urgency". + This is defined as, in ascending order: + SD_HOST_UP (business as usual) + HOST_PENDING (waiting for - supposedly first - check result) + SD_HOST_UNREACHABLE (a problem, but likely not its cause) + SD_HOST_DOWN (look here!!) + The exact values are irrelevant, so I try to make the conversion as + CPU-efficient as possible: */ +#define HOST_URGENCY(hs) ((hs)|(((hs)&0x5)<<1)) + + + +/**************************** FUNCTIONS ******************************/ + +int read_status_data(const char *, int); /* reads all status data */ +int add_host_status(hoststatus *); /* adds a host status entry to the list in memory */ +int add_service_status(servicestatus *); /* adds a service status entry to the list in memory */ + +int add_hoststatus_to_hashlist(hoststatus *); +int add_servicestatus_to_hashlist(servicestatus *); + +servicestatus *find_servicestatus(char *, char *); /* finds status information for a specific service */ +hoststatus *find_hoststatus(char *); /* finds status information for a specific host */ +int get_servicestatus_count(char *, int); /* gets total number of services of a certain type for a specific host */ + +void free_status_data(void); /* free all memory allocated to status data */ +#endif + +#ifndef NSCGI +int initialize_status_data(const char *); /* initializes status data at program start */ +int update_all_status_data(void); /* updates all status data */ +int cleanup_status_data(int); /* cleans up status data at program termination */ +int update_program_status(int); /* updates program status data */ +int update_host_status(host *, int); /* updates host status data */ +int update_service_status(service *, int); /* updates service status data */ +int update_contact_status(contact *, int); /* updates contact status data */ +#endif + +NAGIOS_END_DECL +#endif diff -Nru check-mk-1.2.2p3/nagios4/worker.h check-mk-1.2.6p12/nagios4/worker.h --- check-mk-1.2.2p3/nagios4/worker.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nagios4/worker.h 2014-02-10 09:00:20.000000000 +0000 @@ -0,0 +1,132 @@ +#ifndef LIBNAGIOS_worker_h__ +#define LIBNAGIOS_worker_h__ +#include +#include +#include +#include +#include +#include +#include +#include "libnagios.h" + +/** + * @file worker.h + * @brief Worker implementation along with various helpers + * + * This code isn't really in the "library" category, but it's tucked + * in here to provide a good resource for writing remote workers and + * as an example on how to use the API's found here. + */ + +#ifndef ETIME +#define ETIME ETIMEDOUT +#endif + +typedef struct iobuf { + int fd; + unsigned int len; + char *buf; +} iobuf; + +typedef struct execution_information execution_information; + +typedef struct child_process { + unsigned int id, timeout; + char *cmd; + int ret; + struct kvvec *request; + iobuf outstd; + iobuf outerr; + execution_information *ei; +} child_process; + +/** + * Callback for enter_worker that simply runs a command + */ +extern int start_cmd(child_process *cp); + +/** + * Spawn a helper with a specific process name + * The first entry in the argv parameter will be the name of the + * new process, unless the process changes the name itself. + * @param path The path to the executable (can be $PATH relative) + * @param argv Argument vector for the helper to spawn + */ +extern int spawn_named_helper(char *path, char **argv); + +/** + * Spawn any random helper process. Uses spawn_named_helper() + * @param argv The (NULL-sentinel-terminated) argument vector + * @return 0 on success, < 0 on errors + */ +extern int spawn_helper(char **argv); + +/** + * To be called when a child_process has completed to ship the result to nagios + * @param cp The child_process that describes the job + * @param reason 0 if everything was OK, 1 if the job was unable to run + * @return 0 on success, non-zero otherwise + */ +extern int finish_job(child_process *cp, int reason); + +/** + * Start to poll the socket and call the callback when there are new tasks + * @param sd A socket descriptor to poll + * @param cb The callback to call upon completion + */ +extern void enter_worker(int sd, int (*cb)(child_process*)); + +/** + * Build a buffer from a key/value vector buffer. + * The resulting kvvec-buffer is suitable for sending between + * worker and master in either direction, as it has all the + * right delimiters in all the right places. + * @param kvv The key/value vector to build the buffer from + * @return NULL on errors, a newly allocated kvvec buffer on success + */ +extern struct kvvec_buf *build_kvvec_buf(struct kvvec *kvv); + +/** + * Send a key/value vector as a bytestream through a socket + * @param[in] sd The socket descriptor to send to + * @param kvv The key/value vector to send + * @return The number of bytes sent, or -1 on errors + */ +extern int worker_send_kvvec(int sd, struct kvvec *kvv); + +/** @deprecated Use worker_send_kvvec() instead */ +extern int send_kvvec(int sd, struct kvvec *kvv) + NAGIOS_DEPRECATED(4.1.0, "worker_send_kvvec()"); + +/** + * Grab a worker message from an iocache buffer + * @param[in] ioc The io cache + * @param[out] size Out buffer for buffer length + * @param[in] flags Currently unused + * @return A buffer from the iocache on succes; NULL on errors + */ +extern char *worker_ioc2msg(iocache *ioc, unsigned long *size, int flags); + +/** + * Parse a worker message to a preallocated key/value vector + * + * @param[in] kvv Key/value vector to fill + * @param[in] buf The buffer to parse + * @param[in] len Length of 'buf' + * @param[in] kvv_flags Flags for buf2kvvec() + * @return 0 on success, < 0 on errors + */ +extern int worker_buf2kvvec_prealloc(struct kvvec *kvv, char *buf, unsigned long len, int kvv_flags); + +/** + * Set some common socket options + * @param[in] sd The socket to set options for + * @param[in] bufsize Size to set send and receive buffers to + * @return 0 on success. < 0 on errors + */ +extern int worker_set_sockopts(int sd, int bufsize); + +/** @deprecated Use worker_set_sockopts() instead */ +extern int set_socket_options(int sd, int bufsize) + NAGIOS_DEPRECATED(4.1.0, "worker_set_sockopts()"); +#endif /* INCLUDE_worker_h__ */ diff -Nru check-mk-1.2.2p3/netapp_api_aggr check-mk-1.2.6p12/netapp_api_aggr --- check-mk-1.2.2p3/netapp_api_aggr 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/netapp_api_aggr 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,61 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# <<>> +# [config_instance] aggr-list-info +# aggregates +# aggr-info +# name aggr0 +# size-total 6606028800 +# size-available 5707771904 + +def inventory_netapp_api_aggr(parsed): + aggrs = parsed.get("aggr-list-info") + return [ (values.get("name"), {}) for name, values in aggrs.items() if values.get("name") ] + +def check_netapp_api_aggr(item, params, parsed): + aggrs = parsed.get("aggr-list-info") + aggr = aggrs.get(item) + if not aggr: + return (3, "Aggregation not found in agent output") + + mega = 1024.0 * 1024.0 + size_total = int(aggr.get("size-total")) / mega + size_avail = int(aggr.get("size-available")) / mega + return df_check_filesystem_list(item, params, [(item, size_total, size_avail)]) + +check_info["netapp_api_aggr"] = { + 'check_function' : check_netapp_api_aggr, + 'parse_function' : lambda info: netapp_api_convert_info(info, + configs = {"aggr-list-info": {"block-name": "aggr-info", "key": "name"}}), + 'inventory_function' : inventory_netapp_api_aggr, + 'service_description' : 'Aggregation %s', + 'group' : 'filesystem', + 'has_perfdata' : True, + 'includes' : [ "df.include", "netapp_api.include" ], + 'default_levels_variable' : 'filesystem_default_levels', + +} diff -Nru check-mk-1.2.2p3/netapp_api_cluster check-mk-1.2.6p12/netapp_api_cluster --- check-mk-1.2.2p3/netapp_api_cluster 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/netapp_api_cluster 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,111 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# <<>> +# [config_instance] cf-status +# current-time 1415200211 +# current-mode non_ha +# is-enabled true +# partner +# state ERROR +# is-interconnect-up false +# partner-in-headswap false +# local-in-headswap false +# new-partner-sysid 0 +# +# <<>> +# [config_instance] cf-status +# current-time 1415280951 +# is-enabled true +# partner zmucfd +# state CONNECTED +# time-master-status unknown +# is-interconnect-up true + + +def inventory_netapp_api_cluster(info): + data = dict([line for line in info if len(line) == 2]) + if data.get("current-mode", "").lower() != "non_ha": # non_ha is standalone + return [ (data.get("partner"), {"state": data.get("state", "").lower()}) ] + +# Cluster states according to docu: +# connected - Partner is available for takeover +# takeover_scheduled - Partner is down and takeover is scheduled +# takeover_started - Takeover process has begun +# takeover - Currently controlling partner's resources. +# taken_over - Partner is controlling filer's resources +# takeover_failed - Failed to takeover the partner +# giving_back - Sendhome process in progress +# giveback_partial_waiting - This node controls partner aggregates even +# though the node is not in takeover. And we're waiting for a connection to the partner. +# giveback_partial_connected - This node controls partner aggregates even though the node is not in takeover. +# The partner is available to receive the aggregates. +# waiting_for_root_aggr - Partner is controlling dblade's root aggregate If we're in this state, many other optional fields are not returned. +# waiting - Waiting for a connection to partner. Generally happens while partner is rebooting. +# in_maintenance_mode - node is in maintenance mode. In the mode it is not possible to determine more detailed information (e.g. cluster or not; takeover or not, etc). +# pending_shutdown - starting a takeover/sendhome is inhibited due to a pending system shutdown. i +# error - There is an error with the system +# User have to compare the return values case-insensitively. + +def check_netapp_api_cluster(item, params, info): + data = dict([line for line in info if len(line) == 2]) + + had_errors = False + state = data.get("state").lower() + if state == "error": + had_errors = True + yield 2, "Cluster state error" + if state == "takeover": + had_errors = True + yield 1, "Cluster takeover" + elif state == "takeover_failed": + had_errors = True + yield 2, "Takeover failed. Reason: %s" % data.get("takeover-failure-reason", "None available") + elif state != params["state"]: + had_errors = True + yield 1, "Cluster state is %s. (%s expected)" % (state, params.get("state")) + + if data.get("is-interconnect-up") != "true": + had_errors = True + yield 2, "Cluster interconnect is not up" + + if data.get("current-mode", "") == "non_ha": + had_errors = True + yield 1, "Running in stand-alone mode" + + if data.get("partner") != item: + had_errors = True + yield 1, "Partner name changed: %s instead of %s" % (data.get("partner", "None"), item) + + if not had_errors: + yield 0, "Cluster Status OK" + +check_info["netapp_api_cluster"] = { + 'check_function' : check_netapp_api_cluster, + 'inventory_function' : inventory_netapp_api_cluster, + 'service_description' : 'Cluster with %s', +} + diff -Nru check-mk-1.2.2p3/netapp_api_cpu check-mk-1.2.6p12/netapp_api_cpu --- check-mk-1.2.2p3/netapp_api_cpu 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/netapp_api_cpu 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,69 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# <<>> +# [counter_instance] system +# ---new_counter--- +# cpu_busy 8362860064 +# num_processors 2 + +netapp_api_cpu_default_levels = (90.0, 95.0) + +def inventory_netapp_api_cpu_utilization(info): + return [ (None, 'netapp_api_cpu_default_levels') ] + +def check_netapp_api_cpu_utilization(item, params, info): + data = dict([line for line in info if len(line) == 2]) + now = time.time() + + cpu_busy = int(data["cpu_busy"]) + num_cpus = int(data["num_processors"]) + timedif, ticks_per_sec = get_counter("netapp_api_cpu.utilization", now, cpu_busy) + cpusecs_per_sec = ticks_per_sec / 1000000.0 + used_perc = 100.0 * cpusecs_per_sec + + # Due to timeing invariancies the measured level can become > 100%. + # This makes users unhappy, so cut it off. + if used_perc < 0: + used_perc = 0 + elif used_perc > 100: + used_perc = 100 + + state, infotext, perfdata = check_cpu_util(used_perc, params, now) + perfdata[0] = perfdata[0][:5] + (num_cpus,) + infotext += ", %d CPUs" % num_cpus + return state, infotext, perfdata + +check_info["netapp_api_cpu.utilization"] = { + 'check_function' : check_netapp_api_cpu_utilization, + 'inventory_function' : inventory_netapp_api_cpu_utilization, + 'service_description' : 'CPU utilization', + 'has_perfdata' : True, + 'group' : 'cpu_utilization', + 'includes' : [ "cpu_util.include" ] +} + + diff -Nru check-mk-1.2.2p3/netapp_api_disk check-mk-1.2.6p12/netapp_api_disk --- check-mk-1.2.2p3/netapp_api_disk 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/netapp_api_disk 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,144 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +# Agent output: +# <<>> +# [config_instance] disk-list-info +# disk-detail-info +# disk-uid 2000B452:53C3890F:00000000:00000000:00000000:00000000:00000000:00000000:00000000:00000000 +# raid-state spare +# bay 13 +# used-space 587202560000 +# physical-space 587421536256 +# shelf 2 +# serial-number 6SL94Q9C0000N5055GAW +# disk-detail-info +# disk-uid 2000B452:53C3893E:00000000:00000000:00000000:00000000:00000000:00000000:00000000:00000000 +# raid-state spare +# ... +# shelf-uid-instance +# shelf-uid 50:05:0c:c0:02:20:9a:7c +# disks 2040000C:CA1016C4:00000000:00000000:00000000:00000000:00000000:00000000:00000000:00000000 ... + +def inventory_netapp_api_disk_summary(info): + return [ (None, {}) ] + +def check_netapp_api_disk_summary(_no_item, params, info): + disks_info = netapp_api_convert_info(info, + configs = {"disk-list-info": {"block-name": "disk-detail-info", + "key": "disk-uid"}, + "shelf-uids-of-disks": {"block-name": "shelf-uid-instance", + "key": "shelf-uid"}}) + + disks = disks_info.get("disk-list-info") + shelfs = disks_info.get("shelf-uids-of-disks") + + my_disks = dict([disk for disk in disks.items() if not disk[1].get("raid-state") == "partner"]) + + spare_count = 0 + data_disks = [] + parity_disks = [] + broken_disks = [] + prefailed_data_disks = [] + prefailed_parity_disks = [] + raid_states = {} + + phys_space = 0 + total_space = 0 + + for name, disk in my_disks.items(): + total_space += int(disk.get("used-space")) + raid_type = disk.get("raid-type") + + if disk.get("raid-state", "") == "broken": + broken_disks.append(disk) + elif disk.get("is-prefailed", "false") not in [ "false", "None" ]: + if raid_type in ["parity", "dparity"]: + prefailed_parity_disks.append(disk) + elif raid_type == "data": + prefailed_data_disks.append(disk) + elif disk.get("raid-state","") == "spare": + spare_count += 1 + + if raid_type in ["parity", "dparity"]: + parity_disks.append(disk) + elif raid_type in ["data"]: + data_disks.append(disk) + + yield 0, "Total Raw Capacity: %s" % get_bytes_human_readable(total_space), [("total_space", total_space)] + yield 0, "Total disks: %d (%d Spare)" % (len(my_disks) - len(broken_disks), spare_count), [ + ("total", len(my_disks)), + ("spare", spare_count), + ("broken", len(broken_disks)) ] + + yield 0, "Data disks: %d (%d prefailed)" % (len(data_disks), len(prefailed_data_disks)) + yield 0, "Parity disks: %d (%d prefailed)" % (len(parity_disks), len(prefailed_parity_disks)) + yield 0, "Broken disks: %d" % len(broken_disks) + + + disk_to_shelf_map = {} + def find_disk_shelf_uid(disk_uid): + if not disk_to_shelf_map: + for shelf, values in shelfs.items(): + disks = values["disks"].split(" ") + for disk in disks: + disk_to_shelf_map[disk] = shelf + return disk_to_shelf_map.get(disk_uid) + + for text, disks, state in [("Data-Prefailed", prefailed_data_disks, 0), + ("Parity-Prefailed", prefailed_parity_disks, 0), + ("Broken", broken_disks, 0)]: + info = [] + for disk in disks: + disk_info = "Serial: %s" % disk.get("serial-number") + shelf_uid = find_disk_shelf_uid(disk.get("disk-uid")) + if shelf_uid: + disk_info += " (Shelf: %s, Bay %s)" % (shelf_uid, disk.get("bay")) + info.append(disk_info) + + if info: + yield state, "%s Disk Details: %s" % (text, " / ".join(info)) + + if broken_disks: + warn, crit = params.get("broken_spare_ratio", (1.0, 50.0)) + ratio = float(len(broken_disks)) / (len(broken_disks) + spare_count) * 100 + state = 0 + if ratio >= crit: + state = 2 + elif ratio >= warn: + state = 1 + if state: + yield state, "Too many broken disks (levels at %.1f%%/%.1f%%)" % (warn, crit) + +check_info["netapp_api_disk.summary"] = { + 'check_function' : check_netapp_api_disk_summary, + 'inventory_function' : inventory_netapp_api_disk_summary, + 'service_description' : 'NetApp Disks Summary', + 'group' : 'netapp_disks', + 'has_perfdata' : True +} + diff -Nru check-mk-1.2.2p3/netapp_api_fan check-mk-1.2.6p12/netapp_api_fan --- check-mk-1.2.2p3/netapp_api_fan 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/netapp_api_fan 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,70 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# <<>> +# 50:05:0c:c0:02:21:95:d9 shelf-owned false +# 50:05:0c:c0:02:21:95:d9 cooling-element-number 1 2 3 4 5 6 7 8 +# 50:05:0c:c0:02:21:95:d9 cooling-element-is-error false false None None None None None None +# 50:05:0c:c0:02:21:95:d9 cooling-element-is-not-installed None None true true true true true true + +def inventory_netapp_api_fan(info): + yield None, None + +def check_netapp_api_fan(_no_item, _no_params, parsed): + fan_count = 0 + fan_errors = [] + + for shelf, fans in parsed.items(): + if fans["shelf-owned"][0] != "true": + continue + + for idx, not_installed in enumerate(fans["cooling-element-is-not-installed"]): + if not_installed == "true" or fans["cooling-element-number"][idx] == "None": + continue + fan_count += 1 + if fans["cooling-element-is-error"][idx] == "true": + fan_errors.append((2, "Error in Shelf %s Fan %s" % (shelf, fans["cooling-element-number"][idx]))) + + + yield 0, "%s fans assigned to this filer" % fan_count + + max_fans = 5 + for state, text in fan_errors[:max_fans]: + yield state, text + + if len(fan_errors) > max_fans: + yield 0, "more fans failed (%d total) - only the first %d errors are shown..." % (len(fan_errors), max_fans) + + +check_info["netapp_api_fan"] = { + 'check_function' : check_netapp_api_fan, + 'parse_function' : netapp_api_parse_info_environ, + 'inventory_function' : inventory_netapp_api_fan, + 'service_description' : 'Fan Status Shelves', + 'includes' : ["netapp_api.include"] +} + + diff -Nru check-mk-1.2.2p3/netapp_api_if check-mk-1.2.6p12/netapp_api_if --- check-mk-1.2.2p3/netapp_api_if 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/netapp_api_if 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,200 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +#<<>> +#[config_instance] net-ifconfig-get +#interface-config-info +# interface-config-info +# interface-name e0a +# ipspace-name default-ipspace +# v4-primary-address +# ip-address-info +# addr-family af-inet +# address 10.1.1.188 +# netmask-or-prefix 255.255.0.0 +# broadcast 10.1.255.255 +# creator vfiler:vfiler0 +# mac-address 00:0c:29:d8:98:26 +# mediatype auto-1000t-fd-up +# flowcontrol full +#... +#[counter_instance] ifnet +#---new_counter--- +#instance_name e0a +#recv_errors 0 +#send_errors 0 +#recv_data 90123412 +#send_data 208265211 +#recv_mcasts 699719 +#send_mcasts 7302 + + +def netapp_convert_to_if64(info): + parsed_data = netapp_api_convert_info(info, + configs = {"net-ifconfig-get": + {"block-name": "interface-config-info", "key": "interface-name"}}, + counter_key = "instance_name") + interfaces = parsed_data.get("net-ifconfig-get") + + # Calculate speed, state and create mac-address list + if_mac_list = {} # Dictionary with lists of common mac addresses + vif_list = [] # List of virtual interfaces + for name, values in interfaces.items(): + mediatype = values.get("mediatype") + if mediatype: + tokens = mediatype.split("-") + # Possible values according to 7-Mode docu: 100tx | 100tx-fd | 1000fx | 10g-sr + if "1000" in mediatype: + speed = 1000000000 + elif "100" in mediatype: + speed = 100000000 + elif "10g" in mediatype: + speed = 10000000000 + elif "10" in mediatype: + speed = 10000000 + else: + speed = 0 + values["speed"] = speed + values["state"] = tokens[-1].lower() == "up" and "1" or "2" + elif values.get("port-role") != "storage-acp": + # If an interface has no media type and is not a storage-acp it is considered as virtual interface + vif_list.append(name) + if "mac-address" in values: + if_mac_list.setdefault(values["mac-address"], []) + if_mac_list[values["mac-address"]].append(name) + + + nics = [] + extra_info = {} + for idx, entry in enumerate(sorted(interfaces)): + nic_name, values = entry, interfaces[entry] + + speed = values.get("speed", 0) + state = values.get("state", "2") + + # Try to determine the speed and state for virtual interfaces + # We know all physical interfaces for this virtual device and use the highest available + # speed as the virtual speed. Note: Depending on the configuration this behaviour might + # differ, e.g. the speed of all interfaces might get accumulated.. + # Additionally, we check if not all interfaces of the virtual group share the same connection speed + if not speed: + if "mac-address" in values: + mac_list = if_mac_list[values["mac-address"]] + if len(mac_list) > 1: # check if this interface is grouped + extra_info.setdefault(nic_name, {}) + extra_info[nic_name]["grouped_if"] = [ x for x in mac_list if x not in vif_list ] + + max_speed = 0 + min_speed = 1024**5 + for tmp_if in mac_list: + if tmp_if == nic_name or "speed" not in interfaces[tmp_if]: + continue + check_speed = interfaces[tmp_if]["speed"] + max_speed = max(max_speed, check_speed) + min_speed = min(min_speed, check_speed) + if max_speed != min_speed: + extra_info[nic_name]["speed_differs"] = (max_speed, min_speed) + speed = max_speed + + # Virtual interfaces is "Up" if at least one physical interface is up + if "state" not in values: + if "mac-address" in values: + for tmp_if in if_mac_list[values["mac-address"]]: + if interfaces[tmp_if].get("state") == "1": + state = "1" + break + + # Only add interfaces with counters + if "counters" in values: + counter_data = values.get("counters") + if values.get("mac-address"): + mac = "".join(map(lambda x: chr(int(x, 16)), values["mac-address"].split(':'))) + else: + mac = '' + + nic = ['0'] * 20 + nic[0] = str(idx + 1) # Index + nic[1] = nic_name # Description + nic[2] = "6" # Fake ethernet # Type + nic[3] = speed # Speed + nic[4] = state # Status + # IN + nic[5] = counter_data.get("recv_data", 0) # inoctets + nic[6] = 0 # inucast + nic[7] = counter_data.get("recv_mcasts", 0) # inmcast + nic[8] = 0 # ibcast + nic[9] = 0 # indiscards + nic[10] = counter_data.get("recv_errors", 0) # inerrors + # OUT + nic[11] = counter_data.get("send_data", 0) # outoctets + nic[12] = 0 # outucast + nic[13] = counter_data.get("send_mcasts", 0) # outmcast + nic[14] = 0 # outbcast + nic[15] = 0 # outdiscards + nic[16] = counter_data.get("send_errors", 0) # outspeed + nic[17] = 0 # outqlen + nic[18] = values.get("interface-name", "") # Alias + nic[19] = mac # MAC + + nics.append(nic) + + return nics, extra_info + +def inventory_netapp_api_if(parsed): + nics, extra_info = parsed + return inventory_if_common(nics) + +def check_netapp_api_if(item, params, parsed): + nics, extra_info = parsed + yield check_if_common(item, params, nics) + + for line in nics: + ifIndex = line[0] + ifDescr = line[1] + ifAlias = line[18] + if type(ifIndex) == tuple: + ifGroup, ifIndex = ifIndex + + ifDescr_cln = cleanup_if_strings(ifDescr) + ifAlias_cln = cleanup_if_strings(ifAlias) + if if_item_matches(item, ifIndex, ifAlias_cln, ifDescr_cln): + if ifDescr in extra_info: + vif_group = extra_info[ifDescr] + yield 0, "Physical interfaces: %s" %\ + ", ".join([group for group in vif_group["grouped_if"] if group != ifDescr]) + if "speed_differs" in vif_group: + yield 1, "Interfaces do not have the same speed" + +check_info["netapp_api_if"] = { + 'check_function' : check_netapp_api_if, + 'inventory_function' : inventory_netapp_api_if, + 'parse_function' : netapp_convert_to_if64, + 'service_description' : 'Interface %s', + 'has_perfdata' : True, + 'group' : 'if', + 'includes' : [ 'if.include', 'netapp_api.include' ], + 'default_levels_variable' : 'if_default_levels', +} diff -Nru check-mk-1.2.2p3/netapp_api.include check-mk-1.2.6p12/netapp_api.include --- check-mk-1.2.2p3/netapp_api.include 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/netapp_api.include 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,127 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +# Environ info looks like +# 50:05:0c:c0:02:20:d1:16 shelf-owned true +# 50:05:0c:c0:02:20:d1:16 temp-sensor-is-ambient true false false +# 50:05:0c:c0:02:20:d1:16 temp-sensor-low-warning 10 10 10 +# 50:05:0c:c0:02:20:d1:16 temp-sensor-hi-warning 40 53 53 +# 50:05:0c:c0:02:20:d1:16 temp-sensor-hi-critical 50 63 63 +# 50:05:0c:c0:02:20:d1:16 temp-sensor-current-temperature 28 33 35 +# 50:05:0c:c0:02:20:d1:16 temp-sensor-low-critical 0 0 0 +def netapp_api_parse_info_environ(info): + shelfs = {} + lines = iter(info) + + try: + while True: + line = lines.next() + shelf, key, values = line[0], line[1], line[2:] + shelfs.setdefault(shelf, {})[key] = values + except StopIteration: + pass + + return shelfs + +# Converts the given info into +# - A config dictionary with config_key as identifier. If the counter_key of the +# counter section matches the config_key the counter information is merged into +# the config. +# - With counter_as_key you can specify whether the counter_key entries +# should used as main keys in the returned dictionary +def netapp_api_convert_info(info, configs = {}, counter_key = None, counter_as_key = False): + lines = iter(info) + try: + is_config = False + current_instance = None + current_config = None + + current_counter = {} + objects = {} + last_config_name = [""] + + def add_config(): + if current_config == None: + return + key = current_config.get(configs.get(current_instance, {}).get("key")) + if key: + objects.setdefault(current_instance, {}) + objects[current_instance][key] = current_config + last_config_name[0] = key + + def add_counter(): + if not current_counter: + return + counter_name = current_counter.get(counter_key) + if not counter_name: + return + + if counter_as_key: + objects[counter_name] = current_counter + elif counter_name in objects[current_instance]: + objects[current_instance][counter_name]["counters"] = current_counter + + while True: + line = lines.next() + if line[0] == "[config_instance]": + add_config() + add_counter() + current_counter = {} + is_config = True + current_instance = line[-1] + continue + elif line[0] == "[counter_instance]": + add_config() + add_counter() + current_config = {} + is_config = False + continue + + if is_config: + if line[-1] == configs.get(current_instance, {}).get("block-name"): + add_config() + current_config = {} + elif current_config != None and len(line) >= 2: + key = line[0] + if current_config.get(key): + continue + else: + current_config[key] = line[1] + else: # is counter + if line[-1] == "---new_counter---": + add_counter() + current_counter = {} + elif len(line) == 2: + current_counter[line[0]] = line[1] + + except StopIteration: + add_config() + add_counter() + pass + + return objects + diff -Nru check-mk-1.2.2p3/netapp_api_protocol check-mk-1.2.6p12/netapp_api_protocol --- check-mk-1.2.2p3/netapp_api_protocol 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/netapp_api_protocol 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,77 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +#<<>> +#[counters] +#---new_counter--- +#instance_name nfsv4 +#nfsv4_read_data 0 +#nfsv4_write_data 0 +#---new_counter--- +#instance_name iscsi +#iscsi_read_data 0 +#iscsi_write_data 0 +#---new_counter--- +#instance_name cifs +#cifs_read_data 0 +#cifs_write_data 0 +#---new_counter--- +#instance_name nfs +#nfsv3_read_data 0 +#nfsv3_write_data 0 + + +def inventory_netapp_api_protocol(parsed): + for key in parsed: + yield key, None + +def check_netapp_api_protocol(item, _no_params, parsed): + counter_data = parsed.get(item) + if not counter_data: + return + + # Fix for nfsv3. The item nfs is internally handled as nfsv3 + if item == "nfs": + item = "nfsv3" + + infotext = "" + now = time.time() + for entry, text in [ ("read_ops", "Read OPS"), ("write_ops", "Write OPS") ]: + key = "%s_%s" % (item, entry) + value = int(counter_data.get(entry, counter_data.get(key, "0"))) + timedif, per_sec = get_counter("netapp_api_protocol.%s.%s" % (item, entry), now, value) + yield 0, "%s %s" % (text, get_bytes_human_readable(per_sec, 1000, unit = "")), [(entry, per_sec)] + + +check_info["netapp_api_protocol"] = { + 'check_function' : check_netapp_api_protocol, + 'inventory_function' : inventory_netapp_api_protocol, + 'parse_function' : lambda info: netapp_api_convert_info(info, counter_key = "instance_name", counter_as_key = True), + 'service_description' : 'Protocol %s', + 'has_perfdata' : True, + 'includes' : [ "netapp_api.include" ] +} + diff -Nru check-mk-1.2.2p3/netapp_api_psu check-mk-1.2.6p12/netapp_api_psu --- check-mk-1.2.2p3/netapp_api_psu 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/netapp_api_psu 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,64 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# <<>> +# 50:05:0c:c0:02:20:d2:c4 shelf-owned true +# 50:05:0c:c0:02:20:d2:c4 power-supply-serial-no PMW944430115711 PMW944430115700 None None +# 50:05:0c:c0:02:20:d2:c4 power-supply-is-error false false None None +# 50:05:0c:c0:02:20:d2:c4 power-supply-is-not-installed None None true true +# 50:05:0c:c0:02:20:d2:c4 power-control-status ok +# 50:05:0c:c0:02:20:d2:c4 power-supply-element-number 1 2 3 4 + +def inventory_netapp_api_psu(info): + yield None, None + +def check_netapp_api_psu(_no_item, _no_params, parsed): + psu_count = 0 + psu_errors = [] + for shelf, psus in parsed.items(): + if psus["shelf-owned"][0] != "true": + continue + + for idx, not_installed in enumerate(psus["power-supply-is-not-installed"]): + if not_installed == "true" or psus["power-supply-element-number"][idx] == "None": + continue + psu_count += 1 + if psus["power-supply-is-error"][idx] == "true": + psu_errors.append((2, "Error in Shelf %s PSU %s" % (shelf, psus["power-supply-element-number"][idx]))) + + yield 0, "%s power supplies assigned to this filer" % psu_count + + for state, text in psu_errors: + yield state, text + + +check_info["netapp_api_psu"] = { + 'check_function' : check_netapp_api_psu, + 'inventory_function' : inventory_netapp_api_psu, + 'parse_function' : netapp_api_parse_info_environ, + 'service_description' : 'Power Supplies Shelves', + 'includes' : ["netapp_api.include"] +} diff -Nru check-mk-1.2.2p3/netapp_api_status check-mk-1.2.6p12/netapp_api_status --- check-mk-1.2.2p3/netapp_api_status 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/netapp_api_status 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,53 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# <<>> +# [config_instance] diagnosis-status-get +# attributes +# diagnosis-status +# status ok + +def inventory_netapp_api_status(info): + return [ (None, None) ] + +def check_netapp_api_status(item, _no_params, info): + data = dict([line for line in info if len(line) == 2 and line[0] != "[config_instance]"]) + + if data.get("status"): + state = data["status"].lower() not in ["ok", "ok-with-suppressed"] and 2 or 0 + yield state, "Status: %s" % data["status"] + del data["status"] + + for key, value in data.items(): + yield 0, "%s: %s" % (key.title(), value) + +check_info["netapp_api_status"] = { + 'check_function' : check_netapp_api_status, + 'inventory_function' : inventory_netapp_api_status, + 'service_description' : 'Diagnosis Status', +} + + diff -Nru check-mk-1.2.2p3/netapp_api_temp check-mk-1.2.6p12/netapp_api_temp --- check-mk-1.2.2p3/netapp_api_temp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/netapp_api_temp 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,82 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# <<>> +# 50:05:0c:c0:02:20:d1:16 shelf-owned true +# 50:05:0c:c0:02:20:d1:16 temp-sensor-is-ambient true false false +# 50:05:0c:c0:02:20:d1:16 temp-sensor-low-warning 10 10 10 +# 50:05:0c:c0:02:20:d1:16 temp-sensor-hi-warning 40 53 53 +# 50:05:0c:c0:02:20:d1:16 temp-sensor-hi-critical 50 63 63 +# 50:05:0c:c0:02:20:d1:16 temp-sensor-current-temperature 28 33 35 +# 50:05:0c:c0:02:20:d1:16 temp-sensor-low-critical 0 0 0 + +def inventory_netapp_api_temp(parsed): + yield "Internal", {} + yield "Ambient", {} + + +def check_netapp_api_temp(item, params, parsed): + is_ambient = item == "Ambient" and "true" or "false" + + sensorlist = [] + for shelf, sensors in parsed.items(): + if sensors["shelf-owned"][0] != "true": + continue + + for idx, ambient in enumerate(sensors["temp-sensor-is-ambient"]): + if ambient != is_ambient: + continue + + current_temp = int(sensors["temp-sensor-current-temperature"][idx]) + sensor_no = sensors["temp-sensor-element-no"][idx] + warn_low = int(sensors["temp-sensor-low-warning"][idx]) + crit_low = int(sensors["temp-sensor-low-critical"][idx]) + warn_high = int(sensors["temp-sensor-hi-warning"][idx]) + crit_high = int(sensors["temp-sensor-hi-critical"][idx]) + + kwargs = { + "dev_levels" : (warn_high, crit_high), + "dev_levels_lower" : (warn_low, crit_low), + } + + sensorlist.append( (shelf + "/" + sensor_no, current_temp, kwargs) ) + + if not sensorlist: + return 0, "No temperature sensors assigned to this filer" + else: + return check_temperature_list(sensorlist, params) + + +check_info["netapp_api_temp"] = { + 'check_function' : check_netapp_api_temp, + 'inventory_function' : inventory_netapp_api_temp, + 'parse_function' : netapp_api_parse_info_environ, + 'has_perfdata' : True, + 'group' : "temperature", + 'service_description' : 'Temperature %s Shelves', + 'includes' : ["netapp_api.include", "temperature.include"] +} + diff -Nru check-mk-1.2.2p3/netapp_api_version check-mk-1.2.6p12/netapp_api_version --- check-mk-1.2.2p3/netapp_api_version 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/netapp_api_version 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,65 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# <<>> +# [config_instance] system-get-version +# version NetApp Release 8.2.1 7-Mode: Fri Mar 21 14:48:58 PDT 2014 +# is-clustered false +# [config_instance] system-get-info +# system-info +# system-name netapp-host +# system-id 4082367488 +# system-model SIMBOX +# system-machine-type SIMBOX +# vendor-id NetApp +# system-serial-number 4082367488 +# board-speed 2933 +# board-type NetApp VSim +# cpu-serial-number 999999 +# number-of-processors 2 +# memory-size 1599 +# cpu-processor-id 0x206c2 +# cpu-microcode-version 21 +# maximum-aggregate-size 2199023255552 +# maximum-flexible-volume-size 17592186044416 +# maximum-flexible-volume-count 500 +# supports-raid-array true + + +def inventory_netapp_api_info(info): + return [ (None, None) ] + +def check_netapp_api_info(item, _no_params, info): + data = dict([(line[0], " ".join(line[1:])) for line in info if len(line) == 2 and line[0] != "[config_instance]"]) + return 0, "Version: %s" % data["version"] + +check_info["netapp_api_version"] = { + 'check_function' : check_netapp_api_info, + 'inventory_function' : inventory_netapp_api_info, + 'service_description' : 'NetApp Version', +} + + diff -Nru check-mk-1.2.2p3/netapp_api_vf_stats check-mk-1.2.6p12/netapp_api_vf_stats --- check-mk-1.2.2p3/netapp_api_vf_stats 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/netapp_api_vf_stats 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,99 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +netapp_api_vf_stats_cpu_util_default_levels = (90.0, 95.0) + +def inventory_netapp_api_vf_stats_cpu_util(info): + stats = netapp_api_convert_info(info, counter_key = "instance_name", counter_as_key = True) + for key in stats.keys(): + yield key, 'netapp_api_vf_stats_cpu_util_default_levels' + +def check_netapp_api_vf_stats_cpu_util(item, params, info): + stats = netapp_api_convert_info(info, counter_key = "instance_name", counter_as_key = True) + + vf = stats.get(item) + if not vf: + return + else: + now = time.time() + + cpu_busy = int(vf["vfiler_cpu_busy"]) + timedif, ticks_per_sec = get_counter("netapp_api_vf_stats.cpu_util.%s" % item, now, cpu_busy ) + cpusecs_per_sec = ticks_per_sec / 1000.0 / 1000 / 10000 + used_perc = 100.0 * cpusecs_per_sec + + # Due to timeing invariancies the measured level can become > 100%. + # This makes users unhappy, so cut it off. + if used_perc < 0: + used_perc = 0 + elif used_perc > 100: + used_perc = 100 + + state, infotext, perfdata = check_cpu_util(used_perc, params, now) + perfdata[0] = perfdata[0][:5] + return state, infotext, perfdata + + +check_info["netapp_api_vf_stats.cpu_util"] = { + 'check_function' : check_netapp_api_vf_stats_cpu_util, + 'inventory_function' : inventory_netapp_api_vf_stats_cpu_util, + 'has_perfdata' : True, + 'group' : 'cpu_utilization_multiitem', + 'service_description' : 'CPU utilization %s', + 'includes' : [ "cpu_util.include" ] +} + + +def inventory_netapp_api_vf_stats_traffic(info): + stats = netapp_api_convert_info(info, counter_key = "instance_name", counter_as_key = True) + for key in stats.keys(): + yield key, None + +def check_netapp_api_vf_stats_traffic(item, params, info): + stats = netapp_api_convert_info(info, counter_key = "instance_name", counter_as_key = True) + + vf = stats.get(item) + if not vf: + return + else: + now = time.time() + for entry, name, base, factor, unit in [ ("read_ops", "Read", 1000.0, 1, "OP/s"), + ("write_ops", "Write", 1000.0, 1, "OP/s"), + ("net_data_recv", "Net Data Recv", 1024.0, 1024, "B/s"), + ("net_data_sent", "Net Data Sent", 1024.0, 1024, "B/s"), + ("read_bytes", "Read", 1024.0, 1024, "B/s"), + ("write_bytes", "Write", 1024.0, 1024, "B/s")]: + traffic = int(vf["vfiler_" + entry]) * factor + timedif, ticks_per_sec = get_counter("netapp_api_vf_stats.traffic.%s.%s" % (item, entry), now, traffic) + yield 0, "%s: %s" % (name, get_bytes_human_readable(ticks_per_sec, base = base, unit = unit)), [(entry, ticks_per_sec)] + + +check_info["netapp_api_vf_stats.traffic"] = { + 'check_function' : check_netapp_api_vf_stats_traffic, + 'inventory_function' : inventory_netapp_api_vf_stats_traffic, + 'has_perfdata' : True, + 'service_description' : 'Traffic vFiler %s', +} diff -Nru check-mk-1.2.2p3/netapp_api_vf_stats.cpu_util check-mk-1.2.6p12/netapp_api_vf_stats.cpu_util --- check-mk-1.2.2p3/netapp_api_vf_stats.cpu_util 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/netapp_api_vf_stats.cpu_util 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,25 @@ +title: NetApp Filers: vFiler CPU Utilization +agents: netapp +catalog: hw/storagehw/netapp +license: GPL +distribution: check_mk +description: + Checks the CPU utilization (in percent) of a vFiler. + + {WARN} or {CRIT} is returned, if the usage in the last 60 sec was above + given thresholds. {OK} is returned otherwise. + +perfdata: + One value: The vFilers CPU utilization in percent. + +inventory: + Creates one check for each vFiler. + +examples: + # set default levels to 70 and 80 percent: + netapp_api_vf_stats_cpu_util_default_levels = { "levels": (70.0, 80.0) } + +[parameters] +parameters (dict): with the element +{"levels"}: (float, float): levels of vFiler CPU utilization for {WARN} and {CRIT} in percent + diff -Nru check-mk-1.2.2p3/netapp_api_vf_stats.traffic check-mk-1.2.6p12/netapp_api_vf_stats.traffic --- check-mk-1.2.2p3/netapp_api_vf_stats.traffic 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/netapp_api_vf_stats.traffic 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,16 @@ +title: NetApp Filers: vFiler Traffic +agents: netapp +catalog: hw/storagehw/netapp +license: GPL +distribution: check_mk +description: + Monitors the traffic of vFilers on the NetApp filer. + Right now the check is always {OK} and reports the following informations for + each vFilfer: {Network Data Received/Send}, {Disk Data Read/Written}, {Read/Write Operations Per Second} + +perfdata: + Six values: {Read/Write Operations}, {Network Received/Send}, {Bytes Read/Written} + +inventory: + Creates one check for each vFiler. + diff -Nru check-mk-1.2.2p3/netapp_api_vf_status check-mk-1.2.6p12/netapp_api_vf_status --- check-mk-1.2.2p3/netapp_api_vf_status 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/netapp_api_vf_status 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,51 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# <<>> +# zcs1v running +# zhs01 running +# zmppl01 running +# zmdp running +# cdefs1v running + +def inventory_netapp_api_vf_status(info): + return map(lambda x: (x[0], None), info) + +def check_netapp_api_vf_status(item, _no_params, info): + filer_states = dict(info) + if item not in filer_states: + return + + state = filer_states[item] not in ["running", "DR backup"] and 2 or 0 + return state, "Status is %s" % filer_states[item] + +check_info["netapp_api_vf_status"] = { + 'check_function' : check_netapp_api_vf_status, + 'inventory_function' : inventory_netapp_api_vf_status, + 'service_description' : 'vFiler Status %s', +} + + diff -Nru check-mk-1.2.2p3/netapp_api_volumes check-mk-1.2.6p12/netapp_api_volumes --- check-mk-1.2.2p3/netapp_api_volumes 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/netapp_api_volumes 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,122 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# <<>> +# [config_instance] volume-list-info +# volumes +# volume-info +# name vol0 +# size-total 848203776 +# size-available 630169600 +# [counter_instance] volume +# ---new_counter--- +# instance_name vol0 +# read_data 11864088127 +# read_latency 25455351 +# write_data 905750632 +# write_latency 197601081 +# nfs_read_data 11603591833 +# nfs_read_latency 21772199 +# nfs_write_data 298307013 +# nfs_write_latency 173041497 +# cifs_read_data 0 +# cifs_read_latency 0 + +def inventory_netapp_api_volumes(parsed): + volumes = parsed.get("volume-list-info") + return [ (values.get("name"), {}) for uid, values in volumes.items() if values.get("name") ] + +def check_netapp_api_volumes(item, params, parsed): + volumes = parsed.get("volume-list-info") + + def find_volume(name): + for uid, values in volumes.items(): + try: + if values.get("name") == name: + return values + except: + continue # continue on configuration + + volume = find_volume(item) + if not volume: + return 3, "Volume not found in agent output" + + if volume.get("state") != "online": + return 1, "Volume is %s" % volume.get("state") + + mega = 1024.0 * 1024.0 + size_total = int(volume.get("size-total")) / mega + size_avail = int(volume.get("size-available")) / mega + inodes_total = int(volume.get("files-total")) + inodes_avail = inodes_total - int(volume.get("files-used")) + state, info, perf = df_check_filesystem_single(g_hostname, item, size_total, size_avail, + inodes_total, inodes_avail, params) + + counter_wrapped = False + counters = [] + now = time.time() + + perf_protocols = params.get("perfdata", []) + for protocol in ["", "nfs_", "cifs_", "san_", "fcp_", "iscsi_"]: + if protocol[:-1] not in perf_protocols: + continue + for mode in ["read_", "write_", "other_"]: + for field, factor, format_text in [ ("data", None, None), ("latency", 10000.0, "%s: %.2f ms")]: + key = protocol + mode + field + value = volume.get("counters", {}).get(key) + if value != None: + value = int(value) + try: + delta = get_rate("netapp_api_volumes.%s.%s" % (item, key), now, value, onwrap=RAISE) + perf.append( (key, delta) ) + + if protocol == "" and mode in ["read_", "write_"]: + if factor: + delta = delta / factor + if format_text: + counters.append(format_text % (key, delta)) + else: + counters.append("%s: %s" % (key, get_bytes_human_readable(delta))) + except MKCounterWrapped: + counter_wrapped = True + + if not counter_wrapped: + info += ", " + ", ".join(counters) + + return state, info, perf + +check_info["netapp_api_volumes"] = { + 'check_function' : check_netapp_api_volumes, + 'inventory_function' : inventory_netapp_api_volumes, + 'parse_function' : lambda info: netapp_api_convert_info(info, + configs = {"volume-list-info": {"block-name": "volume-info", "key": "name"}}, + counter_key = "instance_name"), + 'service_description' : 'Volume %s', + 'has_perfdata' : True, + 'group' : "netapp_volumes", + 'includes' : [ "df.include", "netapp_api.include" ], + "default_levels_variable" : "filesystem_default_levels", +} diff -Nru check-mk-1.2.2p3/netapp_cluster check-mk-1.2.6p12/netapp_cluster --- check-mk-1.2.2p3/netapp_cluster 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/netapp_cluster 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -46,15 +46,14 @@ def inventory_netapp_cluster(info): - inventory = [] - - # only inventorizes clusters that dont have takeover disabled. - cfSettings, cfState, cfCannotTakeoverCause, cfPartnerStatus, cfPartnerName, cfInterconnectStatus = info[0] - if int(cfSettings) not in [1, 3]: - # Include the cluster partner name in inventory (value added data) - inventory.append ((cfPartnerName, None)) - return inventory + if info: + cfSettings, cfState, cfCannotTakeoverCause, cfPartnerStatus, cfPartnerName, cfInterconnectStatus = info[0] + # only inventorizes clusters that dont have takeover disabled. + if int(cfSettings) not in [1, 3]: + # Include the cluster partner name in inventory (value added data) + inventory.append ((cfPartnerName, None)) + return inventory def check_netapp_cluster(item, _no_params, info): @@ -64,50 +63,47 @@ # first handle all critical states. # "dead" and "thisNodeDead" if cfState == "1" or cfSettings == "5": - return (2, "CRIT - Node is declared dead by cluster") + return (2, "Node is declared dead by cluster") elif cfPartnerStatus in [1, 3]: - return (2, "CRIT - Partner Status is dead or maybeDown") + return (2, "Partner Status is dead or maybeDown") elif cfInterconnectStatus == "2": - return (2, "CRIT - Cluster Interconnect failure") + return (2, "Cluster Interconnect failure") # then handle warnings. elif cfSettings in [3, 4] or cfState == "3": - return (1, "WARN - Cluster takeover is disabled") + return (1, "Cluster takeover is disabled") elif cfInterconnectStatus == "partialFailure": - return (1, "WARN - Cluster interconnect partially failed") + return (1, "Cluster interconnect partially failed") # if the partner name has changed, we'd like to issue a warning if cfPartnerName != item: - return (1, "WARN - Partner Name %s instead of %s") % (cfPartnerName, item) + return (1, "Partner Name %s instead of %s") % (cfPartnerName, item) # OK - Cluster enabled, Cluster can takeover and the partner is OK and the # infiniband interconnect is working. if cfSettings == "2" and cfState == "2" \ and cfCannotTakeoverCause == "1" and cfPartnerStatus == "2" \ and cfInterconnectStatus == "4": - return (0, "OK - Cluster Status is OK") + return (0, "Cluster Status is OK") # if we reach here, we hit an unknown case. - return (3, "UNKNOWN") - - - -snmp_info["netapp_cluster"] = \ - ( ".1.3.6.1.4.1.789.1.2.3", [ - "1", # cfSettings - "2", # cfState - "3", # cfCannotTakeoverCause - "4", # cfPartnerStatus - "6", # cfPartnerName - "8" # cfInterconnectStatus - ]) - - -check_info["netapp_cluster"] = (check_netapp_cluster, "metrocluster_w_%s", 0, inventory_netapp_cluster) - + return (3, "Got unhandled information") -# Run inventory only on Data Ontap OS with cluster enabled -snmp_scan_functions["netapp_cluster"] = \ - lambda oid: "netapp release" in oid(".1.3.6.1.2.1.1.1.0").lower() or \ - oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.789") +check_info["netapp_cluster"] = { + 'check_function': check_netapp_cluster, + 'inventory_function': inventory_netapp_cluster, + 'service_description': 'metrocluster_w_%s', + 'snmp_info': ('.1.3.6.1.4.1.789.1.2.3', [ + '1', # cfSettings + '2', # cfState + '3', # cfCannotTakeoverCause + '4', # cfPartnerStatus + '6', # cfPartnerName + '8', # cfInterconnectStatus + ]), + 'snmp_scan_function': \ + # Run inventory only on Data Ontap OS with cluster enabled + lambda oid: "netapp release" in oid(".1.3.6.1.2.1.1.1.0").lower() or \ + oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.789"), +} diff -Nru check-mk-1.2.2p3/netapp_cpu check-mk-1.2.6p12/netapp_cpu --- check-mk-1.2.2p3/netapp_cpu 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/netapp_cpu 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -28,15 +28,15 @@ def check_netapp_cpu(item, params, info): util = float(info[0][0]) - infotext = " - %2.1f%% utilization" % util + infotext = "%2.1f%% utilization" % util warn, crit = params perfdata = [("util", util, warn, crit, 0, 100)] if util >= crit: - return (2, "CRIT" + infotext + " (critical at %d%%)" % crit, perfdata) + return (2, infotext + " (critical at %d%%)" % crit, perfdata) elif util >= warn: - return (1, "WARN" + infotext + " (warning at %d%%)" % warn, perfdata) + return (1, infotext + " (warning at %d%%)" % warn, perfdata) else: - return (0, "OK" + infotext, perfdata) + return (0, infotext, perfdata) check_info["netapp_cpu"] = { "check_function" : check_netapp_cpu, diff -Nru check-mk-1.2.2p3/netapp_fcpio check-mk-1.2.6p12/netapp_fcpio --- check-mk-1.2.2p3/netapp_fcpio 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/netapp_fcpio 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -29,8 +29,8 @@ def check_netapp_fcpio(item, params, info): read, write = map(int, info[0]) this_time = int(time.time()) - timedif, avg_read = get_counter("netapp_fcpio.read", this_time, read) - timedif, avg_write = get_counter("netapp_fcpio.write", this_time, write) + avg_read = get_rate("netapp_fcpio.read", this_time, read) + avg_write = get_rate("netapp_fcpio.write", this_time, write) read_warn, read_crit = params['read'] write_warn, write_crit = params['write'] @@ -56,10 +56,10 @@ write_msg = ' (!!)' - infotext = " - %s read%s, %s write%s in last %d sec" % (get_filesize_human_readable(avg_read), \ - read_msg, get_filesize_human_readable(avg_write), write_msg, timedif) + infotext = "%s read%s, %s write%s" % (get_bytes_human_readable(avg_read), \ + read_msg, get_bytes_human_readable(avg_write), write_msg) - return(state, nagios_state_names[state] + infotext, perfdata) + return(state, infotext, perfdata) check_info["netapp_fcpio"] = { diff -Nru check-mk-1.2.2p3/netapp_vfiler check-mk-1.2.6p12/netapp_vfiler --- check-mk-1.2.2p3/netapp_vfiler 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/netapp_vfiler 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -48,23 +48,27 @@ vfName, vfState = vfEntry if vfName == item: if vfState == "2": - return (0, "OK - vFiler is running") + return (0, "vFiler is running") elif vfState == "1": - return (2, "CRIT - vFiler is stopped") + return (2, "vFiler is stopped") else: return (3, "UNKOWN - vFiler status unknown") - return (3, "UNKNOWN - vFiler not found in SNMP output") + return (3, "vFiler not found in SNMP output") # get the vfName and vfState from the vfEntry table -snmp_info["netapp_vfiler"] = \ - ( ".1.3.6.1.4.1.789.1.16.3.1", [ - "2", "9", ]) -check_info["netapp_vfiler"] = (check_netapp_vfiler, "vFiler Status %s", 0, inventory_netapp_vfiler) -snmp_scan_functions["netapp_vfiler"] = \ - lambda oid: "netapp release" in oid(".1.3.6.1.2.1.1.1.0").lower() or \ - oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.789") + +check_info["netapp_vfiler"] = { + 'check_function': check_netapp_vfiler, + 'inventory_function': inventory_netapp_vfiler, + 'service_description': 'vFiler Status %s', + # get the vfName and vfState from the vfEntry table + 'snmp_info': ('.1.3.6.1.4.1.789.1.16.3.1', ['2', '9']), + 'snmp_scan_function': \ + lambda oid: "netapp release" in oid(".1.3.6.1.2.1.1.1.0").lower() or \ + oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.789"), +} diff -Nru check-mk-1.2.2p3/netapp_volumes check-mk-1.2.6p12/netapp_volumes --- check-mk-1.2.2p3/netapp_volumes 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/netapp_volumes 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,62 +24,60 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -# Author: Lars Michelsen - -#.1.3.6.1.4.1.789.1.5.8.1.1.1 = INTEGER: 1 -#.1.3.6.1.4.1.789.1.5.8.1.1.2 = INTEGER: 2 -#.1.3.6.1.4.1.789.1.5.8.1.1.3 = INTEGER: 3 -#.1.3.6.1.4.1.789.1.5.8.1.2.1 = STRING: "vol0" -#.1.3.6.1.4.1.789.1.5.8.1.2.2 = STRING: "RvRBackup" -#.1.3.6.1.4.1.789.1.5.8.1.2.3 = STRING: "RundV" -#.1.3.6.1.4.1.789.1.5.8.1.3.1 = STRING: "67155442" -#.1.3.6.1.4.1.789.1.5.8.1.3.2 = STRING: "2600515058" -#.1.3.6.1.4.1.789.1.5.8.1.3.3 = STRING: "788575730" -#.1.3.6.1.4.1.789.1.5.8.1.4.1 = INTEGER: 1 -#.1.3.6.1.4.1.789.1.5.8.1.4.2 = INTEGER: 1 -#.1.3.6.1.4.1.789.1.5.8.1.4.3 = INTEGER: 1 -#.1.3.6.1.4.1.789.1.5.8.1.5.1 = STRING: "online" -#.1.3.6.1.4.1.789.1.5.8.1.5.2 = STRING: "online" -#.1.3.6.1.4.1.789.1.5.8.1.5.3 = STRING: "online" -#.1.3.6.1.4.1.789.1.5.8.1.6.1 = STRING: "raid_dp" -#.1.3.6.1.4.1.789.1.5.8.1.6.2 = STRING: "raid_dp" -#.1.3.6.1.4.1.789.1.5.8.1.6.3 = STRING: "raid_dp" -#.1.3.6.1.4.1.789.1.5.8.1.7.1 = STRING: "root, diskroot, nosnap=off, -# nosnapdir=off, minra=off, no_atime_update=off, raidtype=raid_dp, raidsize=16, -# nvfail=off, snapmirrored=off, resyncsnaptime=60, create_ucode=off, -# convert_ucode=off, maxdirsize=10485, fs_size_fixed=off, guarantee=volume, -# svo_enable=off, svo_checksum=off, svo_allow_rman=off, svo_reject_errors=off, -# no_i2p=off, fractional_reserve=100, extent=off, try_first=volume_grow" -#.1.3.6.1.4.1.789.1.5.8.1.7.2 = STRING: "nosnap=off, nosnapdir=off, -# minra=off, no_atime_update=off, raidtype=raid_dp, raidsize=16, nvfail=off, -# snapmirrored=off, resyncsnaptime=60, create_ucode=off, convert_ucode=off, -# maxdirsize=10485, fs_size_fixed=off, guarantee=volume, svo_enable=off, -# svo_checksum=off, svo_allow_rman=off, svo_reject_errors=off, no_i2p=off, -# fractional_reserve=100, extent=off, try_first=volume_grow" -#.1.3.6.1.4.1.789.1.5.8.1.7.3 = STRING: "nosnap=on, nosnapdir=on, -# minra=off, no_atime_update=off, raidtype=raid_dp, raidsize=16, nvfail=off, -# snapmirrored=off, resyncsnaptime=60, create_ucode=off, convert_ucode=off, -# maxdirsize=10485, fs_size_fixed=off, guarantee=volume, svo_enable=off, -# svo_checksum=off, svo_allow_rman=off, svo_reject_errors=off, no_i2p=off, -# fractional_reserve=100, extent=off, try_first=volume_grow" -#.1.3.6.1.4.1.789.1.5.8.1.8.1 = STRING: "1ddc9920-496e-11df-aae1-00a09800c998" -#.1.3.6.1.4.1.789.1.5.8.1.8.2 = STRING: "62ac1040-5a8d-11df-83fd-00a09800c998" -#.1.3.6.1.4.1.789.1.5.8.1.8.3 = STRING: "4edc66e0-d6a3-11df-8724-00a09800c998" -#.1.3.6.1.4.1.789.1.5.8.1.9.1 = STRING: "aggr0" -#.1.3.6.1.4.1.789.1.5.8.1.9.2 = STRING: "aggr0" -#.1.3.6.1.4.1.789.1.5.8.1.9.3 = STRING: "aggr0" -#.1.3.6.1.4.1.789.1.5.8.1.10.1 = INTEGER: 2 -#.1.3.6.1.4.1.789.1.5.8.1.10.2 = INTEGER: 2 -#.1.3.6.1.4.1.789.1.5.8.1.10.3 = INTEGER: 2 -#.1.3.6.1.4.1.789.1.5.8.1.11.1 = INTEGER: 1 -#.1.3.6.1.4.1.789.1.5.8.1.11.2 = INTEGER: 1 -#.1.3.6.1.4.1.789.1.5.8.1.11.3 = INTEGER: 1 -#.1.3.6.1.4.1.789.1.5.8.1.12.1 = "" -#.1.3.6.1.4.1.789.1.5.8.1.12.2 = "" -#.1.3.6.1.4.1.789.1.5.8.1.12.3 = "" -#.1.3.6.1.4.1.789.1.5.8.1.13.1 = "" -#.1.3.6.1.4.1.789.1.5.8.1.13.2 = "" -#.1.3.6.1.4.1.789.1.5.8.1.13.3 = "" +# .1.3.6.1.4.1.789.1.5.8.1.1.1 = INTEGER: 1 +# .1.3.6.1.4.1.789.1.5.8.1.1.2 = INTEGER: 2 +# .1.3.6.1.4.1.789.1.5.8.1.1.3 = INTEGER: 3 +# .1.3.6.1.4.1.789.1.5.8.1.2.1 = STRING: "vol0" +# .1.3.6.1.4.1.789.1.5.8.1.2.2 = STRING: "RvRBackup" +# .1.3.6.1.4.1.789.1.5.8.1.2.3 = STRING: "RundV" +# .1.3.6.1.4.1.789.1.5.8.1.3.1 = STRING: "67155442" +# .1.3.6.1.4.1.789.1.5.8.1.3.2 = STRING: "2600515058" +# .1.3.6.1.4.1.789.1.5.8.1.3.3 = STRING: "788575730" +# .1.3.6.1.4.1.789.1.5.8.1.4.1 = INTEGER: 1 +# .1.3.6.1.4.1.789.1.5.8.1.4.2 = INTEGER: 1 +# .1.3.6.1.4.1.789.1.5.8.1.4.3 = INTEGER: 1 +# .1.3.6.1.4.1.789.1.5.8.1.5.1 = STRING: "online" +# .1.3.6.1.4.1.789.1.5.8.1.5.2 = STRING: "online" +# .1.3.6.1.4.1.789.1.5.8.1.5.3 = STRING: "online" +# .1.3.6.1.4.1.789.1.5.8.1.6.1 = STRING: "raid_dp" +# .1.3.6.1.4.1.789.1.5.8.1.6.2 = STRING: "raid_dp" +# .1.3.6.1.4.1.789.1.5.8.1.6.3 = STRING: "raid_dp" +# .1.3.6.1.4.1.789.1.5.8.1.7.1 = STRING: "root, diskroot, nosnap=off, +# nosnapdir=off, minra=off, no_atime_update=off, raidtype=raid_dp, raidsize=16, +# nvfail=off, snapmirrored=off, resyncsnaptime=60, create_ucode=off, +# convert_ucode=off, maxdirsize=10485, fs_size_fixed=off, guarantee=volume, +# svo_enable=off, svo_checksum=off, svo_allow_rman=off, svo_reject_errors=off, +# no_i2p=off, fractional_reserve=100, extent=off, try_first=volume_grow" +# .1.3.6.1.4.1.789.1.5.8.1.7.2 = STRING: "nosnap=off, nosnapdir=off, +# minra=off, no_atime_update=off, raidtype=raid_dp, raidsize=16, nvfail=off, +# snapmirrored=off, resyncsnaptime=60, create_ucode=off, convert_ucode=off, +# maxdirsize=10485, fs_size_fixed=off, guarantee=volume, svo_enable=off, +# svo_checksum=off, svo_allow_rman=off, svo_reject_errors=off, no_i2p=off, +# fractional_reserve=100, extent=off, try_first=volume_grow" +# .1.3.6.1.4.1.789.1.5.8.1.7.3 = STRING: "nosnap=on, nosnapdir=on, +# minra=off, no_atime_update=off, raidtype=raid_dp, raidsize=16, nvfail=off, +# snapmirrored=off, resyncsnaptime=60, create_ucode=off, convert_ucode=off, +# maxdirsize=10485, fs_size_fixed=off, guarantee=volume, svo_enable=off, +# svo_checksum=off, svo_allow_rman=off, svo_reject_errors=off, no_i2p=off, +# fractional_reserve=100, extent=off, try_first=volume_grow" +# .1.3.6.1.4.1.789.1.5.8.1.8.1 = STRING: "1ddc9920-496e-11df-aae1-00a09800c998" +# .1.3.6.1.4.1.789.1.5.8.1.8.2 = STRING: "62ac1040-5a8d-11df-83fd-00a09800c998" +# .1.3.6.1.4.1.789.1.5.8.1.8.3 = STRING: "4edc66e0-d6a3-11df-8724-00a09800c998" +# .1.3.6.1.4.1.789.1.5.8.1.9.1 = STRING: "aggr0" +# .1.3.6.1.4.1.789.1.5.8.1.9.2 = STRING: "aggr0" +# .1.3.6.1.4.1.789.1.5.8.1.9.3 = STRING: "aggr0" +# .1.3.6.1.4.1.789.1.5.8.1.10.1 = INTEGER: 2 +# .1.3.6.1.4.1.789.1.5.8.1.10.2 = INTEGER: 2 +# .1.3.6.1.4.1.789.1.5.8.1.10.3 = INTEGER: 2 +# .1.3.6.1.4.1.789.1.5.8.1.11.1 = INTEGER: 1 +# .1.3.6.1.4.1.789.1.5.8.1.11.2 = INTEGER: 1 +# .1.3.6.1.4.1.789.1.5.8.1.11.3 = INTEGER: 1 +# .1.3.6.1.4.1.789.1.5.8.1.12.1 = "" +# .1.3.6.1.4.1.789.1.5.8.1.12.2 = "" +# .1.3.6.1.4.1.789.1.5.8.1.12.3 = "" +# .1.3.6.1.4.1.789.1.5.8.1.13.1 = "" +# .1.3.6.1.4.1.789.1.5.8.1.13.2 = "" +# .1.3.6.1.4.1.789.1.5.8.1.13.3 = "" netapp_volumes_owner = { '1': 'local', @@ -91,39 +89,42 @@ return [ (line[0], None) for line in info if netapp_volumes_owner.get(line[2], '') == 'local' ] def check_netapp_volumes(item, _not_used, info): - info = [ line for line in info if line[0] == item ][0] - - if len(info) != 5: - return (3, "UNKNOWN - Invalid SNMP response") - - name, fsid, owner, state, status = info - - output = 'FSID: %s, Owner: %s, ' % (fsid, netapp_volumes_owner.get(owner, 'UNKNOWN')) - - ret_state = 0 - output += 'State: %s' % state - if state == 'offline': - ret_state = 1 - output += '(!)' - - output += ', Status: %s' % status - if status == 'reconstructing': - ret_state = 1 - output += '(!)' - elif status.split(',')[0] not in [ 'normal', 'raid_dp', 'raid0', 'raid0, mirrored', 'raid4' ]: - ret_state = 2 - output += '(!!)' - - return (ret_state, "%s - %s" % (nagios_state_names[ret_state], output)) - -check_info["netapp_volumes"] = (check_netapp_volumes, "NetApp Vol %s", 0, inventory_netapp_volumes) -snmp_info["netapp_volumes"] = ( ".1.3.6.1.4.1.789.1.5.8.1", ["2", # volName - "3", # volFSID - "4", # volOwningHost - "5", # volState - "6", # volStatus - ] ) - -snmp_scan_functions['netapp_volumes'] = \ - lambda oid: oid(".1.3.6.1.2.1.1.1.0").startswith('NetApp Release') or \ - oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.789") + for line in info: + if line[0] == item: + name, fsid, owner, state, status = line + + output = 'FSID: %s, Owner: %s, ' % (fsid, netapp_volumes_owner.get(owner, 'UNKNOWN')) + + ret_state = 0 + output += 'State: %s' % state + if state == 'offline': + ret_state = 1 + output += '(!)' + + output += ', Status: %s' % status + if status == 'reconstructing': + ret_state = 1 + output += '(!)' + elif status.split(',')[0] not in [ 'normal', 'raid_dp', 'raid0', + 'raid0, mirrored', 'raid4', 'mixed_raid_type' ]: + ret_state = 2 + output += '(!!)' + + return (ret_state, output) + return 3, "Volume not found" + +check_info["netapp_volumes"] = { + 'check_function': check_netapp_volumes, + 'inventory_function': inventory_netapp_volumes, + 'service_description': 'NetApp Vol %s', + 'snmp_info': ('.1.3.6.1.4.1.789.1.5.8.1', [ + '2', # volName + '3', # volFSID + '4', # volOwningHost + '5', # volState + '6', # volStatus + ]), + 'snmp_scan_function': \ + lambda oid: oid(".1.3.6.1.2.1.1.1.0").startswith('NetApp Release') or \ + oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.789"), +} diff -Nru check-mk-1.2.2p3/netctr check-mk-1.2.6p12/netctr --- check-mk-1.2.2p3/netctr 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/netctr 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -79,19 +79,11 @@ infotxt = "" problems_per_sec = 0.0 packets_per_sec = 0.0 - counter_wrapped = None for countername in netctr_counters: index = netctr_counter_indices[countername] value = int(nicline[index + 1]) - try: - timedif, items_per_sec = get_counter( "netctr." + nic + "." + countername, this_time, value) - perfdata.append( ( countername, "%dc" % value ) ) - except MKCounterWrapped, e: - counter_wrapped = e - # Important: continue counter loop, so that *all* counters get initialized. - # Otherwise this checks would need 7 check cycles until the first result - # would be produced - continue + items_per_sec = get_rate( "netctr." + nic + "." + countername, this_time, value) + perfdata.append( ( countername, "%dc" % value ) ) if countername in [ "rx_errors", "tx_errors", "tx_collisions" ]: problems_per_sec += items_per_sec @@ -102,24 +94,25 @@ elif countername == 'tx_bytes': infotxt += ' - Send: %.2f MB/sec' % (float(items_per_sec) / float(1024*1024)) - # if at least one counter wrapped, we cannot send or use performance - # data and leave out this check for this turn - if counter_wrapped: - raise counter_wrapped - error_percentage = 0.0 if problems_per_sec > 0: error_percentage = (problems_per_sec / packets_per_sec) * 100.0 infotxt += ", error rate %.4f%%" % error_percentage if error_percentage >= crit: - return (2, "CRIT" + infotxt, perfdata) + return (2, infotxt, perfdata) elif error_percentage >= warn: - return (1, "WARNING" + infotxt, perfdata) + return (1, infotxt, perfdata) else: - return (0, "OK" + infotxt, perfdata) + return (0, infotxt, perfdata) - return (3, "UNKNOWN - NIC is not present") + return (3, "NIC is not present") -check_info['netctr.combined'] = (check_netctr_combined, "NIC %s counters", 1, inventory_netctr_combined ) check_config_variables.append("netctr_counters") + +check_info["netctr.combined"] = { + 'check_function': check_netctr_combined, + 'inventory_function': inventory_netctr_combined, + 'service_description': 'NIC %s counters', + 'has_perfdata': True, +} diff -Nru check-mk-1.2.2p3/netctr.combined check-mk-1.2.6p12/netctr.combined --- check-mk-1.2.2p3/netctr.combined 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/netctr.combined 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Monitor errors and traffic over network interfaces +title: Errors and traffic over network interfaces agents: linux -author: Mathias Kettner +catalog: os/networking license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/netif check-mk-1.2.6p12/netif --- check-mk-1.2.2p3/netif 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/netif 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,6 +24,9 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. +# WARNING: This check is deprecated and will be removed soon. Please +# update your agent and use lnx_if instead. + linux_nic_check = "lnx_if" def inventory_netif_link(info): @@ -34,26 +37,26 @@ def check_netif_link(item, targetstate, info): links = [ i[4] for i in info if i[0] == item ] if len(links) == 0: - return (2, "CRIT - unknown network device") + return (2, "unknown network device") elif len(links) != 1: - return (3, "UNKNOWN - network devices listed more than once") + return (3, "network devices listed more than once") if links[0] == 'yes': link = True elif links[0] == 'no': link = False else: - return (3, "UNKNOWN - invalid link state '%s'" % link) + return (3, "invalid link state '%s'" % link) if link == targetstate: if link: - return (0, "OK - Link is up") + return (0, "Link is up") else: - return (0, "OK - no link / NIC unused") + return (0, "no link / NIC unused") else: if link: - return (1, "WARN - Link is up, NIC should be unused") + return (1, "Link is up, NIC should be unused") else: - return (2, "CRIT - no link") + return (2, "no link") def inventory_netif_params(info): @@ -64,16 +67,26 @@ def check_netif_params(item, params, info): infolist = [ i[1:4] for i in info if i[0] == item ] if len(infolist) == 0: - return (2, "CRIT - unknown network device") + return (2, "unknown network device") elif len(infolist) != 1: - return (3, "UNKNOWN - network devices listed more than once") + return (3, "network devices listed more than once") act_params = tuple(infolist[0]) if act_params == params: - return (0, "OK - %s" % (",".join(act_params),) ) + return (0, "%s" % (",".join(act_params),) ) else: - return (2, "CRIT - %s (should be %s)" % + return (2, "%s (should be %s)" % (",".join(act_params), ",".join(params))) -check_info['netif.link'] = (check_netif_link, "NIC %s link", 0, inventory_netif_link) -check_info['netif.params'] = (check_netif_params, "NIC %s parameter", 0, inventory_netif_params) + +check_info["netif.params"] = { + 'check_function': check_netif_params, + 'inventory_function': inventory_netif_params, + 'service_description': 'NIC %s parameter', +} + +check_info["netif.link"] = { + 'check_function': check_netif_link, + 'inventory_function': inventory_netif_link, + 'service_description': 'NIC %s link', +} diff -Nru check-mk-1.2.2p3/netif.link check-mk-1.2.6p12/netif.link --- check-mk-1.2.2p3/netif.link 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/netif.link 2015-06-24 09:48:36.000000000 +0000 @@ -1,9 +1,8 @@ -title: Monitor link status of Linux network interfaces (deprecated) +title: Network interfaces on Linux: Link - DEPRECATED agents: linux -author: Mathias Kettner +catalog: os/networking license: GPL distribution: check_mk description: - This check monitors the link status of Linux network interfaces. + This check is deprecated. Please use {lnx_if} instead. - Note: this check is deprecated. Better use {lnx_if}. diff -Nru check-mk-1.2.2p3/netif.params check-mk-1.2.6p12/netif.params --- check-mk-1.2.2p3/netif.params 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/netif.params 2015-06-24 09:48:36.000000000 +0000 @@ -1,10 +1,8 @@ -title: Monitor speed settings of Linux network interfaces (deprecated) +title: Network interfaces on Linux: Parameters - DEPRECATED agents: linux -author: Mathias Kettner +catalog: os/networking license: GPL distribution: check_mk description: - This check monitors the link status of Linux network interfaces. - - Note: this check is deprecated. Better use {lnx_if}. + This check is deprecated. Please use {lnx_if} instead. diff -Nru check-mk-1.2.2p3/netstat check-mk-1.2.6p12/netstat --- check-mk-1.2.2p3/netstat 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/netstat 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,73 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent (Linux) +# <>> +# tcp 0 0 127.0.0.1:57573 127.0.0.1:80 ESTABLISHED +# tcp 0 0 10.1.1.50:38692 178.248.246.154:993 ESTABLISHED +# tcp 0 0 127.0.0.1:34929 127.0.0.1:5000 TIME_WAIT +# tcp 0 0 127.0.0.1:34922 127.0.0.1:5000 TIME_WAIT +# tcp 0 0 127.0.0.1:80 127.0.0.1:57454 TIME_WAIT +# tcp 0 0 127.0.0.1:35005 127.0.0.1:5000 TIME_WAIT +# tcp 0 0 10.1.1.50:38612 178.248.246.154:993 ESTABLISHED +# tcp 0 0 127.0.0.1:80 127.0.0.1:57548 TIME_WAIT +# tcp 0 0 127.0.0.1:34981 127.0.0.1:5000 TIME_WAIT +# tcp 0 0 127.0.0.1:54552 127.0.0.1:13419 ESTABLISHED +# tcp 0 0 127.0.0.1:35012 127.0.0.1:5000 TIME_WAIT +# tcp 0 0 127.0.0.1:34910 127.0.0.1:5000 TIME_WAIT +# tcp 0 0 127.0.0.1:34915 127.0.0.1:5000 TIME_WAIT +# tcp 0 0 127.0.0.1:80 127.0.0.1:57546 TIME_WAIT +# tcp 0 0 127.0.0.1:34935 127.0.0.1:5000 TIME_WAIT +# tcp 0 0 127.0.0.1:34984 127.0.0.1:5000 TIME_WAIT +# tcp 0 0 127.0.0.1:80 127.0.0.1:57488 TIME_WAIT +# tcp 0 0 127.0.0.1:34967 127.0.0.1:5000 TIME_WAIT + +def parse_netstat(info): + connections = [] + for line in info: + if len(line) == 6: + proto, recv_q, send_q, local, remote, connstate = line + if proto.startswith("tcp"): # also tcp4 and tcp6 + proto = "TCP" + elif proto.startswith("udp"): + proto = "UDP" + + connections.append( (proto, local.rsplit(":", 1), remote.rsplit(":", 1), + connstate) ) + return connections + + +def check_netstat(item, params, info): + connections = parse_netstat(info) + return check_netstat_generic(item, params, connections) + +check_info["netstat"] = { + 'check_function' : check_netstat, + 'service_description' : "TCP Connection %s", + 'group' : "tcp_connections", + 'includes' : [ "netstat.include" ], +} + diff -Nru check-mk-1.2.2p3/netstat.include check-mk-1.2.6p12/netstat.include --- check-mk-1.2.2p3/netstat.include 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/netstat.include 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,80 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Item is a user defined identifier of the connection. +# Example for params: +# { +# "proto" : "UDP", +# "local_ip" : "10.1.1.99", +# "remote_port" : 5665, +# "state" : "ESTABLISHED", +# } +# Other keys: local_port, remote_ip. Missing entries do not care. + + +def check_netstat_generic(item, params, connections): + found = 0 + for proto, (local_ip, local_port), (remote_ip, remote_port), connstate in connections: + # Beware: port numbers are strings here. + match = True + for k, v in [ + ( "local_ip", local_ip ), + ( "local_port", local_port ), + ( "remote_ip", remote_ip ), + ( "remote_port", remote_port ), + ( "proto", proto ), + ( "state", connstate )]: + if k in params and str(params[k]) != v: + match = False + break + if match: + found += 1 + + # Check if found connections are within limits + yield 0, "Found %d matching entries" % found + + if params.get("min_states"): + min_warn, min_crit = params["min_states"] + if found <= min_crit: + state = 2 + elif found <= min_warn: + state = 1 + else: + state = 0 + if state: + yield state, "too few (warn/crit at %d/%d)" % (min_warn, min_crit) + + if params.get("max_states"): + max_warn, max_crit = params["max_states"] + if found >= max_crit: + state = 2 + elif found >= max_warn: + state = 1 + else: + state = 0 + if state: + yield state, "too many (warn/crit at %d/%d)" % (max_warn, max_crit) + diff -Nru check-mk-1.2.2p3/network_fs.include check-mk-1.2.6p12/network_fs.include --- check-mk-1.2.2p3/network_fs.include 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/network_fs.include 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,68 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example out from agent: +# <<<...mounts>>> +# /foobar hanging 0 0 0 0 +# /with spaces ok 217492 123563 112515 524288 + +def inventory_network_fs_mounts(info): + return [ (" ".join(line[:-5]), None) for line in info ] + +def check_network_fs_mounts(item, _no_params, info): + for line in info: + mountpoint = " ".join(line[:-5]) + if mountpoint == item: + # On some operating systems there is no information about + # used and free space, but just dahshes (e.g. AIX) + if line[-4] == '-': + if line[-5] == 'ok': + return 0, "mount seems OK" + else: + return 2, "Server not responding" + + + size_blocks = int(line[-4]) + free_blocks = int(line[-2]) # for non-root user + blocksize = int(line[-1]) + + if size_blocks <= 0 or free_blocks < 0 or blocksize > 1024*1024: + return (2, "Stale fs handle") + + if line[-5] == 'ok': + gb = 1024 * 1024 * 1024.0 + size_gb = (size_blocks * blocksize) / gb + if size_gb == 0: + return (0, "server is responding") + + free_gb = (free_blocks * blocksize) / gb + used_gb = size_gb - free_gb + used_perc = 100.0 * used_gb / size_gb + return (0, "%.1f%% used (%.1f of %.1f GB)" % (used_perc, used_gb, size_gb)) + else: + return (2, "server not responding") + return (3, "not mounted") + diff -Nru check-mk-1.2.2p3/nfsexports check-mk-1.2.6p12/nfsexports --- check-mk-1.2.2p3/nfsexports 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/nfsexports 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,18 +24,12 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -#!/usr/bin/python - - # This check verifies a given NFS export is registered with mountd. # Optionally we can add tracking of allowed clients and filesystem ID. # Agent info # [['/mirrored/data/recording', '172.0.0.0/255.0.0.0']] - - - def inventory_nfsexports(checkname, info): # reminder to self: inventorize the exported fs, and maybe even the fs id. # but do not inventorize the allowed clients unless i'm really sure that @@ -54,18 +48,17 @@ # if the agent returned an empty list then it found entries in /etc/exports # but apparently no daemons were running. if len(info) == 0: - return (2, "CRITICAL - exports defined but no exports found in export list. Daemons might not be working") + return (2, "exports defined but no exports found in export list. Daemons might not be working") # otherwise lets see if our export exists. for line in info: exported_path = line[0] if exported_path == item: - return (0, "OK - export is active") - return (2, "CRITICAL - export not found in export list") - -# return (3, "UNKNOWN - invalid data returned from Agent") - - - + return (0, "export is active") + return (2, "export not found in export list") -check_info['nfsexports'] = (check_nfsexports, "NFS export %s", 0, inventory_nfsexports) +check_info["nfsexports"] = { + 'check_function': check_nfsexports, + 'inventory_function': inventory_nfsexports, + 'service_description': 'NFS export %s', +} diff -Nru check-mk-1.2.2p3/nfsmounts check-mk-1.2.6p12/nfsmounts --- check-mk-1.2.2p3/nfsmounts 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/nfsmounts 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,34 +24,16 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. +# Example out from agent: +# <<>> +# /foobar hanging 0 0 0 0 +# /with spaces ok 217492 123563 112515 524288 -def inventory_nfsmounts(info): - return [ (line[0], None) for line in info ] -def check_nfsmounts(item, _no_params, info): - for line in info: - mountpoint = line[0] - if mountpoint == item: - size_blocks = int(line[2]) - free_blocks = int(line[4]) # for non-root user - blocksize = int(line[5]) - - if size_blocks < 0 or free_blocks < 0 or blocksize > 1024*1024: - return (2, "CRIT - Stale NFS handle") - - if line[1] == 'ok': - gb = 1024 * 1024 * 1024.0 - size_gb = (size_blocks * blocksize) / gb - if size_gb == 0: - return (0, "OK - server is responding") - - free_gb = (free_blocks * blocksize) / gb - used_gb = size_gb - free_gb - used_perc = 100.0 * used_gb / size_gb - return (0, "OK - %.1f%% used (%.1f of %.1f GB)" % (used_perc, used_gb, size_gb)) - else: - return (2, "CRIT - server not responding") - return (3, "UNKNOWN - not mounted") - -check_info['nfsmounts'] = (check_nfsmounts, "NFS mount %s", 0, inventory_nfsmounts) -checkgroup_of['nfsmounts'] = 'network_fs' +check_info["nfsmounts"] = { + 'check_function': check_network_fs_mounts, + 'inventory_function': inventory_network_fs_mounts, + 'service_description': 'NFS mount %s', + 'group': 'network_fs', + 'includes': [ 'network_fs.include' ], +} diff -Nru check-mk-1.2.2p3/nginx_status check-mk-1.2.6p12/nginx_status --- check-mk-1.2.2p3/nginx_status 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/nginx_status 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,109 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def nginx_status_parse(info): + if len(info) != 4: + return {} # skip unknown data format + + data = {} + for i, line in enumerate(info): + address, port = line[:2] + if len(line) < 3: + continue # Skip unexpected lines + item = '%s:%s' % (address, port) + + if item not in data: + # new server block start + data[item] = { + 'active' : int(info[i+0][4]), + 'accepted' : int(info[i+2][2]), + 'handled' : int(info[i+2][3]), + 'requests' : int(info[i+2][4]), + 'reading' : int(info[i+3][3]), + 'writing' : int(info[i+3][5]), + 'waiting' : int(info[i+3][7]), + } + + return data + +def inventory_nginx_status(info): + data = nginx_status_parse(info) + inv = [] + for item in data.keys(): + inv.append((item, {})) + return inv + +def check_nginx_status(item, params, info): + if params == None: + params = {} + + all_data = nginx_status_parse(info) + if item not in all_data: + return 3, 'Unable to find instance in agent output' + data = all_data[item] + + # Add some more values, derived from the raw ones... + data['requests_per_conn'] = data['requests'] / data['handled'] + + this_time = int(time.time()) + for key in [ 'accepted', 'handled', 'requests' ]: + per_sec = get_rate("nginx_status.%s" % key, this_time, data[key]) + data['%s_per_sec' % key] = per_sec + + perfdata = data.items() + perfdata.sort() + + worst_state = 0 + + conn_warn, conn_crit = params.get('active_connections', (None, None)) + conn_txt = '' + if conn_crit != None and data['active'] > conn_crit: + worst_state = max(worst_state, 2) + conn_txt = ' (!!)' + elif conn_warn != None and data['active'] > conn_warn: + worst_state = max(worst_state, 1) + conn_txt = ' (!)' + + output = [ + 'Active: %d%s (%d reading, %d writing, %d waiting)' % + (data['active'], conn_txt, data['reading'], data['writing'], data['waiting']), + 'Requests: %0.2f/s (%0.2f/Connection)' % (data['requests_per_sec'], data['requests_per_conn']), + ] + + if data['accepted'] == data['handled']: + output.append('Accepted/Handled: %0.2f/s' % data['accepted_per_sec']) + else: + output.append('Accepted: %0.2f/s, Handled: %0.2f/s' % (data['accepted_per_sec'], data['handled_per_sec'])) + + return worst_state, ', '.join(output), perfdata + +check_info['nginx_status'] = { + "check_function" : check_nginx_status, + "inventory_function" : inventory_nginx_status, + "service_description" : "Nginx %s Status", + "has_perfdata" : True, + "group" : "nginx_status" +} Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/Notifications.png and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/Notifications.png differ diff -Nru check-mk-1.2.2p3/Notifications.svg check-mk-1.2.6p12/Notifications.svg --- check-mk-1.2.2p3/Notifications.svg 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/Notifications.svg 2014-07-04 17:50:27.000000000 +0000 @@ -0,0 +1,800 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + Monitoring Core + + Raw notification context + + Forward? + + + Notification Spooler + Forwarding + AyncDelivery + + yes + + no + + Rule BasedFlexiblePlain Email + + + + Plugincontexts + + Bulk? + + yes + + + Spool? + + yes + + Deliver(executeplugin) + + no + no + + Store + + BulkExecuter + + + + Plugincontexts + + + + send + receive + + + Raw notificationcontext + + Pluginnotificationcontext + + + Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/notifications.tar.gz and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/notifications.tar.gz differ diff -Nru check-mk-1.2.2p3/notify.py check-mk-1.2.6p12/notify.py --- check-mk-1.2.2p3/notify.py 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/notify.py 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,22 +24,51 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -# Environment macros are turned of due to Livestatus. So we -# need to go the hard (but efficient) way of using command line -# arguments. Fetching things via Livestatus would be possible -# but might introduce problems (for example race conditions). - -# Specify a command that reads a mail body from stdin (an UTF-8 -# encoded one) and can use any of the variables contact, email, -# hostname, servicedesc, hoststate, servicestate, output in -# the form %(variable)s +import pprint, urllib, select, subprocess, socket -import urllib +# Please have a look at doc/Notifications.png: +# +# There are two types of contexts: +# 1. Raw contexts (purple) +# -> These come out from the monitoring core. They are not yet +# assinged to a certain plugin. In case of rule based notifictions +# they are not even assigned to a certain contact. +# +# 2. Plugin contexts (cyan) +# -> These already bear all information about the contact, the plugin +# to call and its parameters. + +# .--Configuration-------------------------------------------------------. +# | ____ __ _ _ _ | +# | / ___|___ _ __ / _(_) __ _ _ _ _ __ __ _| |_(_) ___ _ __ | +# | | | / _ \| '_ \| |_| |/ _` | | | | '__/ _` | __| |/ _ \| '_ \ | +# | | |__| (_) | | | | _| | (_| | |_| | | | (_| | |_| | (_) | | | | | +# | \____\___/|_| |_|_| |_|\__, |\__,_|_| \__,_|\__|_|\___/|_| |_| | +# | |___/ | +# +----------------------------------------------------------------------+ +# | Default values of global configuration variables. | +# '----------------------------------------------------------------------' # Default settings -notification_logdir = var_dir + "/notify" -notification_log = notification_logdir + "/notify.log" -notification_logging = 0 +notification_logdir = var_dir + "/notify" +notification_spooldir = var_dir + "/notify/spool" +notification_bulkdir = var_dir + "/notify/bulk" +notification_core_log = var_dir + "/notify/nagios.log" # Fallback for history if no CMC running +notification_log = log_dir + "/notify.log" +notification_logging = 1 +notification_backlog = 10 # keep the last 10 notification contexts for reference + +# Settings for new rule based notifications +enable_rulebased_notifications = False +notification_fallback_email = "" +notification_rules = [] +notification_bulk_interval = 10 # Check every 10 seconds for ripe bulks + +# Notification Spooling +notification_spooling = False +notification_spool_to = None + + notification_log_template = \ u"$CONTACTNAME$ - $NOTIFICATIONTYPE$ - " \ u"$HOSTNAME$ $HOSTSTATE$ - " \ @@ -69,389 +98,1057 @@ $LONGSERVICEOUTPUT$ """ -test_vars = { - 'host': { - 'NOTIFY_CONTACTEMAIL': 'lm@mathias-kettner.de', - 'NOTIFY_CONTACTNAME': 'lm', - 'NOTIFY_CONTACTPAGER': '', - 'NOTIFY_DATE': '2013-01-17', - 'NOTIFY_HOSTADDRESS': '127.0.0.1', - 'NOTIFY_HOSTALIAS': 'localhost', - 'NOTIFY_HOSTCHECKCOMMAND': 'check-mk-ping', - 'NOTIFY_HOSTDOWNTIME': '0', - 'NOTIFY_HOSTNAME': 'localhost', - 'NOTIFY_HOSTNOTIFICATIONNUMBER': '1', - 'NOTIFY_HOSTOUTPUT': 'Manually set to Down by lm', - 'NOTIFY_HOSTPERFDATA': '', - 'NOTIFY_HOSTPROBLEMID': '136', - 'NOTIFY_HOSTSTATE': 'DOWN', - 'NOTIFY_HOSTSTATEID': '1', - 'NOTIFY_HOSTTAGS': 'cmk-agent prod lan tcp wato /wato/', - 'NOTIFY_LASTHOSTSTATE': 'UP', - 'NOTIFY_LASTHOSTSTATECHANGE': '1358761208', - 'NOTIFY_LASTHOSTSTATECHANGE_REL': '0d 00:11:38', - 'NOTIFY_LOGDIR': '/omd/sites/event/var/check_mk/notify', - 'NOTIFY_LONGDATETIME': 'Thu Jan 17 15:28:13 CET 2013', - 'NOTIFY_LONGHOSTOUTPUT': '', - 'NOTIFY_NOTIFICATIONTYPE': 'PROBLEM', - 'NOTIFY_PARAMETERS': '', - 'NOTIFY_SHORTDATETIME': '2013-01-17 15:28:13', - 'NOTIFY_WHAT': 'HOST', - 'NOTIFY_OMD_ROOT': '/omd/sites/event', - 'NOTIFY_OMD_SITE': 'event', - 'NOTIFY_MAIL_COMMAND': 'mail -s \'$SUBJECT$\' \'$CONTACTEMAIL$\'', - }, - 'service': { - 'NOTIFY_CONTACTEMAIL': 'lm@mathias-kettner.de', - 'NOTIFY_CONTACTNAME': 'lm', - 'NOTIFY_CONTACTPAGER': '', - 'NOTIFY_DATE': '2013-01-17', - 'NOTIFY_HOSTADDRESS': '127.0.0.1', - 'NOTIFY_HOSTALIAS': 'localhost', - 'NOTIFY_HOSTCHECKCOMMAND': 'check-mk-ping', - 'NOTIFY_HOSTDOWNTIME': '0', - 'NOTIFY_HOSTNAME': 'localhost', - 'NOTIFY_HOSTNOTIFICATIONNUMBER': '0', - 'NOTIFY_HOSTOUTPUT': 'OK - 127.0.0.1: rta 0.028ms, lost 0%', - 'NOTIFY_HOSTPERFDATA': 'rta=0.028ms;200.000;500.000;0; pl=0%;40;80;; rtmax=0.052ms;;;; rtmin=0.021ms;;;;', - 'NOTIFY_HOSTPROBLEMID': '0', - 'NOTIFY_HOSTSTATE': 'UP', - 'NOTIFY_HOSTSTATEID': '0', - 'NOTIFY_HOSTTAGS': 'cmk-agent prod lan tcp wato /wato/', - 'NOTIFY_LASTHOSTSTATE': 'UP', - 'NOTIFY_LASTHOSTSTATECHANGE': '1358761208', - 'NOTIFY_LASTHOSTSTATECHANGE_REL': '0d 00:11:38', - 'NOTIFY_LASTSERVICESTATE': 'OK', - 'NOTIFY_LASTSERVICESTATECHANGE': '1358761208', - 'NOTIFY_LASTSERVICESTATECHANGE_REL': '0d 00:00:01', - 'NOTIFY_LOGDIR': '/omd/sites/event/var/check_mk/notify', - 'NOTIFY_LONGDATETIME': 'Thu Jan 17 15:31:46 CET 2013', - 'NOTIFY_LONGHOSTOUTPUT': '', - 'NOTIFY_LONGSERVICEOUTPUT': '', - 'NOTIFY_NOTIFICATIONTYPE': 'PROBLEM', - 'NOTIFY_PARAMETERS': '', - 'NOTIFY_SERVICECHECKCOMMAND': 'check_mk-cpu.loads', - 'NOTIFY_SERVICEDESC': 'CPU load', - 'NOTIFY_SERVICENOTIFICATIONNUMBER': '1', - 'NOTIFY_SERVICEOUTPUT': 'CRIT - 15min load 1.29 at 2 CPUs (critical at 0.00)', - 'NOTIFY_SERVICEPERFDATA': 'load1=1.35;0;0;0;2 load5=1.33;0;0;0;2 load15=1.29;0;0;0;2', - 'NOTIFY_SERVICEPROBLEMID': '137', - 'NOTIFY_SERVICESTATE': 'CRITICAL', - 'NOTIFY_SERVICESTATEID': '2', - 'NOTIFY_SHORTDATETIME': '2013-01-17 15:31:46', - 'NOTIFY_WHAT': 'SERVICE', - 'NOTIFY_OMD_ROOT': '/omd/sites/event', - 'NOTIFY_OMD_SITE': 'event', - 'NOTIFY_MAIL_COMMAND': 'mail -s \'$SUBJECT$\' \'$CONTACTEMAIL$\'', - }, -} - -g_interactive = False - -def set_fake_env(ty, context): - os.environ.update(test_vars[ty]) - context.update(dict([(k[7:], v) for (k, v) in test_vars[ty].items()])) - -def substitute_context(template, context): - # First replace all known variables - for varname, value in context.items(): - template = template.replace('$'+varname+'$', value) - - # Remove the rest of the variables and make them empty - template = re.sub("\$[A-Z]+\$", "", template) - return template - -def notify_log(message): - if g_interactive or notification_logging >= 1: - formatted = (u"[%d] " % int(time.time())) + message + "\n" - if g_interactive: - sys.stdout.write(formatted.encode("utf-8")) - else: - file(notification_log, "a").write(formatted.encode("utf-8")) +#. +# .--Main----------------------------------------------------------------. +# | __ __ _ | +# | | \/ | __ _(_)_ __ | +# | | |\/| |/ _` | | '_ \ | +# | | | | | (_| | | | | | | +# | |_| |_|\__,_|_|_| |_| | +# | | +# +----------------------------------------------------------------------+ +# | Main code entry point. | +# '----------------------------------------------------------------------' def notify_usage(): - sys.stderr.write("""Usage: check_mk --notify - check_mk --notify fake-service - check_mk --notify fake-host + sys.stderr.write("""Usage: check_mk --notify [--keepalive] + check_mk --notify spoolfile Normally the notify module is called without arguments to send real notification. But there are situations where this module is called with COMMANDS to e.g. support development of notification plugins. Available commands: - fake-service ... Calls the given notification plugin with fake - notification data of a service notification. - fake-host ... Calls the given notification plugin with fake - notification data of a host notification. + spoolfile Reads the given spoolfile and creates a + notification out of its data + stdin Read one notification context from stdin instead + of taking variables from environment + replay N Uses the N'th recent notification from the backlog + and sends it again, counting from 0. + send-bulks Send out ripe bulk notifications """) -def get_readable_rel_date(timestamp): - try: - change = int(timestamp) - except: - change = 0 - rel_time = time.time() - change - seconds = rel_time % 60 - rem = rel_time / 60 - minutes = rem % 60 - hours = (rem % 1440) / 60 - days = rem / 1440 - return '%dd %02d:%02d:%02d' % (days, hours, minutes, seconds) - -def urlencode(s): - return urllib.quote(s) +# Main function called by cmk --notify. It either starts the +# keepalive mode (used by CMC), sends out one notifications from +# several possible sources or sends out all ripe bulk notifications. def do_notify(args): + global notify_mode, notification_logging + if notification_logging == 0: + notification_logging = 1 # transform deprecated value 0 to 1 try: - mode = 'notify' + if not os.path.exists(notification_logdir): + os.makedirs(notification_logdir) + if not os.path.exists(notification_spooldir): + os.makedirs(notification_spooldir) + + notify_mode = 'notify' if args: - if len(args) != 2 or args[0] not in ['fake-service', 'fake-host']: + notify_mode = args[0] + if notify_mode not in [ 'stdin', 'spoolfile', 'replay', 'send-bulks' ]: sys.stderr.write("ERROR: Invalid call to check_mk --notify.\n\n") notify_usage() sys.exit(1) - mode, plugin = args - global g_interactive - g_interactive = True + if len(args) != 2 and notify_mode not in [ "stdin", "replay", "send-bulks" ]: + sys.stderr.write("ERROR: need an argument to --notify %s.\n\n" % notify_mode) + sys.exit(1) - if not os.path.exists(notification_logdir): - os.makedirs(notification_logdir) + elif notify_mode == 'spoolfile': + filename = args[1] - # Hier müssen wir erstmal rausfinden, an wen die Notifikation gehen soll. - # Das sollte hoffentlich als Env-Variable da sein. Wenn nicht in check_mk_templates.cfg - # einbauen. Dann können wir in den Kontaktdefinitionen nachschauen. Diese sollten - # ja in main.mk/conf.d vorhanden sein. Die neue Notifikationstabelle muss auf jeden - # fall da rein. Für den Benutzer rufen also diese Tabelle auf. Wenn es die - # nicht gibt (garkein Eintrag), verfahren wir nach dem alten Verfahren und - # senden direkt eine Email. Wenn es die Tabelle aber gibt, werten wir - # Zeile für Zeile aus: - # - Bestimmen, ob die Zeile aktiv ist. Dazu ist evtl. eine Livestatus-Rückanfrage - # notwendig. Das ist nicht optimal, aber zumindest wegen der Timeperiods notwendig. - # - Wenn aktiv, dann rufen wir das Plugin dazu auf. Dieses hat sich mit einer - # Python-Funktion registriert. Wo werden diese definiert? Im precompiled-Fall - # brauchen wir das *nicht*. Man könnte die Plugins also einfach nur bei --notify - # einlesen. Zeitkritisch ist das nicht sehr, denn Notifikationen sind selten. - - # Information about notification is excpected in the - # environment in variables with the prefix NOTIFY_ - context = dict([ - (var[7:], value.decode("utf-8")) - for (var, value) - in os.environ.items() - if var.startswith("NOTIFY_") - and not re.match('^\$[A-Z]+\$$', value)]) - - # Add a few further helper variables - import socket - context["MONITORING_HOST"] = socket.gethostname() - if omd_root: - context["OMD_ROOT"] = omd_root - context["OMD_SITE"] = os.getenv("OMD_SITE", "") - - context["WHAT"] = context.get("SERVICEDESC") and "SERVICE" or "HOST" - context["MAIL_COMMAND"] = notification_mail_command - - # Handle interactive calls - if mode == 'fake-service': - set_fake_env('service', context) - - elif mode == 'fake-host': - set_fake_env('host', context) - - context['HOSTURL'] = '/check_mk/view.py?view_name=hoststatus&host=%s' % urlencode(context['HOSTNAME']) - if context['WHAT'] == 'SERVICE': - context['SERVICEURL'] = '/check_mk/view.py?view_name=service&host=%s&service=%s' % \ - (urlencode(context['HOSTNAME']), urlencode(context['SERVICEDESC'])) - - if mode in [ 'fake-service', 'fake-host' ]: - sys.exit(call_notification_script(plugin, [], context)) - - if 'LASTHOSTSTATECHANGE' in context: - context['LASTHOSTSTATECHANGE_REL'] = get_readable_rel_date(context['LASTHOSTSTATECHANGE']) - if context['WHAT'] != 'HOST' and 'LASTSERVICESTATECHANGE' in context: - context['LASTSERVICESTATECHANGE_REL'] = get_readable_rel_date(context['LASTSERVICESTATECHANGE']) - - if notification_logging >= 2: - notify_log("Notification context:\n" - + "\n".join(["%s=%s" % v for v in sorted(context.items())])) - - if not context: - sys.stderr.write("check_mk --notify expects context data in environment variables " - "that are prefixed with NOTIFY_\n") - sys.exit(1) + elif notify_mode == 'replay': + try: + replay_nr = int(args[1]) + except: + replay_nr = 0 + + + # If the notify_mode is set to 'spoolfile' we try to parse the given spoolfile + # This spoolfile contains a python dictionary + # { context: { Dictionary of environment variables }, plugin: "Plugin name" } + # Any problems while reading the spoolfile results in returning 2 + # -> mknotifyd deletes this file + if notify_mode == "spoolfile": + return handle_spoolfile(filename) + + elif opt_keepalive: + notify_keepalive() + + elif notify_mode == 'replay': + raw_context = raw_context_from_backlog(replay_nr) + notify_notify(raw_context) - # Get notification settings for the contact in question - if available. - method = "email" - contact = contacts.get(context["CONTACTNAME"]) + elif notify_mode == 'stdin': + notify_notify(raw_context_from_stdin()) - try: - if contact: - method = contact.get("notification_method") - else: - method = 'email' - if type(method) == tuple and method[0] == 'flexible': - notify_flexible(contact, context, method[1]) - else: - notify_via_email(context) + elif notify_mode == "send-bulks": + send_ripe_bulks() - except Exception, e: - notify_log("ERROR: %s\n%s" % (e, format_exception())) - sys.stderr.write("ERROR: %s\n" % e) - if notification_log: - sys.stderr.write("Details have been logged to %s.\n" % notification_log) - sys.exit(1) + else: + notify_notify(raw_context_from_env()) except Exception, e: - if g_interactive: - raise crash_dir = var_dir + "/notify" if not os.path.exists(crash_dir): os.makedirs(crash_dir) - file(crash_dir + "/crash.log", "a").write("CRASH:\n%s\n\n" % format_exception()) - + file(crash_dir + "/crash.log", "a").write("CRASH (%s):\n%s\n" % + (time.strftime("%Y-%m-%d %H:%M:%S"), format_exception())) -def notify_via_email(context): - notify_log(substitute_context(notification_log_template, context)) - if "SERVICEDESC" in context: - subject_t = notification_service_subject - body_t = notification_service_body +# This function processes one raw notification and decides wether it +# should be spooled or not. In the latter cased a local delivery +# is being done. +def notify_notify(raw_context, analyse=False): + if not analyse: + store_notification_backlog(raw_context) + + notify_log("----------------------------------------------------------------------") + if analyse: + notify_log("Analysing notification context with %s variables" % len(raw_context)) else: - subject_t = notification_host_subject - body_t = notification_host_body + notify_log("Got raw notification context with %s variables" % len(raw_context)) + + # Add some further variable for the conveniance of the plugins - subject = substitute_context(subject_t, context) - context["SUBJECT"] = subject - body = substitute_context(notification_common_body + body_t, context) - command = substitute_context(notification_mail_command, context) - command_utf8 = command.encode("utf-8") if notification_logging >= 2: - notify_log("Executing command: %s" % command) - notify_log(body) - # Make sure that mail(x) is using UTF-8. More then - # setting the locale cannot be done here. We hope that - # C.UTF-8 is always available. Please check the output - # of 'locale -a' on your system if you are curious. - os.putenv("LANG", "C.UTF-8") + encoded_context = dict(raw_context.items()) + convert_context_to_unicode(encoded_context) + notify_log("Raw notification context:\n" + + "\n".join([" %s=%s" % v for v in sorted(encoded_context.items())])) + + raw_keys = list(raw_context.keys()) + try: + complete_raw_context(raw_context) + except Exception, e: + notify_log("Error on completing raw context: %s" % e) + if notification_logging >= 2: - file(var_dir + "/notify/body.log", "w").write(body.encode("utf-8")) - os.popen(command_utf8, "w").write(body.encode("utf-8")) + notify_log("Computed variables:\n" + + "\n".join(sorted([" %s=%s" % (k, raw_context[k]) for k in raw_context if k not in raw_keys]))) + # Spool notification to remote host, if this is enabled + if notification_spool_to: + remote_host, tcp_port, also_local = notification_spool_to + target_site = "%s:%s" % (remote_host, tcp_port) + create_spoolfile({"context": raw_context, "forward": target_site}) + if not also_local: + return + + return locally_deliver_raw_context(raw_context, analyse=analyse) + + +# Here we decide which notification implementation we are using. +# Hopefully we can drop a couple of them some day +# 1. Rule Based Notifiations (since 1.2.5i1) +# 2. Flexible Notifications (since 1.2.2) +# 3. Plain email notification (refer to git log if you are really interested) +def locally_deliver_raw_context(raw_context, analyse=False): + contactname = raw_context.get("CONTACTNAME") + try: -def notify_flexible(contact, context, notification_table): - notify_log("Flexible notification for %s" % context["CONTACTNAME"]) - for entry in notification_table: - plugin = entry["plugin"] - notify_log("Plugin: %s" % plugin) + # If rule based notifications are enabled then the Micro Core does not set the + # variable CONTACTNAME. In the other cores the CONTACTNAME is being set to + # check-mk-notify. + # We do we not simply check the config variable enable_rulebased_notifications? + # -> Because the core needs are restart in order to reflect this while the + # notification mode of Check_MK not. There are thus situations where the + # setting of the core is different from our global variable. The core must + # have precedence in this situation! + if not contactname or contactname == "check-mk-notify": + # 1. RULE BASE NOTIFICATIONS + notify_log("Preparing rule based notifications") + return notify_rulebased(raw_context, analyse=analyse) + + if analyse: + return # Analysis only possible when rule based notifications are enabled + + # Now fetch all configuration about that contact (it needs to be configure via + # Check_MK for that purpose). If we do not know that contact then we cannot use + # flexible notifications even if they are enabled. + contact = contacts.get(contactname) + + if contact.get("disable_notifications", False): + notify_log("Notifications for %s are disabled in personal settings. Skipping." % contactname) + return - # Check disabling - if entry.get("disabled"): - notify_log("- Skipping: it is disabled for this user") - continue + # Get notification settings for the contact in question - if available. + if contact: + method = contact.get("notification_method", "email") + else: + method = "email" - # Check host, if configured - if entry.get("only_hosts"): - hostname = context.get("HOSTNAME") - if hostname not in entry["only_hosts"]: - notify_log(" - Skipping: host '%s' matches non of %s" % (hostname, ", ".join(entry["only_hosts"]))) - continue + if type(method) == tuple and method[0] == 'flexible': + # 2. FLEXIBLE NOTIFICATIONS + notify_log("Preparing flexible notifications for %s" % contactname) + notify_flexible(raw_context, method[1]) - # Check service, if configured - if entry.get("only_services"): - servicedesc = context.get("SERVICEDESC") - if not servicedesc: - notify_log(" - Proceed: limited to certain services, but this is a host notification") + else: + # 3. PLAIN EMAIL NOTIFICATION + notify_log("Preparing plain email notifications for %s" % contactname) + notify_plain_email(raw_context) + + except Exception, e: + if opt_debug: + raise + notify_log("ERROR: %s\n%s" % (e, format_exception())) + + +def notification_replay_backlog(nr): + global notify_mode + notify_mode = "replay" + raw_context = raw_context_from_backlog(nr) + notify_notify(raw_context) + + +def notification_analyse_backlog(nr): + global notify_mode + notify_mode = "replay" + raw_context = raw_context_from_backlog(nr) + return notify_notify(raw_context, analyse=True) + + +#. +# .--Keepalive-Mode (Used by CMC)----------------------------------------. +# | _ __ _ _ | +# | | |/ /___ ___ _ __ __ _| (_)_ _____ | +# | | ' // _ \/ _ \ '_ \ / _` | | \ \ / / _ \ | +# | | . \ __/ __/ |_) | (_| | | |\ V / __/ | +# | |_|\_\___|\___| .__/ \__,_|_|_| \_/ \___| | +# | |_| | +# +----------------------------------------------------------------------+ +# | Implementation of cmk --notify --keepalive, which is being used | +# | by the Micro Core. | +# '----------------------------------------------------------------------' + +def notify_keepalive(): + last_config_timestamp = config_timestamp() + + # Send signal that we are ready to receive the next notification, but + # not after a config-reload-restart (see below) + if os.getenv("CMK_NOTIFY_RESTART") != "1": + notify_log("Starting in keepalive mode with PID %d" % os.getpid()) + sys.stdout.write("*") + sys.stdout.flush() + else: + notify_log("We are back after a restart.") + + while True: + try: + # Invalidate timeperiod cache + global g_inactive_timerperiods + g_inactive_timerperiods = None + + # If the configuration has changed, we do a restart. But we do + # this check just before the next notification arrives. We must + # *not* read data from stdin, just peek! There is still one + # problem: when restarting we must *not* send the initial '*' + # byte, because that must be not no sooner then the notification + # has been sent. We do this by setting the environment variable + # CMK_NOTIFY_RESTART=1 + + if notify_data_available(): + if last_config_timestamp != config_timestamp(): + notify_log("Configuration has changed. Restarting myself.") + os.putenv("CMK_NOTIFY_RESTART", "1") + os.execvp("cmk", sys.argv) + + data = "" + while not data.endswith("\n\n"): + try: + new_data = "" + new_data = os.read(0, 32768) + except IOError, e: + new_data = "" + except Exception, e: + if opt_debug: + raise + notify_log("Cannot read data from CMC: %s" % e) + + if not new_data: + notify_log("CMC has closed the connection. Shutting down.") + sys.exit(0) # closed stdin, this is + data += new_data + + try: + context = raw_context_from_string(data.rstrip('\n')) + notify_notify(context) + except Exception, e: + if opt_debug: + raise + notify_log("ERROR %s\n%s" % (e, format_exception())) + + # Signal that we are ready for the next notification + sys.stdout.write("*") + sys.stdout.flush() + + + # Fix vor Python 2.4: + except SystemExit, e: + sys.exit(e) + except Exception, e: + if opt_debug: + raise + notify_log("ERROR %s\n%s" % (e, format_exception())) + + send_ripe_bulks() + + +def notify_data_available(): + readable, writeable, exceptionable = select.select([0], [], [], notification_bulk_interval) + return not not readable + +#. +# .--Rule-Based-Notifications--------------------------------------------. +# | ____ _ _ _ | +# | | _ \ _ _| | ___| |__ __ _ ___ ___ __| | | +# | | |_) | | | | |/ _ \ '_ \ / _` / __|/ _ \/ _` | | +# | | _ <| |_| | | __/ |_) | (_| \__ \ __/ (_| | | +# | |_| \_\\__,_|_|\___|_.__/ \__,_|___/\___|\__,_| | +# | | +# +----------------------------------------------------------------------+ +# | Logic for rule based notifications | +# '----------------------------------------------------------------------' + +def notify_rulebased(raw_context, analyse=False): + # First step: go through all rules and construct our table of + # notification plugins to call. This is a dict from (user, plugin) to + # a pair if (locked, parameters). If locked is True, then a user + # cannot cancel this notification via his personal notification rules. + # Example: + # notifications = { + # ( "hh", "email" ) : ( False, [] ), + # ( "hh", "sms" ) : ( True, [ "0171737337", "bar" ] ), + # } + + notifications = {} + num_rule_matches = 0 + rule_info = [] + + for rule in notification_rules + user_notification_rules(): + if "contact" in rule: + notify_log("User %s's rule '%s'..." % (rule["contact"], rule["description"])) + else: + notify_log("Global rule '%s'..." % rule["description"]) + + why_not = rbn_match_rule(rule, raw_context) # also checks disabling + if why_not: + notify_log(" -> does not match: %s" % why_not) + rule_info.append(("miss", rule, why_not)) + else: + notify_log(" -> matches!") + num_rule_matches += 1 + contacts = rbn_rule_contacts(rule, raw_context) + + # Handle old-style and new-style rules + if "notify_method" in rule: # old-style + plugin = rule["notify_plugin"] + plugin_parameters = rule["notify_method"] # None: do cancel, [ str ]: plugin parameters else: - for s in entry["only_services"]: - if re.match(s, servicedesc): - break - else: - notify_log(" - Skipping: service '%s' matches non of %s" % ( - servicedesc, ", ".join(entry["only_services"]))) - continue + plugin, plugin_parameters = rule["notify_plugin"] - # Check notification type - event, allowed_events = check_notification_type(context, entry["host_events"], entry["service_events"]) - if event not in allowed_events: - notification_type = context.get("NOTIFICATIONTYPE","") - notify_log(" - Skipping: wrong notification type %s (%s), only %s are allowed" % - (event, notification_type, ",".join(allowed_events)) ) - continue + bulk = rule.get("bulk") - # Check notification number (in case of repeated notifications/escalations) - if "escalation" in entry: - from_number, to_number = entry["escalation"] - if context["WHAT"] == "HOST": - notification_number = int(context.get("HOSTNOTIFICATIONNUMBER", 1)) + if plugin_parameters == None: # cancelling + for contact in contacts: + key = contact, plugin + if key in notifications: + locked, plugin_parameters, bulk = notifications[key] + if locked and "contact" in rule: + notify_log(" - cannot cancel notification of %s via %s: it is locked" % key) + else: + notify_log(" - cancelling notification of %s via %s" % key) + del notifications[key] else: - notification_number = int(context.get("SERVICENOTIFICATIONNUMBER", 1)) - if notification_number < from_number or notification_number > to_number: - notify_log(" - Skipping: notification number %d does not lie in range %d ... %d" % - (notification_number, from_number, to_number)) - continue + final_parameters = rbn_finalize_plugin_parameters(raw_context["HOSTNAME"], plugin, plugin_parameters) + for contact in contacts: + key = contact, plugin + plugintxt = plugin or "plain email" + if key in notifications: + locked, previous_parameters, old_bulk = notifications[key] + if locked and "contact" in rule: + notify_log(" - cannot modify notification of %s via %s: it is locked" % (contact, plugintxt)) + continue + notify_log(" - modifying notification of %s via %s" % (contact, plugintxt)) + else: + notify_log(" - adding notification of %s via %s" % (contact, plugintxt)) + notifications[key] = ( not rule.get("allow_disable"), final_parameters, bulk ) + + rule_info.append(("match", rule, "")) + + plugin_info = [] + + if not notifications: + if num_rule_matches: + notify_log("%d rules matched, but no notification has been created." % num_rule_matches) + else: + if notification_fallback_email and not analyse: + notify_log("No rule matched, falling back to email to %s" % notification_fallback_email) + plugin_context = create_plugin_context(raw_context, []) + contact = rbn_fake_email_contact(notification_fallback_email) + rbn_add_contact_information(plugin_context, contact) + notify_via_email(plugin_context) - if "timeperiod" in entry: - timeperiod = entry["timeperiod"] - if timeperiod and timeperiod != "24X7": - if not check_timeperiod(timeperiod): - notify_log(" - Skipping: time period %s is currently not active" % timeperiod) - continue + else: + # Now do the actual notifications + notify_log("Executing %d notifications:" % len(notifications)) + entries = notifications.items() + entries.sort() + for (contact, plugin), (locked, params, bulk) in entries: + if analyse: + verb = "would notify" + else: + verb = "notifying" + notify_log(" * %s %s via %s, parameters: %s, bulk: %s" % ( + verb, contact, (plugin or "plain email"), params and ", ".join(params) or "(no parameters)", + bulk and "yes" or "no")) + plugin_info.append((contact, plugin, params, bulk)) # for analysis + try: + plugin_context = create_plugin_context(raw_context, params) + rbn_add_contact_information(plugin_context, contact) + if not analyse: + if bulk: + do_bulk_notify(contact, plugin, params, plugin_context, bulk) + elif notification_spooling: + create_spoolfile({"context": plugin_context, "plugin": plugin}) + else: + call_notification_script(plugin, plugin_context) + + except Exception, e: + if opt_debug: + raise + fe = format_exception() + notify_log(" ERROR: %s" % e) + notify_log(fe) + + analysis_info = rule_info, plugin_info + return analysis_info + +def rbn_finalize_plugin_parameters(hostname, plugin, rule_parameters): + # Right now we are only able to finalize notification plugins with dict parameters.. + if type(rule_parameters) == dict: + parameters = host_extra_conf_merged(hostname, notification_parameters.get(plugin, [])) + parameters.update(rule_parameters) + return parameters + else: + return rule_parameters - if plugin is None: - notify_via_email(context) +def add_rulebased_macros(raw_context): + # For the rule based notifications we need the list of contacts + # an object has. The CMC does send this in the macro "CONTACTS" + if "CONTACTS" not in raw_context: + raw_context["CONTACTS"] = livestatus_fetch_contacts(raw_context["HOSTNAME"], raw_context.get("SERVICEDESC")) + + # Add a pseudo contact name. This is needed for the correct creation + # of spool files. Spool files are created on a per-contact-base, as in classical + # notifications the core sends out one individual notification per contact. + # In the case of rule based notifications we do not make distinctions between + # the various contacts. + raw_context["CONTACTNAME"] = "check-mk-notify" + + +# Create a table of all user specific notification rules. Important: +# create deterministic order, so that rule analyses can depend on +# rule indices +def user_notification_rules(): + user_rules = [] + contactnames = contacts.keys() + contactnames.sort() + for contactname in contactnames: + contact = contacts[contactname] + for rule in contact.get("notification_rules", []): + # Save the owner of the rule for later debugging + rule["contact"] = contactname + # We assume that the "contact_..." entries in the + # rule are allowed and only contain one entry of the + # type "contact_users" : [ contactname ]. This + # is handled by WATO. Contact specific rules are a + # WATO-only feature anyway... + user_rules.append(rule) + notify_log("Found %d user specific rules" % len(user_rules)) + return user_rules + + +def rbn_fake_email_contact(email): + return { + "name" : email, + "alias" : "Explicit email adress " + email, + "email" : email, + "pager" : "", + } + + +def rbn_add_contact_information(plugin_context, contact): + if type(contact) == dict: + for what in [ "name", "alias", "email", "pager" ]: + plugin_context["CONTACT" + what.upper()] = contact.get(what, "") + for key in contact.keys(): + if key[0] == '_': + plugin_context["CONTACT" + key.upper()] = unicode(contact[key]) + else: + if contact.startswith("mailto:"): # Fake contact + contact_dict = { + "name" : contact[7:], + "alias" : "Email address " + contact, + "email" : contact[7:], + "pager" : "" } else: - call_notification_script(plugin, entry.get("parameters", []), context) + contact_dict = contacts.get(contact, { "alias" : contact }) + contact_dict["name"] = contact + rbn_add_contact_information(plugin_context, contact_dict) -def call_notification_script(plugin, parameters, context): - # Prepare environment - os.putenv("NOTIFY_PARAMETERS", " ".join(parameters)) - for nr, value in enumerate(parameters): - os.putenv("NOTIFY_PARAMETER_%d" % (nr + 1), value) - os.putenv("NOTIFY_LOGDIR", notification_logdir) - - for key in [ 'WHAT', 'OMD_ROOT', 'OMD_SITE', - 'MAIL_COMMAND', 'LASTHOSTSTATECHANGE_REL' ]: - if key in context: - os.putenv('NOTIFY_' + key, context[key]) - - # Remove service macros for host notifications - if context['WHAT'] == 'HOST': - for key in context.keys(): - if 'SERVICE' in key: - os.unsetenv('NOTIFY_%s' % key) - - # Remove exceeding arguments from previous plugin calls - for nr in range(len(parameters)+1, 101): - name = "NOTIFY_PARAMETER_%d" % nr - if name in os.environ: - os.putenv(name, "") - # Call actual script without any arguments - if local_notifications_dir: - path = local_notifications_dir + "/" + plugin - if not os.path.exists(path): - path = notifications_dir + "/" + plugin +def livestatus_fetch_contacts(host, service): + try: + if service: + query = "GET services\nFilter: host_name = %s\nFilter: service_description = %s\nColumns: contacts\n" % ( + host, service) + else: + query = "GET hosts\nFilter: host_name = %s\nColumns: contacts\n" % host + + commasepped = livestatus_fetch_query(query).strip() + aslist = commasepped.split(",") + if "check-mk-notify" in aslist: # Remove artifical contact used for rule based notifications + aslist.remove("check-mk-notify") + return ",".join(aslist) + + except: + if opt_debug: + raise + return "" # We must allow notifications without Livestatus access + + + +def rbn_match_rule(rule, context): + if rule.get("disabled"): + return "This rule is disabled" + + return \ + rbn_match_folder(rule, context) or \ + rbn_match_hosttags(rule, context) or \ + rbn_match_hostgroups(rule, context) or \ + rbn_match_servicegroups(rule, context) or \ + rbn_match_contactgroups(rule, context) or \ + rbn_match_hosts(rule, context) or \ + rbn_match_exclude_hosts(rule, context) or \ + rbn_match_services(rule, context) or \ + rbn_match_exclude_services(rule, context) or \ + rbn_match_plugin_output(rule, context) or \ + rbn_match_checktype(rule, context) or \ + rbn_match_timeperiod(rule) or \ + rbn_match_escalation(rule, context) or \ + rbn_match_escalation_throtte(rule, context) or \ + rbn_match_servicelevel(rule, context) or \ + rbn_match_host_event(rule, context) or \ + rbn_match_service_event(rule, context) or \ + rbn_match_event_console(rule, context) + + +def rbn_match_folder(rule, context): + if "match_folder" in rule: + mustfolder = rule["match_folder"] + mustpath = mustfolder.split("/") + hasfolder = None + for tag in context.get("HOSTTAGS", "").split(): + if tag.startswith("/wato/"): + hasfolder = tag[6:].rstrip("/") + haspath = hasfolder.split("/") + if mustpath == ["",]: + return # Match is on main folder, always OK + while mustpath: + if not haspath or mustpath[0] != haspath[0]: + return "The rule requires WATO folder '%s', but the host is in '%s'" % ( + mustfolder, hasfolder) + mustpath = mustpath[1:] + haspath = haspath[1:] + + if hasfolder == None: + return "The host is not managed via WATO, but the rule requires a WATO folder" + + +def rbn_match_hosttags(rule, context): + required = rule.get("match_hosttags") + if required: + tags = context.get("HOSTTAGS", "").split() + if not hosttags_match_taglist(tags, required): + return "The host's tags %s do not match the required tags %s" % ( + "|".join(tags), "|".join(required)) + + +def rbn_match_servicegroups(rule, context): + if context["WHAT"] != "SERVICE": + return + required_groups = rule.get("match_servicegroups") + if required_groups != None: + sgn = context.get("SERVICEGROUPNAMES") + if sgn == None: + return "No information about service groups is in the context, but service " \ + "must be in group %s" % ( " or ".join(required_groups)) + if sgn: + servicegroups = sgn.split(",") + else: + return "The service is in no group, but %s is required" % ( + " or ".join(required_groups)) + + for group in required_groups: + if group in servicegroups: + return + + return "The service is only in the groups %s, but %s is required" % ( + sgn, " or ".join(required_groups)) + +def rbn_match_contactgroups(rule, context): + required_groups = rule.get("match_contactgroups") + if context["WHAT"] == "SERVICE": + cgn = context.get("SERVICECONTACTGROUPNAMES") else: - path = notifications_dir + "/" + plugin + cgn = context.get("HOSTCONTACTGROUPNAMES") - if not os.path.exists(path): - notify_log("Notification plugin '%s' not found" % plugin) - notify_log(" not in %s" % notifications_dir) - if local_notifications_dir: - notify_log(" and not in %s" % local_notifications_dir) + if required_groups != None: + if cgn == None: + notify_log("Warning: No information about contact groups in the context. " \ + "Seams that you don't use the Check_MK Microcore. ") + return + if cgn: + contactgroups = cgn.split(",") + else: + return "The object is in no group, but %s is required" % ( + " or ".join(required_groups)) + + for group in required_groups: + if group in contactgroups: + return + + return "The object is only in the groups %s, but %s is required" % ( + cgn, " or ".join(required_groups)) + + +def rbn_match_hostgroups(rule, context): + required_groups = rule.get("match_hostgroups") + if required_groups != None: + hgn = context.get("HOSTGROUPNAMES") + if hgn == None: + return "No information about host groups is in the context, but host " \ + "must be in group %s" % ( " or ".join(required_groups)) + if hgn: + hostgroups = hgn.split(",") + else: + return "The host is in no group, but %s is required" % ( + " or ".join(required_groups)) + + for group in required_groups: + if group in hostgroups: + return + + return "The host is only in the groups %s, but %s is required" % ( + hgn, " or ".join(required_groups)) + + +def rbn_match_hosts(rule, context): + if "match_hosts" in rule: + hostlist = rule["match_hosts"] + if context["HOSTNAME"] not in hostlist: + return "The host's name '%s' is not on the list of allowed hosts (%s)" % ( + context["HOSTNAME"], ", ".join(hostlist)) + + +def rbn_match_exclude_hosts(rule, context): + if context["HOSTNAME"] in rule.get("match_exclude_hosts", []): + return "The host's name '%s' is on the list of excluded hosts" % context["HOSTNAME"] + + +def rbn_match_services(rule, context): + if "match_services" in rule: + if context["WHAT"] != "SERVICE": + return "The rule specifies a list of services, but this is a host notification." + servicelist = rule["match_services"] + service = context["SERVICEDESC"] + if not in_extraconf_servicelist(servicelist, service): + return "The service's description '%s' dows not match by the list of " \ + "allowed services (%s)" % (service, ", ".join(servicelist)) + + +def rbn_match_exclude_services(rule, context): + if context["WHAT"] != "SERVICE": return + excludelist = rule.get("match_exclude_services", []) + service = context["SERVICEDESC"] + if in_extraconf_servicelist(excludelist, service): + return "The service's description '%s' matches the list of excluded services" \ + % context["SERVICEDESC"] - notify_log("Executing %s" % path) - out = os.popen(path + " 2>&1 > 8)) - return exitcode - return 0 +def rbn_match_plugin_output(rule, context): + if "match_plugin_output" in rule: + r = regex(rule["match_plugin_output"]) + + if context["WHAT"] == "SERVICE": + output = context["SERVICEOUTPUT"] + else: + output = context["HOSTOUTPUT"] + if not r.search(output): + return "The expression '%s' cannot be found in the plugin output '%s'" % \ + (rule["match_plugin_output"], output) + + +def rbn_match_checktype(rule, context): + if "match_checktype" in rule: + if context["WHAT"] != "SERVICE": + return "The rule specifies a list of Check_MK plugins, but this is a host notification." + command = context["SERVICECHECKCOMMAND"] + if not command.startswith("check_mk-"): + return "The rule specified a list of Check_MK plugins, but his is no Check_MK service." + plugin = command[9:] + allowed = rule["match_checktype"] + if plugin not in allowed: + return "The Check_MK plugin '%s' is not on the list of allowed plugins (%s)" % \ + (plugin, ", ".join(allowed)) + + +def rbn_match_timeperiod(rule): + if "match_timeperiod" in rule: + timeperiod = rule["match_timeperiod"] + if timeperiod != "24X7" and not check_timeperiod(timeperiod): + return "The timeperiod '%s' is currently not active." % timeperiod + + +def rbn_match_escalation(rule, context): + if "match_escalation" in rule: + from_number, to_number = rule["match_escalation"] + if context["WHAT"] == "HOST": + notification_number = int(context.get("HOSTNOTIFICATIONNUMBER", 1)) + else: + notification_number = int(context.get("SERVICENOTIFICATIONNUMBER", 1)) + if notification_number < from_number or notification_number > to_number: + return "The notification number %d does not lie in range %d ... %d" % ( + notification_number, from_number, to_number) + +def rbn_match_escalation_throtte(rule, context): + if "match_escalation_throttle" in rule: + from_number, rate = rule["match_escalation_throttle"] + if context["WHAT"] == "HOST": + notification_number = int(context.get("HOSTNOTIFICATIONNUMBER", 1)) + else: + notification_number = int(context.get("SERVICENOTIFICATIONNUMBER", 1)) + if notification_number <= from_number: + return + if (notification_number - from_number) % rate != 0: + return "This notification is being skipped due to throttling. The next number will be %d" % \ + (notification_number + rate - ((notification_number - from_number) % rate)) + +def rbn_match_servicelevel(rule, context): + if "match_sl" in rule: + from_sl, to_sl = rule["match_sl"] + if context['WHAT'] == "SERVICE" and context.get('SVC_SL','').isdigit(): + sl = saveint(context.get('SVC_SL')) + else: + sl = saveint(context.get('HOST_SL')) + + if sl < from_sl or sl > to_sl: + return "The service level %d is not between %d and %d." % (sl, from_sl, to_sl) + + +def rbn_match_host_event(rule, context): + if "match_host_event" in rule: + if context["WHAT"] != "HOST": + if "match_service_event" not in rule: + return "This is a service notification, but the rule just matches host events" + else: + return # Let this be handled by match_service_event + allowed_events = rule["match_host_event"] + state = context["HOSTSTATE"] + last_state = context["PREVIOUSHOSTHARDSTATE"] + events = { "UP" : 'r', "DOWN" : 'd', "UNREACHABLE" : 'u' } + return rbn_match_event(context, state, last_state, events, allowed_events) + + +def rbn_match_service_event(rule, context): + if "match_service_event" in rule: + if context["WHAT"] != "SERVICE": + if "match_host_event" not in rule: + return "This is a host notification, but the rule just matches service events" + else: + return # Let this be handled by match_host_event + allowed_events = rule["match_service_event"] + state = context["SERVICESTATE"] + last_state = context["PREVIOUSSERVICEHARDSTATE"] + events = { "OK" : 'r', "WARNING" : 'w', "CRITICAL" : 'c', "UNKNOWN" : 'u' } + return rbn_match_event(context, state, last_state, events, allowed_events) + + +def rbn_match_event(context, state, last_state, events, allowed_events): + notification_type = context["NOTIFICATIONTYPE"] + + if notification_type == "RECOVERY": + event = events.get(last_state, '?') + 'r' + elif notification_type in [ "FLAPPINGSTART", "FLAPPINGSTOP", "FLAPPINGDISABLED" ]: + event = 'f' + elif notification_type in [ "DOWNTIMESTART", "DOWNTIMEEND", "DOWNTIMECANCELLED"]: + event = 's' + elif notification_type == "ACKNOWLEDGEMENT": + event = 'x' + else: + event = events.get(last_state, '?') + events.get(state, '?') + + notify_log("Event type is %s" % event) + + # Now go through the allowed events. Handle '?' has matching all types! + for allowed in allowed_events: + if event == allowed or \ + event[0] == '?' and len(allowed) > 1 and event[1] == allowed[1]: + return + + return "Event type '%s' not handled by this rule. Allowed are: %s" % ( + event, ", ".join(allowed_events)) + + +def rbn_rule_contacts(rule, context): + the_contacts = set([]) + if rule.get("contact_object"): + the_contacts.update(rbn_object_contacts(context)) + if rule.get("contact_all"): + the_contacts.update(rbn_all_contacts()) + if rule.get("contact_all_with_email"): + the_contacts.update(rbn_all_contacts(with_email=True)) + if "contact_users" in rule: + the_contacts.update(rule["contact_users"]) + if "contact_groups" in rule: + the_contacts.update(rbn_groups_contacts(rule["contact_groups"])) + if "contact_emails" in rule: + the_contacts.update(rbn_emails_contacts(rule["contact_emails"])) + + all_enabled = [] + for contactname in the_contacts: + contact = contacts.get(contactname) + if contact and contact.get("disable_notifications", False): + notify_log(" - skipping contact %s: he/she has disabled notifications" % contactname) + else: + all_enabled.append(contactname) + + return all_enabled + + +def rbn_match_event_console(rule, context): + if "match_ec" in rule: + match_ec = rule["match_ec"] + is_ec_notification = "EC_ID" in context + if match_ec == False and is_ec_notification: + return "Notification has been created by the Event Console." + elif match_ec != False and not is_ec_notification: + return "Notification has not been created by the Event Console." + + if match_ec != False: + + # Match Event Console rule ID + if "match_rule_id" in match_ec and context["EC_RULE_ID"] != match_ec["match_rule_id"]: + return "EC Event has rule ID '%s', but '%s' is required" % ( + context["EC_RULE_ID"], match_ec["match_rule_id"]) + + # Match syslog priority of event + if "match_priority" in match_ec: + prio_from, prio_to = match_ec["match_priority"] + if prio_from > prio_to: + prio_to, prio_from = prio_from, prio_to + p = int(context["EC_PRIORITY"]) + if p < prio_from or p > prio_to: + return "Event has priority %s, but matched range is %s .. %s" % ( + p, prio_from, prio_to) + + # Match syslog facility of event + if "match_facility" in match_ec: + if match_ec["match_facility"] != int(context["EC_FACILITY"]): + return "Wrong syslog facility %s, required is %s" % (context["EC_FACILITY"], match_ec["match_facility"]) + + # Match event comment + if "match_comment" in match_ec: + r = regex(match_ec["match_comment"]) + if not r.search(context["EC_COMMENT"]): + return "The event comment '%s' does not match the regular expression '%s'" % ( + context["EC_COMMENT"], match_ec["match_comment"]) + + +def rbn_object_contacts(context): + commasepped = context.get("CONTACTS") + if commasepped: + return commasepped.split(",") + else: + return [] + + +def rbn_all_contacts(with_email=None): + if not with_email: + return contacts.keys() # We have that via our main.mk contact definitions! + else: + return [ + contact_id + for (contact_id, contact) + in contacts.items() + if contact.get("email")] + + +def rbn_groups_contacts(groups): + if not groups: + return {} + contacts = set([]) + query = "GET contactgroups\nColumns: members\n" + for group in groups: + query += "Filter: name = %s\n" % group + query += "Or: %d\n" % len(groups) + response = livestatus_fetch_query(query) + for line in response.splitlines(): + line = line.strip() + if line: + contacts.update(line.split(",")) + return contacts + + +def rbn_emails_contacts(emails): + return [ "mailto:" + e for e in emails ] + + +#. +# .--Flexible-Notifications----------------------------------------------. +# | _____ _ _ _ _ | +# | | ___| | _____ _(_) |__ | | ___ | +# | | |_ | |/ _ \ \/ / | '_ \| |/ _ \ | +# | | _| | | __/> <| | |_) | | __/ | +# | |_| |_|\___/_/\_\_|_.__/|_|\___| | +# | | +# +----------------------------------------------------------------------+ +# | Implementation of the pre 1.2.5, hopelessly outdated flexible | +# | notifications. | +# '----------------------------------------------------------------------' + +def notify_flexible(raw_context, notification_table): + + for entry in notification_table: + plugin = entry["plugin"] + notify_log(" Notification channel with plugin %s" % (plugin or "plain email")) + + if not should_notify(raw_context, entry): + continue + + plugin_context = create_plugin_context(raw_context, entry.get("parameters", [])) + + if notification_spooling: + create_spoolfile({"context": plugin_context, "plugin": plugin}) + else: + call_notification_script(plugin, plugin_context) + +# may return +# 0 : everything fine -> proceed +# 1 : currently not OK -> try to process later on +# >=2: invalid -> discard +def should_notify(context, entry): + # Check disabling + if entry.get("disabled"): + notify_log(" - Skipping: it is disabled for this user") + return False + + # Check host, if configured + if entry.get("only_hosts"): + hostname = context.get("HOSTNAME") + + skip = True + regex = False + negate = False + for h in entry["only_hosts"]: + if h.startswith("!"): # negate + negate = True + h = h[1:] + elif h.startswith('~'): + regex = True + h = h[1:] + + if not regex and hostname == h: + skip = negate + break + + elif regex and re.match(h, hostname): + skip = negate + break + if skip: + notify_log(" - Skipping: host '%s' matches none of %s" % (hostname, ", ".join(entry["only_hosts"]))) + return False + + # Check if the host has to be in a special service_level + if "match_sl" in entry: + from_sl, to_sl = entry['match_sl'] + if context['WHAT'] == "SERVICE" and context.get('SVC_SL','').isdigit(): + sl = saveint(context.get('SVC_SL')) + else: + sl = saveint(context.get('HOST_SL')) + + if sl < from_sl or sl > to_sl: + notify_log(" - Skipping: service level %d not between %d and %d" % (sl, from_sl, to_sl)) + return False + + # Skip blacklistet serivces + if entry.get("service_blacklist"): + servicedesc = context.get("SERVICEDESC") + if not servicedesc: + notify_log(" - Proceed: blacklist certain services, but this is a host notification") + else: + for s in entry["service_blacklist"]: + if re.match(s, servicedesc): + notify_log(" - Skipping: service '%s' matches blacklist (%s)" % ( + servicedesc, ", ".join(entry["service_blacklist"]))) + return False + + + + + # Check service, if configured + if entry.get("only_services"): + servicedesc = context.get("SERVICEDESC") + if not servicedesc: + notify_log(" - Proceed: limited to certain services, but this is a host notification") + else: + # Example + # only_services = [ "!LOG foo", "LOG", BAR" ] + # -> notify all services beginning with LOG or BAR, but not "LOG foo..." + skip = True + for s in entry["only_services"]: + if s.startswith("!"): # negate + negate = True + s = s[1:] + else: + negate = False + if re.match(s, servicedesc): + skip = negate + break + if skip: + notify_log(" - Skipping: service '%s' matches none of %s" % ( + servicedesc, ", ".join(entry["only_services"]))) + return False + + # Check notification type + event, allowed_events = check_notification_type(context, entry["host_events"], entry["service_events"]) + if event not in allowed_events: + notify_log(" - Skipping: wrong notification type %s (%s), only %s are allowed" % + (event, context["NOTIFICATIONTYPE"], ",".join(allowed_events)) ) + return False + + # Check notification number (in case of repeated notifications/escalations) + if "escalation" in entry: + from_number, to_number = entry["escalation"] + if context["WHAT"] == "HOST": + notification_number = int(context.get("HOSTNOTIFICATIONNUMBER", 1)) + else: + notification_number = int(context.get("SERVICENOTIFICATIONNUMBER", 1)) + if notification_number < from_number or notification_number > to_number: + notify_log(" - Skipping: notification number %d does not lie in range %d ... %d" % + (notification_number, from_number, to_number)) + return False + + if "timeperiod" in entry: + timeperiod = entry["timeperiod"] + if timeperiod and timeperiod != "24X7": + if not check_timeperiod(timeperiod): + notify_log(" - Skipping: time period %s is currently not active" % timeperiod) + return False + return True def check_notification_type(context, host_events, service_events): notification_type = context["NOTIFICATIONTYPE"] @@ -477,9 +1174,806 @@ return event, allowed_events +#. +# .--Plain Email---------------------------------------------------------. +# | ____ _ _ _____ _ _ | +# | | _ \| | __ _(_)_ __ | ____|_ __ ___ __ _(_) | | +# | | |_) | |/ _` | | '_ \ | _| | '_ ` _ \ / _` | | | | +# | | __/| | (_| | | | | | | |___| | | | | | (_| | | | | +# | |_| |_|\__,_|_|_| |_| |_____|_| |_| |_|\__,_|_|_| | +# | | +# +----------------------------------------------------------------------+ +# | Plain Email notification, inline implemented. This is also being | +# | used as a pseudo-plugin by Flexible Notification and RBN. | +# '----------------------------------------------------------------------' + +def notify_plain_email(raw_context): + plugin_context = create_plugin_context(raw_context, []) + + if notification_spooling: + create_spoolfile({"context": plugin_context, "plugin" : None}) + else: + notify_log("Sending plain email to %s" % plugin_context["CONTACTNAME"]) + notify_via_email(plugin_context) + + +def notify_via_email(plugin_context): + notify_log(substitute_context(notification_log_template, plugin_context)) + + if plugin_context["WHAT"] == "SERVICE": + subject_t = notification_service_subject + body_t = notification_service_body + else: + subject_t = notification_host_subject + body_t = notification_host_body + + subject = substitute_context(subject_t, plugin_context) + plugin_context["SUBJECT"] = subject + body = substitute_context(notification_common_body + body_t, plugin_context) + command = substitute_context(notification_mail_command, plugin_context) + command_utf8 = command.encode("utf-8") + + # Make sure that mail(x) is using UTF-8. Otherwise we cannot send notifications + # with non-ASCII characters. Unfortunately we do not know whether C.UTF-8 is + # available. If e.g. nail detects a non-Ascii character in the mail body and + # the specified encoding is not available, it will silently not send the mail! + # Our resultion in future: use /usr/sbin/sendmail directly. + # Our resultion in the present: look with locale -a for an existing UTF encoding + # and use that. + old_lang = os.getenv("LANG", "") + for encoding in os.popen("locale -a 2>/dev/null"): + l = encoding.lower() + if "utf8" in l or "utf-8" in l or "utf.8" in l: + encoding = encoding.strip() + os.putenv("LANG", encoding) + if notification_logging >= 2: + notify_log("Setting locale for mail to %s." % encoding) + break + else: + notify_log("No UTF-8 encoding found in your locale -a! Please provide C.UTF-8 encoding.") + + # Important: we must not output anything on stdout or stderr. Data of stdout + # goes back into the socket to the CMC in keepalive mode and garbles the + # handshake signal. + if notification_logging >= 2: + notify_log("Executing command: %s" % command) + + p = subprocess.Popen(command_utf8, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) + stdout_txt, stderr_txt = p.communicate(body.encode("utf-8")) + exitcode = p.returncode + os.putenv("LANG", old_lang) # Important: do not destroy our environment + if exitcode != 0: + notify_log("ERROR: could not deliver mail. Exit code of command is %r" % exitcode) + for line in (stdout_txt + stderr_txt).splitlines(): + notify_log("mail: %s" % line.rstrip()) + return 2 + + return 0 + + +#. +# .--Plugins-------------------------------------------------------------. +# | ____ _ _ | +# | | _ \| |_ _ __ _(_)_ __ ___ | +# | | |_) | | | | |/ _` | | '_ \/ __| | +# | | __/| | |_| | (_| | | | | \__ \ | +# | |_| |_|\__,_|\__, |_|_| |_|___/ | +# | |___/ | +# +----------------------------------------------------------------------+ +# | Code for the actuall calling of notification plugins (scripts). | +# '----------------------------------------------------------------------' + +# Exit codes for plugins and also for our functions that call the plugins: +# 0: Notification successfully sent +# 1: Could not send now, please retry later +# 2: Cannot send, retry does not make sense + +# Add the plugin parameters to the envinroment. We have two types of parameters: +# - list, the legacy style. This will lead to PARAMETERS_1, ... +# - dict, the new style for scripts with WATO rule. This will lead to +# PARAMETER_FOO_BAR for a dict key named "foo_bar". +def create_plugin_context(raw_context, params): + plugin_context = {} + plugin_context.update(raw_context) # Make a real copy + + if type(params) == list: + plugin_context["PARAMETERS"] = " ".join(params) + for nr, param in enumerate(params): + plugin_context["PARAMETER_%d" % (nr + 1)] = param + else: + for key, value in params.items(): + plugin_context["PARAMETER_" + key.upper()] = plugin_param_to_string(value) + return plugin_context + + +def create_bulk_parameter_context(params): + dict_context = create_plugin_context({}, params) + return [ "%s=%s\n" % (varname, value.replace("\r", "").replace("\n", "\1")) + for (varname, value) in dict_context.items() ] + + +def plugin_param_to_string(value): + if type(value) in ( str, unicode ): + return value + elif type(value) in ( int, float ): + return str(value) + elif value == None: + return "" + elif value == True: + return "yes" + elif value == False: + return "" + elif type(value) in ( tuple, list ): + return "\t".join(value) + else: + return repr(value) # Should never happen + + +def path_to_notification_script(plugin): + # Call actual script without any arguments + if local_notifications_dir: + path = local_notifications_dir + "/" + plugin + if not os.path.exists(path): + path = notifications_dir + "/" + plugin + else: + path = notifications_dir + "/" + plugin + + if not os.path.exists(path): + notify_log("Notification plugin '%s' not found" % plugin) + notify_log(" not in %s" % notifications_dir) + if local_notifications_dir: + notify_log(" and not in %s" % local_notifications_dir) + return None + + else: + return path + +# This is the function that finally sends the actual notificion. +# It does this by calling an external script are creating a +# plain email and calling bin/mail. +# +# It also does the central logging of the notifications +# that are actually sent out. +# +# Note: this function is *not* being called for bulk notification. +def call_notification_script(plugin, plugin_context): + core_notification_log(plugin, plugin_context) + + # The "Pseudo"-Plugin None means builtin plain email + if not plugin: + return notify_via_email(plugin_context) + + # Call actual script without any arguments + path = path_to_notification_script(plugin) + if not path: + return 2 + + # Export complete context to have all vars in environment. + # Existing vars are replaced, some already existing might remain + for key in plugin_context: + if type(plugin_context[key]) == bool: + notify_log("INTERNAL ERROR: %s=%s is of type bool" % (key, plugin_context[key])) + os.putenv('NOTIFY_' + key, plugin_context[key].encode('utf-8')) + + notify_log(" executing %s" % path) + out = os.popen(path + " 2>&1 > 8)) + else: + exitcode = 0 + + # Clear environment again. TODO: We could os process.Popen and specify + # the environment without destroying it? + for key in plugin_context: + os.unsetenv('NOTIFY_' + key) + + return exitcode + + + +#. +# .--Spooling------------------------------------------------------------. +# | ____ _ _ | +# | / ___| _ __ ___ ___ | (_)_ __ __ _ | +# | \___ \| '_ \ / _ \ / _ \| | | '_ \ / _` | | +# | ___) | |_) | (_) | (_) | | | | | | (_| | | +# | |____/| .__/ \___/ \___/|_|_|_| |_|\__, | | +# | |_| |___/ | +# +----------------------------------------------------------------------+ +# | Some functions dealing with the spooling of notifications. | +# '----------------------------------------------------------------------' + +def create_spoolfile(data): + if not os.path.exists(notification_spooldir): + os.makedirs(notification_spooldir) + file_path = "%s/%s" % (notification_spooldir, fresh_uuid()) + notify_log("Creating spoolfile: %s" % file_path) + file(file_path,"w").write(pprint.pformat(data)) + + +# There are three types of spool files: +# 1. Notifications to be forwarded. Contain key "forward" +# 2. Notifications for async local delivery. Contain key "plugin" +# 3. Notifications to *got* forwarded. Contain neither of both. +# Spool files of type 1 are not handled here! +def handle_spoolfile(spoolfile): + try: + data = eval(file(spoolfile).read()) + if "plugin" in data: + plugin_context = data["context"] + plugin = data["plugin"] + notify_log("Got spool file for local delivery via %s" % ( + plugin or "plain mail")) + return call_notification_script(plugin, plugin_context) + + else: + # We received a forwarded raw notification. We need to process + # this with our local notification rules in order to call one, + # several or no actual plugins. + notify_log("Got spool file from remote host for local delivery.") + raw_context = data["context"] + locally_deliver_raw_context(data["context"]) + return 0 # No error handling for async delivery + + except Exception, e: + notify_log("ERROR %s\n%s" % (e, format_exception())) + return 2 + + +#. +# .--Bulk-Notifications--------------------------------------------------. +# | ____ _ _ | +# | | __ ) _ _| | | __ | +# | | _ \| | | | | |/ / | +# | | |_) | |_| | | < | +# | |____/ \__,_|_|_|\_\ | +# | | +# +----------------------------------------------------------------------+ +# | Store postponed bulk notifications for later delivery. Deliver such | +# | notifications on cmk --notify bulk. | +# '----------------------------------------------------------------------' + +def do_bulk_notify(contact, plugin, params, plugin_context, bulk): + # First identify the bulk. The following elements identify it: + # 1. contact + # 2. plugin + # 3. time horizon (interval) in seconds + # 4. max bulked notifications + # 5. elements specified in bulk["groupby"] and bulk["groupby_custom"] + # We first create a bulk path constructed as a tuple of strings. + # Later we convert that to a unique directory name. + # Note: if you have separate bulk rules with exactly the same + # bulking options, then they will use the same bulk. + + what = plugin_context["WHAT"] + bulk_path = (contact, plugin, str(bulk["interval"]), str(bulk["count"])) + bulkby = bulk["groupby"] + if "host" in bulkby: + bulk_path += ("host", plugin_context["HOSTNAME"]) + elif "folder" in bulkby: + bulk_path += ("folder", find_wato_folder(plugin_context)) + if "service" in bulkby: + bulk_path += ("service", plugin_context.get("SERVICEDESC", "")) + if "sl" in bulkby: + sl = plugin_context.get(what + "_SL", "") + bulk_path += ("sl", sl) + if "check_type" in bulkby: + command = plugin_context.get(what + "CHECKCOMMAND", "").split("!")[0] + bulk_path += ("check_type", command) + if "state" in bulkby: + state = plugin_context.get(what + "STATE", "") + bulk_path += ("state", state) + + # User might have specified _FOO instead of FOO + bulkby_custom = bulk.get("groupby_custom", []) + for macroname in bulkby_custom: + macroname = macroname.lstrip("_").upper() + value = plugin_context.get(what + "_" + macroname, "") + bulk_path += (macroname.lower(), value) + + notify_log(" --> storing for bulk notification %s" % "|".join(bulk_path)) + bulk_dirname = create_bulk_dirname(bulk_path) + uuid = fresh_uuid() + filename = bulk_dirname + "/" + uuid + file(filename + ".new", "w").write("%r\n" % ((params, plugin_context),)) + os.rename(filename + ".new", filename) # We need an atomic creation! + notify_log(" - stored in %s" % filename) + + +def find_wato_folder(context): + for tag in context.get("HOSTTAGS", "").split(): + if tag.startswith("/wato/"): + return tag[6:].rstrip("/") + return "" + + +def create_bulk_dirname(bulk_path): + dirname = notification_bulkdir + "/" + bulk_path[0] + "/" + bulk_path[1] + "/" + dirname += ",".join([b.replace("/", "\\") for b in bulk_path[2:]]) + + # Remove non-Ascii-characters by special %02x-syntax + try: + str(dirname) + except: + new_dirname = "" + for char in dirname: + if ord(char) <= 0 or ord(char) > 127: + new_dirname += "%%%04x" % ord(char) + else: + new_dirname += char + dirname = new_dirname + + if not os.path.exists(dirname): + os.makedirs(dirname) + notify_log(" - created bulk directory %s" % dirname) + return dirname + + +def find_bulks(only_ripe): + if not os.path.exists(notification_bulkdir): + return [] + + now = time.time() + bulks = [] + + dir_1 = notification_bulkdir + for contact in os.listdir(dir_1): + if contact.startswith("."): + continue + dir_2 = dir_1 + "/" + contact + for method in os.listdir(dir_2): + if method.startswith("."): + continue + dir_3 = dir_2 + "/" + method + for bulk in os.listdir(dir_3): + parts = bulk.split(',') # e.g. 60,10,host,localhost + try: + interval = int(parts[0]) + count = int(parts[1]) + except: + notify_log("Skipping invalid bulk directory %s" % dir_3) + continue + dir_4 = dir_3 + "/" + bulk + uuids = [] + oldest = time.time() + for uuid in os.listdir(dir_4): # 4ded0fa2-f0cd-4b6a-9812-54374a04069f + if uuid.startswith(".") or uuid.endswith(".new"): + continue + if len(uuid) != 36: + notify_log("Skipping invalid notification file %s/%s" % (dir_4, uuid)) + continue + + mtime = os.stat(dir_4 + "/" + uuid).st_mtime + uuids.append((mtime, uuid)) + oldest = min(oldest, mtime) + + uuids.sort() + if not uuids: + dirage = now - os.stat(dir_4).st_mtime + if dirage > 60: + notify_log("Warning: removing orphaned empty bulk directory %s" % dir_4) + try: + os.rmdir(dir_4) + except Exception, e: + notify_log(" -> Error removing it: %s" % e) + continue + + age = now - oldest + if age >= interval: + notify_log("Bulk %s is ripe: age %d >= %d" % (dir_4, age, interval)) + elif len(uuids) >= count: + notify_log("Bulk %s is ripe: count %d >= %d" % (dir_4, len(uuids), count)) + else: + notify_log("Bulk %s is not ripe yet (age: %d, count: %d)!" % (dir_4, age, len(uuids))) + if only_ripe: + continue + + bulks.append((dir_4, age, interval, count, uuids)) + + return bulks + +def send_ripe_bulks(): + ripe = find_bulks(True) + if ripe: + notify_log("Sending out %d ripe bulk notifications" % len(ripe)) + for bulk in ripe: + try: + notify_bulk(bulk[0], bulk[-1]) + except Exception, e: + if opt_debug: + raise + notify_log("Error sending bulk %s: %s" % (bulk[0], format_exception())) + + +def notify_bulk(dirname, uuids): + parts = dirname.split("/") + contact = parts[-3] + plugin = parts[-2] + notify_log(" -> %s/%s %s" % (contact, plugin, dirname)) + # If new entries are created in this directory while we are working + # on it, nothing bad happens. It's just that we cannot remove + # the directory after our work. It will be the starting point for + # the next bulk with the same ID, which is completely OK. + bulk_context = [] + old_params = None + unhandled_uuids = [] + for mtime, uuid in uuids: + try: + params, context = eval(file(dirname + "/" + uuid).read()) + except Exception, e: + if opt_debug: + raise + notify_log(" Deleting corrupted or empty bulk file %s/%s: %s" % (dirname, uuid, e)) + continue + + if old_params == None: + old_params = params + elif params != old_params: + notify_log(" Parameters are different from previous, postponing into separate bulk") + unhandled_uuids.append((mtime, uuid)) + continue + + bulk_context.append("\n") + for varname, value in context.items(): + bulk_context.append("%s=%s\n" % (varname, value.replace("\r", "").replace("\n", "\1"))) + + # Do not forget to add this to the monitoring log. We create + # a single entry for each notification contained in the bulk. + # It is important later to have this precise information. + plugin_name = "bulk " + (plugin or "plain email") + core_notification_log(plugin_name, context) + + if bulk_context: # otherwise: only corrupted files + parameter_context = create_bulk_parameter_context(old_params) + context_text = "".join(parameter_context + bulk_context) + call_bulk_notification_script(plugin, context_text) + else: + notify_log("No valid notification file left. Skipping this bulk.") + + # Remove sent notifications + for mtime, uuid in uuids: + if (mtime, uuid) not in unhandled_uuids: + path = dirname + "/" + uuid + try: + os.remove(path) + except Exception, e: + notify_log("Cannot remove %s: %s" % (path, e)) + + # Repeat with unhandled uuids (due to different parameters) + if unhandled_uuids: + notify_bulk(dirname, unhandled_uuids) + + # Remove directory. Not neccessary if emtpy + try: + os.rmdir(dirname) + except Exception, e: + if not unhandled_uuids: + notify_log("Warning: cannot remove directory %s: %s" % (dirname, e)) + + +def call_bulk_notification_script(plugin, context_text): + path = path_to_notification_script(plugin) + if not path: + raise MKGeneralException("Notification plugin %s not found" % plugin) + + # Protocol: The script gets the context on standard input and + # read until that is closed. It is being called with the parameter + # --bulk. + p = subprocess.Popen([path, "--bulk"], shell=False, + stdout = subprocess.PIPE, stderr = subprocess.PIPE, stdin = subprocess.PIPE) + stdout_txt, stderr_txt = p.communicate(context_text.encode("utf-8")) + exitcode = p.returncode + if exitcode: + notify_log("ERROR: script %s --bulk returned with exit code %s" % (path, exitcode)) + for line in (stdout_txt + stderr_txt).splitlines(): + notify_log("%s: %s" % (plugin, line.rstrip())) + +#. +# .--Contexts------------------------------------------------------------. +# | ____ _ _ | +# | / ___|___ _ __ | |_ _____ _| |_ ___ | +# | | | / _ \| '_ \| __/ _ \ \/ / __/ __| | +# | | |__| (_) | | | | || __/> <| |_\__ \ | +# | \____\___/|_| |_|\__\___/_/\_\\__|___/ | +# | | +# +----------------------------------------------------------------------+ +# | Functions dealing with loading, storing and converting contexts. | +# '----------------------------------------------------------------------' + +# Add a few further helper variables that are usefull in notification plugins +def complete_raw_context(raw_context): + raw_context["WHAT"] = raw_context.get("SERVICEDESC") and "SERVICE" or "HOST" + raw_context["MONITORING_HOST"] = socket.gethostname() + raw_context["LOGDIR"] = notification_logdir + if omd_root: + raw_context["OMD_ROOT"] = omd_root + raw_context["OMD_SITE"] = os.getenv("OMD_SITE", "") + raw_context["MAIL_COMMAND"] = notification_mail_command + + # The Check_MK Micro Core sends the MICROTIME and no other time stamps. We add + # a few Nagios-like variants in order to be compatible + if "MICROTIME" in raw_context: + microtime = int(raw_context["MICROTIME"]) + timestamp = float(microtime) / 1000000.0 + broken = time.localtime(timestamp) + raw_context["DATE"] = time.strftime("%Y-%m-%d", broken) + raw_context["SHORTDATETIME"] = time.strftime("%Y-%m-%d %H:%M:%S", broken) + raw_context["LONGDATETIME"] = time.strftime("%a %b %d %H:%M:%S %Z %Y", broken) + + raw_context['HOSTURL'] = '/check_mk/index.py?start_url=%s' % \ + urlencode('view.py?view_name=hoststatus&host=%s' % raw_context['HOSTNAME']) + if raw_context['WHAT'] == 'SERVICE': + raw_context['SERVICEURL'] = '/check_mk/index.py?start_url=%s' % \ + urlencode('view.py?view_name=service&host=%s&service=%s' % + (raw_context['HOSTNAME'], raw_context['SERVICEDESC'])) + + # Relative Timestamps for several macros + for macro in [ 'LASTHOSTSTATECHANGE', 'LASTSERVICESTATECHANGE', 'LASTHOSTUP', 'LASTSERVICEOK' ]: + if macro in raw_context: + raw_context[macro + '_REL'] = get_readable_rel_date(raw_context[macro]) + + + # Rule based notifications enabled? We might need to complete a few macros + contact = raw_context.get("CONTACTNAME") + if not contact or contact == "check-mk-notify": + add_rulebased_macros(raw_context) + + # For custom notifications the number is set to 0 by the core (Nagios and CMC). We force at least + # number 1 here, so that rules with conditions on numbers do not fail (the minimum is 1 here) + for what in [ "HOST", "SERVICE" ]: + key = what + "NOTIFICATIONNUMBER" + if key in raw_context and raw_context[key] == "0": + raw_context[key] = "1" + + # Add the previous hard state. This is neccessary for notification rules that depend on certain transitions, + # like OK -> WARN (but not CRIT -> WARN). The CMC sends PREVIOUSHOSTHARDSTATE and PREVIOUSSERVICEHARDSTATE. + # Nagios does not have this information and we try to deduct this. + if "PREVIOUSHOSTHARDSTATE" not in raw_context and "LASTHOSTSTATE" in raw_context: + prev_state = raw_context["LASTHOSTSTATE"] + # When the attempts are > 1 then the last state could be identical with + # the current one, e.g. both critical. In that case we assume the + # previous hard state to be OK. + if prev_state == raw_context["HOSTSTATE"]: + prev_state = "UP" + elif "HOSTATTEMPT" not in raw_context or \ + ("HOSTATTEMPT" in raw_context and raw_context["HOSTATTEMPT"] != "1"): + # Here We do not know. The transition might be OK -> WARN -> CRIT and + # the initial OK is completely lost. We use the artificial state "?" + # here, which matches all states and makes sure that when in doubt a + # notification is being sent out. But when the new state is UP, then + # we know that the previous state was a hard state (otherwise there + # would not have been any notification) + if raw_context["HOSTSTATE"] != "UP": + prev_state = "?" + notify_log("Previous host hard state not known. Allowing all states.") + raw_context["PREVIOUSHOSTHARDSTATE"] = prev_state + + # Same for services + if raw_context["WHAT"] == "SERVICE" and "PREVIOUSSERVICEHARDSTATE" not in raw_context: + prev_state = raw_context["LASTSERVICESTATE"] + if prev_state == raw_context["SERVICESTATE"]: + prev_state = "OK" + elif "SERVICEATTEMPT" not in raw_context or \ + ("SERVICEATTEMPT" in raw_context and raw_context["SERVICEATTEMPT"] != "1"): + if raw_context["SERVICESTATE"] != "OK": + prev_state = "?" + notify_log("Previous service hard state not known. Allowing all states.") + raw_context["PREVIOUSSERVICEHARDSTATE"] = prev_state + + # Add short variants for state names (at most 4 characters) + for key, value in raw_context.items(): + if key.endswith("STATE"): + raw_context[key[:-5] + "SHORTSTATE"] = value[:4] + + if raw_context["WHAT"] == "SERVICE": + raw_context['SERVICEFORURL'] = urllib.quote(raw_context['SERVICEDESC']) + raw_context['HOSTFORURL'] = urllib.quote(raw_context['HOSTNAME']) + + convert_context_to_unicode(raw_context) + + +# Be aware: The backlog.mk contains the raw context which has not been decoded +# to unicode yet. It contains raw encoded strings e.g. the plugin output provided +# by third party plugins which might be UTF-8 encoded but can also be encoded in +# other ways. Currently the context is converted later by bot, this module +# and the GUI. TODO Maybe we should centralize the encoding here and save the +# backlock already encoded. +def store_notification_backlog(raw_context): + path = notification_logdir + "/backlog.mk" + if not notification_backlog: + if os.path.exists(path): + os.remove(path) + return + + try: + backlog = eval(file(path).read())[:notification_backlog-1] + except: + backlog = [] + + backlog = [ raw_context ] + backlog + file(path, "w").write("%r\n" % backlog) + + +def raw_context_from_backlog(nr): + try: + backlog = eval(file(notification_logdir + "/backlog.mk").read()) + except: + backlog = [] + + if nr < 0 or nr >= len(backlog): + sys.stderr.write("No notification number %d in backlog.\n" % nr) + sys.exit(2) + + notify_log("Replaying notification %d from backlog...\n" % nr) + return backlog[nr] + + +def raw_context_from_env(): + # Information about notification is excpected in the + # environment in variables with the prefix NOTIFY_ + return dict([ + (var[7:], value) + for (var, value) + in os.environ.items() + if var.startswith("NOTIFY_") + and not dead_nagios_variable(value) ]) + + +def raw_context_from_stdin(): + context = {} + for line in sys.stdin: + varname, value = line.strip().split("=", 1) + context[varname] = value.replace(r"\n", "\n").replace("\\\\", "\\") + return context + + +def raw_context_from_string(data): + # Context is line-by-line in g_notify_readahead_buffer + context = {} + try: + for line in data.split('\n'): + varname, value = line.strip().split("=", 1) + context[varname] = value.replace(r"\n", "\n").replace("\\\\", "\\") + except Exception, e: # line without '=' ignored or alerted + if opt_debug: + raise + return context + + +def convert_context_to_unicode(context): + # Convert all values to unicode + for key, value in context.iteritems(): + if type(value) == str: + try: + value_unicode = value.decode("utf-8") + except: + try: + value_unicode = value.decode("latin-1") + except: + value_unicode = u"(Invalid byte sequence)" + context[key] = value_unicode + + +def substitute_context(template, context): + # First replace all known variables + for varname, value in context.items(): + template = template.replace('$'+varname+'$', value) + + # Remove the rest of the variables and make them empty + template = re.sub("\$[A-Z]+\$", "", template) + return template + + +#. +# .--Helpers-------------------------------------------------------------. +# | _ _ _ | +# | | | | | ___| |_ __ ___ _ __ ___ | +# | | |_| |/ _ \ | '_ \ / _ \ '__/ __| | +# | | _ | __/ | |_) | __/ | \__ \ | +# | |_| |_|\___|_| .__/ \___|_| |___/ | +# | |_| | +# +----------------------------------------------------------------------+ +# | Some generic helper functions | +# '----------------------------------------------------------------------' + + +def livestatus_fetch_query(query): + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + sock.connect(livestatus_unix_socket) + sock.send(query) + sock.shutdown(socket.SHUT_WR) + response = sock.recv(10000000) + sock.close() + return response + +def livestatus_send_command(command): + try: + message = "COMMAND [%d] %s\n" % (time.time(), command) + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + sock.connect(livestatus_unix_socket) + sock.send(message) + sock.close() + except Exception, e: + if opt_debug: + raise + notify_log("WARNING: cannot send livestatus command: %s" % e) + notify_log("Command was: %s" % command) + + def format_exception(): import traceback, StringIO, sys txt = StringIO.StringIO() t, v, tb = sys.exc_info() traceback.print_exception(t, v, tb, None, txt) return txt.getvalue() + + +def dead_nagios_variable(value): + if len(value) < 3: + return False + if value[0] != '$' or value[-1] != '$': + return False + for c in value[1:-1]: + if not c.isupper() and c != '_': + return False + return True + + +def notify_log(message): + if notification_logging >= 1: + formatted = u"%s %s\n" % (time.strftime("%F %T", time.localtime()), message) + file(notification_log, "a").write(formatted.encode("utf-8")) + +def get_readable_rel_date(timestamp): + try: + change = int(timestamp) + except: + change = 0 + rel_time = time.time() - change + seconds = rel_time % 60 + rem = rel_time / 60 + minutes = rem % 60 + hours = (rem % 1440) / 60 + days = rem / 1440 + return '%dd %02d:%02d:%02d' % (days, hours, minutes, seconds) + +def urlencode(s): + return urllib.quote(s) + +def fresh_uuid(): + try: + return file('/proc/sys/kernel/random/uuid').read().strip() + except IOError: + # On platforms where the above file does not exist we try to + # use the python uuid module which seems to be a good fallback + # for those systems. Well, if got python < 2.5 you are lost for now. + import uuid + return str(uuid.uuid4()) + +def core_notification_log(plugin, plugin_context): + what = plugin_context["WHAT"] + contact = plugin_context["CONTACTNAME"] + spec = plugin_context["HOSTNAME"] + if what == "HOST": + state = plugin_context["HOSTSTATE"] + output = plugin_context["HOSTOUTPUT"] + if what == "SERVICE": + spec += ";" + plugin_context["SERVICEDESC"] + state = plugin_context["SERVICESTATE"] + output = plugin_context["SERVICEOUTPUT"] + + log_message = "%s NOTIFICATION: %s;%s;%s;%s;%s" % ( + what, contact, spec, state, plugin or "plain email", output) + if monitoring_core == "cmc": + livestatus_send_command("LOG;" + log_message.encode("utf-8")) + else: + # Nagios and friends do not support logging via an + # external command. We write the files into a help file + # in var/check_mk/notify. If the users likes he can + # replace that file with a symbolic link to the nagios + # log file. But note: Nagios logging might not atomic. + file(notification_core_log, "a").write("[%d] %s\n" % (time.time(), log_message.encode("utf-8"))) + diff -Nru check-mk-1.2.2p3/ntp check-mk-1.2.6p12/ntp --- check-mk-1.2.2p3/ntp 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/ntp 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -103,13 +103,13 @@ if when > 0: infotext += ", last reached %d secs ago" % when - maxstratum, warn, crit = params + crit_stratum, warn, crit = params if abs(offset) >= crit: return (2, "critical offset" + " " + infotext, offset, jitter) elif state in [ "falsetick" ]: return (2, infotext, offset, jitter) - elif stratum >= maxstratum: - return (2, infotext + (", stratum is too high (max allowed is %d)" % (maxstratum - 1))) + elif stratum >= crit_stratum: + return (2, infotext + (", stratum is too high (max allowed is %d)(!!)" % (crit_stratum - 1))) # The following check in some cases produces false alarms. The poll interval can # switch back to a low value while 'when' still being at a high value. While @@ -129,20 +129,20 @@ state = check_ntp_server_state(line, params) if len(state) == 4: state, text, offset, jitter = state - maxstratum, warn, crit = params + crit_stratum, warn, crit = params perfdata = [ ( "offset", offset, warn, crit, 0, None ), ( "jitter", jitter, warn, crit, 0, None ) ] else: state, text = state perfdata = [] - return (state, nagios_state_names[state] + " - " + text, perfdata) + return (state, text, perfdata) - return (3, "UNKNOWN - peer not found") + return (3, "peer not found") def check_ntp_summary(item, params, info): # No information at all? NTP daemon not running or timeout in ntpq -p if len(info) == 0: - return (3, "UNKNOWN - no information from NTP: timeout in ntpq -p or NTP daemon not running") + return (3, "no information from NTP: timeout in ntpq -p or NTP daemon not running") # We only are interested in our system peer or pulse per second source (pps) for line in info: @@ -150,10 +150,22 @@ state, text, perfdata = check_ntp(line[1], params, [line]) text += " (synchronized on %s)" % line[1] return (state, text, perfdata) - return (2, "CRIT - found %d peers, but none is suitable" % len(info)) + return (2, "found %d peers, but none is suitable" % len(info)) + -check_info['ntp'] = (check_ntp, "NTP Peer %s", 1, inventory_ntp) -checkgroup_of['ntp'] = 'ntp_time' -check_info['ntp.time'] = (check_ntp_summary, "NTP Time", 1, inventory_ntp_summary) -checkgroup_of['ntp.time'] = 'ntp_time' +check_info["ntp"] = { + 'check_function': check_ntp, + 'inventory_function': inventory_ntp, + 'service_description': 'NTP Peer %s', + 'has_perfdata': True, + 'group': 'ntp_peer', +} + +check_info["ntp.time"] = { + 'check_function': check_ntp_summary, + 'inventory_function': inventory_ntp_summary, + 'service_description': 'NTP Time', + 'has_perfdata': True, + 'group': 'ntp_time', +} diff -Nru check-mk-1.2.2p3/ntp.time check-mk-1.2.6p12/ntp.time --- check-mk-1.2.2p3/ntp.time 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/ntp.time 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Check state of NTP time +title: NTP time synchronization using ntpd agents: linux -author: Mathias Kettner +catalog: os/services license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/nvidia check-mk-1.2.6p12/nvidia --- check-mk-1.2.2p3/nvidia 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/nvidia 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -39,18 +39,16 @@ warn, crit = params[2:4] else: warn, crit = params[0:2] - infotext = " - %dC - levels at %d/%d" % (temp, warn, crit) - perfdata = [ ("temp", temp, warn, crit ) ] - if temp >= crit: - return (2, "CRIT" + infotext, perfdata) - elif temp >= warn: - return (1, "WARN" + infotext, perfdata) - else: - return (0, "OK" + infotext, perfdata) - return (3, 'UNKNOWN - sensor not found in agent output') -check_info['nvidia.temp'] = ( check_nvidia_temp, "Temperature NVIDIA %s", 1, inventory_nvidia_temp ) + return check_temperature(temp, (warn, crit)) +check_info["nvidia.temp"] = { + 'check_function': check_nvidia_temp, + 'inventory_function': inventory_nvidia_temp, + 'service_description': 'Temperature NVIDIA %s', + 'has_perfdata': True, + 'includes': [ 'temperature.include' ], +} def inventory_nvidia_errors(info): @@ -63,11 +61,14 @@ if line[0] == "GPUErrors:": errors = int(line[1]) if errors == 0: - return (0, "OK - No GPU errors") + return (0, "No GPU errors") else: - return (2, "CRIT - %d GPU errors" % errors) - return (3, "UNKNOWN - incomplete output from agent") - + return (2, "%d GPU errors" % errors) + return (3, "incomplete output from agent") -check_info['nvidia.errors'] = ( check_nvidia_errors, "NVIDIA GPU Errors", 0, inventory_nvidia_errors ) -checkgroup_of['nvidia.errors'] = 'hw_errors' +check_info["nvidia.errors"] = { + 'check_function': check_nvidia_errors, + 'inventory_function': inventory_nvidia_errors, + 'service_description': 'NVIDIA GPU Errors', + 'group': 'hw_errors', +} diff -Nru check-mk-1.2.2p3/nvidia.errors check-mk-1.2.6p12/nvidia.errors --- check-mk-1.2.2p3/nvidia.errors 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/nvidia.errors 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Check GPU errors on NVIDIA graphics card +title: GPU errors on NVIDIA graphics card agents: linux -author: Mathias Kettner +catalog: os/hardware license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/nvidia.temp check-mk-1.2.6p12/nvidia.temp --- check-mk-1.2.2p3/nvidia.temp 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/nvidia.temp 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Check temperatures of NVIDIA graphics card +title: Temperatures of NVIDIA graphics card agents: linux -author: Mathias Kettner +catalog: os/hardware license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/omd_status check-mk-1.2.6p12/omd_status --- check-mk-1.2.2p3/omd_status 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/omd_status 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -41,7 +41,9 @@ # OVERALL 2 def inventory_omd_status(info): - return [ ( line[0][1:-1], None ) for line in info if line[0][0] == '[' ] + for line in info: + if line[0][0] == '[': + yield line[0][1:-1], None def check_omd_status(item, _no_params, info): active = False @@ -52,16 +54,20 @@ elif active: if line[0] == 'OVERALL': if line[1] == '0': - return (0, 'OK - all services are running') + return (0, 'all services are running') elif line[1] == '1': - return (2, 'CRIT - site is stopped') + return (2, 'site is stopped') else: - return (2, 'CRIT - partially running! stopped services: %s' % ", ".join(stopped)) + return (2, 'partially running! stopped services: %s' % ", ".join(stopped)) elif line[1] != '0': stopped.append(line[0]) - return (3, "UNKNOWN - site not existing or AUTOSTART off") + return (3, "site not existing or AUTOSTART off") + -check_info['omd_status'] = \ - (check_omd_status, "OMD %s status", 0, inventory_omd_status) -checkgroup_of["omd_status"] = "omd_status" +check_info["omd_status"] = { + 'check_function': check_omd_status, + 'inventory_function': inventory_omd_status, + 'service_description': 'OMD %s status', + 'group': 'omd_status', +} diff -Nru check-mk-1.2.2p3/openvpn_clients check-mk-1.2.6p12/openvpn_clients --- check-mk-1.2.2p3/openvpn_clients 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/openvpn_clients 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,70 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +# <<>> +# wilhelmshilfe-hups1,84.161.206.33:58371,11267978,8134524,Sun Mar 10 14:02:27 2013 +# wilhelmshilfe-hups365,84.161.206.33:59737,924198,809268,Sun Mar 10 13:59:14 2013 +# wilhelmshilfe-bartenbach-redu,78.43.52.102:40411,492987861,516066364,Sun Mar 10 03:55:01 2013 +# wilhelmshilfe-hups3,84.161.206.33:58512,8224815,6189879,Sun Mar 10 11:32:40 2013 +# wilhelmshilfe-heiningen,46.5.209.251:3412,461581486,496901007,Fri Mar 8 10:02:38 2013 +# wilhelmshilfe-hups5,84.161.206.33:60319,721646,336190,Sun Mar 10 14:23:30 2013 +# wilhelmshilfe-suessen,92.198.38.212:3077,857194558,646128778,Fri Mar 8 10:02:38 2013 +# wilhelmshilfe-hups6,84.161.206.33:61410,3204103,2793366,Sun Mar 10 11:59:13 2013 +# wilhelmshilfe-gw-fau1,217.92.99.180:55683,109253134,96735180,Sun Mar 10 10:11:44 2013 +# wilhelmshilfe-bendig,78.47.146.190:34475,5787319,19395097,Sat Mar 9 10:02:52 2013 +# wilhelmshilfe-ursenwang,46.223.206.6:47299,747919254,922426625,Fri Mar 8 10:02:38 2013 +# vpn-wilhelmshilfe.access.lihas.de,79.204.249.30:59046,12596972,31933023,Sun Mar 10 09:32:22 2013 +# wilhelmshilfe-karlshof,92.198.38.214:3078,810996228,716994592,Fri Mar 8 10:02:39 2013 + +def inventory_openvpn_clients(info): + return [ (l[0], None) for l in info ] + +def check_openvpn_clients(item, _no_params, info): + for line in info: + if line[0] == item: + infos = [ "Channel is up" ] + perfdata = [] + name, address, inbytes, outbytes, date = line + this_time = time.time() + for what, val in [ + ( "in", int(inbytes) ), + ( "out", int(outbytes) )]: + countername = "openvpn_clients.%s.%s" % (item, what) + bytes_per_sec = get_rate(countername, this_time, val) + infos.append("%s: %s/sec" % (what, get_bytes_human_readable(bytes_per_sec))) + perfdata.append((what, bytes_per_sec)) + return 0, ", ".join(infos), perfdata + + return 3, "Client connection not found" + +check_info["openvpn_clients"] = { + "check_function" : check_openvpn_clients, + "inventory_function" : inventory_openvpn_clients, + "service_description" : "OpenVPN Client %s", + "has_perfdata" : True, +} + diff -Nru check-mk-1.2.2p3/oracle_asm_diskgroup check-mk-1.2.6p12/oracle_asm_diskgroup --- check-mk-1.2.2p3/oracle_asm_diskgroup 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/oracle_asm_diskgroup 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -30,21 +30,94 @@ # MOUNTED NORMAL N 512 4096 1048576 3072 2146 309 918 0 Y OCR_VOTE/ # The agent section <<>> does not output the header line -check_includes['oracle_asm_diskgroup'] = [ "df.include" ] +factory_settings["asm_diskgroup_default_levels"] = { + "levels" : (80.0, 90.0), # warn/crit in percent + "magic_normsize" : 20, # Standard size if 20 GB + "levels_low" : (50.0, 60.0), # Never move warn level below 50% due to magic factor + "trend_range" : 24, + "trend_perfdata" : True, # do send performance data for trends + "req_mir_free" : False, # Ignore Requirre mirror free space in DG +} def inventory_oracle_asm_diskgroup(info): return [ (line[-1].rstrip("/"), {}) for line in info ] def check_oracle_asm_diskgroup(item, params, info): - for state, typ, rebal, sector, block, au, total_mb, \ - free_mb, req_mir_free_mb, usable_file_mb, offline_disks, \ - voting_files, name in info: - if name.rstrip('/') == item: - return df_check_filesystem(g_hostname, item, int(total_mb), - int(free_mb), params) - return (3, "UNKNOWN - Disk group not found") - -check_info["oracle_asm_diskgroup"] = (check_oracle_asm_diskgroup, "ASM Diskgroup %s", 1, inventory_oracle_asm_diskgroup) -checkgroup_of['oracle_asm_diskgroup'] = "fs" -check_default_levels["oracle_asm_diskgroup"] = "filesystem_default_levels" + for line in info: + voting_files = 'N' + + if len(line) == 13: + state, typ, rebal, sector, block, au, total_mb, \ + free_mb, req_mir_free_mb, usable_file_mb, offline_disks, \ + voting_files, name = line + elif len(line) == 12: + state, typ, rebal, sector, block, au, total_mb, \ + free_mb, req_mir_free_mb, usable_file_mb, offline_disks, \ + name = line + else: + name = "" + + dg_name = name.rstrip('/') + + if dg_name == item: + if typ in ('NORMAL', 'HIGH'): + if typ == 'NORMAL': + if voting_files == 'Y': + # NORMAL Redundancy Disk-Groups with Voting requires 3 Failgroups + dg_factor = 3 + else: + dg_factor = 2 + + elif typ == 'HIGH': + if voting_files == 'Y': + # HIGH Redundancy Disk-Groups with Voting requires 5 Failgroups + dg_factor = 5 + else: + dg_factor = 3 + + total_mb = int(total_mb)/dg_factor + free_space_mb = int(free_mb)/dg_factor + + if params.get('req_mir_free'): + + req_mir_free_mb = int(req_mir_free_mb) + + if req_mir_free_mb < 0: + # requirred mirror free space could be negative! + req_mir_free_mb = 0 + + free_space_mb = int(req_mir_free_mb) + + infotext += ', required mirror free space used' % lower() + + else: + # EXTERNAL Redundancy + free_space_mb = int(free_mb) + + status, infotext, perfdata = df_check_filesystem(g_hostname, item, int(total_mb), + free_space_mb, params) + + infotext += ', %s redundancy' % typ.lower() + + offline_disks = int(offline_disks) + if offline_disks > 0: + status = max(2, status) + infotext += ', %d Offline disks found(!!)' % offline_disks + + return (status, infotext, perfdata) + + # In case of missing information we assume that the ASM-Instance is + # checked at a later time. + # This reduce false notifications for not running ASM-Instances + raise MKCounterWrapped("Diskgroup %s not found" % item) + +check_info["oracle_asm_diskgroup"] = { + 'check_function' : check_oracle_asm_diskgroup, + 'inventory_function' : inventory_oracle_asm_diskgroup, + 'service_description' : 'ASM Diskgroup %s', + 'has_perfdata' : True, + 'group' : 'asm_diskgroup', + 'default_levels_variable' : 'asm_diskgroup_default_levels', + "includes" : [ "df.include" ], +} diff -Nru check-mk-1.2.2p3/oracle_crs_res check-mk-1.2.6p12/oracle_crs_res --- check-mk-1.2.2p3/oracle_crs_res 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/oracle_crs_res 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,141 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Original version by Thorsten Bruhns from OPITZ CONSULTING Deutschland GmbH + +# <<>> +# ezszds8c|NAME=ora.DG_CLUSTER.dg +# ezszds8c|TYPE=ora.diskgroup.type +# ezszds8c|STATE=ONLINE on ezszds8c +# ezszds8c|TARGET=ONLINE +# ezszds8c|NAME=ora.I31_ARCH.dg +# ezszds8c|TYPE=ora.diskgroup.type +# ezszds8c|STATE=ONLINE on ezszds8c +# ezszds8c|TARGET=ONLINE +# ezszds8c|NAME=ora.I31_DATA.dg +# ezszds8c|TYPE=ora.diskgroup.type +# ezszds8c|STATE=ONLINE on ezszds8c +# ezszds8c|TARGET=ONLINE +# ezszds8c|NAME=ora.I31_MLOG.dg +# ezszds8c|TYPE=ora.diskgroup.type +# ezszds8c|STATE=ONLINE on ezszds8c +# ezszds8c|TARGET=ONLINE +# ...usw... + +# Parse output into dict of dicts of dicts: +# nodename -> ressource name -> entry +# ressource. Example: +# { 'ezszds8c' : +# { 'ora.I31_ARCH.dg' : { +# 'state': 'ONLINE on ezszds9c', +# 'target': 'ONLINE', +# 'type': 'ora.diskgroup.type'} +# } +# } +# Returns a pair of CRS node name and the former dict +def parse_oracle_crs_res(info): + ressources = {} + for nodename, varsetting in info: + if nodename == "nodename": + crs_nodename = varsetting + continue + + key, value = varsetting.split("=", 1) + if key == "NAME": + res_name = value + entry = {} + ressources.setdefault(res_name, {}) + ressources[res_name][nodename] = entry + else: + entry[key.lower()] = value + return crs_nodename, ressources + + +def inventory_oracle_crs_res(parsed): + return [ (name, None) for name in parsed[1] ] + + +def get_oracle_crs_runninginfo(item, data): + infotext = '' + for line in data: + resname = line[0]['NAME'] + resstate = line[0]['STATE'].split(' ', 1)[0] + resstatelong = line[0]['STATE'] + nodename = line[1] + + if item == resname and nodename not in ('nodename', 'csslocal', 'crslocal') \ + and resstate == 'ONLINE': + + # sometime there is only 'ONLINE' instead 'ONLINE on ' + # prevent duplicate entries + infotextshort = ' [online on %s]' % (nodename) + if infotextshort not in infotext: + infotext += infotextshort + + return infotext + + +def check_oracle_crs_res(item, _no_params, parsed): + crs_nodename, ressources = parsed + + # In case of missing information we assume that the clusterware + # is not running and we simple skip the result + if item not in ressources: + if item == 'ora.cssd': + yield 2, "Clusterware not running" + elif item == 'ora.crsd': + yield 2, "Cluster Resource Service Daemon not running!" + else: + raise MKCounterWrapped("No ressource details found for %s. Maybe the cssd/crsd is not running" % item) + return + + for nodename, entry in ressources[item].items(): + restype = entry["type"] + resstate = entry["state"].split(' ', 1)[0] + restarget = entry["target"] + + if nodename == "csslocal": + infotext = "local: " + else: + infotext = "on " + nodename + ": " + infotext += resstate.lower() + + if resstate != restarget: + state = 2 + infotext += ", target state %s" % restarget.lower() + else: + state = 0 + yield state, infotext + + + +check_info['oracle_crs_res'] = { + "parse_function" : parse_oracle_crs_res, + "check_function" : check_oracle_crs_res, + "inventory_function" : inventory_oracle_crs_res, + "service_description" : "ORA-GI %s Resource", + "group" : "oracle_crs_res", +} diff -Nru check-mk-1.2.2p3/oracle_crs_version check-mk-1.2.6p12/oracle_crs_version --- check-mk-1.2.2p3/oracle_crs_version 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/oracle_crs_version 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,44 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +def inventory_oracle_crs_version(info): + return [ ('', None) for line in info ] + +def check_oracle_crs_version(item, params, info): + for line in info: + return (0, line[0]) + + # In case of missing information we assume that the clusterware + # is not running and we simple skip the result + raise MKCounterWrapped("No version details found. Maybe the cssd is not running") + +check_info['oracle_crs_version'] = { + "check_function" : check_oracle_crs_version, + "inventory_function" : inventory_oracle_crs_version, + "service_description" : "ORA-GI Version", + "group" : "oracle_crs_version", +} diff -Nru check-mk-1.2.2p3/oracle_crs_voting check-mk-1.2.6p12/oracle_crs_voting --- check-mk-1.2.2p3/oracle_crs_voting 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/oracle_crs_voting 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,78 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Developed by Thorsten Bruhns from OPITZ CONSULTING Deutschland GmbH + +# <<>> +# 1. ONLINE 0a6884c063904f50bf7ef4516b728a2d (/dev/oracleasm/disks/DATA1) [DATA1] + +def inventory_oracle_crs_voting(info): + inventory = [] + return [ ('', None) for line in info ] + for line in info: + if line[1] == 'ONLINE': + return (0, None) + elif len(line) == 3: + # CRS 10.2 + 11.1 has 3 entries + return (0, None) + return None + +def check_oracle_crs_voting(item, params, info): + # state = -1 => no data for Service + state = -1 + infotext = '' + votecount = 0 + votedisk = '' + for line in info: + if line[1] == 'ONLINE': + votecount += 1 + votedisk += '[%s] ' % line[3] + elif len(line) == 3: + votecount += 1 + votedisk += '[%s] ' % line[2] + + if votecount in (1,3,5): + state = 0 + infotext = '%d Voting Disks found. %s' % (votecount, votedisk) + return state, infotext + elif votecount == 0: + # cssd could not start without an existing voting disk! + raise MKCounterWrapped("No Voting Disk(s) found. Maybe the cssd/crsd is not running!") + else: + state = 2 + infotext = 'missing Voting Disks (!!). %d Votes found %s' % (votecount, votedisk) + return state, infotext + + # In case of missing information we assume that the clusterware + # is not running and we simple skip the result + raise MKCounterWrapped("No Voting Disk(s) found. Maybe the cssd/crsd is not running!") + +check_info['oracle_crs_voting'] = { + "check_function" : check_oracle_crs_voting, + "inventory_function" : inventory_oracle_crs_voting, + "service_description" : "ORA-GI Voting", + "group" : "oracle_crs_voting", +} diff -Nru check-mk-1.2.2p3/oracle_dataguard_stats check-mk-1.2.6p12/oracle_dataguard_stats --- check-mk-1.2.2p3/oracle_dataguard_stats 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/oracle_dataguard_stats 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,136 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# In cooperation with Thorsten Bruhns from OPITZ Consulting + +# <<>> +# TUX12C|TUXSTDB|PHYSICAL STANDBY|transport lag|+00 00:00:00 +# TUX12C|TUXSTDB|PHYSICAL STANDBY|apply lag|+00 00:28:57 +# TUX12C|TUXSTDB|PHYSICAL STANDBY|apply finish time|+00 00:00:17.180 +# TUX12C|TUXSTDB|PHYSICAL STANDBY|estimated startup time|20 + +def inventory_oracle_dataguard_stats(info): + inventory = [] + for line in info: + inventory.append(("%s.%s" % (line[0], line[1]), {})) + return inventory + +def check_oracle_dataguard_stats(item, params, info): + def get_seconds(timestamp): + if str(timestamp)[0:1] == '+': + days = int(timestamp[1:3]) + h = int(timestamp[4:6]) + min = int(timestamp[7:9]) + sec = int(timestamp[10:12]) + + seconds = int(sec + min*60 + h*3600 + days*24*3600) + return seconds + return int(-1) + + + state = 0 + + perfdata = [] + infotext = '' + + itemfound = False + + for line in info: + + if line[0] + '.' + line[1] == item: + db_name, db_unique_name, database_role, parameter, value = line + + itemfound = True + if infotext == '': + infotext = 'Database Role %s' % (database_role.lower()) + + if parameter in('transport lag', 'apply lag', 'apply finish time'): + + if parameter == 'apply lag': + params_value = 'apply_lag' + + elif parameter == 'transport lag': + params_value = 'transport_lag' + + else: + params_value = '' + + state_marker = '' + + seconds = int(get_seconds(value)) + + infotext += ' %s %s' % (parameter, get_age_human_readable(seconds)) + + if params.get(params_value): + infotext +=' levels at (' + + if parameter == 'apply lag' and params.get('apply_lag_min'): + + # minimum apply lag needs a configured apply lag rule! + warn, crit = params.get('apply_lag_min') + infotext += '%s/%s .. ' % (get_age_human_readable(warn), \ + get_age_human_readable(crit)) + + # apply_lag_min is a MINIMUM value! + if crit >= seconds: + state = 2 + state_marker = '(!!)' + elif warn >= seconds: + state = max(state, 1) + state_marker = '(!)' + + warn, crit = params.get(params_value) + infotext += '%s/%s)' % (get_age_human_readable(warn), \ + get_age_human_readable(crit)) + + if crit <= seconds: + state = 2 + state_marker = '(!!)' + elif warn <= seconds: + state = max(state, 1) + state_marker = '(!)' + + infotext += state_marker + + perfdata.append([parameter.replace(' ', '_'), seconds, warn, crit]) + else: + perfdata.append([parameter.replace(' ', '_'), seconds]) + + if itemfound == True: + return state, infotext, perfdata + + # In case of missing information we assume that the login into + # the database has failed and we simply skip this check. It won't + # switch to UNKNOWN, but will get stale. + raise MKCounterWrapped("Dataguard disabled or Instance not running") + +check_info['oracle_dataguard_stats'] = { + "check_function" : check_oracle_dataguard_stats, + "inventory_function" : inventory_oracle_dataguard_stats, + "service_description" : "ORA %s Dataguard-Stats", + "has_perfdata" : True, + "group" : "oracle_dataguard_stats", +} diff -Nru check-mk-1.2.2p3/oracle.include check-mk-1.2.6p12/oracle.include --- check-mk-1.2.2p3/oracle.include 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/oracle.include 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -40,7 +40,7 @@ # Handle error output from new agent if line[1] == 'FAILURE': if len(line) >= 3 and line[2].startswith("ORA-"): - return (3, "UNKNOWN - %s" % " ".join(line[2:])) + return (3, "%s" % " ".join(line[2:])) else: return False # ignore other FAILURE lines @@ -48,6 +48,6 @@ if line[1] in [ 'select', '*', 'ERROR' ]: return False if line[1].startswith('ORA-'): - return (3, 'UNKNOWN - Found error in agent output "%s"' % ' '.join(line[1:])) + return (3, 'Found error in agent output "%s"' % ' '.join(line[1:])) diff -Nru check-mk-1.2.2p3/oracle_instance check-mk-1.2.6p12/oracle_instance --- check-mk-1.2.2p3/oracle_instance 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/oracle_instance 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,153 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# In cooperation with Thorsten Bruhns + +# <<>> +# TUX2|12.1.0.1.0|OPEN|ALLOWED|STARTED|6735|1297771692|ARCHIVELOG|PRIMARY|NO|TUX2 +# TUX5|12.1.0.1.1|MOUNTED|ALLOWED|STARTED|82883|1297771692|NOARCHIVELOG|PRIMARY|NO|0|TUX5 + +factory_settings["oracle_instance_defaults"] = { + "logins" : 2, + "noforcelogging" : 1, + "noarchivelog" : 1, + "primarynotopen" : 2, +} + + +def inventory_oracle_instance(info): + # Skip ORA- error messages from broken old oracle agent + # <<>> + # ORA-99999 tnsping failed for +ASM1 + return [ (line[0], {} ) for line in info + if not (line[0].startswith('ORA-') and line[0][4].isdigit()) + and len(line[0]) < 20 ] + + +def check_oracle_instance(item, params, info): + + def state_marker(state, infotext, param, column, data): + value = params.get(param) + if value != None and column == data: + state = max(state, value) + if value == 1: + infotext += '(!)' + elif value == 2: + infotext += '(!!)' + return state, infotext + + state = 0 + for line in info: + if line[0] == item: + # In case of a general error (e.g. authentication failed), the second + # column contains the word "FAILURE" + if line[1] == 'FAILURE': + return 2, " ".join(line[2:]) + + state = 0 + + # Be compatible to old oracle agent plugin output + if len(line) == 6: + sid, version, openmode, logins, _unused, _unused2 = line + infotext = 'Status %s, Version %s, Logins %s' % (openmode, version, logins.lower()) + state, infotext = state_marker(state, infotext, 'logins', logins, 'RESTRICTED') + return state, infotext + + sid, version, openmode, logins, archiver, up_seconds, dbid, \ + log_mode, database_role, force_logging, name = line + + infotext = "Database Name %s, Status %s" % \ + (name, openmode) + + # Check state for PRIMARY Database. Normaly there are always OPEN + if database_role == 'PRIMARY' and openmode != 'OPEN': + state = params.get('primarynotopen') + if state == 1: + infotext += '(!)' + elif state == 2: + infotext += '(!!)' + elif state == 0: + infotext += ' (allowed by rule)' + + # ORACLE is sick and cannot handle timezone changes >:-P + up_seconds = max(0, int(up_seconds)) + infotext += ", Role %s, Version %s, " \ + "Up since %s (%s)" % ( + database_role, version, \ + time.strftime("%F %T", time.localtime(time.time() - up_seconds)), \ + get_age_human_readable(up_seconds)) + + if params.get('uptime_min'): + warn, crit = params.get('uptime_min') + warn = int(warn) + crit = int(crit) + + infotext += ' (levels at %s/%s)' % (get_age_human_readable(warn), get_age_human_readable(crit)) + if up_seconds <= crit: + state = 2 + infotext += '(!!) not long enough up' + elif up_seconds <= warn: + state = max(state, 1) + infotext += '(!) not long enough up' + + # ASM has no login and archivelog check + if database_role != 'ASM': + + # logins are only possible when the database is open + if openmode == 'OPEN': + infotext += ', Logins %s' % (logins.lower()) + state, infotext = state_marker(state, infotext, 'logins', logins, 'RESTRICTED') + + # the new internal database _MGMTDB from 12.1.0.2 is always in NOARCHIVELOG mode + if name != '_MGMTDB' and sid != '-MGMTDB': + infotext += ', Log Mode %s' % (log_mode.lower()) + state, infotext = state_marker(state, infotext, 'archivelog', log_mode, 'ARCHIVELOG') + state, infotext = state_marker(state, infotext, 'noarchivelog',log_mode, 'NOARCHIVELOG') + + # force logging is only usable when archivelog is enabled + if log_mode == 'ARCHIVELOG': + if archiver != 'STARTED': + infotext += '. Archiver %s(!!)' % (archiver.lower()) + state = 2 + + infotext += ', Force Logging %s' % (force_logging.lower()) + state, infotext = state_marker(state, infotext, 'forcelogging', force_logging, 'YES') + state, infotext = state_marker(state, infotext, 'noforcelogging', force_logging, 'NO') + + perfdata = [('uptime', up_seconds)] + return state, infotext, perfdata + + return 2, "Database not running or login failed" + + +check_info['oracle_instance'] = { + "check_function" : check_oracle_instance, + "inventory_function" : inventory_oracle_instance, + "service_description" : "ORA %s Instance", + "default_levels_variable" : "oracle_instance_defaults", + "group" : "oracle_instance", + "has_perfdata" : True, +} diff -Nru check-mk-1.2.2p3/oracle_jobs check-mk-1.2.6p12/oracle_jobs --- check-mk-1.2.2p3/oracle_jobs 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/oracle_jobs 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,166 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# <<>> +# IODBSZ1 SYS SM$CLEAN_AUTO_SPLIT_MERGE SCHEDULED 0 763 TRUE 24.04.13 00:00:00,600000 EUROPE/VIENNA - SUCCEEDED +# IODBSZ1 SYS RSE$CLEAN_RECOVERABLE_SCRIPT SCHEDULED 0 763 TRUE 24.04.13 00:00:00,100000 EUROPE/VIENNA - SUCCEEDED +# IODBSZ1 SYS BSLN_MAINTAIN_STATS_JOB SCHEDULED 0 110 TRUE 29.04.13 00:00:00,300000 +01:00 BSLN_MAINTAIN_STATS_SCHED SUCCEEDED +# IODBSZ1 SYS DRA_REEVALUATE_OPEN_FAILURES SCHEDULED 0 97 TRUE 01.01.70 00:00:00,000000 +02:00 MAINTENANCE_WINDOW_GROUP SUCCEEDED +# IODBSZ1 SYS ORA$AUTOTASK_CLEAN SCHEDULED 0 763 TRUE 24.04.13 03:00:00,900000 EUROPE/VIENNA DAILY_PURGE_SCHEDULE SUCCEEDED +# IODBSZ1 SYS PURGE_LOG SCHEDULED 0 763 TRUE 24.04.13 03:00:00,800000 EUROPE/VIENNA DAILY_PURGE_SCHEDULE SUCCEEDED +# IODBSZ1 ORACLE_OCM MGMT_CONFIG_JOB SCHEDULED 0 97 TRUE 01.01.70 00:00:00,000000 +02:00 MAINTENANCE_WINDOW_GROUP SUCCEEDED +# IODBSZ1 ORACLE_OCM MGMT_STATS_CONFIG_JOB SCHEDULED 0 3 TRUE 01.05.13 01:01:01,000000 +01:00 - SUCCEEDED +# IODBSZ1 EXFSYS RLM$SCHDNEGACTION SCHEDULED 0 18954 TRUE 23.04.13 14:51:57,000000 +02:00 - SUCCEEDED +# IODBSZ1 EXFSYS RLM$EVTCLEANUP SCHEDULED 0 18202 TRUE 23.04.13 13:41:48,200000 +01:00 - SUCCEEDED + + +factory_settings["oracle_jobs_defaults"] = { + "disabled": True, +} + +def inventory_oracle_jobs(info): + for line in info: + if len(line) >= 3: + yield "%s.%s.%s" % ( line[0], line[1], line[2] ), {} + +def check_oracle_jobs(item, params, info): + if item.count('.') == 2: + sid, job_owner, job_name = item.split('.') + else: + # old format without job owner. Simply ignore owner. + sid, job_name = item.split('.') + job_owner = None + + data_found = False + for line in info: + service_found = False + + if line[0] == sid: + data_found = True + + if line[0] == sid and (line[1] == job_owner or job_owner == None) and line[2] == job_name: + + service_found = True + param_disabled = params["disabled"] + + if len(line) == 10: + # new agent output with '|' separator + sid, job_owner, job_name, job_state, job_runtime, job_run_count, \ + job_enabled, job_nextrun, job_schedule, job_status = line + + else: + # old agent format + # the extraction of data is complicated due to missing field separator + job_name = line[2] + job_state = line[3] + job_runtime = line[4] + job_enabled = line[6] + job_nextrun = " ".join(line[7:-3]) + job_schedule = line[-2] + job_status = line[-1] + + break + + if not data_found: + # In case of missing information we assume that the login into + # the database has failed and we simply skip this check. It won't + # switch to UNKNOWN, but will get stale. + raise MKCounterWrapped("Login not possible for check %s" % item) + + if not service_found: + return (2, 'Job is missing') + + state = 0 + output = [] + perfdata = [] + + + txt = "Job-State: %s" % job_state + if job_state == "BROKEN": + txt += "(!!)" + state = max(state, 2) + output.append(txt) + + txt = "Enabled: %s" % (job_enabled == "TRUE" and "Yes" or "No") + if job_enabled != "TRUE" and job_state != 'RUNNING': + if param_disabled: + txt += ' ignored' + else: + txt += "(!)" + state = max(state, 1) + output.append(txt) + + if job_runtime == '' or job_runtime == 'SCHEDULED': + last_duration = 0 + else: + last_duration = int(job_runtime.replace('.',',').split(',',1)[0]) + # bugfix for an error in mk_oracle agent with missing round over last_duration + output.append("Last Duration: %s" % (get_age_human_readable(last_duration))) + + if "run_duration" in params: + warn, crit = params["run_duration"] + + output.append(" levels at (%ds/%ds)" % (warn, crit)) + + if last_duration >= crit: + output.append("(!!)") + state = max(state, 2) + elif last_duration >= warn: + output.append("(!)") + state = max(state, 1) + + perfdata.append(("duration", last_duration)) + + # 01.05.13 01:01:01,000000 +01:00 + if job_nextrun.startswith("01.01.70 00:00:00"): + if job_schedule == "-": + job_nextrun = "not scheduled(!)" + state = max(state, 1) + else: + job_nextrun = job_schedule + output.append("Next Run: %s" % job_nextrun) + + # A job who is running forever has no last run state and job_status is + # STOPPED + if job_state == "RUNNING" and job_runtime == '' and job_status == 'STOPPED': + txt = 'Job is running forever' + else: + txt = "Last Run Status: %s" % (job_status) + if job_status != "SUCCEEDED": + txt += "(!!)" + state = max(state, 2) + output.append(txt) + + return (state, ", ".join(output), perfdata) + +check_info['oracle_jobs'] = { + "service_description" : "ORA %s Job", + "group" : "oracle_jobs", + "check_function" : check_oracle_jobs, + "inventory_function" : inventory_oracle_jobs, + "has_perfdata" : True, + "default_levels_variable" : "oracle_jobs_defaults", +} diff -Nru check-mk-1.2.2p3/oracle_locks check-mk-1.2.6p12/oracle_locks --- check-mk-1.2.2p3/oracle_locks 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/oracle_locks 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,90 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# <<>> +# TUX12C|273|2985|ora12c.local|sqlplus@ora12c.local (TNS V1-V3)|46148|oracle|633|NULL|NULL + +# oracle_sid, sid#, serial#, machine, program, process, osuser, ctime object_owner object_name + +factory_settings["oracle_locks_defaults"] = { + "levels" : (1800, 3600), +} + +def inventory_oracle_locks(info): + return [ (line[0], {}) for line in info ] + +def check_oracle_locks(item, params, info): + lockcount = 0 + state = -1 + infotext = '' + + for line in info: + warn, crit = params["levels"] + + if line[0] == item and line[1] <> '': + + sid, sidnr, serial, machine, program, process, osuser, ctime, \ + object_owner, object_name = line + + ctime = int(ctime) + + if ctime >= crit: + state = 2 + lockcount += 1 + infotext += 'locktime %s (!!) Session (sid,serial, proc) %s,%s,%s machine %s osuser %s object: %s.%s ; ' \ + % (get_age_human_readable(ctime), sidnr, serial, process, machine, osuser, object_owner, object_name) + + elif ctime >= warn: + state = max(1,state) + lockcount += 1 + infotext += 'locktime %s (!) Session (sid,serial, proc) %s,%s,%s machine %s osuser %s object: %s.%s ; ' \ + % (get_age_human_readable(ctime), sidnr, serial, process, machine, osuser, object_owner, object_name) + + if line[0] == item and line[1] == '': + state = max(0, state) + + if infotext == '': + infotext = 'No locks existing' + elif lockcount > 10: + infotext = 'more then 10 locks existing!' + + if state <> -1: + return (state, infotext) + + # In case of missing information we assume that the login into + # the database has failed and we simply skip this check. It won't + # switch to UNKNOWN, but will get stale. + raise MKCounterWrapped("Login into database failed") + + +check_info["oracle_locks"] = { + 'check_function': check_oracle_locks, + 'inventory_function': inventory_oracle_locks, + 'service_description': 'ORA %s Locks', + 'has_perfdata': False, + "default_levels_variable" : "oracle_locks_defaults", + 'group': 'oracle_locks', +} diff -Nru check-mk-1.2.2p3/oracle_logswitches check-mk-1.2.6p12/oracle_logswitches --- check-mk-1.2.2p3/oracle_logswitches 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/oracle_logswitches 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -36,25 +36,29 @@ def check_oracle_logswitches(item, params, info): for line in info: if line[0] == item: - err = oracle_handle_ora_errors(line) - if err == False: - continue - elif isinstance(err, tuple): - return err locrit, lowarn, warn, crit = params logswitches = int(line[1]) - infotext = " - %d log switches in the last 60 minutes (levels at %d/%d .. %d/%d)" \ + infotext = "%d log switches in the last 60 minutes (levels at %d/%d .. %d/%d)" \ % (logswitches, locrit, lowarn, warn, crit) perfdata = [("logswitches", logswitches, warn, crit)] if logswitches >= crit or logswitches <= locrit: - return (2, "CRIT" + infotext, perfdata) + return (2, infotext, perfdata) elif logswitches >= warn or logswitches <= lowarn: - return (1, "WARN" + infotext, perfdata) + return (1, infotext, perfdata) else: - return (0, "OK" + infotext, perfdata) - return (3, "UNKNOWN - Database not existing or not running") + return (0, infotext, perfdata) -check_info['oracle_logswitches'] = (check_oracle_logswitches, "ORA %s Logswitches", 1, inventory_oracle_logswitches ) -checkgroup_of['oracle_logswitches'] = "oracle_logswitches" -check_includes['oracle_logswitches'] = [ "oracle.include" ] + # In case of missing information we assume that the login into + # the database has failed and we simply skip this check. It won't + # switch to UNKNOWN, but will get stale. + raise MKCounterWrapped("Login into database failed") + + +check_info["oracle_logswitches"] = { + 'check_function': check_oracle_logswitches, + 'inventory_function': inventory_oracle_logswitches, + 'service_description': 'ORA %s Logswitches', + 'has_perfdata': True, + 'group': 'oracle_logswitches', +} diff -Nru check-mk-1.2.2p3/oracle_longactivesessions check-mk-1.2.6p12/oracle_longactivesessions --- check-mk-1.2.2p3/oracle_longactivesessions 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/oracle_longactivesessions 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,97 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# <<>> +# TUX12C 0 4800 19 0 + +# Columns: +# ORACLE_SID serial# machine process osuser program last_call_el sql_id + +factory_settings["oracle_longactivesessions_defaults"] = { + "levels" : (500, 1000), +} + + +def inventory_oracle_longactivesessions(info): + return [ (line[0], {}) for line in info ] + + +def check_oracle_longactivesessions(item, params, info): + sessioncount = 0 + state = 3 + itemfound = False + + for line in info: + + warn, crit = params["levels"] + + if line[0] == item: + itemfound = True + + if line[0] == item and line[1] != '': + + sessioncount += 1 + sid, sidnr, serial, machine, process, osuser, program, \ + last_call_el, sql_id = line + + longoutput = 'Session (sid,serial,proc) %s %s %s active for %s from %s osuser %s program %s sql_id %s ' \ + % (sidnr, serial, process, get_age_human_readable(int(last_call_el)), machine, osuser, program, sql_id) + + if itemfound: + if sessioncount >= crit: + state = 2 + elif sessioncount >= warn: + state = 1 + else: + state = 0 + + if sessioncount == 0: + return 0, "%d long active sessions (%d, levels at %d/%d)" \ + % (sessioncount, sessioncount, warn, crit), \ + [("count", sessioncount, warn, crit)] + elif sessioncount <= 10: + return state, "%d long active sessions (%d, levels at %d/%d) %s" \ + % (sessioncount, sessioncount, warn, crit, longoutput), \ + [("count", sessioncount, warn, crit)] + elif sessioncount > 10: + return state, "%d long active sessions (%d, levels at %d/%d)" \ + % (sessioncount, sessioncount, warn, crit), \ + [("count", sessioncount, warn, crit)] + + # In case of missing information we assume that the login into + # the database has failed and we simply skip this check. It won't + # switch to UNKNOWN, but will get stale. + raise MKCounterWrapped("no info from database. Check ORA %s Instance" % item) + + +check_info['oracle_longactivesessions'] = { + "check_function" : check_oracle_longactivesessions, + "inventory_function" : inventory_oracle_longactivesessions, + "service_description" : "ORA %s Long Active Sessions", + "has_perfdata" : True, + "default_levels_variable" : "oracle_longactivesessions_defaults", + "group" : "oracle_longactivesessions", +} diff -Nru check-mk-1.2.2p3/oracle_processes check-mk-1.2.6p12/oracle_processes --- check-mk-1.2.2p3/oracle_processes 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/oracle_processes 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,78 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# In cooperation with Thorsten Bruhns from OPITZ Consulting + +# <<>> +# TUX2 51 300 +# FOOBAR 11 4780 + +# Columns: SID PROCESSES_COUNT PROCESSES_LIMIT + +factory_settings["oracle_processes_defaults"] = { + "levels" : (70.0, 90.0), +} + +def inventory_oracle_processes(info): + return [ ( line[0], {} ) for line in info ] + +def check_oracle_processes(item, params, info): + for line in info: + if line[0] == item: + + processes_num = int(line[1]) + processes_max = int(line[2]) + processes_pct = float(processes_num) / float(processes_max) * 100 + + warn, crit = params["levels"] + processes_warn = processes_max * warn / 100 + processes_crit = processes_max * crit / 100 + + if processes_pct >= crit: + state = 2 + elif processes_pct >= warn: + state = 1 + else: + state = 0 + + return state, "%d of %d processes are used (%d%%, levels at %d%%/%d%%)" \ + % (processes_num, processes_max, processes_pct, warn, crit), \ + [("processes", processes_num, processes_warn, processes_crit)] + + # In case of missing information we assume that the login into + # the database has failed and we simply skip this check. It won't + # switch to UNKNOWN, but will get stale. + raise MKCounterWrapped("Login into database failed") + + +check_info['oracle_processes'] = { + "check_function" : check_oracle_processes, + "inventory_function" : inventory_oracle_processes, + "service_description" : "ORA %s Processes", + "has_perfdata" : True, + "default_levels_variable" : "oracle_processes_defaults", + "group" : "oracle_processes", +} diff -Nru check-mk-1.2.2p3/oracle_recovery_area check-mk-1.2.6p12/oracle_recovery_area --- check-mk-1.2.2p3/oracle_recovery_area 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/oracle_recovery_area 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,82 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# <<>> +# TUX12C 0 4800 19 0 + +# Columns: +# ORACLE_SID used_pct size used reclaimable + +factory_settings["oracle_recovery_area_defaults"] = { + "levels" : (70.0, 90.0), +} + + +def inventory_oracle_recovery_area(info): + return [ (line[0], {}) for line in info ] + + +def check_oracle_recovery_area(item, params, info): + for line in info: + if line[0] == item: + size_mb, used_mb, reclaimable_mb = map(int, line[2:5]) + if size_mb == 0: + perc_used = 0 + else: + perc_used = float(used_mb - reclaimable_mb) / size_mb * 100 + + warn, crit = params["levels"] + warn_mb = size_mb * warn / 100 + crit_mb = size_mb * crit / 100 + + if perc_used >= crit: + state = 2 + elif perc_used >= warn: + state = 1 + else: + state = 0 + + mb = 1024*1024 + return state, "%s out of %s used (%.1f%%, levels at %s%%/%s%%), %s reclaimable" \ + % (get_bytes_human_readable(used_mb*mb), get_bytes_human_readable(size_mb*mb), \ + perc_used, warn, crit, get_bytes_human_readable(reclaimable_mb*mb)), \ + [('used', used_mb, warn_mb, crit_mb, 0, size_mb), ('reclaimable', reclaimable_mb)] + + # In case of missing information we assume that the login into + # the database has failed and we simply skip this check. It won't + # switch to UNKNOWN, but will get stale. + raise MKCounterWrapped("Login into database failed") + + +check_info['oracle_recovery_area'] = { + "check_function" : check_oracle_recovery_area, + "inventory_function" : inventory_oracle_recovery_area, + "service_description" : "ORA %s Recovery Area", + "has_perfdata" : True, + "default_levels_variable" : "oracle_recovery_area_defaults", + "group" : "oracle_recovery_area", +} + diff -Nru check-mk-1.2.2p3/oracle_recovery_status check-mk-1.2.6p12/oracle_recovery_status --- check-mk-1.2.2p3/oracle_recovery_status 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/oracle_recovery_status 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,171 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# In cooperation with Thorsten Bruhns from OPITZ Consulting + +# <<>> +# TUX2|tux2|PRIMARY|MOUNTED|1|1405456155|ONLINE||NO|2719061 +# TUX2|tux2|PRIMARY|MOUNTED|2|1405456155|ONLINE||NO|2719061 +# new format with backupmode +# <<>> +# TUX2|tux2|PRIMARY|READ WRITE|1|1419771465|317|ONLINE|NO|YES|8149107|NOT ACTIVE|489 +# TUX2|tux2|PRIMARY|READ WRITE|2|1419771465|317|ONLINE|NO|YES|8149107|NOT ACTIVE|489 + +# Databases seem to also report lines with some data missing: +# PV|PV|PRIMARY|READ WRITE|397|1433251398|7297|ONLINE|NO|YES|10740614283 +# PV|PV|PRIMARY|READ WRITE|398|1433251398|7297|ONLINE|NO|YES|10740614283 +# PV|PV|PRIMARY|READ WRITE|399|||ONLINE|||0 +# PV|PV|PRIMARY|READ WRITE|400|||ONLINE|||0 +# PV|PV|PRIMARY|READ WRITE|401|||ONLINE|||0 + +def inventory_oracle_recovery_status(info): + return [ ( line[0], {} ) for line in info ] + +def check_oracle_recovery_status(item, params, info): + state = 0 + offlinecount = 0 + oldest_checkpoint_age = -1 + + oldest_backup_age = -1 + backup_count = 0 + + perfdata = [] + + itemfound = False + for line in info: + if line[0] == item: + itemfound = True + + if len(line) == 11: + db_name, db_unique_name, database_role, open_mode, filenr, \ + checkpoint_time, checkpoint_age, datafilestatus, recovery, fuzzy, checkpoint_change = line + + backupmode = 'not checked' + backup_state = 'unknown' + + elif len(line) == 13: + db_name, db_unique_name, database_role, open_mode, filenr, \ + checkpoint_time, checkpoint_age, datafilestatus, recovery, \ + fuzzy, checkpoint_change, backup_state, backup_age = line + + if params.get("levels"): + warn, crit = params["levels"] + + if backup_state == 'ACTIVE': + backup_count += 1 + oldest_backup_age = max(int(backup_age), oldest_backup_age) + + if datafilestatus == 'ONLINE': + if checkpoint_age: + checkpoint_age = int(checkpoint_age) + oldest_checkpoint_age = max(oldest_checkpoint_age, checkpoint_age) + else: + offlinecount += 1 + + if itemfound == True: + infotext = "%s database" % (database_role.lower()) + + if oldest_checkpoint_age == -1: + infotext += ", no online datafiles found(!!)" + state = 2 + else: + infotext += ", oldest Checkpoint %s ago" \ + % (get_age_human_readable(int(oldest_checkpoint_age))) + + if (database_role == 'PRIMARY' and db_name == '_MGMTDB' and db_unique_name == '_mgmtdb') \ + or not params.get("levels"): + # We ignore the state of the check when no parameters are known + # _mgmtdb is new internal instance from 12.1.0.2 on Grid-Infrastructure + perfdata.append(['checkpoint_age', oldest_checkpoint_age]) + else: + if database_role == 'PRIMARY': + # checkpoint age should not higher on primary as well + # There is no CRIT for older checkoint age as this is mostly not an + # serios issue. + # otherwise the standby will produca a warning or crit as well + if oldest_checkpoint_age >= warn: + infotext += '(!)' + state = max(1, state) + + perfdata.append(['checkpoint_age', oldest_checkpoint_age, warn]) + else: + perfdata.append(['checkpoint_age', oldest_checkpoint_age, warn, crit]) + + # check the checkpoint age on a non primary database! + if oldest_checkpoint_age >= crit: + infotext += '(!!)' + state = 2 + elif oldest_checkpoint_age >= warn: + infotext += '(!)' + state = max(1, state) + + infotext += ' (levels at %s/%s )' % (get_age_human_readable(warn), get_age_human_readable(crit)) + + if offlinecount > 0: + infotext += " %i datafiles offline(!!)" \ + % (offlinecount) + state = 2 + + if oldest_backup_age > 0: + infotext += " %i datafiles in backup mode oldest is %s" % ( \ + backup_count, get_age_human_readable(oldest_backup_age)) + + if params.get("backup_age"): + + warn, crit = params["backup_age"] + infotext += " levels at (%s/%s)" % (get_age_human_readable(warn), get_age_human_readable(crit)) + perfdata.append(['backup_age', oldest_backup_age, warn, crit]) + + if oldest_backup_age >= crit: + infotext += '(!!)' + state = 2 + elif oldest_backup_age >= warn: + infotext += '(!)' + state = max(1, state) + else: + perfdata.append(['backup_age', oldest_backup_age]) + else: + + # create a 'dummy' performance data with 0 + # => The age from plugin is only valid when a datafile is in backup mode! + perfdata.append(['backup_age', 0]) + + + return state, infotext, perfdata + + # In case of missing information we assume that the login into + # the database has failed and we simply skip this check. It won't + # switch to UNKNOWN, but will get stale. + raise MKCounterWrapped("Login into database failed") + + +check_info['oracle_recovery_status'] = { + "check_function" : check_oracle_recovery_status, + "inventory_function" : inventory_oracle_recovery_status, + "service_description" : "ORA %s Recovery Status", + "has_perfdata" : True, + "group" : "oracle_recovery_status", +} diff -Nru check-mk-1.2.2p3/oracle_rman check-mk-1.2.6p12/oracle_rman --- check-mk-1.2.2p3/oracle_rman 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/oracle_rman 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,88 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# <<>> +# TUX2 COMPLETED 2014-07-08_17:27:59 2014-07-08_17:29:35 DB_INCR 32 +# TUX2 COMPLETED 2014-07-08_17:30:02 2014-07-08_17:30:06 ARCHIVELOG 121 + +# Columns: SID STATUS START END BACKUPTYPE BACKUPAGE + +def inventory_oracle_rman(info): + return [ (line[0] + "." + line[4], {}) for line in info + # only check full, incremental and archivelog-Backups + if line[4] in ('ARCHIVELOG', 'DB_FULL', 'DB_INCR')] + + +def check_oracle_rman(item, params, info): + for sid, status, start, end, backuptype, backupage in info: + if item == sid + "." + backuptype: + if not backupage: + return 0, "Backup is currently running" + backupage = int(backupage) + infotext = "Last backup %s ago" % get_age_human_readable(backupage * 60) + + perfdata = [] + state = 2 + infotext = "no COMPLETED backup found in last 14 days" + + if status in ('COMPLETED', 'COMPLETED WITH WARNINGS'): + if not backupage: + # This should not be possible until last fix + return 3, "Unknown backupage in check found" + + # backupage is time in minutes from agent! + backupage = int(backupage)*60 + infotext = "Last backup %s ago" % get_age_human_readable(backupage) + + state = 0 + if "levels" in params: + warn, crit = params.get("levels") + if backupage >= crit: + state = 2 + elif backupage >= warn: + state = 1 + infotext += " (levels at %s/%s)" % ( + get_age_human_readable(warn), + get_age_human_readable(crit)) + + perfdata = [ ("age", backupage, warn, crit) ] + else: + perfdata = [ ("age", backupage, ) ] + + return state, infotext, perfdata + + # In case of missing information we assume that the login into + # the database has failed and we simply skip this check. It won't + # switch to UNKNOWN, but will get stale. + raise MKCounterWrapped("Login into database failed") + +check_info['oracle_rman'] = { + "check_function" : check_oracle_rman, + "inventory_function" : inventory_oracle_rman, + "service_description" : "ORA %s RMAN Backup", + "has_perfdata" : True, + "group" : "oracle_rman", +} diff -Nru check-mk-1.2.2p3/oracle_rman_backups check-mk-1.2.6p12/oracle_rman_backups --- check-mk-1.2.2p3/oracle_rman_backups 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/oracle_rman_backups 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,73 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# <<>> +# IODBSZ1 2013-04-08T16:33:53 COMPLETED 2013-04-08_16:33:54 2013-04-08_17:56:42 DB FULL +# IODBSZ1 2013-04-23T15:28:26 RUNNING 2013-04-23_15:28:28 2013-04-23_15:31:02 ARCHIVELOG + +def inventory_oracle_rman_backups(info): + inventory = [] + for line in info: + if line[1] != "FAILURE" and " ".join(line[5:]) in ('ARCHIVELOG', 'DB FULL', 'DB INCR'): + inventory.append(("%s.%s" % (line[0], " ".join(line[5:])), {})) + return inventory + +def check_oracle_rman_backups(item, params, info): + try: + sid, jobname = item.split('.') + except ValueError: + return (3, 'Invalid check item given (must be .)') + + data = None + for line in info: + if line[0] == sid and " ".join(line[5:]) == jobname: + data = line + break + if not data: + return (3, 'Unable to find the job') + + state = 0 + output = [] + + job_state = line[2] + + txt = "State: %s" % job_state + if job_state not in [ "COMPLETED", "RUNNING" ]: + txt += " (!!)" + state = max(state, 2) + output.append(txt) + + output.append("Start-Time: %s" % line[3]) + if job_state != "RUNNING": + output.append("End-Time: %s" % line[4]) + + return (state, ", ".join(output)) + +check_info['oracle_rman_backups'] = { + "service_description" : "ORA %s RMAN Backup", + "check_function" : check_oracle_rman_backups, + "inventory_function" : inventory_oracle_rman_backups, +} diff -Nru check-mk-1.2.2p3/oracle_sessions check-mk-1.2.6p12/oracle_sessions --- check-mk-1.2.2p3/oracle_sessions 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/oracle_sessions 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -36,24 +36,28 @@ def check_oracle_sessions(item, params, info): for line in info: if line[0] == item: - err = oracle_handle_ora_errors(line) - if err == False: - continue - elif isinstance(err, tuple): - return err warn, crit = params sessions = int(line[1]) - infotext = " - %d active sessions (levels at %d/%d)" % (sessions, warn, crit) + infotext = "%d active sessions (levels at %d/%d)" % (sessions, warn, crit) perfdata = [("sessions", sessions, warn, crit)] if sessions >= crit: - return (2, "CRIT" + infotext, perfdata) + return (2, infotext, perfdata) elif sessions >= warn: - return (1, "WARN" + infotext, perfdata) + return (1, infotext, perfdata) else: - return (0, "OK" + infotext, perfdata) - return (3, "UNKNOWN - Database not existing or not running") + return (0, infotext, perfdata) -check_info['oracle_sessions'] = (check_oracle_sessions, "ORA %s Sessions", 1, inventory_oracle_sessions ) -check_includes['oracle_sessions'] = [ "oracle.include" ] -checkgroup_of["oracle_sessions"] = "oracle_sessions" + # In case of missing information we assume that the login into + # the database has failed and we simply skip this check. It won't + # switch to UNKNOWN, but will get stale. + raise MKCounterWrapped("Login into database failed") + + +check_info["oracle_sessions"] = { + 'check_function': check_oracle_sessions, + 'inventory_function': inventory_oracle_sessions, + 'service_description': 'ORA %s Sessions', + 'has_perfdata': True, + 'group': 'oracle_sessions', +} diff -Nru check-mk-1.2.2p3/oracle_tablespaces check-mk-1.2.6p12/oracle_tablespaces --- check-mk-1.2.2p3/oracle_tablespaces 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/oracle_tablespaces 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,6 +24,12 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. +# no used space check for Tablsspaces with CONTENTS in ('TEMPORARY','UNDO') +# It is impossible to check the used space in UNDO and TEMPORARY Tablespaces +# These Types of Tablespaces are ignored in this plugin. +# This restriction is only working with newer agents, because we need an +# additional parameter at end if each datafile + # <<>> # pengt /database/pengt/daten155/dbf/system_01.dbf SYSTEM AVAILABLE YES 38400 4194302 38392 1280 SYSTEM 8192 ONLINE # pengt /database/pengt/daten155/dbf/undotbs_01.dbf UNDOTBS1 AVAILABLE YES 128000 4194302 127992 640 ONLINE 8192 ONLINE @@ -46,7 +52,7 @@ # 10. block size in bytes # 11. status of the table space # 12. free space in the datafile - +# 13. Tablespace-Type (PERMANENT, UNDO, TEMPORARY) # This definition needs to be removed at a later stage # A previous version of this check didn't write the parameter @@ -58,20 +64,22 @@ factory_settings["oracle_tablespaces_defaults"] = { "levels" : (10.0, 5.0), "magic_normsize" : 1000, - "levels_low" : (0.0, 0.0) + "magic_maxlevels": (60.0, 50.0), + "defaultincrement": True, } # Whether to check auto extend settings oracle_tablespaces_check_autoext = True -# Whether to check default increment size +# this parameter is deprecated and needed for old configurations with +# parameter in main.mk. It is not used anymore! oracle_tablespaces_check_default_increment = True def inventory_oracle_tablespaces(info): tablespaces = set([]) autoextensible = set([]) for line in info: - if len(line) != 13: + if len(line) not in (13,14): continue ts = (line[0], line[2]) if line[11] in [ "ONLINE", "READONLY" ]: @@ -102,56 +110,46 @@ else: # A list of levels. Choose the correct one depending on the # size of the current tablespace - found = False for to_size, this_levels in params.get("levels"): if size_bytes > to_size: warn, crit = this_levels - found = True - if not found: - return (None, None) - - if magic: - # convert warn/crit to percentage - if type(warn) != float: - warn = savefloat(warn * 1024 * 1024 / float(size_bytes)) * 100 - if type(crit) != float: - crit = savefloat(crit * 1024 * 1024 / float(size_bytes)) * 100 - - normsize = params["magic_normsize"] - hbytes_size = size_bytes / (float(normsize) * 1024 * 1024) - felt_size = hbytes_size ** magic - scale = felt_size / hbytes_size - warn_scaled = 100 - (( 100 - warn ) * scale) - crit_scaled = 100 - (( 100 - crit ) * scale) - - # Make sure, levels do never get too low due to magic factor - lowest_warning_level, lowest_critical_level = params["levels_low"] - if warn_scaled < lowest_warning_level: - warn_scaled = lowest_warning_level - if crit_scaled < lowest_critical_level: - crit_scaled = lowest_critical_level - warn_bytes = savefloat(size_bytes * warn_scaled / 100) - crit_bytes = savefloat(size_bytes * crit_scaled / 100) - else: - # warn/crit level are float => percentages of max size, otherwise MB - if type(warn) == float: - warn_bytes = warn / 100.0 * size_bytes + break else: - warn_bytes = warn * 1024 * 1024 + return None, None, "", False - if type(crit) == float: - crit_bytes = crit / 100.0 * size_bytes - else: - crit_bytes = crit * 1024 * 1024 + # warn/crit level are float => percentages of max size, otherwise MB + if type(warn) == float: + output_as_percentage = True + if magic: + normsize = params["magic_normsize"] * 1024 * 1024 + hbytes_size = size_bytes / float(normsize) + felt_size = hbytes_size ** magic + scale = felt_size / hbytes_size + warn *= scale + crit *= scale + max_warning_level, max_critical_level = params["magic_maxlevels"] + warn = min(warn, max_warning_level) + crit = min(crit, max_critical_level) + levels_text = " (levels at %.1f%%/%.1f%%)" % (warn, crit) + warn_bytes = warn * size_bytes / 100 + crit_bytes = crit * size_bytes / 100 + + # Absolute free space in MB + else: + output_as_percentage = False + warn_bytes = warn * 1024 * 1024 + crit_bytes = crit * 1024 * 1024 + levels_text = " (levels at %s/%s)" % (get_bytes_human_readable(warn_bytes), get_bytes_human_readable(crit_bytes)) - return warn_bytes, crit_bytes + + return warn_bytes, crit_bytes, levels_text, output_as_percentage def check_oracle_tablespaces(item, params, info): try: sid, tbsname = item.split('.') except ValueError: - return (3, 'UNKNOWN - Invalid check item given (must be .)') + return (3, 'Invalid check item given (must be .)') ts_status = None num_files = 0 @@ -163,6 +161,7 @@ num_increments = 0 increment_size = 0 free_space = 0 + ts_type = None # Conversion of old autochecks params if type(params) == tuple: @@ -179,7 +178,13 @@ elif isinstance(err, tuple): return err - if line[2] == tbsname and len(line) == 13: + if line[2] == tbsname and len(line) in (13,14): + # the ts_type is a new value on agent. + if len(line) == 14: + ts_type = line[13] + else: + # old behaivor is all Tablespaces are treated as PERMANENT + ts_type = 'PERMANENT' ts_status = line[11] blocksize = int(line[10]) num_files += 1 @@ -196,10 +201,15 @@ incsize = int(line[8]) # size of next increment in blocks if incsize == 1: uses_default_increment = True - incs = free_bl / incsize + + incs, rest = divmod(free_bl, incsize) + if rest: + incs += 1 + num_increments += incs - increment_size += blocksize * incsize * incs - free_space += blocksize * (incsize * incs + (int(line[12]))) + increment_size += blocksize * incsize * incs - rest * blocksize + free_space += blocksize * (incsize * incs + (int(line[12]))) - rest * blocksize + # not autoextensible: take current size as maximum else: my_max_size = blocksize * int(line[5]) @@ -208,23 +218,33 @@ if ts_status == None: - return (3, "UNKNOWN - Tablespace not found") + # In case of missing information we assume that the login into + # the database has failed and we simply skip this check. It won't + # switch to UNKNOWN, but will get stale. + raise MKCounterWrapped("Login into database failed") - infotext = " - %s, size %s, used %s" % \ + warn, crit, levels_text, output_as_percentage = get_tablespace_levels_in_bytes(max_size, params) + + infotext = "%s (%s), size: %s, free: " % \ (ts_status, - get_bytes_human_readable(current_size), - get_bytes_human_readable(used)) + ts_type, + get_bytes_human_readable(current_size)) + + if output_as_percentage: + infotext += "%.1f%%" % (100.0 * (max_size - used) / max_size) + else: + infotext += get_bytes_human_readable(max_size - used) if num_extensible > 0: - infotext += ", max %s" % get_bytes_human_readable(max_size) + infotext += ", maximum size: %s" % get_bytes_human_readable(max_size) infotext += " - %d increments (%s)" % \ (num_increments, get_bytes_human_readable(increment_size)) status = 0 # Check increment size, should not be set to default (1) - if oracle_tablespaces_check_default_increment: + if params.get("defaultincrement"): if uses_default_increment: infotext += ", DEFAULT INCREMENT(!)" status = 1 @@ -232,39 +252,36 @@ # Check autoextend status if parameter not set to None if autoext != None: if autoext and num_extensible == 0: - infotext += ", AUTOEXTEND(!!)" + infotext += ", NO AUTOEXTEND(!!)" status = 2 elif not autoext and num_extensible > 0: - infotext += ", NO AUTOTEXTEND(!!)" + infotext += ", AUTOTEXTEND(!!)" status = 2 elif num_extensible > 0: infotext += ", autoextend" else: infotext += ", no autoextend" - - warn, crit = get_tablespace_levels_in_bytes(max_size, params) - # Check free space, but only if status is not READONLY - if ts_status != "READONLY": + # and Tablespace-Type must be PERMANENT + if ts_status != "READONLY" and ts_type == 'PERMANENT': if (crit is not None and free_space <= crit) \ or (warn is not None and free_space <= warn): infotext += ", only %s left" % get_bytes_human_readable(free_space) - infotext += " (levels at %s/%s)" % ( - get_bytes_human_readable(warn), get_bytes_human_readable(crit)) + infotext += levels_text if free_space <= crit: status = 2 else: status = max(1, status) - perfdata = [ ("size", current_size, max_size - warn, max_size - crit), + perfdata = [ ("size", current_size, max_size - (warn or 0), max_size - (crit or 0)), ("used", used), ("max_size", max_size) ] if num_files != 1 or num_avail != 1 or num_extensible != 1: infotext += ", %d data files (%d avail, %d autoext)" % (num_files, num_avail, num_extensible) - return (status, nagios_state_names[status] + infotext, perfdata) + return status, infotext, perfdata check_info['oracle_tablespaces'] = { "service_description" : "ORA %s Tablespace", @@ -275,4 +292,3 @@ "default_levels_variable" : "oracle_tablespaces_defaults", "includes" : [ "oracle.include" ] } -check_config_variables.append("oracle_tablespaces_check_default_increment") diff -Nru check-mk-1.2.2p3/oracle_undostat check-mk-1.2.6p12/oracle_undostat --- check-mk-1.2.2p3/oracle_undostat 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/oracle_undostat 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,110 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# In cooperation with Thorsten Bruhns from OPITZ Consulting + +# <<>> +# TUX2 160 0 1081 300 0 + +factory_settings["oracle_undostat_defaults"] = { + "levels" : (600, 300), + "nospaceerrcnt_state" : 2, +} + + +def inventory_oracle_undostat(info): + return [ (line[0], {}) for line in info ] + + +def check_oracle_undostat(item, params, info): + for line in info: + if line[0] == item: + infotext = '' + + activeblks, maxconcurrency, tuned_undoretention, maxquerylen, nospaceerrcnt = map(int, line[1:]) + warn, crit = params["levels"] + + if tuned_undoretention == -1: + state = 0 + elif tuned_undoretention <= crit: + state = 2 + elif tuned_undoretention <= warn: + state = 1 + else: + state = 0 + + if nospaceerrcnt == 0: + nospaceerrcntpic = "" + else: + + if params.get('nospaceerrcnt_state'): + state_errcnt = params.get('nospaceerrcnt_state') + state = max(state, state_errcnt) + + if state_errcnt == 1: + nospaceerrcntpic = "(!)" + elif state_errcnt == 2: + nospaceerrcntpic = "(!!)" + elif state_errcnt == 3: + nospaceerrcntpic = "(?)" + + if tuned_undoretention >= 0: + infotext = "%s Undoretention (levels at %s/%s), %d active undoblocks, " \ + % (get_age_human_readable(tuned_undoretention), + get_age_human_readable(warn), + get_age_human_readable(crit), + activeblks) + + infotext += "%d max concurrent transactions, %s max querylen, %d space errors%s" \ + % (maxconcurrency, + get_age_human_readable(maxquerylen), + nospaceerrcnt, nospaceerrcntpic) + + + perfdata = [('activeblk', activeblks), + ('transconcurrent', maxconcurrency), + ('tunedretention', tuned_undoretention, warn, crit), + ('querylen', maxquerylen), + ('nonspaceerrcount', nospaceerrcnt), + ] + + return state, infotext, perfdata + + # In case of missing information we assume that the login into + # the database has failed and we simply skip this check. It won't + # switch to UNKNOWN, but will get stale. + raise MKCounterWrapped("Login into database failed") + + +check_info['oracle_undostat'] = { + "check_function" : check_oracle_undostat, + "inventory_function" : inventory_oracle_undostat, + "service_description" : "ORA %s Undo Retention", + "has_perfdata" : True, + "default_levels_variable" : "oracle_undostat_defaults", + "group" : "oracle_undostat", +} + diff -Nru check-mk-1.2.2p3/oracle_version check-mk-1.2.6p12/oracle_version --- check-mk-1.2.2p3/oracle_version 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/oracle_version 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -39,8 +39,8 @@ elif isinstance(err, tuple): return err - return (0, 'OK - Version: ' + " ".join(line[1:])) - return (3, 'UNKNOWN - no version information, database might be stopped') + return (0, 'Version: ' + " ".join(line[1:])) + return (3, 'no version information, database might be stopped') check_info['oracle_version'] = { "check_function" : check_oracle_version, diff -Nru check-mk-1.2.2p3/ovs_bonding check-mk-1.2.6p12/ovs_bonding --- check-mk-1.2.2p3/ovs_bonding 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/ovs_bonding 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/package_info check-mk-1.2.6p12/package_info --- check-mk-1.2.2p3/package_info 2013-11-05 09:42:58.000000000 +0000 +++ check-mk-1.2.6p12/package_info 2015-09-21 11:01:35.000000000 +0000 @@ -1,8 +1,15 @@ {'author': 'Mathias Kettner', 'description': 'This package is automatically created during setup of Check_MK and contains files shipped with the official release of Check_MK.', 'download_url': 'http://mathias-kettner.de/check_mk.html', - 'files': {'agents': ['Makefile', - 'apache_status.cfg', + 'files': {'agents': ['asmcmd.sh', + 'cfg_examples/sqlnet.ora', + 'cfg_examples/logwatch.cfg', + 'cfg_examples/jolokia.cfg', + 'cfg_examples/nginx_status.cfg', + 'cfg_examples/apache_status.cfg', + 'cfg_examples/sqlplus.sh', + 'check-mk-agent_1.2.6p12-1_all.deb', + 'check-mk-agent-1.2.6p12-1.noarch.rpm', 'check_mk_agent.aix', 'check_mk_agent.freebsd', 'check_mk_agent.hpux', @@ -12,106 +19,263 @@ 'check_mk_agent.openbsd', 'check_mk_agent.openvms', 'check_mk_agent.solaris', + 'check-mk-agent.spec', 'check_mk_caching_agent.linux', - 'hpux/hpux_statgrab', - 'hpux/hpux_lunstats', - 'jolokia.cfg', - 'logwatch.cfg', - 'plugins/db2_mem.sh', - 'plugins/mk_mysql', - 'plugins/dmraid', - 'plugins/veritas/vxvm_enclosures', - 'plugins/veritas/vxvm_objstatus', - 'plugins/veritas/vxvm_multipath', - 'plugins/mailman_lists', - 'plugins/plesk_backups', - 'plugins/mk_oracle', + 'mk-job', + 'mk-job.solaris', + 'plugins/mk_inventory.solaris', + 'plugins/symantec_av', 'plugins/apache_status', - 'plugins/sylo', - 'plugins/j4p_performance', - 'plugins/dmi_sysinfo', - 'plugins/nfsexports', 'plugins/mk_jolokia', - 'plugins/plesk_domains', - 'plugins/mk_logwatch', + 'plugins/mk_oracle_asm', + 'plugins/mk_oracle.aix', + 'plugins/mk_logwatch_aix', + 'plugins/mk_oracle.solaris', + 'plugins/unitrends_replication', + 'plugins/hpux_statgrab', 'plugins/smart', - 'plugins/mk_zypper', + 'plugins/mk_mysql', + 'plugins/netstat.linux', + 'plugins/db2_mem', + 'plugins/netstat.aix', + 'plugins/hpux_lunstats', + 'plugins/unitrends_backup', + 'plugins/dnsclient', + 'plugins/lnx_quota', 'plugins/mk_postgres', + 'plugins/mk_zypper', + 'plugins/mk_logwatch', + 'plugins/nfsexports', + 'plugins/mk_inventory.linux', + 'plugins/mk_inventory.aix', + 'plugins/nginx_status', + 'plugins/websphere_mq', + 'plugins/mailman_lists', + 'plugins/mk_logins', + 'plugins/runas', + 'plugins/kaspersky_av', 'plugins/mk_tsm', - 'sqlplus.sh', + 'plugins/nfsexports.solaris', + 'plugins/jar_signature', + 'plugins/plesk_domains', + 'plugins/vxvm', + 'plugins/mk_sap', + 'plugins/mk_oracle', + 'plugins/README', + 'plugins/mk_oracle_crs', + 'plugins/plesk_backups', + 'sap/sap.cfg', + 'sap/sapnwrfc-0.19.tar.gz', + 'special/agent_allnet_ip_sensoric', + 'special/agent_ucs_bladecenter', + 'special/agent_vsphere.pysphere', + 'special/agent_innovaphone', + 'special/agent_ibmsvc', + 'special/agent_netapp', + 'special/agent_random', + 'special/agent_fritzbox', + 'special/agent_hivemanager', + 'special/agent_emcvnx', + 'special/agent_vsphere', + 'special/agent_activemq', 'waitmax', - 'waitmax.c', - 'windows/installer.ico', - 'windows/check_mk_agent.cc', - 'windows/crash.cc', - 'windows/endless.bat', - 'windows/installer.nsi', + 'windows/check_mk_agent.msi', 'windows/mrpe/check_crit.bat', - 'windows/check_mk.example.ini', 'windows/install_agent.exe', - 'windows/plugins/windows_time.bat', - 'windows/plugins/dmi_sysinfo.bat', - 'windows/plugins/mssql.vbs', - 'windows/plugins/windows_updates.vbs', - 'windows/plugins/ad_replication.bat', + 'windows/check_mk.example.ini', + 'windows/check_mk_agent-64.exe', + 'windows/nowin.exe', + 'windows/check_mk_agent.exe', + 'windows/install_agent-64.exe', + 'windows/plugins/mk_oracle.bat', + 'windows/plugins/windows_broadcom_bonding.bat', + 'windows/plugins/windows_os_bonding.ps1', 'windows/plugins/megaraid.bat', - 'windows/plugins/wmicchecks.bat', - 'windows/plugins/win_dhcp_pools.bat', + 'windows/plugins/tsm_checks.bat', 'windows/plugins/windows_multipath.vbs', - 'windows/plugins/mk_oracle.bat', - 'windows/nowin.cc', - 'windows/Makefile', - 'windows/check_mk_agent.exe', - 'windows/nowin.exe', - 'windows/crash.exe', + 'windows/plugins/citrix_xenapp.ps1', + 'windows/plugins/hyperv_vms.ps1', + 'windows/plugins/win_dhcp_pools.bat', + 'windows/plugins/citrix_licenses.vbs', + 'windows/plugins/wmic_if.ps1', + 'windows/plugins/windows_tasks.ps1', + 'windows/plugins/mk_inventory.ps1', + 'windows/plugins/windows_updates.vbs', + 'windows/plugins/deprecated/citrix_serverload.ps1', + 'windows/plugins/deprecated/dmi_sysinfo.bat', + 'windows/plugins/deprecated/psperf.bat', + 'windows/plugins/deprecated/windows_time.bat', + 'windows/plugins/deprecated/wmicchecks.bat', + 'windows/plugins/deprecated/citrix_sessions.ps1', + 'windows/plugins/netstat_an.bat', + 'windows/plugins/veeam_backup_status.ps1_', + 'windows/plugins/win_dmidecode.bat', + 'windows/plugins/win_printers.ps1', + 'windows/plugins/mssql.vbs', + 'windows/plugins/wmic_if.bat', + 'windows/plugins/arcserve_backup.ps1', + 'windows/plugins/windows_intel_bonding.bat', + 'windows/plugins/ad_replication.bat', + 'windows/plugins/veeam_backup_status.bat', + 'xinetd_caching.conf', 'xinetd.conf', - 'xinetd_caching.conf'], + 'z_os/waitmax'], 'checkman': ['3ware_disks', '3ware_info', '3ware_units', 'ad_replication', + 'adva_fsp_current', + 'adva_fsp_if', + 'adva_fsp_temp', 'aironet_clients', 'aironet_errors', + 'aix_diskiod', 'aix_lvm', + 'aix_memory', 'aix_multipath', + 'akcp_daisy_temp', + 'akcp_sensor_drycontact', 'akcp_sensor_humidity', 'akcp_sensor_temp', + 'alcatel_power', + 'allnet_ip_sensoric.humidity', + 'allnet_ip_sensoric.pressure', + 'allnet_ip_sensoric.temp', + 'allnet_ip_sensoric.tension', 'apache_status', + 'apc_ats_output', + 'apc_ats_status', + 'apc_humidity', + 'apc_inputs', + 'apc_inrow_airflow', + 'apc_inrow_fanspeed', + 'apc_inrow_temp', + 'apc_mod_pdu_modules', + 'apc_powerswitch', 'apc_rackpdu_power', 'apc_symmetra', 'apc_symmetra_ext_temp', 'apc_symmetra_power', 'apc_symmetra_temp', + 'apc_symmetra_test', + 'arc_raid_status', + 'arcserve_backup', + 'arris_cmts_cpu', + 'arris_cmts_mem', + 'arris_cmts_temp', + 'bintec_brrp_status', 'bintec_info', + 'bintec_sensors.fan', + 'bintec_sensors.temp', + 'bintec_sensors.voltage', 'blade_bays', 'blade_blades', 'blade_blowers', + 'blade_bx_blades', + 'blade_bx_load', + 'blade_bx_powerfan', + 'blade_bx_powermod', + 'blade_bx_temp', 'blade_health', 'blade_mediatray', - 'blade_misc', 'blade_powerfan', 'blade_powermod', + 'bluecat_commandserver', + 'bluecat_dhcp', + 'bluecat_dns', + 'bluecat_dns_queries', + 'bluecat_ha', + 'bluecat_ntp', + 'bluecoat_diskcpu', + 'bluecoat_sensors', + 'brocade.fan', 'brocade_fcport', + 'brocade_info', + 'brocade_mlx_fan', + 'brocade_mlx.module_cpu', + 'brocade_mlx.module_mem', + 'brocade_mlx.module_status', + 'brocade_mlx_power', + 'brocade_mlx_temp', + 'brocade.power', + 'brocade.temp', + 'brocade_tm', + 'brocade_vdx_status', 'canon_pages', + 'carel_sensors', 'carel_uniflair_cooling', + 'casa_cpu_mem', + 'casa_cpu_temp', + 'casa_cpu_util', + 'casa_fan', + 'casa_power', 'cbl_airlaser.hardware', 'cbl_airlaser.status', + 'check_disk_smb', + 'check_dns', + 'check_form_submit', + 'check_http', + 'check_icmp', + 'check_ldap', 'check_mk.only_from', + 'check_notify_count', + 'checkpoint_connections', + 'checkpoint_packets', + 'check_smtp', + 'check_sql', + 'check_ssh', + 'check_tcp', + 'check_traceroute', + 'check_uniserv', + 'chrony', + 'cifsmounts', 'cisco_asa_failover', + 'cisco_cpu', 'cisco_fan', + 'cisco_fantray', + 'cisco_fru_power', 'cisco_hsrp', 'cisco_mem', 'cisco_power', 'cisco_qos', + 'cisco_secure', + 'cisco_sys_mem', 'cisco_temp', + 'cisco_temperature', + 'cisco_temp_perf', 'cisco_temp_sensor', - 'cmctc.temp', + 'cisco_vpn_tunnel', + 'cisco_vss', + 'cisco_wlc', + 'cisco_wlc_clients', + 'citrix_licenses', + 'citrix_serverload', + 'citrix_sessions', + 'climaveneta_alarm', + 'climaveneta_fan', + 'climaveneta_temp', + 'cmciii.access', + 'cmciii.can_current', + 'cmciii.humidity', + 'cmciii.io', + 'cmciii_lcp_airin', + 'cmciii_lcp_airout', + 'cmciii_lcp_fans', + 'cmciii_lcp_waterflow', + 'cmciii_lcp_waterin', + 'cmciii_lcp_waterout', + 'cmciii.psm_current', + 'cmciii.psm_plugs', + 'cmciii.sensor', + 'cmciii.state', + 'cmciii.temp', 'cmctc_lcp.blower', 'cmctc_lcp.blowergrade', 'cmctc_lcp.flow', 'cmctc_lcp.regulator', 'cmctc_lcp.temp', 'cmctc_psm_m', + 'cmctc.temp', 'cpsecure_sessions', 'cpu.loads', 'cpu.threads', @@ -122,10 +286,32 @@ 'decru_perf', 'decru_power', 'decru_temps', + 'dell_chassis_fans', + 'dell_chassis_io', + 'dell_chassis_kvm', + 'dell_chassis_power', + 'dell_chassis_powersupplies', + 'dell_chassis_slots', + 'dell_chassis_status', + 'dell_chassis_temp', + 'dell_idrac_disks', + 'dell_om_disks', + 'dell_om_esmlog', + 'dell_om_mem', + 'dell_om_processors', + 'dell_om_sensors', 'dell_powerconnect_cpu', 'dell_powerconnect_fans', 'dell_powerconnect_psu', 'dell_powerconnect_temp', + 'dell_poweredge_amperage.current', + 'dell_poweredge_amperage.power', + 'dell_poweredge_cpu', + 'dell_poweredge_mem', + 'dell_poweredge_netdev', + 'dell_poweredge_pci', + 'dell_poweredge_status', + 'dell_poweredge_temp', 'df', 'df_netapp', 'df_netapp32', @@ -133,20 +319,86 @@ 'dmi_sysinfo', 'dmraid.ldisks', 'dmraid.pdisks', + 'docsis_channels_downstream', + 'docsis_cm_status', + 'domino_info', + 'domino_mailqueues', + 'domino_tasks', + 'domino_transactions', + 'domino_users', 'drbd', 'drbd.disk', 'drbd.net', 'drbd.stats', - 'dummy', + 'emc_datadomain_disks', + 'emc_datadomain_fans', + 'emc_datadomain_fs', + 'emc_datadomain_nvbat', + 'emc_datadomain_power', + 'emc_datadomain_temps', + 'emc_isilon.clusterhealth', + 'emc_isilon_diskstatus', + 'emc_isilon_iops', + 'emc_isilon.names', + 'emc_isilon.nodehealth', + 'emc_isilon.nodes', + 'emcvnx_disks', + 'emcvnx_hba', + 'emcvnx_hwstatus', + 'emcvnx_info', + 'emcvnx_raidgroups.capacity', + 'emcvnx_raidgroups.capacity_contiguous', + 'emcvnx_raidgroups.list_disks', + 'emcvnx_raidgroups.list_luns', + 'emerson_stat', + 'emerson_temp', + 'enterasys_cpu_util', + 'enterasys_fans', + 'enterasys_lsnat', + 'enterasys_powersupply', + 'enterasys_temp', + 'esx_vsphere_counters.diskio', + 'esx_vsphere_counters.if', + 'esx_vsphere_counters.ramdisk', + 'esx_vsphere_counters.uptime', + 'esx_vsphere_datastores', + 'esx_vsphere_hostsystem.cpu_usage', + 'esx_vsphere_hostsystem.maintenance', + 'esx_vsphere_hostsystem.mem_usage', + 'esx_vsphere_hostsystem.mem_usage_cluster', + 'esx_vsphere_hostsystem.multipath', + 'esx_vsphere_hostsystem.state', + 'esx_vsphere_licenses', + 'esx_vsphere_objects', + 'esx_vsphere_objects.count', + 'esx_vsphere_sensors', + 'esx_vsphere_vm.cpu', + 'esx_vsphere_vm.datastores', + 'esx_vsphere_vm.guest_tools', + 'esx_vsphere_vm.heartbeat', + 'esx_vsphere_vm.mem_usage', + 'esx_vsphere_vm.name', + 'esx_vsphere_vm.running_on', + 'esx_vsphere_vm.snapshots', + 'etherbox.humidity', + 'etherbox.smoke', + 'etherbox.switch', + 'etherbox.temp', + 'f5_bigip_chassis_temp', 'f5_bigip_cluster', + 'f5_bigip_cluster_v11', + 'f5_bigip_conns', + 'f5_bigip_cpu_temp', 'f5_bigip_fans', 'f5_bigip_interfaces', 'f5_bigip_pool', 'f5_bigip_psu', - 'f5_bigip_temp', 'f5_bigip_vserver', - 'fc_brocade_port', - 'fc_brocade_port_detailed', + 'fast_lta_headunit.replication', + 'fast_lta_headunit.status', + 'fast_lta_silent_cubes.capacity', + 'fast_lta_volumes', + 'fc_port', 'fileinfo', 'fileinfo.groups', 'fjdarye100_cadaps', @@ -160,6 +412,15 @@ 'fjdarye100_rluns', 'fjdarye100_sum', 'fjdarye100_syscaps', + 'fjdarye101_cadaps', + 'fjdarye101_cmods', + 'fjdarye101_cmods_mem', + 'fjdarye101_conencs', + 'fjdarye101_disks', + 'fjdarye101_disks.summary', + 'fjdarye101_rluns', + 'fjdarye101_sum', + 'fjdarye101_syscaps', 'fjdarye60_cadaps', 'fjdarye60_cmods', 'fjdarye60_cmods_flash', @@ -178,16 +439,37 @@ 'fortigate_cpu', 'fortigate_memory', 'fortigate_sessions', + 'fritz.config', + 'fritz.conn', + 'fritz.link', + 'fritz.wan_if', 'fsc_fans', 'fsc_ipmi_mem_status', 'fsc_subsystems', 'fsc_temp', + 'genua_carp', + 'genua_fan', + 'genua_pfstate', + 'genua_state_correlation', 'h3c_lanswitch_cpu', 'h3c_lanswitch_sensors', 'heartbeat_crm', 'heartbeat_crm.resources', 'heartbeat_nodes', 'heartbeat_rscstatus', + 'hitachi_hnas_cifs', + 'hitachi_hnas_cpu', + 'hitachi_hnas_fan', + 'hitachi_hnas_fc_if', + 'hitachi_hnas_fpga', + 'hitachi_hnas_pnode', + 'hitachi_hnas_psu', + 'hitachi_hnas_quorumdevice', + 'hitachi_hnas_span', + 'hitachi_hnas_temp', + 'hitachi_hnas_vnode', + 'hitachi_hnas_volume', + 'hivemanager_devices', 'hp_blade', 'hp_blade_blades', 'hp_blade_fan', @@ -197,12 +479,15 @@ 'hp_procurve_cpu', 'hp_procurve_mem', 'hp_procurve_sensors', + 'hp_proliant', 'hp_proliant_cpu', 'hp_proliant_da_cntlr', 'hp_proliant_da_phydrv', 'hp_proliant_fans', 'hp_proliant_mem', + 'hp_proliant_power', 'hp_proliant_psu', + 'hp_proliant_raid', 'hp_proliant_temp', 'hp_sts_drvbox', 'hpux_cpu', @@ -214,21 +499,66 @@ 'hpux_serviceguard', 'hpux_snmp_cs.cpu', 'hpux_tunables', + 'hpux_tunables.maxfiles_lim', + 'hpux_tunables.nkthread', + 'hpux_tunables.nproc', + 'hpux_tunables.semmni', + 'hpux_tunables.semmns', + 'hpux_tunables.shmseg', 'hr_cpu', 'hr_fs', 'hr_mem', + 'hwg_temp', + 'hyperv_vms', + 'ibm_imm_health', + 'ibm_rsa_health', + 'ibm_svc_array', + 'ibm_svc_enclosure', + 'ibm_svc_enclosurestats.power', + 'ibm_svc_enclosurestats.temp', + 'ibm_svc_eventlog', + 'ibm_svc_host', + 'ibm_svc_license', + 'ibm_svc_mdisk', + 'ibm_svc_mdiskgrp', + 'ibm_svc_node', + 'ibm_svc_nodestats.cache', + 'ibm_svc_nodestats.cpu_util', + 'ibm_svc_nodestats.diskio', + 'ibm_svc_nodestats.disk_latency', + 'ibm_svc_nodestats.iops', + 'ibm_svc_portfc', + 'ibm_svc_system', + 'ibm_svc_systemstats.cache', + 'ibm_svc_systemstats.cpu_util', + 'ibm_svc_systemstats.diskio', + 'ibm_svc_systemstats.disk_latency', + 'ibm_svc_systemstats.iops', 'ibm_xraid_pdisks', 'if', 'if64', + 'if64_tplink', + 'if_brocade', 'if_lancom', 'ifoperstatus', - 'imm_health', + 'innovaphone_channels', + 'innovaphone_cpu', + 'innovaphone_licenses', + 'innovaphone_mem', + 'innovaphone_priports_l1', + 'innovaphone_priports_l2', + 'innovaphone_temp', 'ipmi', 'ipmi_sensors', 'ironport_misc', + 'j4p_performance.app_sess', + 'j4p_performance.app_state', 'j4p_performance.mem', + 'j4p_performance.serv_req', 'j4p_performance.threads', 'j4p_performance.uptime', + 'jar_signature', + 'job', 'jolokia_info', 'jolokia_metrics.app_sess', 'jolokia_metrics.app_state', @@ -236,20 +566,60 @@ 'jolokia_metrics.bea_requests', 'jolokia_metrics.bea_sess', 'jolokia_metrics.bea_threads', + 'jolokia_metrics.gc', 'jolokia_metrics.mem', 'jolokia_metrics.requests', + 'jolokia_metrics.serv_req', 'jolokia_metrics.threads', + 'jolokia_metrics.tp', + 'jolokia_metrics.uptime', + 'juniper_bgp_state', + 'juniper_cpu', + 'juniper_screenos_cpu', + 'juniper_screenos_fan', + 'juniper_screenos_mem', + 'juniper_screenos_temp', + 'juniper_screenos_vpn', + 'juniper_trpz_aps', + 'juniper_trpz_cpu_util', + 'juniper_trpz_flash', + 'juniper_trpz_info', + 'juniper_trpz_mem', + 'juniper_trpz_power', + 'kaspersky_av_quarantine', + 'kaspersky_av_tasks', + 'kaspersky_av_updates', + 'kemp_loadmaster_ha', + 'kemp_loadmaster_realserver', + 'kemp_loadmaster_services', + 'kentix_humidity', + 'kentix_temp', 'kernel', 'kernel.util', + 'knuerr_rms_humidity', + 'knuerr_rms_temp', + 'knuerr_sensors', 'lgp_info', 'lgp_pdu_aux', 'lgp_pdu_info', + 'libelle_business_shadow.archive_dir', + 'libelle_business_shadow.info', + 'libelle_business_shadow.process', + 'libelle_business_shadow.status', + 'liebert_bat_temp', + 'liebert_chiller_status', 'livestatus_status', 'lnx_bonding', 'lnx_if', + 'lnx_quota', + 'lnx_thermal', 'local', + 'logins', 'logwatch', 'logwatch.ec', + 'logwatch.groups', + 'lparstat_aix', + 'lparstat_aix.cpu_util', 'lsi.array', 'lsi.disk', 'mailman_lists', @@ -264,7 +634,10 @@ 'mem.used', 'mem.vmalloc', 'mem.win', + 'mikrotik_signal', 'mounts', + 'moxa_iologik_register', + 'mq_queues', 'mrpe', 'mssql_backup', 'mssql_counters.cache_hits', @@ -274,67 +647,149 @@ 'mssql_tablespaces', 'mssql_versions', 'multipath', + 'mysql_capacity', + 'mysql.connections', 'mysql.innodb_io', 'mysql.sessions', - 'mysql_capacity', + 'mysql_slave', + 'netapp_api_aggr', + 'netapp_api_cluster', + 'netapp_api_cpu', + 'netapp_api_disk', + 'netapp_api_fan', + 'netapp_api_if', + 'netapp_api_protocol', + 'netapp_api_psu', + 'netapp_api_status', + 'netapp_api_temp', + 'netapp_api_version', + 'netapp_api_vf_stats.cpu_util', + 'netapp_api_vf_stats.traffic', + 'netapp_api_vf_status', + 'netapp_api_volumes', 'netapp_cluster', + 'netapp_cpu', + 'netapp_fcpio', 'netapp_vfiler', 'netapp_volumes', 'netctr.combined', 'netif.link', 'netif.params', + 'netstat', 'nfsexports', 'nfsmounts', + 'nginx_status', 'ntp', 'ntp.time', 'nvidia.errors', 'nvidia.temp', 'omd_status', + 'openvpn_clients', + 'oracle_asm_diskgroup', + 'oracle_crs_res', + 'oracle_crs_version', + 'oracle_crs_voting', + 'oracle_dataguard_stats', + 'oracle_instance', + 'oracle_jobs', 'oracle_logswitches', + 'oracle_recovery_status', + 'oracle_rman_backups', 'oracle_sessions', 'oracle_tablespaces', + 'oracle_undostat', 'oracle_version', 'ovs_bonding', 'pdu_gude_8301', 'pdu_gude_8310', + 'plesk_backups', 'plesk_domains', 'postfix_mailq', 'postgres_sessions', 'postgres_stat_database', 'postgres_stat_database.size', 'printer_alerts', + 'printer_input', + 'printer_output', 'printer_pages', 'printer_supply', 'printer_supply_ricoh', 'ps', 'ps.perf', + 'qlogic_fcport', + 'qlogic_sanbox_fabric_element', + 'qlogic_sanbox.psu', + 'qlogic_sanbox.temp', + 'qmail_stats', + 'qnap_disks', + 'quantum_libsmall_door', + 'quantum_libsmall_status', 'raritan_emx', + 'raritan_pdu_inlet', + 'raritan_pdu_inlet_summary', + 'raritan_pdu_outletcount', + 'raritan_pdu_plugs', + 'rmon_stats', + 'rms200_temp', 'rsa_health', + 'sap.dialog', + 'sap.value', + 'sap.value_groups', + 'sensatronics_temp', + 'sentry_pdu', 'services', + 'services.summary', 'smart.stats', 'smart.temp', 'smbios_sel', 'sni_octopuse_cpu', 'sni_octopuse_status', 'sni_octopuse_trunks', - 'snia_sml', 'snmp_info', 'snmp_uptime', + 'solaris_mem', 'solaris_multipath', 'statgrab_cpu', + 'statgrab_disk', + 'statgrab_load', + 'statgrab_mem', + 'statgrab_net.ctr', + 'statgrab_net.link', + 'statgrab_net.params', 'steelhead_connections', + 'steelhead_peers', 'steelhead_status', 'strem1_sensors', + 'stulz_alerts', + 'stulz_humidity', + 'stulz_powerstate', + 'stulz_pump', + 'stulz_temp', 'superstack3_sensors', 'sylo', + 'symantec_av_progstate', + 'symantec_av_quarantine', + 'symantec_av_updates', 'sym_brightmail_queues', 'systemtime', 'tcp_conn_stats', + 'timemachine', 'tsm_drives', + 'tsm_paths', + 'tsm_scratch', + 'tsm_sessions', 'tsm_stagingpools', 'tsm_storagepools', 'ucd_cpu_load', 'ucd_cpu_util', + 'ucs_bladecenter_fans', + 'ucs_bladecenter_fans.temp', + 'ucs_bladecenter_if', + 'ucs_bladecenter_psu', + 'ucs_bladecenter_psu.chassis_temp', + 'ucs_bladecenter_psu.switch', + 'unitrends_backup', + 'unitrends_replication', 'ups_bat_temp', 'ups_capacity', 'ups_eaton_enviroment', @@ -343,31 +798,66 @@ 'ups_out_load', 'ups_out_voltage', 'ups_power', + 'ups_socomec_capacity', + 'ups_socomec_in_voltage', + 'ups_socomec_outphase', + 'ups_socomec_out_source', + 'ups_socomec_out_voltage', + 'ups_test', 'uptime', + 'users', 'vbox_guest', + 'veeam_client', + 'veeam_jobs', + 'viprinet_firmware', + 'viprinet_mem', + 'viprinet_power', + 'viprinet_router', + 'viprinet_serial', + 'viprinet_temp', 'vms_cpu', 'vms_diskstat.df', 'vms_if', 'vms_queuejobs', 'vms_system.ios', 'vms_system.procs', + 'vmstat_aix', 'vms_users', - 'vmware_state', 'vxvm_enclosures', 'vxvm_multipath', 'vxvm_objstatus', + 'wagner_titanus_topsense.airflow_deviation', + 'wagner_titanus_topsense.alarm', + 'wagner_titanus_topsense.chamber_deviation', + 'wagner_titanus_topsense.info', + 'wagner_titanus_topsense.overall_status', + 'wagner_titanus_topsense.smoke', + 'wagner_titanus_topsense.temp', + 'websphere_mq_channels', + 'websphere_mq_queues', 'win_dhcp_pools', 'win_dhcp_pools.stats', + 'windows_broadcom_bonding', + 'windows_intel_bonding', 'windows_multipath', + 'windows_os_bonding', + 'windows_tasks', 'windows_updates', + 'win_netstat', 'winperf.cpuusage', 'winperf.diskstat', + 'winperf_if', 'winperf_msx_queues', 'winperf_phydisk', 'winperf_processor.util', + 'winperf_tcp_conn', + 'winperf_ts_sessions', + 'win_printers', 'wmic_process', - 'wut_webio_io', + 'wut_webio_io.inputs', 'wut_webtherm', + 'zfs_arc_cache', + 'zfs_arc_cache.l2', 'zfsget', 'zpool_status', 'zypper'], @@ -375,20 +865,57 @@ '3ware_info', '3ware_units', 'ad_replication', + 'adva_fsp_current', + 'adva_fsp_if', + 'adva_fsp_temp', + 'agent_activemq', + 'agent_allnet_ip_sensoric', + 'agent_emcvnx', + 'agent_fritzbox', + 'agent_hivemanager', + 'agent_ibmsvc', + 'agent_innovaphone', + 'agent_netapp', + 'agent_random', + 'agent_ucs_bladecenter', + 'agent_vsphere', 'aironet_clients', 'aironet_errors', + 'aix_diskiod', 'aix_lvm', + 'aix_memory', 'aix_multipath', + 'akcp_daisy_temp', + 'akcp_sensor_drycontact', 'akcp_sensor_humidity', 'akcp_sensor_temp', + 'alcatel_power', + 'allnet_ip_sensoric', 'apache_status', + 'apc_ats_output', + 'apc_ats_status', + 'apc_humidity', + 'apc_inputs', + 'apc_inrow_airflow', + 'apc_inrow_fanspeed', + 'apc_inrow_temp', + 'apc_mod_pdu_modules', 'apc_powerswitch', 'apc_rackpdu_power', 'apc_symmetra', 'apc_symmetra_ext_temp', 'apc_symmetra_power', 'apc_symmetra_temp', + 'apc_symmetra_test', + 'arc_raid_status', + 'arcserve_backup', + 'arris_cmts_cpu', + 'arris_cmts.include', + 'arris_cmts_mem', + 'arris_cmts_temp', + 'bintec_brrp_status', 'bintec_info', + 'bintec_sensors', 'blade_bays', 'blade_blades', 'blade_blowers', @@ -399,38 +926,101 @@ 'blade_bx_temp', 'blade_health', 'blade_mediatray', - 'blade_misc', 'blade_powerfan', 'blade_powermod', + 'bluecat_command_server', + 'bluecat_dhcp', + 'bluecat_dns', + 'bluecat_dns_queries', + 'bluecat_ha', + 'bluecat_ntp', + 'bluecat_threads', 'bluecoat_diskcpu', 'bluecoat_sensors', 'bonding.include', + 'brocade', 'brocade_fcport', + 'brocade_info', + 'brocade_mlx', + 'brocade_mlx_fan', + 'brocade_mlx_power', + 'brocade_mlx_temp', + 'brocade_tm', + 'brocade_vdx_status', 'canon_pages', + 'carel_sensors', 'carel_uniflair_cooling', + 'casa_cpu_mem', + 'casa_cpu_temp', + 'casa_cpu_util', + 'casa_fan', + 'casa_power', 'cbl_airlaser', + 'check_bi_aggr', + 'check_cmk_inv', + 'check_disk_smb', 'check_dns', + 'check_form_submit', + 'check_ftp', 'check_http', + 'check_icmp', 'check_ldap', + 'check_mail', + 'check_mail_loop', 'check_mk', + 'check_notify_count', + 'checkpoint_connections', + 'checkpoint.include', + 'checkpoint_packets', 'check_smtp', + 'check_sql', + 'check_ssh', 'check_tcp', + 'check_traceroute', + 'check_uniserv', + 'chrony', + 'cifsmounts', 'cisco_asa_failover', 'cisco_cpu', 'cisco_fan', + 'cisco_fantray', + 'cisco_fru_power', 'cisco_hsrp', - 'cisco_locif', 'cisco_mem', 'cisco_power', 'cisco_qos', + 'cisco_secure', + 'cisco_sensor_item.include', + 'cisco_sys_mem', 'cisco_temp', + 'cisco_temperature', 'cisco_temp_perf', 'cisco_temp_sensor', + 'cisco_vpn_tunnel', + 'cisco_vss', + 'cisco_wlc', + 'cisco_wlc_clients', + 'citrix_licenses', + 'citrix_serverload', + 'citrix_sessions', + 'climaveneta_alarm', + 'climaveneta_fan', + 'climaveneta_temp', + 'cmciii', + 'cmciii.include', + 'cmciii_lcp_airin', + 'cmciii_lcp_airout', + 'cmciii_lcp_fans', + 'cmciii_lcp_waterflow', + 'cmciii_lcp_waterin', + 'cmciii_lcp_waterout', 'cmctc', 'cmctc_lcp', 'cmctc_psm_m', 'cpsecure_sessions', 'cpu', + 'cpu_load.include', + 'cpu_util.include', 'cups_queues', 'db2_mem', 'decru_cpu', @@ -438,31 +1028,96 @@ 'decru_perf', 'decru_power', 'decru_temps', + 'dell_chassis_fans', + 'dell_chassis_io', + 'dell_chassis_kvm', + 'dell_chassis_power', + 'dell_chassis_powersupplies', + 'dell_chassis_slots', + 'dell_chassis_status', + 'dell_chassis_temp', + 'dell_idrac_disks', + 'dell_om_disks', + 'dell_om_esmlog', + 'dell_om.include', + 'dell_om_mem', + 'dell_om_processors', + 'dell_om_sensors', 'dell_powerconnect_cpu', 'dell_powerconnect_fans', 'dell_powerconnect_psu', 'dell_powerconnect_temp', + 'dell_poweredge_amperage', + 'dell_poweredge_cpu', + 'dell_poweredge_mem', + 'dell_poweredge_netdev', + 'dell_poweredge_pci', + 'dell_poweredge_status', + 'dell_poweredge_temp', 'df', 'df.include', 'df_netapp', - 'df_netapp.include', 'df_netapp32', + 'df_netapp.include', 'diskstat', 'diskstat.include', 'dmi_sysinfo', 'dmraid', + 'docsis_channels_downstream', + 'docsis_channels_upstream', + 'docsis_cm_status', + 'docsis.include', + 'domino_info', + 'domino_mailqueues', + 'domino_tasks', + 'domino_transactions', + 'domino_users', 'drbd', + 'elphase.include', + 'emc_datadomain_disks', + 'emc_datadomain_fans', + 'emc_datadomain_fs', + 'emc_datadomain_nvbat', + 'emc_datadomain_power', + 'emc_datadomain_temps', + 'emc_isilon', + 'emc_isilon_diskstatus', + 'emc_isilon_iops', + 'emcvnx_disks', + 'emcvnx_hba', + 'emcvnx_hwstatus', + 'emcvnx_info', + 'emcvnx_raidgroups', + 'emerson_stat', + 'emerson_temp', + 'enterasys_cpu_util', + 'enterasys_fans', + 'enterasys_lsnat', + 'enterasys_powersupply', + 'enterasys_temp', + 'esx_vsphere_counters', + 'esx_vsphere_datastores', + 'esx_vsphere_hostsystem', + 'esx_vsphere_licenses', + 'esx_vsphere_objects', + 'esx_vsphere_sensors', + 'esx_vsphere_vm', + 'etherbox', + 'f5_bigip_chassis_temp', 'f5_bigip_cluster', + 'f5_bigip_cluster_v11', + 'f5_bigip_conns', + 'f5_bigip_cpu_temp', 'f5_bigip_fans', 'f5_bigip_interfaces', 'f5_bigip_pool', 'f5_bigip_psu', - 'f5_bigip_temp', 'f5_bigip_vserver', - 'fc_brocade_port', - 'fc_brocade_port_detailed', + 'fast_lta_headunit', + 'fast_lta_silent_cubes', + 'fast_lta_volumes', + 'fc_port', 'fileinfo', - 'fjdarye.include', 'fjdarye100_cadaps', 'fjdarye100_cmods', 'fjdarye100_cmods_mem', @@ -473,6 +1128,14 @@ 'fjdarye100_rluns', 'fjdarye100_sum', 'fjdarye100_syscaps', + 'fjdarye101_cadaps', + 'fjdarye101_cmods', + 'fjdarye101_cmods_mem', + 'fjdarye101_conencs', + 'fjdarye101_disks', + 'fjdarye101_rluns', + 'fjdarye101_sum', + 'fjdarye101_syscaps', 'fjdarye60_cadaps', 'fjdarye60_cmods', 'fjdarye60_cmods_flash', @@ -487,18 +1150,37 @@ 'fjdarye60_sum', 'fjdarye60_syscaps', 'fjdarye60_thmls', + 'fjdarye.include', 'fortigate_cpu', 'fortigate_memory', 'fortigate_sessions', + 'fritz', 'fsc_fans', 'fsc_ipmi_mem_status', 'fsc_subsystems', 'fsc_temp', + 'genua_carp', + 'genua_fan', + 'genua_pfstate', + 'genua_state_correlation', 'h3c_lanswitch_cpu', 'h3c_lanswitch_sensors', 'heartbeat_crm', 'heartbeat_nodes', 'heartbeat_rscstatus', + 'hitachi_hnas_cifs', + 'hitachi_hnas_cpu', + 'hitachi_hnas_fan', + 'hitachi_hnas_fc_if', + 'hitachi_hnas_fpga', + 'hitachi_hnas_pnode', + 'hitachi_hnas_psu', + 'hitachi_hnas_quorumdevice', + 'hitachi_hnas_span', + 'hitachi_hnas_temp', + 'hitachi_hnas_vnode', + 'hitachi_hnas_volume', + 'hivemanager_devices', 'hp_blade', 'hp_blade_blades', 'hp_blade_fan', @@ -514,7 +1196,9 @@ 'hp_proliant_da_phydrv', 'hp_proliant_fans', 'hp_proliant_mem', + 'hp_proliant_power', 'hp_proliant_psu', + 'hp_proliant_raid', 'hp_proliant_temp', 'hp_sts_drvbox', 'hpux_cpu', @@ -530,28 +1214,86 @@ 'hr_fs', 'hr_mem', 'hwg_temp', + 'hyperv_vms', 'ibm_imm_health', 'ibm_rsa_health', + 'ibm_svc_array', + 'ibm_svc_enclosure', + 'ibm_svc_enclosurestats', + 'ibm_svc_eventlog', + 'ibm_svc_host', + 'ibm_svc_license', + 'ibm_svc_mdisk', + 'ibm_svc_mdiskgrp', + 'ibm_svc_node', + 'ibm_svc_nodestats', + 'ibm_svc_portfc', + 'ibm_svc_system', + 'ibm_svc_systemstats', 'ibm_xraid_pdisks', 'if', - 'if.include', 'if64', + 'if64_tplink', + 'if_brocade', + 'if.include', 'if_lancom', 'ifoperstatus', + 'innovaphone_channels', + 'innovaphone_cpu', + 'innovaphone.include', + 'innovaphone_licenses', + 'innovaphone_mem', + 'innovaphone_priports_l1', + 'innovaphone_priports_l2', + 'innovaphone_temp', 'ipmi', 'ipmi_sensors', 'ironport_misc', 'j4p_performance', + 'jar_signature', + 'job', 'jolokia_info', 'jolokia_metrics', + 'juniper_bgp_state', + 'juniper_cpu', + 'juniper_mem.include', + 'juniper_screenos_cpu', + 'juniper_screenos_fan', + 'juniper_screenos_mem', + 'juniper_screenos_temp', + 'juniper_screenos_vpn', + 'juniper_trpz_aps', + 'juniper_trpz_cpu_util', + 'juniper_trpz_flash', + 'juniper_trpz_info', + 'juniper_trpz_mem', + 'juniper_trpz_power', + 'kaspersky_av_quarantine', + 'kaspersky_av_tasks', + 'kaspersky_av_updates', + 'kemp_loadmaster_ha', + 'kemp_loadmaster_realserver', + 'kemp_loadmaster_services', + 'kentix_humidity', + 'kentix_temp', 'kernel', + 'knuerr_rms_humidity', + 'knuerr_rms_temp', + 'knuerr_sensors', 'lgp_info', 'lgp_pdu_aux', 'lgp_pdu_info', + 'libelle_business_shadow', + 'license.include', + 'liebert_bat_temp', + 'liebert_chiller_status', 'livestatus_status', 'lnx_bonding', 'lnx_if', + 'lnx_quota', + 'lnx_thermal', 'local', + 'logins', 'logwatch', 'lparstat_aix', 'lsi', @@ -565,7 +1307,10 @@ 'megaraid_pdisks', 'mem', 'mem.include', + 'mikrotik_signal', 'mounts', + 'moxa_iologik_register', + 'mq_queues', 'mrpe', 'mssql_backup', 'mssql_counters', @@ -574,6 +1319,22 @@ 'multipath', 'mysql', 'mysql_capacity', + 'mysql_slave', + 'netapp_api_aggr', + 'netapp_api_cluster', + 'netapp_api_cpu', + 'netapp_api_disk', + 'netapp_api_fan', + 'netapp_api_if', + 'netapp_api.include', + 'netapp_api_protocol', + 'netapp_api_psu', + 'netapp_api_status', + 'netapp_api_temp', + 'netapp_api_version', + 'netapp_api_vf_stats', + 'netapp_api_vf_status', + 'netapp_api_volumes', 'netapp_cluster', 'netapp_cpu', 'netapp_fcpio', @@ -581,41 +1342,81 @@ 'netapp_volumes', 'netctr', 'netif', + 'netstat', + 'netstat.include', + 'network_fs.include', 'nfsexports', 'nfsmounts', + 'nginx_status', 'ntp', 'nvidia', 'omd_status', - 'oracle.include', + 'openvpn_clients', 'oracle_asm_diskgroup', + 'oracle_crs_res', + 'oracle_crs_version', + 'oracle_crs_voting', + 'oracle_dataguard_stats', + 'oracle.include', + 'oracle_instance', + 'oracle_jobs', + 'oracle_locks', 'oracle_logswitches', + 'oracle_longactivesessions', + 'oracle_processes', + 'oracle_recovery_area', + 'oracle_recovery_status', + 'oracle_rman', + 'oracle_rman_backups', 'oracle_sessions', 'oracle_tablespaces', + 'oracle_undostat', 'oracle_version', 'ovs_bonding', - 'pdu_gude.include', 'pdu_gude_8301', 'pdu_gude_8310', + 'pdu_gude.include', 'plesk_backups', 'plesk_domains', 'postfix_mailq', 'postgres_sessions', 'postgres_stat_database', 'printer_alerts', + 'printer_input', + 'printer_io.include', + 'printer_output', 'printer_pages', 'printer_supply', 'printer_supply_ricoh', 'ps', + 'ps.include', + 'qlogic_fcport', + 'qlogic_sanbox', + 'qlogic_sanbox_fabric_element', + 'qmail_stats', + 'qnap_disks', + 'quantum_libsmall_door', + 'quantum_libsmall_status', 'raritan_emx', + 'raritan_pdu_inlet', + 'raritan_pdu_inlet.include', + 'raritan_pdu_inlet_summary', + 'raritan_pdu_outletcount', + 'raritan_pdu_plugs', + 'rmon_stats', + 'rms200_temp', + 'sap', + 'sensatronics_temp', + 'sentry_pdu', 'services', 'smart', 'smbios_sel', 'sni_octopuse_cpu', 'sni_octopuse_status', 'sni_octopuse_trunks', - 'snia_sml', 'snmp_info', 'snmp_uptime', + 'solaris_mem', 'solaris_multipath', 'statgrab_cpu', 'statgrab_disk', @@ -623,233 +1424,447 @@ 'statgrab_mem', 'statgrab_net', 'steelhead_connections', + 'steelhead_peers', 'steelhead_status', 'strem1_sensors', + 'stulz_alerts', + 'stulz_humidity', + 'stulz_powerstate', + 'stulz_pump', + 'stulz_temp', 'superstack3_sensors', 'sylo', + 'symantec_av_progstate', + 'symantec_av_quarantine', + 'symantec_av_updates', 'sym_brightmail_queues', 'systemtime', 'tcp_conn_stats', + 'temperature.include', + 'timemachine', 'tsm_drives', + 'tsm_paths', + 'tsm_scratch', + 'tsm_sessions', 'tsm_stagingpools', - 'tsm_stgpool', 'tsm_storagepools', 'ucd_cpu_load', 'ucd_cpu_util', + 'ucs_bladecenter_fans', + 'ucs_bladecenter_if', + 'ucs_bladecenter.include', + 'ucs_bladecenter_psu', + 'unitrends_backup', + 'unitrends_replication', 'ups_bat_temp', 'ups_capacity', + 'ups_capacity.include', 'ups_eaton_enviroment', 'ups_in_freq', 'ups_in_voltage', + 'ups_in_voltage.include', 'ups_out_load', 'ups_out_voltage', + 'ups_out_voltage.include', 'ups_power', + 'ups_socomec_capacity', + 'ups_socomec_in_voltage', + 'ups_socomec_outphase', + 'ups_socomec_out_source', + 'ups_socomec_out_voltage', + 'ups_test', 'uptime', + 'uptime.include', 'vbox_guest', + 'veeam_client', + 'veeam_jobs', + 'viprinet_firmware', + 'viprinet_mem', + 'viprinet_power', + 'viprinet_router', + 'viprinet_serial', + 'viprinet_temp', 'vms_cpu', - 'vms_df', 'vms_diskstat', 'vms_if', - 'vms_md', - 'vms_netif', 'vms_queuejobs', - 'vms_sys', 'vms_system', - 'vms_users', 'vmstat_aix', - 'vmware_state', + 'vms_users', 'vxvm_enclosures', 'vxvm_multipath', 'vxvm_objstatus', + 'wagner_titanus_topsense', + 'websphere_mq_channels', + 'websphere_mq_queues', 'win_dhcp_pools', + 'windows_broadcom_bonding', + 'windows_intel_bonding', 'windows_multipath', + 'windows_os_bonding', + 'windows_tasks', 'windows_updates', + 'win_netstat', 'winperf', + 'winperf_if', 'winperf_msx_queues', 'winperf_phydisk', 'winperf_processor', + 'winperf_tcp_conn', + 'winperf_ts_sessions', + 'win_printers', 'wmic_process', 'wut_webio_io', 'wut_webtherm', + 'zfs_arc_cache', 'zfsget', 'zpool_status', 'zypper'], - 'doc': ['README', - 'README.i18n', - 'README.setup_in_omd', - 'README.sounds', - 'agents/README.HP-UX', - 'agents/README.OpenVMS', + 'doc': ['agents/README.OpenVMS', + 'agents/README.solaris', 'agents/README.FreeBSD', + 'agents/README.HP-UX', 'agents/README.AIX', 'agents/README.jolokia_on_weblogic', - 'agents/README.solaris', - 'benchmark/bench.cfg', 'benchmark/cmkbench.sh', - 'bi-example-oracle.mk', + 'benchmark/bench.cfg', 'bi-example.mk', + 'bi-example-oracle.mk', + 'checking.svg', 'check_mk.200.png', 'check_mk.png', 'check_mk.svg', 'check_mk.trans.200.gif', 'check_mk.trans.200.png', - 'checking.svg', - 'drafts/LIESMICH.cookieauth', - 'drafts/LIESMICH.inventur', - 'drafts/README.predictive', - 'drafts/LIESMICH.interval', + 'drafts/LIESMICH.checkgenerator', + 'drafts/README.cmk-register-host', + 'drafts/LIESMICH.windows_persisted', + 'drafts/LIESMICH.globalsettings', + 'drafts/LIESMICH.Check_MK_2.0', 'helpers/reindent.py', - 'helpers/listtar', - 'helpers/figheader', - 'helpers/headrify', 'helpers/validate_checks', 'helpers/df_magic_number.py', 'helpers/wato-migrate-1.2.0.sh', + 'helpers/listtar', + 'helpers/figheader', + 'helpers/headrify', 'jasperreports/livestatus.jar', - 'jasperreports/src/LivestatusFieldsProvider.java', 'jasperreports/src/LivestatusDatasource.java', - 'jasperreports/src/LivestatusQueryExecuter.java', 'jasperreports/src/LivestatusQueryExecuterFactory.java', + 'jasperreports/src/LivestatusQueryExecuter.java', + 'jasperreports/src/LivestatusFieldsProvider.java', + 'jasperreports/classes/livestatus/LivestatusFieldsProvider.class', + 'jasperreports/classes/livestatus/LivestatusQueryExecuter.class', 'jasperreports/classes/livestatus/LivestatusQueryExecuterFactory.class', 'jasperreports/classes/livestatus/LivestatusDatasource.class', - 'jasperreports/classes/livestatus/LivestatusQueryExecuter.class', - 'jasperreports/classes/livestatus/LivestatusFieldsProvider.class', 'jasperreports/README.jasperreports-livestatus', 'jasperreports/Makefile', - 'livestatus/LQL-examples/6.lql', - 'livestatus/LQL-examples/4.lql', - 'livestatus/LQL-examples/5.lql', - 'livestatus/LQL-examples/7.lql', - 'livestatus/LQL-examples/1.lql', + 'livestatus/LQL-examples/2.lql', + 'livestatus/LQL-examples/8.lql', 'livestatus/LQL-examples/12.lql', + 'livestatus/LQL-examples/4.lql', + 'livestatus/LQL-examples/9.lql', 'livestatus/LQL-examples/11.lql', 'livestatus/LQL-examples/3.lql', + 'livestatus/LQL-examples/1.lql', 'livestatus/LQL-examples/10.lql', - 'livestatus/LQL-examples/8.lql', - 'livestatus/LQL-examples/9.lql', + 'livestatus/LQL-examples/7.lql', 'livestatus/LQL-examples/13.lql', - 'livestatus/LQL-examples/2.lql', + 'livestatus/LQL-examples/6.lql', + 'livestatus/LQL-examples/5.lql', + 'manpage.template', 'mkeventd/query_events', - 'treasures/check_bi_local.py', - 'treasures/get_rrd_cache_stats', - 'treasures/livedump', + 'Notifications.png', + 'Notifications.svg', + 'predictive/foo.agentplugin', + 'predictive/foo.check', + 'predictive/foo.wato', + 'predictive/README', + 'README', + 'README.i18n', + 'README.renaming_services', + 'README.setup_in_omd', + 'README.sounds', + 'skeleton_check', + 'treasures/workplace/screenrc', + 'treasures/workplace/vimrc', + 'treasures/mk_oracle.old', + 'treasures/wato_host_svc_groups.py', + 'treasures/cmk-plugin-statistics', + 'treasures/check_mk.bash_completion', + 'treasures/agent_wrapper.php', + 'treasures/host_to_ping_check.sh', + 'treasures/agent_ipmi', + 'treasures/wato_hook_cleanup_folders.py', + 'treasures/downtime', + 'treasures/find_piggy_orphans', + 'treasures/multisite_to_mrpe', + 'treasures/notification_report.sh', + 'treasures/incomplete_checks/db2/db2_logsizes', + 'treasures/incomplete_checks/db2/db2_sessions', + 'treasures/incomplete_checks/db2/mk_db2', + 'treasures/incomplete_checks/db2/db2_versions', + 'treasures/incomplete_checks/db2/db2_bp_hitratios', + 'treasures/incomplete_checks/db2/db2_counters', + 'treasures/incomplete_checks/db2/db2_tablespaces', 'treasures/check_imap_folder', - 'treasures/inventory_helper.sh', + 'treasures/zombies.mk', + 'treasures/migrate_cpu_load.sh', + 'treasures/wato_include_hosts', + 'treasures/check_livestatus', + 'treasures/win_if_check.bat', + 'treasures/localchecks/check_mount_rw', + 'treasures/localchecks/check_bi_local.py', + 'treasures/localchecks/zombies', + 'treasures/localchecks/check_fstab_mounts', + 'treasures/notifications/snmp_trap', + 'treasures/notifications/mobilant', + 'treasures/notifications/pushover', + 'treasures/notifications/multitech', + 'treasures/notifications/README', + 'treasures/nagvis_icon/nagvis_icon.mk', + 'treasures/nagvis_icon/nagvis_icon.py', + 'treasures/check_nagios_states', + 'treasures/livedump/livedump', + 'treasures/livedump/livedump-ssh-recv', + 'treasures/livedump/livedump-mail-fetch', + 'treasures/livedump/livestatus-to-nsca.sh', + 'treasures/livedump/README', + 'treasures/modbus/perfometer/modbus.py', + 'treasures/modbus/agent_modbus.cpp', + 'treasures/modbus/checkman/modbus_value', + 'treasures/modbus/wato/modbus.py', + 'treasures/modbus/modbus_value', + 'treasures/modbus/agent_modbus', + 'treasures/modbus/checks/agent_modbus', + 'treasures/deprecated/agents/plugins/dmi_sysinfo', + 'treasures/deprecated/agents/plugins/mrpe_include', + 'treasures/deprecated/agents/plugins/j4p_performance', + 'treasures/deprecated/agents/plugins/sylo', + 'treasures/deprecated/README', 'treasures/check_nagios_states.php', - 'treasures/README.livedump', + 'treasures/get_rrd_cache_stats', + 'treasures/liveproxy/liveproxyd', + 'treasures/active_checks/check_form_submit', + 'treasures/active_checks/check_bi_aggr', + 'treasures/active_checks/check_sql', + 'treasures/active_checks/check_mail_loop', + 'treasures/active_checks/check_traceroute', + 'treasures/active_checks/check_uniserv', + 'treasures/active_checks/check_notify_count', + 'treasures/active_checks/check_mail', + 'treasures/windows_msi/build_msi.bat', + 'treasures/windows_msi/check_mk_agent.wxs', + 'treasures/windows_msi/check_mk_agent.wixobj', + 'treasures/windows_msi/cmk_InstallDirDlg.wxs', + 'treasures/windows_msi/cmk_WixUI_InstallDir.wixobj', + 'treasures/windows_msi/check_mk_agent.wixpdb', + 'treasures/windows_msi/cmk_InstallDirDlg.wixobj', + 'treasures/windows_msi/check_mk_agent_baked.wxs', + 'treasures/windows_msi/sources/GPL-V2.rtf', + 'treasures/windows_msi/sources/check_mk.ini', + 'treasures/windows_msi/sources/check_mk.example.ini', + 'treasures/windows_msi/sources/check_mk_agent-64.exe', + 'treasures/windows_msi/sources/gpl_v2.rtf', + 'treasures/windows_msi/sources/check_mk_agent.exe', + 'treasures/windows_msi/sources/plugins.cmkp', + 'treasures/windows_msi/README', + 'treasures/windows_msi/cmk_WixUI_InstallDir.wxs', + 'treasures/inventory/extract_inventory.py', + 'treasures/msexchange/winperf_msx_dumpster', + 'treasures/msexchange/winperf_msx_rpc_clientaccess', + 'treasures/msexchange/winperf_msx_dc_access', + 'treasures/msexchange/winperf_msx_db_reads_avg_latency', + 'treasures/msexchange/winperf_msx_activesync', + 'treasures/msexchange/winperf_msx_queued_mailbox', + 'treasures/msexchange/README', + 'treasures/inventory_helper.sh', + 'treasures/Event_Console/sl_notify_to_eventd.py', + 'treasures/Event_Console/get_event_status', 'treasures/Event_Console/snmptd_mkevent.py', 'treasures/Event_Console/mail_mkevent.py', + 'treasures/Event_Console/message_to_syslog.py', + 'treasures/Event_Console/nsca2mkeventd/nsca2mkeventd', + 'treasures/Event_Console/nsca2mkeventd/nsca2mkeventd.init', + 'treasures/Event_Console/nsca2mkeventd/LIESMICH', 'treasures/Event_Console/Eventconsole-Performance.py', - 'treasures/Event_Console/get_event_status', 'treasures/colorgrep', - 'treasures/multisite_to_mrpe', - 'treasures/notification_report.sh', - 'treasures/duallan_check', - 'treasures/fsc_ipmi_mem_status.sh', - 'treasures/check_livestatus', - 'treasures/solaris_cache_plugins.sh', - 'treasures/downtime', - 'treasures/wato_host_svc_groups.py', - 'treasures/notify_trap', - 'treasures/wato_import.py', + 'treasures/webapps/cmk_nagios_webapps-1.1.mkp', + 'treasures/webapps/README', + 'treasures/wiki_painter.py', 'treasures/check_curl', - 'treasures/SLAviews/reporting.py', - 'treasures/SLAviews/README.sla_view', - 'treasures/fsc-celsius-m470-sel-1.2.tar.gz', - 'treasures/check_servicegroup', - 'treasures/check_nagios_states', - 'treasures/wiki_snapin.py', - 'treasures/livestatus-to-nsca.sh', - 'treasures/migrate_cpu_load.sh', - 'treasures/win_if_check.bat', 'treasures/speedometer.xcf', - 'treasures/zombies.mk', - 'windows/counters.ini', - 'windows/README.windows', - 'windows/nslookup_local_check_example/nslookup_reverse.bat', + 'treasures/ds_random_bi.mk', + 'treasures/check_servicegroup', + 'treasures/wato_geo_fields.py', + 'treasures/mknotifyd', + 'treasures/fsc-celsius-m470-sel-1.2.tar.gz', + 'treasures/opcmsg', + 'treasures/unix_cache_plugins.sh', + 'treasures/wato_import.py', + 'treasures/fsc_ipmi_mem_status.sh', + 'treasures/check_flapping/wato_plugin.py', + 'treasures/check_flapping/cmk_check', + 'treasures/check_flapping/check_flapping', + 'treasures/duallan_check', + 'treasures/cmk-delete-host', + 'treasures/config_snippets/exchange.rules.mk', + 'windows/README.lodctr', 'windows/nslookup_local_check_example/nslookup_forward.bat', 'windows/nslookup_local_check_example/nslookup.vbs', + 'windows/nslookup_local_check_example/nslookup_reverse.bat', + 'windows/counters.ini', 'windows/counter_help.txt', 'windows/counter_names.txt', - 'windows/README.lodctr', + 'windows/README.windows', 'COPYING', 'AUTHORS', 'ChangeLog', + 'livestatus/api/python/livestatus.py', + 'livestatus/api/python/example_multisite.py', + 'livestatus/api/python/example.py', + 'livestatus/api/python/README', + 'livestatus/api/python/make_nagvis_map.py', 'livestatus/api/c++/demo.cc', - 'livestatus/api/c++/Livestatus.cc', - 'livestatus/api/c++/Makefile', 'livestatus/api/c++/Livestatus.h', - 'livestatus/api/perl/MANIFEST', - 'livestatus/api/perl/examples/dump.pl', - 'livestatus/api/perl/examples/test.pl', + 'livestatus/api/c++/Makefile', + 'livestatus/api/c++/Livestatus.cc', 'livestatus/api/perl/Changes', + 'livestatus/api/perl/examples/test.pl', + 'livestatus/api/perl/examples/dump.pl', + 'livestatus/api/perl/inc/Module/AutoInstall.pm', + 'livestatus/api/perl/inc/Module/Install.pm', + 'livestatus/api/perl/inc/Module/Install/AutoInstall.pm', + 'livestatus/api/perl/inc/Module/Install/Base.pm', + 'livestatus/api/perl/inc/Module/Install/WriteAll.pm', + 'livestatus/api/perl/inc/Module/Install/Win32.pm', + 'livestatus/api/perl/inc/Module/Install/Metadata.pm', + 'livestatus/api/perl/inc/Module/Install/Fetch.pm', + 'livestatus/api/perl/inc/Module/Install/Makefile.pm', + 'livestatus/api/perl/inc/Module/Install/Can.pm', + 'livestatus/api/perl/inc/Module/Install/Include.pm', 'livestatus/api/perl/META.yml', - 'livestatus/api/perl/t/33-Monitoring-Livestatus-test_socket_timeout.t', - 'livestatus/api/perl/t/98-Pod-Coverage.t', + 'livestatus/api/perl/MANIFEST', + 'livestatus/api/perl/Makefile.PL', 'livestatus/api/perl/t/31-Monitoring-Livestatus-MULTI-live-test.t', - 'livestatus/api/perl/t/32-Monitoring-Livestatus-backend-test.t', - 'livestatus/api/perl/t/97-Pod.t', - 'livestatus/api/perl/t/02-Monitoring-Livestatus-internals.t', + 'livestatus/api/perl/t/22-Monitoring-Livestatus-UNIX.t', + 'livestatus/api/perl/t/99-Perl-Critic.t', + 'livestatus/api/perl/t/34-Monitoring-Livestatus-utf8_support.t', 'livestatus/api/perl/t/30-Monitoring-Livestatus-live-test.t', + 'livestatus/api/perl/t/01-Monitoring-Livestatus-basic_tests.t', 'livestatus/api/perl/t/35-Monitoring-Livestatus-callbacks_support.t', - 'livestatus/api/perl/t/99-Perl-Critic.t', + 'livestatus/api/perl/t/97-Pod.t', + 'livestatus/api/perl/t/33-Monitoring-Livestatus-test_socket_timeout.t', + 'livestatus/api/perl/t/02-Monitoring-Livestatus-internals.t', 'livestatus/api/perl/t/perlcriticrc', + 'livestatus/api/perl/t/20-Monitoring-Livestatus-test_socket.t', + 'livestatus/api/perl/t/32-Monitoring-Livestatus-backend-test.t', 'livestatus/api/perl/t/03-Monitoring-Livestatus-MULTI-internals.t', - 'livestatus/api/perl/t/22-Monitoring-Livestatus-UNIX.t', + 'livestatus/api/perl/t/98-Pod-Coverage.t', 'livestatus/api/perl/t/21-Monitoring-Livestatus-INET.t', - 'livestatus/api/perl/t/20-Monitoring-Livestatus-test_socket.t', - 'livestatus/api/perl/t/01-Monitoring-Livestatus-basic_tests.t', - 'livestatus/api/perl/t/34-Monitoring-Livestatus-utf8_support.t', 'livestatus/api/perl/README', - 'livestatus/api/perl/inc/Module/AutoInstall.pm', - 'livestatus/api/perl/inc/Module/Install/AutoInstall.pm', - 'livestatus/api/perl/inc/Module/Install/Include.pm', - 'livestatus/api/perl/inc/Module/Install/Can.pm', - 'livestatus/api/perl/inc/Module/Install/WriteAll.pm', - 'livestatus/api/perl/inc/Module/Install/Metadata.pm', - 'livestatus/api/perl/inc/Module/Install/Win32.pm', - 'livestatus/api/perl/inc/Module/Install/Base.pm', - 'livestatus/api/perl/inc/Module/Install/Makefile.pm', - 'livestatus/api/perl/inc/Module/Install/Fetch.pm', - 'livestatus/api/perl/inc/Module/Install.pm', - 'livestatus/api/perl/Makefile.PL', - 'livestatus/api/perl/lib/Monitoring/Livestatus.pm', 'livestatus/api/perl/lib/Monitoring/Livestatus/UNIX.pm', - 'livestatus/api/perl/lib/Monitoring/Livestatus/INET.pm', 'livestatus/api/perl/lib/Monitoring/Livestatus/MULTI.pm', - 'livestatus/api/python/example.py', - 'livestatus/api/python/README', - 'livestatus/api/python/make_nagvis_map.py', - 'livestatus/api/python/example_multisite.py', - 'livestatus/api/python/livestatus.py'], - 'pnp-templates': ['check-mk-ping.php', - 'check-mk.php', + 'livestatus/api/perl/lib/Monitoring/Livestatus/INET.pm', + 'livestatus/api/perl/lib/Monitoring/Livestatus.pm'], + 'pnp-templates': ['check_mk_active-disk_smb.php', + 'check_mk_active-http.php', + 'check_mk_active-icmp.php', + 'check_mk_active-mail_loop.php', + 'check_mk_active-mail.php', + 'check_mk_active-notify_count.php', + 'check_mk_active-tcp.php', + 'check_mk-aix_diskiod.php', + 'check_mk-aix_memory.php', + 'check_mk-akcp_daisy_temp.php', 'check_mk-akcp_sensor_humidity.php', 'check_mk-akcp_sensor_temp.php', + 'check_mk-allnet_ip_sensoric.humidity.php', + 'check_mk-allnet_ip_sensoric.pressure.php', + 'check_mk-allnet_ip_sensoric.temp.php', + 'check_mk-allnet_ip_sensoric.tension.php', 'check_mk-apache_status.php', - 'check_mk-apc_symmetra.php', + 'check_mk-apc_humidity.php', + 'check_mk-apc_inrow_temp.php', 'check_mk-apc_symmetra_ext_temp.php', + 'check_mk-apc_symmetra.php', 'check_mk-apc_symmetra_power.php', 'check_mk-apc_symmetra_temp.php', + 'check_mk-arcserve_backup.php', + 'check_mk-arris_cmts_temp.php', + 'check_mk-bintec_sensors.fan.php', + 'check_mk-bintec_sensors.temp.php', + 'check_mk-bintec_sensors.voltage.php', + 'check_mk-blade_bx_load.php', 'check_mk-brocade_fcport.php', + 'check_mk-brocade_mlx.module_cpu.php', + 'check_mk-brocade_mlx.module_mem.php', + 'check_mk-brocade_mlx_temp.php', + 'check_mk-brocade.temp.php', + 'check_mk-canon_pages.php', + 'check_mk-carel_sensors.php', 'check_mk-cbl_airlaser.hardware.php', + 'check_mk-check_mk-cmctc.temp.php', + 'check_mk-chrony.php', + 'check_mk-cisco_cpu.php', 'check_mk-cisco_qos.php', 'check_mk-cisco_temp_sensor.php', + 'check_mk-cisco_wlc_clients.php', + 'check_mk-citrix_licenses.php', + 'check_mk-climaveneta_temp.php', 'check_mk-cmctc.temp.php', 'check_mk-cpu.load.php', 'check_mk-cpu.loads.php', 'check_mk-cpu.threads.php', 'check_mk-decru_cpu.php', + 'check_mk-decru_temps.php', + 'check_mk-dell_chassis_temp.php', + 'check_mk-dell_om_sensors.php', 'check_mk-dell_powerconnect_cpu.php', 'check_mk-dell_powerconnect_temp.php', - 'check_mk-df.php', - 'check_mk-df_netapp.php', 'check_mk-df_netapp32.php', + 'check_mk-df_netapp.php', + 'check_mk-df.php', 'check_mk-diskstat.php', + 'check_mk-emc_datadomain_temps.php', + 'check_mk-emc_isilon_iops.php', + 'check_mk-emcvnx_disks.php', + 'check_mk-emcvnx_hba.php', + 'check_mk-emcvnx_raidgroups.capacity_contiguous.php', + 'check_mk-emcvnx_raidgroups.capacity.php', + 'check_mk-emerson_temp.php', + 'check_mk-enterasys_cpu_util.php', + 'check_mk-enterasys_temp.php', + 'check_mk-esx_vsphere_counters.diskio.php', + 'check_mk-esx_vsphere_counters.if.php', + 'check_mk-esx_vsphere_counters.ramdisk.php', + 'check_mk-esx_vsphere_counters.uptime.php', + 'check_mk-esx_vsphere_datastores.php', + 'check_mk-esx_vsphere_hostsystem.cpu_usage.php', + 'check_mk-esx_vsphere_hostsystem.mem_usage.php', + 'check_mk-etherbox.humidity.php', + 'check_mk-f5_bigip_chassis_temp.php', + 'check_mk-f5_bigip_cpu_temp.php', 'check_mk-f5_bigip_interfaces.php', - 'check_mk-f5_bigip_temp.php', - 'check_mk-fc_brocade_port_detailed.php', + 'check_mk-f5_bigip_vserver.php', + 'check_mk-fast_lta_silent_cubes.capacity.php', + 'check_mk-fast_lta_volumes.php', + 'check_mk-fc_port.php', + 'check_mk-fritz.uptime.php', + 'check_mk-fritz.wan_if.php', + 'check_mk-fsc_temp.php', 'check_mk-h3c_lanswitch_cpu.php', + 'check_mk-hitachi_hnas_cifs.php', + 'check_mk-hitachi_hnas_cpu.php', + 'check_mk-hitachi_hnas_fan.php', + 'check_mk-hitachi_hnas_fc_if.php', + 'check_mk-hitachi_hnas_fpga.php', + 'check_mk-hitachi_hnas_span.php', + 'check_mk-hitachi_hnas_temp.php', + 'check_mk-hitachi_hnas_volume.php', + 'check_mk-hivemanager_devices.php', + 'check-mk-host-ping.php', + 'check-mk-host-tcp.php', 'check_mk-hp_blade_psu.php', 'check_mk-hp_procurve_cpu.php', 'check_mk-hpux_cpu.php', @@ -866,14 +1881,45 @@ 'check_mk-hr_cpu.php', 'check_mk-hr_fs.php', 'check_mk-hr_mem.php', - 'check_mk-if.php', + 'check_mk-hwg_temp.php', + 'check_mk-ibm_svc_enclosurestats.power.php', + 'check_mk-ibm_svc_enclosurestats.temp.php', + 'check_mk-ibm_svc_host.php', + 'check_mk-ibm_svc_license.php', + 'check_mk-ibm_svc_mdiskgrp.php', + 'check_mk-ibm_svc_nodestats.cache.php', + 'check_mk-ibm_svc_nodestats.cpu_util.php', + 'check_mk-ibm_svc_nodestats.diskio.php', + 'check_mk-ibm_svc_nodestats.disk_latency.php', + 'check_mk-ibm_svc_nodestats.iops.php', + 'check_mk-ibm_svc_systemstats.cache.php', + 'check_mk-ibm_svc_systemstats.cpu_util.php', + 'check_mk-ibm_svc_systemstats.diskio.php', + 'check_mk-ibm_svc_systemstats.disk_latency.php', + 'check_mk-ibm_svc_systemstats.iops.php', 'check_mk-if64.php', + 'check_mk-if64_tplink.php', 'check_mk-if_lancom.php', + 'check_mk-if.php', + 'check_mk-innovaphone_cpu.php', + 'check_mk-innovaphone_temp.php', 'check_mk-ipmi.php', 'check_mk-ipmi_sensors.php', + 'check_mk-job.php', + 'check_mk-jolokia_metrics.gc.php', 'check_mk-jolokia_metrics.mem.php', + 'check_mk-jolokia_metrics.threads.php', + 'check_mk-jolokia_metrics.tp.php', + 'check_mk-jolokia_metrics.uptime.php', + 'check_mk-juniper_screenos_cpu.php', + 'check_mk-juniper_screenos_mem.php', + 'check_mk-juniper_screenos_temp.php', + 'check_mk-juniper_trpz_mem.php', 'check_mk-kernel.php', 'check_mk-kernel.util.php', + 'check_mk-knuerr_rms_humidity.php', + 'check_mk-knuerr_rms_temp.php', + 'check_mk-libelle_business_shadow.archive_dir.php', 'check_mk-livestatus_status.php', 'check_mk-lnx_if.php', 'check_mk-local.php', @@ -884,21 +1930,52 @@ 'check_mk-mem.vmalloc.php', 'check_mk-mem.win.php', 'check_mk-mssql_tablespaces.php', - 'check_mk-mysql.innodb_io.php', 'check_mk-mysql_capacity.php', + 'check_mk-mysql.innodb_io.php', + 'check_mk-mysql_slave.php', + 'check_mk-netapp_api_aggr.php', + 'check_mk-netapp_api_cpu.utilization.php', + 'check_mk-netapp_api_disk.summary.php', + 'check_mk-netapp_api_if.php', + 'check_mk-netapp_api_protocol.php', + 'check_mk-netapp_api_temp.php', + 'check_mk-netapp_api_vf_stats.cpu_util.php', + 'check_mk-netapp_api_vf_stats.traffic.php', + 'check_mk-netapp_api_volumes.php', + 'check_mk-netapp_cpu.php', 'check_mk-netapp_fcpio.php', 'check_mk-netctr.combined.php', 'check_mk-netctr.php', + 'check_mk-nginx_status.php', 'check_mk-ntp.php', 'check_mk-ntp.time.php', 'check_mk-nvidia.temp.php', + 'check_mk-openvpn_clients.php', + 'check_mk-oracle_asm_diskgroup.php', + 'check_mk-oracle_dataguard_stats.php', + 'check_mk-oracle_instance.php', 'check_mk-oracle_logswitches.php', + 'check_mk-oracle_processes.php', + 'check_mk-oracle_recovery_status.php', 'check_mk-oracle_sessions.php', 'check_mk-oracle_tablespaces.php', + 'check-mk.php', + 'check-mk-ping.php', 'check_mk-postfix_mailq.php', + 'check_mk-printer_pages.php', 'check_mk-printer_supply.php', 'check_mk-ps.perf.php', + 'check_mk-ps.php', + 'check_mk-qlogic_fcport.php', + 'check_mk-qlogic_sanbox.temp.php', + 'check_mk-raritan_pdu_inlet.php', + 'check_mk-raritan_pdu_outletcount.php', + 'check_mk-rmon_stats.php', + 'check_mk-rms200_temp.php', + 'check_mk-sensatronics_temp.php', + 'check_mk-smart.stats.php', 'check_mk-smart.temp.php', + 'check_mk-sni_octopuse_cpu.php', 'check_mk-snmp_uptime.php', 'check_mk-statgrab_cpu.php', 'check_mk-statgrab_disk.php', @@ -906,855 +1983,1032 @@ 'check_mk-statgrab_mem.php', 'check_mk-statgrab_net.ctr.php', 'check_mk-steelhead_connections.php', + 'check_mk-stulz_humidity.php', + 'check_mk-stulz_temp.php', 'check_mk-sylo.php', 'check_mk-systemtime.php', 'check_mk-tcp_conn_stats.php', 'check_mk-tsm_stagingpools.php', 'check_mk-ucd_cpu_load.php', 'check_mk-ucd_cpu_util.php', + 'check_mk-ucs_bladecenter_fans.temp.php', + 'check_mk-ucs_bladecenter_if.php', + 'check_mk-ucs_bladecenter_psu.chassis_temp.php', + 'check_mk-ucs_bladecenter_psu.switch_power.php', + 'check_mk-ups_bat_temp.php', + 'check_mk-ups_outphase.php', + 'check_mk-ups_socomec_outphase.php', 'check_mk-uptime.php', + 'check_mk-veeam_client.php', + 'check_mk-viprinet_temp.php', 'check_mk-vms_cpu.php', 'check_mk-vms_df.php', 'check_mk-vms_diskstat.df.php', 'check_mk-vms_if.php', - 'check_mk-vms_sys.util.php', 'check_mk-vms_system.ios.php', 'check_mk-vms_system.procs.php', + 'check_mk-vms_sys.util.php', + 'check_mk-wagner_titanus_topsense.airflow_deviation.php', + 'check_mk-wagner_titanus_topsense.chamber_deviation.php', + 'check_mk-wagner_titanus_topsense.smoke.php', + 'check_mk-wagner_titanus_topsense.temp.php', + 'check_mk-win_dhcp_pools.php', 'check_mk-winperf.cpuusage.php', + 'check_mk-winperf_if.php', 'check_mk-winperf_msx_queues.php', 'check_mk-winperf_phydisk.php', 'check_mk-winperf_processor.util.php', 'check_mk-wut_webtherm.php', + 'check_mk-zfs_arc_cache.l2.php', + 'check_mk-zfs_arc_cache.php', 'check_mk-zfsget.php', - 'check_mk_active-http.php', - 'check_mk_active-tcp.php'], - 'web': ['htdocs/js/hover.js', + 'template-temperature.php'], + 'web': ['htdocs/main.py', + 'htdocs/userdb.py', + 'htdocs/crashed_check.py', + 'htdocs/bi.py', + 'htdocs/login.css', + 'htdocs/weblib.py', + 'htdocs/logwatch.css', + 'htdocs/js/hover.js', + 'htdocs/js/prediction.js', + 'htdocs/js/mobile.js', + 'htdocs/js/wato.js', 'htdocs/js/bi.js', 'htdocs/js/checkmk.js', - 'htdocs/js/wato.js', - 'htdocs/js/search.js', - 'htdocs/js/sidebar.js', 'htdocs/js/dashboard.js', - 'htdocs/js/mobile.js', + 'htdocs/js/sidebar.js', + 'htdocs/js/search.js', + 'htdocs/sidebar.py', + 'htdocs/prediction.css', + 'htdocs/views.css', 'htdocs/dashboard.css', - 'htdocs/logwatch.css', - 'htdocs/valuespec.py', - 'htdocs/multitar.py', - 'htdocs/sidebar.css', + 'htdocs/check_mk.css', + 'htdocs/hooks.py', + 'htdocs/mobile.py', + 'htdocs/livestatus.py', + 'htdocs/inventory.py', + 'htdocs/pages.css', + 'htdocs/wato.css', 'htdocs/htmllib.py', - 'htdocs/pagefunctions.py', + 'htdocs/notify.py', 'htdocs/bi.css', - 'htdocs/views.py', - 'htdocs/wato.py', - 'htdocs/index.py', - 'htdocs/dashboard.py', + 'htdocs/md5crypt.py', 'htdocs/table.py', - 'htdocs/wato.css', - 'htdocs/hooks.py', + 'htdocs/visuals.py', + 'htdocs/default_permissions.py', + 'htdocs/logwatch.py', + 'htdocs/login.py', + 'htdocs/mobile.css', + 'htdocs/actions.py', 'htdocs/ie.css', - 'htdocs/views.css', - 'htdocs/images/button_back_hi.png', - 'htdocs/images/wato_mainmenu_button_hi.png', - 'htdocs/images/sidebar_profile_hi.png', - 'htdocs/images/button_timeperiods_lo.png', - 'htdocs/images/button_login_lo.png', - 'htdocs/images/login_spotlight.png', - 'htdocs/images/button_sitestatus_waiting_lo.png', - 'htdocs/images/icon_download.png', + 'htdocs/jquery/jquery.mobile-1.0.js', + 'htdocs/jquery/jquery-1.6.4.min.js', + 'htdocs/jquery/jquery.mobile-1.0.min.css', + 'htdocs/jquery/jquery-1.7.1.min.js', + 'htdocs/jquery/jquery.mobile-1.0.css', + 'htdocs/jquery/jquery.mobile-1.0.min.js', + 'htdocs/jquery/images/icons-18-black.png', + 'htdocs/jquery/images/ajax-loader.png', + 'htdocs/jquery/images/icons-36-black.png', + 'htdocs/jquery/images/icons-36-white.png', + 'htdocs/jquery/images/icons-18-white.png', + 'htdocs/jquery/jquery.mobile.structure-1.0.min.css', + 'htdocs/jquery/jquery.mobile.structure-1.0.css', + 'htdocs/sidebar.css', + 'htdocs/webapi.py', + 'htdocs/index.py', + 'htdocs/help.py', + 'htdocs/views.py', + 'htdocs/valuespec.py', + 'htdocs/css/README', + 'htdocs/prediction.py', + 'htdocs/wato.py', + 'htdocs/dashboard.py', + 'htdocs/html_mod_python.py', + 'htdocs/images/dial_refresh_off.png', + 'htdocs/images/icon_notes.png', + 'htdocs/images/button_sitestatus_dead_hi.png', + 'htdocs/images/button_insert_hi.png', + 'htdocs/images/icon_availability.png', + 'htdocs/images/button_url_lo.png', + 'htdocs/images/button_rulesets_hi.png', + 'htdocs/images/button_ignore_lo.png', + 'htdocs/images/button_download_hi.png', + 'htdocs/images/button_sitestatus_disabled_hi.png', + 'htdocs/images/icon_siteuptodate.png', + 'htdocs/images/dashlet_servicestats.png', + 'htdocs/images/button_history_hi.png', + 'htdocs/images/dashboard_grid.png', + 'htdocs/images/favicon.ico', + 'htdocs/images/tree_40.png', + 'htdocs/images/icon_disabled.png', + 'htdocs/images/button_reloadsnapin_lo_alt.png', + 'htdocs/images/button_commands_down_lo.png', 'htdocs/images/icon_reload_cmk.gif', - 'htdocs/images/icon_matrix.png', + 'htdocs/images/link_events.gif', + 'htdocs/images/icon_wato_changes.png', + 'htdocs/images/button_checkbox_hi.png', + 'htdocs/images/icon_download.png', 'htdocs/images/icon_configuration.png', - 'htdocs/images/link_folder.gif', - 'htdocs/images/button_snapin_greyswitch_on_hi.png', - 'htdocs/images/icon_aggr.gif', - 'htdocs/images/icons/xapp.png', - 'htdocs/images/icons/input_devices_settings.png', - 'htdocs/images/icons/clock.png', - 'htdocs/images/icons/evolution.png', - 'htdocs/images/icons/kwikdisk.png', - 'htdocs/images/icons/gnome_apps.png', - 'htdocs/images/icons/kcolorchooser.png', - 'htdocs/images/icons/remote.png', - 'htdocs/images/icons/3floppy_unmount.png', - 'htdocs/images/icons/kaboodle.png', - 'htdocs/images/icons/kcmmidi.png', - 'htdocs/images/icons/joystick.png', - 'htdocs/images/icons/printmgr.png', - 'htdocs/images/icons/korganizer_todo.png', - 'htdocs/images/icons/x.png', - 'htdocs/images/icons/kasteroids.png', - 'htdocs/images/icons/kword.png', - 'htdocs/images/icons/package_favorite.png', - 'htdocs/images/icons/kdmconfig.png', - 'htdocs/images/icons/printer1.png', - 'htdocs/images/icons/klpq.png', - 'htdocs/images/icons/nfs_unmount.png', - 'htdocs/images/icons/usbpendrive_mount.png', - 'htdocs/images/icons/xcalc.png', - 'htdocs/images/icons/enhanced_browsing.png', - 'htdocs/images/icons/package_games_board.png', - 'htdocs/images/icons/tablet.png', - 'htdocs/images/icons/package_editors.png', - 'htdocs/images/icons/knode.png', - 'htdocs/images/icons/package_applications.png', - 'htdocs/images/icons/package_utilities.png', - 'htdocs/images/icons/kiten.png', - 'htdocs/images/icons/ooo_setup.png', - 'htdocs/images/icons/ktouch.png', - 'htdocs/images/icons/fsview.png', - 'htdocs/images/icons/indeximg.png', - 'htdocs/images/icons/kontact.png', - 'htdocs/images/icons/kmenu.png', - 'htdocs/images/icons/ktimer.png', - 'htdocs/images/icons/earth.png', - 'htdocs/images/icons/pda_blue.png', - 'htdocs/images/icons/mycomputer.png', - 'htdocs/images/icons/kfm_home.png', - 'htdocs/images/icons/looknfeel.png', - 'htdocs/images/icons/cdaudio_mount.png', - 'htdocs/images/icons/browser.png', - 'htdocs/images/icons/print_class.png', - 'htdocs/images/icons/gimp.png', - 'htdocs/images/icons/raid.png', - 'htdocs/images/icons/kmix.png', - 'htdocs/images/icons/artsbuilder.png', - 'htdocs/images/icons/kmplot.png', - 'htdocs/images/icons/window_list.png', - 'htdocs/images/icons/multimedia.png', - 'htdocs/images/icons/kblackbox.png', - 'htdocs/images/icons/kcmsystem.png', - 'htdocs/images/icons/acroread.png', - 'htdocs/images/icons/kalarm.png', - 'htdocs/images/icons/amor.png', - 'htdocs/images/icons/emacs.png', - 'htdocs/images/icons/ooo_gulls.png', - 'htdocs/images/icons/important.png', - 'htdocs/images/icons/kfloppy.png', - 'htdocs/images/icons/iconthemes.png', - 'htdocs/images/icons/kcmx.png', - 'htdocs/images/icons/arts.png', - 'htdocs/images/icons/kgpg.png', - 'htdocs/images/icons/hdd_mount.png', - 'htdocs/images/icons/kverbos.png', - 'htdocs/images/icons/kjots.png', - 'htdocs/images/icons/kteatime.png', - 'htdocs/images/icons/mplayer.png', - 'htdocs/images/icons/colors.png', - 'htdocs/images/icons/kcmdevices.png', - 'htdocs/images/icons/memory.png', - 'htdocs/images/icons/kfind.png', - 'htdocs/images/icons/modem.png', - 'htdocs/images/icons/gnome_apps2.png', - 'htdocs/images/icons/opera.png', - 'htdocs/images/icons/cdwriter_unmount.png', - 'htdocs/images/icons/blockdevice.png', - 'htdocs/images/icons/kcmfontinst.png', - 'htdocs/images/icons/kbounce.png', - 'htdocs/images/icons/kdict.png', - 'htdocs/images/icons/3floppy_mount.png', - 'htdocs/images/icons/karm.png', - 'htdocs/images/icons/package_wordprocessing.png', - 'htdocs/images/icons/digikam.png', - 'htdocs/images/icons/clanbomber.png', - 'htdocs/images/icons/package_graphics.png', - 'htdocs/images/icons/cdaudio_unmount.png', - 'htdocs/images/icons/ksig.png', - 'htdocs/images/icons/realplayer.png', - 'htdocs/images/icons/kedit.png', - 'htdocs/images/icons/kugar.png', - 'htdocs/images/icons/kpaint.png', - 'htdocs/images/icons/kbackgammon_engine.png', - 'htdocs/images/icons/korganizer.png', - 'htdocs/images/icons/kcmmemory.png', - 'htdocs/images/icons/usb.png', - 'htdocs/images/icons/kchart.png', - 'htdocs/images/icons/kjobviewer.png', - 'htdocs/images/icons/samba_mount.png', - 'htdocs/images/icons/cactus.png', - 'htdocs/images/icons/artscontrol.png', - 'htdocs/images/icons/core.png', - 'htdocs/images/icons/licq.png', - 'htdocs/images/icons/help_index.png', - 'htdocs/images/icons/kcmscsi.png', - 'htdocs/images/icons/kbrunch.png', - 'htdocs/images/icons/keybindings.png', - 'htdocs/images/icons/mozilla.png', - 'htdocs/images/icons/kdat.png', - 'htdocs/images/icons/konquest.png', - 'htdocs/images/icons/processor.png', - 'htdocs/images/icons/khangman.png', - 'htdocs/images/icons/kooka.png', - 'htdocs/images/icons/terminal.png', - 'htdocs/images/icons/kworldclock.png', - 'htdocs/images/icons/kfig.png', - 'htdocs/images/icons/kcmprocessor.png', - 'htdocs/images/icons/download_manager.png', - 'htdocs/images/icons/konqueror.png', - 'htdocs/images/icons/shed.png', - 'htdocs/images/icons/kpdf.png', - 'htdocs/images/icons/katomic.png', - 'htdocs/images/icons/kpresenter.png', - 'htdocs/images/icons/cdwriter_mount.png', - 'htdocs/images/icons/kthememgr.png', - 'htdocs/images/icons/klettres.png', - 'htdocs/images/icons/kuickshow.png', - 'htdocs/images/icons/scanner.png', - 'htdocs/images/icons/gimp2.png', - 'htdocs/images/icons/khelpcenter.png', - 'htdocs/images/icons/edu_science.png', - 'htdocs/images/icons/5floppy_mount.png', - 'htdocs/images/icons/hdd_unmount.png', - 'htdocs/images/icons/winprops.png', - 'htdocs/images/icons/krita.png', - 'htdocs/images/icons/kruler.png', - 'htdocs/images/icons/tv.png', - 'htdocs/images/icons/kcoloredit.png', - 'htdocs/images/icons/package.png', - 'htdocs/images/icons/cdrom_unmount.png', - 'htdocs/images/icons/kreversi.png', - 'htdocs/images/icons/kpackage.png', - 'htdocs/images/icons/locale.png', - 'htdocs/images/icons/personal.png', - 'htdocs/images/icons/laptop_pcmcia.png', - 'htdocs/images/icons/knotify.png', - 'htdocs/images/icons/korn.png', - 'htdocs/images/icons/ksim_cpu.png', - 'htdocs/images/icons/kaddressbook.png', - 'htdocs/images/icons/kaudiocreator.png', - 'htdocs/images/icons/package_toys.png', - 'htdocs/images/icons/designer.png', - 'htdocs/images/icons/zip_unmount.png', - 'htdocs/images/icons/ksim.png', - 'htdocs/images/icons/access.png', - 'htdocs/images/icons/konsole.png', - 'htdocs/images/icons/zip_mount.png', - 'htdocs/images/icons/randr.png', - 'htdocs/images/icons/edu_miscellaneous.png', - 'htdocs/images/icons/kivio.png', - 'htdocs/images/icons/kmenuedit.png', - 'htdocs/images/icons/kcmkwm.png', - 'htdocs/images/icons/amarok.png', - 'htdocs/images/icons/kcmdf.png', - 'htdocs/images/icons/netscape.png', - 'htdocs/images/icons/pda.png', - 'htdocs/images/icons/package_network.png', - 'htdocs/images/icons/kcharselect.png', - 'htdocs/images/icons/ksnapshot.png', - 'htdocs/images/icons/kxkb.png', - 'htdocs/images/icons/kmag.png', - 'htdocs/images/icons/kwrite.png', - 'htdocs/images/icons/package_games_arcade.png', - 'htdocs/images/icons/kcmpci.png', - 'htdocs/images/icons/samba_unmount.png', - 'htdocs/images/icons/error.png', - 'htdocs/images/icons/style.png', - 'htdocs/images/icons/kate.png', - 'htdocs/images/icons/kmousetool.png', - 'htdocs/images/icons/usbpendrive_unmount.png', - 'htdocs/images/icons/email.png', - 'htdocs/images/icons/edu_mathematics.png', - 'htdocs/images/icons/dvd_mount.png', - 'htdocs/images/icons/stylesheet.png', - 'htdocs/images/icons/camera.png', - 'htdocs/images/icons/camera_unmount.png', - 'htdocs/images/icons/ksysv.png', - 'htdocs/images/icons/kmines.png', - 'htdocs/images/icons/ksirc.png', - 'htdocs/images/icons/networkdevice.png', - 'htdocs/images/icons/krfb.png', - 'htdocs/images/icons/missing.png', - 'htdocs/images/icons/laptop_battery.png', - 'htdocs/images/icons/icons.png', - 'htdocs/images/icons/noatun.png', - 'htdocs/images/icons/file-manager.png', - 'htdocs/images/icons/mozilla-thunderbird.png', - 'htdocs/images/icons/kweather.png', - 'htdocs/images/icons/date.png', - 'htdocs/images/icons/fonts.png', - 'htdocs/images/icons/krdc.png', - 'htdocs/images/icons/kig.png', - 'htdocs/images/icons/klipper.png', - 'htdocs/images/icons/kshisen.png', - 'htdocs/images/icons/5floppy_unmount.png', - 'htdocs/images/icons/display.png', - 'htdocs/images/icons/krec.png', - 'htdocs/images/icons/aktion.png', - 'htdocs/images/icons/dvd_unmount.png', - 'htdocs/images/icons/kpager.png', - 'htdocs/images/icons/kscd.png', - 'htdocs/images/icons/khotkeys.png', - 'htdocs/images/icons/key_bindings.png', - 'htdocs/images/icons/galeon.png', - 'htdocs/images/icons/kmoon.png', - 'htdocs/images/icons/kopete.png', - 'htdocs/images/icons/agent.png', - 'htdocs/images/icons/network.png', - 'htdocs/images/icons/irkick.png', - 'htdocs/images/icons/package_games_strategy.png', - 'htdocs/images/icons/print_printer.png', - 'htdocs/images/icons/mozilla-firebird.png', - 'htdocs/images/icons/sodipodi.png', - 'htdocs/images/icons/gaim.png', - 'htdocs/images/icons/kwin.png', - 'htdocs/images/icons/package_games.png', - 'htdocs/images/icons/ksplash.png', - 'htdocs/images/icons/xfmail.png', - 'htdocs/images/icons/package_multimedia.png', - 'htdocs/images/icons/kmahjong.png', - 'htdocs/images/icons/kmail.png', - 'htdocs/images/icons/security.png', - 'htdocs/images/icons/kwin4.png', - 'htdocs/images/icons/cdrom_mount.png', - 'htdocs/images/icons/keditbookmarks.png', - 'htdocs/images/icons/kdisknav.png', - 'htdocs/images/icons/artsmidimanager.png', - 'htdocs/images/icons/knotes.png', - 'htdocs/images/icons/kcmdrkonqi.png', - 'htdocs/images/icons/password.png', - 'htdocs/images/icons/background.png', - 'htdocs/images/icons/kolf.png', - 'htdocs/images/icons/kcalc.png', - 'htdocs/images/icons/kolourpaint.png', - 'htdocs/images/icons/kcmpartitions.png', - 'htdocs/images/icons/pda_black.png', - 'htdocs/images/icons/go.png', - 'htdocs/images/icons/kview.png', - 'htdocs/images/icons/filetypes.png', - 'htdocs/images/icons/khexedit.png', - 'htdocs/images/icons/mo_mount.png', - 'htdocs/images/icons/xconfig.png', - 'htdocs/images/icons/applixware.png', - 'htdocs/images/icons/samba.png', - 'htdocs/images/icons/mouse.png', - 'htdocs/images/icons/bookcase.png', - 'htdocs/images/icons/kappfinder.png', - 'htdocs/images/icons/bug.png', - 'htdocs/images/icons/package_settings.png', - 'htdocs/images/icons/chat.png', - 'htdocs/images/icons/ark.png', - 'htdocs/images/icons/cache.png', - 'htdocs/images/icons/kghostview.png', - 'htdocs/images/icons/kfm.png', - 'htdocs/images/icons/cookie.png', - 'htdocs/images/icons/kuser.png', - 'htdocs/images/icons/keyboard_layout.png', - 'htdocs/images/icons/knewsticker.png', - 'htdocs/images/icons/xemacs.png', - 'htdocs/images/icons/nfs_mount.png', - 'htdocs/images/icons/package_development.png', - 'htdocs/images/icons/energy.png', - 'htdocs/images/icons/kmessedwords.png', - 'htdocs/images/icons/kdf.png', - 'htdocs/images/icons/mo_unmount.png', - 'htdocs/images/icons/kservices.png', - 'htdocs/images/icons/package_system.png', - 'htdocs/images/icons/atlantik.png', - 'htdocs/images/icons/edu_languages.png', - 'htdocs/images/icons/printer.png', - 'htdocs/images/icons/kppp.png', - 'htdocs/images/icons/ktip.png', - 'htdocs/images/icons/kpercentage.png', - 'htdocs/images/icons/xmag.png', - 'htdocs/images/icons/kget.png', - 'htdocs/images/icons/kbackgammon.png', - 'htdocs/images/icons/xclock.png', - 'htdocs/images/icons/keyboard.png', - 'htdocs/images/icons/kcontrol.png', - 'htdocs/images/icons/camera_mount.png', - 'htdocs/images/icons/home.png', - 'htdocs/images/icons/hwinfo.png', - 'htdocs/images/icons/kcmdevice.png', - 'htdocs/images/icons/juk.png', - 'htdocs/images/icons/kalzium.png', - 'htdocs/images/icons/kpovmodeler.png', - 'htdocs/images/icons/kmid.png', - 'htdocs/images/icons/bell.png', - 'htdocs/images/icons/ipod.png', - 'htdocs/images/icons/konqsidebar_mediaplayer.png', - 'htdocs/images/icons/kpilot.png', - 'htdocs/images/icons/kscreensaver.png', - 'htdocs/images/icons/flashkard.png', - 'htdocs/images/icons/package_edutainment.png', - 'htdocs/images/button_snapin_switch_off_lo.png', - 'htdocs/images/breadcrumb_c_lo.png', - 'htdocs/images/icon_inventory.png', - 'htdocs/images/icon_comment.gif', - 'htdocs/images/button_forth_hi.png', - 'htdocs/images/button_timeperiods_hi.png', - 'htdocs/images/button_filters_lo.png', - 'htdocs/images/button_restore_lo.png', - 'htdocs/images/button_restore_hi.png', - 'htdocs/images/icon_comment.png', - 'htdocs/images/button_painteroptions_lo.png', + 'htdocs/images/button_snapin_switch_on_lo.png', + 'htdocs/images/dial_num_columns.png', + 'htdocs/images/button_random_lo.png', + 'htdocs/images/icon_dashboard.png', + 'htdocs/images/button_sitestatus_unreach_lo.png', + 'htdocs/images/icon_notifications.png', 'htdocs/images/trans.png', - 'htdocs/images/alert_unreach.png', - 'htdocs/images/icon_wato.png', - 'htdocs/images/white_60percent.png', - 'htdocs/images/contextlink_short_hi.png', - 'htdocs/images/button_filters_set_hi.png', - 'htdocs/images/icon_rulesets.png', - 'htdocs/images/icon_disabled.png', - 'htdocs/images/checkbox_hover_bg.png', - 'htdocs/images/breadcrumb_rm_lo.png', - 'htdocs/images/button_inventory_failed_lo.png', - 'htdocs/images/login_input_error.png', - 'htdocs/images/link_search.gif', - 'htdocs/images/repprogress.png', - 'htdocs/images/alert_ok.png', - 'htdocs/images/button_home_lo.png', - 'htdocs/images/icon_snapshot.png', - 'htdocs/images/globe.png', - 'htdocs/images/button_analyze_hi.png', - 'htdocs/images/dial_refresh.png', - 'htdocs/images/wato_mainmenu_button_lo.png', - 'htdocs/images/icon_delete.png', - 'htdocs/images/dashadow-n.png', - 'htdocs/images/icon_backup.png', + 'htdocs/images/icon_reloading_cmk.gif', + 'htdocs/images/button_properties_lo.png', + 'htdocs/images/button_sitestatus_down_hi.png', + 'htdocs/images/icon_auditlog.png', + 'htdocs/images/button_table_actions_off_hi.png', + 'htdocs/images/icon_factoryreset.png', + 'htdocs/images/columnswitcher.png', + 'htdocs/images/status_add_dashlet.png', + 'htdocs/images/contextlink_lo.png', + 'htdocs/images/quicksearch_field_bg.png', + 'htdocs/images/white_20percent.png', + 'htdocs/images/icon_apply.png', + 'htdocs/images/icon_commands.png', + 'htdocs/images/snapin_background.png', + 'htdocs/images/button_sitestatus_dead_lo.png', + 'htdocs/images/button_folderproperties_hi.png', + 'htdocs/images/icon_status.png', + 'htdocs/images/button_bottom_lo.png', + 'htdocs/images/button_down_hi.png', + 'htdocs/images/alert_start.png', + 'htdocs/images/alert_warn.png', + 'htdocs/images/icon_folder.png', + 'htdocs/images/icon_hostgroups.png', + 'htdocs/images/button_add_dashlet_hi.png', + 'htdocs/images/status_pageurl.png', + 'htdocs/images/icon_repl_25.png', + 'htdocs/images/button_reloadsnapin_hi.png', + 'htdocs/images/button_table_actions_on_lo.png', + 'htdocs/images/button_sites_lo.png', + 'htdocs/images/tree_30.png', + 'htdocs/images/button_snapin_switch_on_hi.png', + 'htdocs/images/button_auditlog_hi.png', + 'htdocs/images/icon_crash.png', 'htdocs/images/rule_no.png', - 'htdocs/images/button_sitestatus_online_lo.png', - 'htdocs/images/button_backup_lo.png', + 'htdocs/images/breadcrumb_c_hi.png', + 'htdocs/images/dashlet_arrow_l_max.png', + 'htdocs/images/wato_mainmenu_button_hi.png', + 'htdocs/images/button_timeperiods_hi.png', + 'htdocs/images/button_users_hi.png', + 'htdocs/images/link_processes.gif', + 'htdocs/images/dashlet_nodata.png', + 'htdocs/images/button_folderproperties_lo.png', + 'htdocs/images/dashlet_anchor_hi.png', + 'htdocs/images/breadcrumb_r_hi.png', + 'htdocs/images/button_snapin_greyswitch_off_lo.png', + 'htdocs/images/button_snapin_greyswitch_off_hi.png', + 'htdocs/images/rule_yes_off.png', + 'htdocs/images/nform_background.png', + 'htdocs/images/icon_stale.png', + 'htdocs/images/form_background.png', 'htdocs/images/button_servicegroups_hi.png', 'htdocs/images/alert_down.png', - 'htdocs/images/icon_roles.png', - 'htdocs/images/button_commands_lo.png', - 'htdocs/images/button_filters_set_down_lo.png', - 'htdocs/images/dashadow-sw.png', - 'htdocs/images/button_contactgroups_hi.png', - 'htdocs/images/icon_wato_changes.png', - 'htdocs/images/breadcrumb_lm_lo.png', - 'htdocs/images/icon_pnp.gif', - 'htdocs/images/dashadow-nw.png', - 'htdocs/images/helpbackground.jpg', - 'htdocs/images/dial_num_columns.png', - 'htdocs/images/button_maxisnapin_hi.png', - 'htdocs/images/button_move_lo.png', - 'htdocs/images/breadcrumb_l_hi.png', - 'htdocs/images/breadcrumb_rm_hi.png', - 'htdocs/images/button_top_lo.png', - 'htdocs/images/tree_black_30.png', + 'htdocs/images/button_minisnapin_hi.png', + 'htdocs/images/button_availability_lo.png', + 'htdocs/images/button_svc_problems_lo.png', 'htdocs/images/button_checkbox_down_lo.png', - 'htdocs/images/tree_black_50.png', + 'htdocs/images/button_painteroptions_off.png', 'htdocs/images/button_checkbox_lo.png', - 'htdocs/images/icon_rulepmatch.png', - 'htdocs/images/icon_new.png', - 'htdocs/images/link_configuration.gif', - 'htdocs/images/button_sitestatus_dead_lo.png', - 'htdocs/images/sidebar_button_lo.png', - 'htdocs/images/icon_checkmkg.gif', - 'htdocs/images/button_download_hi.png', - 'htdocs/images/contextlink_hot_hi.png', - 'htdocs/images/button_hosttag_hi.png', - 'htdocs/images/contextlink_short.png', - 'htdocs/images/button_sidebar_addsnapin_hi.png', - 'htdocs/images/icon_reload.gif', - 'htdocs/images/status_pageurl.png', - 'htdocs/images/button_folderproperties_lo.png', - 'htdocs/images/button_closesnapin_lo.png', - 'htdocs/images/tree_black_80.png', - 'htdocs/images/snapin_background.png', - 'htdocs/images/sidebar_button_hi.png', + 'htdocs/images/button_timeline_hi.png', + 'htdocs/images/button_filters_set_off.png', + 'htdocs/images/icon_bulk_import.png', + 'htdocs/images/button_replay_hi.png', + 'htdocs/images/icon_showbi.png', + 'htdocs/images/icon_services.png', 'htdocs/images/contentframe_background.jpg', - 'htdocs/images/button_auditlog_hi.png', - 'htdocs/images/button_commands_off.png', - 'htdocs/images/alert_crit.png', - 'htdocs/images/button_clone_lo.png', - 'htdocs/images/button_sitestatus_down_hi.png', - 'htdocs/images/button_folderproperties_hi.png', - 'htdocs/images/button_painteroptions_down_hi.png', - 'htdocs/images/link_home.gif', - 'htdocs/images/metanav_40_hi.png', - 'htdocs/images/icon_reloading_cmk.gif', - 'htdocs/images/button_sitestatus_down_lo.png', - 'htdocs/images/button_sitestatus_disabled_lo.png', - 'htdocs/images/icon_need_restart.png', - 'htdocs/images/button_edit_lo.png', - 'htdocs/images/icon_downtime.gif', - 'htdocs/images/button_sitestatus_unreach_lo.png', - 'htdocs/images/button_users_hi.png', - 'htdocs/images/alert_downtime.png', - 'htdocs/images/link_map.gif', - 'htdocs/images/login_dark_bg.png', - 'htdocs/images/button_down_lo.png', - 'htdocs/images/dashadow-e.png', - 'htdocs/images/button_reloadsnapin_hi.png', - 'htdocs/images/icon_restart.png', - 'htdocs/images/contextlink_hi.png', - 'htdocs/images/icon_contactgroups.png', - 'htdocs/images/icon_hosttag.png', - 'htdocs/images/button_filters_down_lo.png', - 'htdocs/images/icon_filter_set.png', - 'htdocs/images/button_top_hi.png', - 'htdocs/images/icon_outofnot.gif', - 'htdocs/images/link_info.gif', - 'htdocs/images/icon_apply.png', - 'htdocs/images/button_delete_hi.png', - 'htdocs/images/icon_edit.png', - 'htdocs/images/button_snapin_switch_off_hi.png', + 'htdocs/images/button_servicegroups_lo.png', + 'htdocs/images/button_sidebar_settings_hi.png', + 'htdocs/images/checkbox_hover_bg.png', + 'htdocs/images/breadcrumb_lm_hi.png', + 'htdocs/images/wato_mainmenu_button_lo.png', + 'htdocs/images/button_download_lo.png', 'htdocs/images/status_aggrcomp.png', - 'htdocs/images/icon_cluster.png', - 'htdocs/images/button_hostgroups_hi.png', - 'htdocs/images/button_url_lo.png', - 'htdocs/images/icon_status.png', - 'htdocs/images/icon_flapping.gif', - 'htdocs/images/dashlet_linkarrow.png', - 'htdocs/images/icon_analyze.png', - 'htdocs/images/button_contactgroups_lo.png', - 'htdocs/images/icon_new_cluster.png', - 'htdocs/images/button_properties_lo.png', - 'htdocs/images/button_sidebar_addsnapin_lo.png', + 'htdocs/images/icon_repl_75.png', + 'htdocs/images/icon_notify_create.png', + 'htdocs/images/ios_logo.png', + 'htdocs/images/link_downtime.gif', + 'htdocs/images/link_notifications.gif', + 'htdocs/images/sidebar_top.png', + 'htdocs/images/icon_aggr.png', + 'htdocs/images/icon_alert.png', + 'htdocs/images/button_folder_lo.png', + 'htdocs/images/icon_timeperiods.png', + 'htdocs/images/button_sidebar_messages_hi.png', + 'htdocs/images/icon_rulenmatch.png', + 'htdocs/images/icon_snapshot_nchecksum.png', + 'htdocs/images/button_top_lo.png', + 'htdocs/images/button_aggr_hi.png', + 'htdocs/images/button_showbi_lo.png', + 'htdocs/images/contextlink_hot.png', + 'htdocs/images/dashlet_arrow_t_grow.png', + 'htdocs/images/icon_failed.png', + 'htdocs/images/icon_crash_glow.png', + 'htdocs/images/dashlet_arrow_b_grow.png', + 'htdocs/images/button_movedown_lo.png', + 'htdocs/images/button_forth_hi.png', + 'htdocs/images/icon_back.png', + 'htdocs/images/icon_custom_attr.png', + 'htdocs/images/dashlet_arrow_r_grow.png', + 'htdocs/images/button_download_agents_hi.png', + 'htdocs/images/contextlink_hi.png', + 'htdocs/images/white_30percent.png', + 'htdocs/images/icon_warning.png', + 'htdocs/images/tree_black_60.png', + 'htdocs/images/helpbutton_passive.png', + 'htdocs/images/contextlink.png', + 'htdocs/images/icon_action.png', + 'htdocs/images/button_backup_lo.png', + 'htdocs/images/alert_crit.png', + 'htdocs/images/icon_users.png', + 'htdocs/images/icon_forth_off.png', + 'htdocs/images/button_sitestatus_unreach_hi.png', + 'htdocs/images/perfometer-bg.png', + 'htdocs/images/speedometer.png', + 'htdocs/images/breadcrumb_r_lo.png', + 'htdocs/images/button_static_checks_lo.png', + 'htdocs/images/button_sitestatus_unknown_hi.png', + 'htdocs/images/button_minisnapin_lo.png', + 'htdocs/images/white_50percent.png', + 'htdocs/images/icon_wato_nochanges.png', 'htdocs/images/icon_reloading.gif', - 'htdocs/images/button_forth_lo.png', - 'htdocs/images/button_movedown_hi.png', - 'htdocs/images/button_snapin_greyswitch_off_lo.png', - 'htdocs/images/icon_usedrulesets.png', - 'htdocs/images/dial_num_columns_hi.png', - 'htdocs/images/button_configuration_lo.png', - 'htdocs/images/button_reloadsnapin_lo.png', - 'htdocs/images/snapin_header.png', - 'htdocs/images/button_home_hi.png', + 'htdocs/images/icon_foreign_changes.png', + 'htdocs/images/icon_ruleimatch.png', + 'htdocs/images/button_filters_hi.png', + 'htdocs/images/metanav_button_hi.png', + 'htdocs/images/icon_www.png', + 'htdocs/images/bg_flapping.png', + 'htdocs/images/metanav_30_hi.png', + 'htdocs/images/icon_logwatch.png', + 'htdocs/images/icon_ical.png', + 'htdocs/images/button_restore_hi.png', + 'htdocs/images/icon_cluster.png', + 'htdocs/images/button_inventory_failed_hi.png', + 'htdocs/images/button_annotation_hi.png', + 'htdocs/images/helpbutton_active_hi.png', + 'htdocs/images/icon_repl_pending.png', + 'htdocs/images/button_sidebar_addsnapin_lo.png', + 'htdocs/images/icon_email.png', + 'htdocs/images/rule_no_off.png', 'htdocs/images/button_sitestatus_online_hi.png', + 'htdocs/images/snapin_closed.png', + 'htdocs/images/button_replay_lo.png', + 'htdocs/images/sidebar_background.jpg', + 'htdocs/images/button_painteroptions_hi.png', 'htdocs/images/rule_yes.png', - 'htdocs/images/metanav_30_hi.png', - 'htdocs/images/button_sites_hi.png', - 'htdocs/images/link_performance.gif', - 'htdocs/images/tree_40.png', - 'htdocs/images/icon_random.png', - 'htdocs/images/alert_flapping.png', - 'htdocs/images/button_up_lo.png', - 'htdocs/images/button_edit_hi.png', - 'htdocs/images/icon_parentscan.png', - 'htdocs/images/columnswitcher_hi.png', + 'htdocs/images/metanav_40.png', + 'htdocs/images/white_60percent.png', + 'htdocs/images/icon_new.png', + 'htdocs/images/button_edit_lo.png', + 'htdocs/images/logo_mk_small.png', + 'htdocs/images/button_commands_lo.png', + 'htdocs/images/icon_backup.png', + 'htdocs/images/button_services_hi.png', + 'htdocs/images/button_end_lo.png', + 'htdocs/images/button_sidebar_logout_lo.png', + 'htdocs/images/button_download_agents_lo.png', + 'htdocs/images/button_up_hi.png', + 'htdocs/images/button_insertdate_lo.png', + 'htdocs/images/dashlet_pnpgraph.png', + 'htdocs/images/alert_unknown.png', 'htdocs/images/helpbutton_active.png', + 'htdocs/images/icon_online.png', + 'htdocs/images/button_diagnose_lo.png', + 'htdocs/images/status_ldap.png', + 'htdocs/images/tree_black_30.png', + 'htdocs/images/button_move_lo.png', + 'htdocs/images/login_input_normal.png', + 'htdocs/images/dashlet_edit_hi.png', + 'htdocs/images/button_configuration_hi.png', + 'htdocs/images/breadcrumb_l_hi.png', + 'htdocs/images/button_wikisearch_lo.png', + 'htdocs/images/button_snapin_switch_off_hi.png', + 'htdocs/images/dashlet_del_lo.png', + 'htdocs/images/dashlet_del_hi.png', + 'htdocs/images/globe.png', 'htdocs/images/button_sitestatus_unknown_lo.png', - 'htdocs/images/icon_ruleimatch.png', - 'htdocs/images/button_minisnapin_hi.png', - 'htdocs/images/check_mk.trans.120.png', - 'htdocs/images/button_insert_lo.png', + 'htdocs/images/button_sitestatus_disabled_lo.png', + 'htdocs/images/sidebar_profile_lo.png', + 'htdocs/images/button_hosttag_lo.png', + 'htdocs/images/button_timewarp_lo.png', + 'htdocs/images/button_notifications_hi.png', 'htdocs/images/sidebar_bottom.png', - 'htdocs/images/snapinfooter_einstellungen_down.png', - 'htdocs/images/folder_closed.png', - 'htdocs/images/button_url_hi.png', - 'htdocs/images/dashadow-s.png', - 'htdocs/images/contextlink_hot.png', + 'htdocs/images/button_closesnapin_hi.png', + 'htdocs/images/icon_need_restart.png', + 'htdocs/images/icon_nagios.png', + 'htdocs/images/icon_rulematch.png', + 'htdocs/images/logo_mk.png', + 'htdocs/images/icon_ldap.png', + 'htdocs/images/button_backup_hi.png', + 'htdocs/images/white_10percent.png', + 'htdocs/images/icon_authok.png', + 'htdocs/images/icon_download_csv.png', + 'htdocs/images/button_maxisnapin_hi.png', + 'htdocs/images/button_home_lo.png', + 'htdocs/images/icon_need_replicate.png', + 'htdocs/images/button_random_hi.png', 'htdocs/images/button_quicksearch_hi.png', - 'htdocs/images/button_filters_hi.png', - 'htdocs/images/button_reloadsnapin_lo_alt.png', - 'htdocs/images/icon_pnp.png', - 'htdocs/images/link_folder_open.gif', - 'htdocs/images/link_processes.gif', - 'htdocs/images/image_button_l.png', - 'htdocs/images/quicksearch_field_bg.png', - 'htdocs/images/assume_3.png', - 'htdocs/images/snapin_closed.png', - 'htdocs/images/button_login_hi.png', - 'htdocs/images/assume_none.png', - 'htdocs/images/icon_properties.png', - 'htdocs/images/icon_servicegroups.png', - 'htdocs/images/icon_rulenmatch.png', - 'htdocs/images/icon_action.gif', - 'htdocs/images/assume_2.png', - 'htdocs/images/link_hosts.gif', - 'htdocs/images/mk_logo_126x72.gif', - 'htdocs/images/contextlink_down.png', - 'htdocs/images/button_random_lo.png', - 'htdocs/images/breadcrumb_c_hi.png', - 'htdocs/images/dashadow-ne.png', - 'htdocs/images/button_start_lo.png', + 'htdocs/images/dashlet_url.png', + 'htdocs/images/image_button_r.png', + 'htdocs/images/link_configuration.gif', + 'htdocs/images/breadcrumb_rm_lo.png', + 'htdocs/images/icon_replicate.png', + 'htdocs/images/icon_checkmkg.gif', + 'htdocs/images/icon_diagnose.png', + 'htdocs/images/dashboard_menuarrow.png', + 'htdocs/images/icon_autherr.png', 'htdocs/images/wato_mainmenu_button_clicked.png', - 'htdocs/images/icon_nagvis.png', - 'htdocs/images/icon_rulematch.png', - 'htdocs/images/button_start_hi.png', - 'htdocs/images/icon_filter.png', - 'htdocs/images/sidebar_top.png', - 'htdocs/images/tree_80.png', - 'htdocs/images/tree_30.png', - 'htdocs/images/icon_hostdowntime.png', - 'htdocs/images/button_analyze_lo.png', - 'htdocs/images/icon_reload_failed.gif', - 'htdocs/images/icon_trans.png', - 'htdocs/images/button_filters_set_off.png', + 'htdocs/images/contextlink_short_hi.png', + 'htdocs/images/icon_packages.png', + 'htdocs/images/alert_ack.png', 'htdocs/images/login_window.png', - 'htdocs/images/tree_black_60.png', - 'htdocs/images/button_checkbox_hi.png', - 'htdocs/images/tree_black_70.png', + 'htdocs/images/link_link.gif', + 'htdocs/images/icon_notif_disabled.png', + 'htdocs/images/tree_00.png', + 'htdocs/images/snapin_footer.png', + 'htdocs/images/button_restore_lo.png', + 'htdocs/images/tree_90.png', + 'htdocs/images/icon_prediction.png', + 'htdocs/images/button_auditlog_lo.png', + 'htdocs/images/login_input_error.png', + 'htdocs/images/columnswitcher_down.png', + 'htdocs/images/icon_notify_cancel.png', + 'htdocs/images/status_persist.png', + 'htdocs/images/icon_retry_disabled.gif', + 'htdocs/images/icon_flapping.png', + 'htdocs/images/button_history_lo.png', + 'htdocs/images/icon_inv.png', + 'htdocs/images/button_table_actions_on_hi.png', + 'htdocs/images/button_sidebar_logout_hi.png', + 'htdocs/images/button_hosttag_hi.png', + 'htdocs/images/button_configuration_lo.png', + 'htdocs/images/breadcrumb_l_lo.png', + 'htdocs/images/icon_abort.png', + 'htdocs/images/icon_matrix.png', + 'htdocs/images/icon_rulepmatch.png', + 'htdocs/images/button_insert_lo.png', + 'htdocs/images/icon_npassive.png', + 'htdocs/images/pluginurl.png', + 'htdocs/images/button_filters_lo.png', + 'htdocs/images/icon_update.png', + 'htdocs/images/icon_passive.gif', + 'htdocs/images/contextlink_down.png', + 'htdocs/images/icon_inventory_failed.png', + 'htdocs/images/button_hostgroups_hi.png', + 'htdocs/images/button_contactgroups_hi.png', + 'htdocs/images/icon_analyze.png', + 'htdocs/images/tree_black_90.png', + 'htdocs/images/new.png', + 'htdocs/images/button_sitestatus_waiting_hi.png', + 'htdocs/images/contextlink_short.png', + 'htdocs/images/icon_success.png', 'htdocs/images/button_moveup_lo.png', - 'htdocs/images/icon_painteroptions.png', - 'htdocs/images/white_10percent.png', + 'htdocs/images/icon_host.png', + 'htdocs/images/button_snapin_switch_off_lo.png', + 'htdocs/images/button_start_hi.png', + 'htdocs/images/dashlet_arrow_r_max.png', + 'htdocs/images/button_quicksearch_lo.png', + 'htdocs/images/button_bottom_hi.png', + 'htdocs/images/link_queue.gif', + 'htdocs/images/side_fold.png', + 'htdocs/images/image_button.png', 'htdocs/images/helpbutton_passive_hi.png', - 'htdocs/images/columnswitcher.png', - 'htdocs/images/link_error.gif', + 'htdocs/images/ooservice.png', + 'htdocs/images/button_forth_lo.png', + 'htdocs/images/icon_history.png', + 'htdocs/images/breadcrumb_lm_lo.png', + 'htdocs/images/dial_num_columns_off.png', + 'htdocs/images/button_commands_off.png', + 'htdocs/images/icon_static_checks.png', + 'htdocs/images/login_spotlight.png', + 'htdocs/images/tree_80.png', + 'htdocs/images/button_move_hi.png', + 'htdocs/images/link_reporting.gif', + 'htdocs/images/login_dark_bg.png', + 'htdocs/images/link_folder_open.gif', + 'htdocs/images/snapin_header.png', + 'htdocs/images/button_delete_lo.png', + 'htdocs/images/button_back_hi.png', + 'htdocs/images/button_closesnapin_lo.png', + 'htdocs/images/link_home.gif', + 'htdocs/images/alert_ackstop.png', + 'htdocs/images/white_80percent.png', + 'htdocs/images/check_mk.trans.120.png', + 'htdocs/images/button_sitestatus_online_lo.png', + 'htdocs/images/alert_command.png', + 'htdocs/images/dashlet_hoststats.png', + 'htdocs/images/icon_trans.png', + 'htdocs/images/dashlet_anchor_off.png', + 'htdocs/images/button_sites_hi.png', + 'htdocs/images/button_dashboard_controls_hi.png', + 'htdocs/images/link_notes.gif', + 'htdocs/images/button_users_lo.png', + 'htdocs/images/folder_hi.png', + 'htdocs/images/dashlet_anchor_lo.png', 'htdocs/images/tree_20.png', - 'htdocs/images/button_sidebar_settings_hi.png', - 'htdocs/images/icon_www.png', - 'htdocs/images/metanav_40.png', - 'htdocs/images/button_end_hi.png', - 'htdocs/images/icon_nagios.gif', + 'htdocs/images/icon_notif_man_disabled.png', + 'htdocs/images/icon_timewarp.png', + 'htdocs/images/icon_reload_failed.gif', + 'htdocs/images/button_dashboard_controls_lo.png', + 'htdocs/images/metanav_30.png', + 'htdocs/images/icon_sites.png', + 'htdocs/images/button_commands_down_hi.png', + 'htdocs/images/button_diagnose_hi.png', + 'htdocs/images/icon_rename_host.png', + 'htdocs/images/button_home_hi.png', + 'htdocs/images/icon_pnp.gif', + 'htdocs/images/button_hostgroups_lo.png', + 'htdocs/images/wikisearch_field_bg.png', + 'htdocs/images/icon_offline.png', 'htdocs/images/snapinfooter_logout_down.png', - 'htdocs/images/alert_warn.png', - 'htdocs/images/button_bottom_lo.png', + 'htdocs/images/button_ignore_hi.png', + 'htdocs/images/button_checkbox_off.png', + 'htdocs/images/alert_stop.png', + 'htdocs/images/icon_user_locked.png', + 'htdocs/images/icon_ignore.png', + 'htdocs/images/dashlet_arrow_t_max.png', + 'htdocs/images/icon_hostdowntime.png', + 'htdocs/images/button_roles_hi.png', + 'htdocs/images/icon_enabled.png', + 'htdocs/images/dial_refresh_hi.png', + 'htdocs/images/button_folder_hi.png', + 'htdocs/images/alert_downtime.png', + 'htdocs/images/button_login_hi.png', + 'htdocs/images/icon_detail.png', + 'htdocs/images/button_svc_problems_hi.png', + 'htdocs/images/button_showbi_hi.png', + 'htdocs/images/icon_filter.png', + 'htdocs/images/button_filters_off.png', + 'htdocs/images/sidebar_logout_lo.png', + 'htdocs/images/icon_search.png', + 'htdocs/images/alert_unreach.png', + 'htdocs/images/contextlink_hot_hi.png', + 'htdocs/images/icon_snapshot_pchecksum.png', + 'htdocs/images/icon_painteroptions.png', + 'htdocs/images/icon_repl_failed.png', + 'htdocs/images/dial_num_columns_hi.png', + 'htdocs/images/button_closetimewarp_hi.png', + 'htdocs/images/status_frameurl.png', + 'htdocs/images/icon_repl_locked.png', + 'htdocs/images/tree_black_10.png', + 'htdocs/images/dashlet_arrow_l.png', 'htdocs/images/icon_upload.png', - 'htdocs/images/button_moveup_hi.png', - 'htdocs/images/icon_disabled.gif', - 'htdocs/images/icon_commands.png', - 'htdocs/images/button_sitestatus_unknown_hi.png', - 'htdocs/images/icon_inventory_failed.png', - 'htdocs/images/button_commands_down_lo.png', - 'htdocs/images/folder_hi.png', - 'htdocs/images/icon_users.png', - 'htdocs/images/button_movedown_lo.png', - 'htdocs/images/icon_downtime.png', - 'htdocs/images/sidebar_background.jpg', - 'htdocs/images/someproblem.png', - 'htdocs/images/button_minisnapin_lo.png', - 'htdocs/images/helpbutton_active_hi.png', - 'htdocs/images/tree_10.png', - 'htdocs/images/link_notifications.gif', - 'htdocs/images/tree_50.png', - 'htdocs/images/metanav_button_hi.png', + 'htdocs/images/alert_notify.png', + 'htdocs/images/button_analyze_lo.png', + 'htdocs/images/icon_repl_50.png', + 'htdocs/images/icon_parentscan.png', + 'htdocs/images/button_inventory_failed_lo.png', + 'htdocs/images/button_painteroptions_down_hi.png', + 'htdocs/images/button_down_lo.png', + 'htdocs/images/button_reloadsnapin_lo.png', + 'htdocs/images/white_70percent.png', + 'htdocs/images/button_timeline_lo.png', + 'htdocs/images/icon_loading.gif', + 'htdocs/images/helpbackground.jpg', + 'htdocs/images/button_timeperiods_lo.png', + 'htdocs/images/alert_flapping.png', + 'htdocs/images/link_map.gif', + 'htdocs/images/button_edit_hi.png', + 'htdocs/images/icon_notification.png', + 'htdocs/images/button_contactgroups_lo.png', + 'htdocs/images/icon_notif_enabled.png', + 'htdocs/images/button_back_lo.png', + 'htdocs/images/icon_nagvis.png', + 'htdocs/images/button_analyze_hi.png', 'htdocs/images/icon_home.png', - 'htdocs/images/icon_back.png', - 'htdocs/images/icon_siteuptodate.png', - 'htdocs/images/alert_unknown.png', - 'htdocs/images/white_50percent.png', - 'htdocs/images/button_rulesets_lo.png', - 'htdocs/images/button_hosttag_lo.png', - 'htdocs/images/new.png', + 'htdocs/images/button_add_dashlet_lo.png', + 'htdocs/images/button_roles_lo.png', + 'htdocs/images/tree_60.png', + 'htdocs/images/button_aggr_lo.png', + 'htdocs/images/icon_restart.png', + 'htdocs/images/button_movedown_hi.png', + 'htdocs/images/folder_closed.png', + 'htdocs/images/button_top_hi.png', + 'htdocs/images/icon_ack.png', + 'htdocs/images/tree_black_00.png', + 'htdocs/images/assume_2.png', + 'htdocs/images/icon_hosttag.png', + 'htdocs/images/breadcrumb_c_lo.png', + 'htdocs/images/snapinfooter_einstellungen_down.png', + 'htdocs/images/assume_bg.png', + 'htdocs/images/button_up_lo.png', + 'htdocs/images/tableshadow.png', + 'htdocs/images/button_sidebar_messages_lo.png', + 'htdocs/images/dashlet_arrow_l_grow.png', + 'htdocs/images/icon_roles.png', + 'htdocs/images/button_annotation_lo.png', + 'htdocs/images/alert_up.png', + 'htdocs/images/dashlet_linkarrow.png', + 'htdocs/images/button_start_lo.png', + 'htdocs/images/button_wikisearch_hi.png', + 'htdocs/images/icon_software.png', + 'htdocs/images/alert_ok.png', + 'htdocs/images/icon_back_off.png', + 'htdocs/images/link_folder.gif', + 'htdocs/images/button_filters_set_hi.png', + 'htdocs/images/icon_checkmk.gif', + 'htdocs/images/button_painteroptions_lo.png', + 'htdocs/images/link_info.gif', + 'htdocs/images/someproblem.png', + 'htdocs/images/tree_10.png', + 'htdocs/images/icon_inventory.png', + 'htdocs/images/button_insertdate_hi.png', + 'htdocs/images/button_check_parameters_hi.png', + 'htdocs/images/assume_3.png', + 'htdocs/images/icon_timeline.png', + 'htdocs/images/icon_new_cluster.png', + 'htdocs/images/button_sitestatus_waiting_lo.png', + 'htdocs/images/snapinfooter_addsnapin_down.png', + 'htdocs/images/icon_wato.png', + 'htdocs/images/button_sitestatus_down_lo.png', + 'htdocs/images/button_filters_set_down_hi.png', + 'htdocs/images/icon_starred.png', + 'htdocs/images/button_login_lo.png', + 'htdocs/images/login_error_message.png', + 'htdocs/images/button_sidebar_settings_lo.png', + 'htdocs/images/button_clone_hi.png', + 'htdocs/images/icon_retry.gif', + 'htdocs/images/button_commands_hi.png', + 'htdocs/images/button_end_hi.png', + 'htdocs/images/image_button_l.png', + 'htdocs/images/dashlet_arrow_b_max.png', + 'htdocs/images/icon_localrule.png', 'htdocs/images/sidebar_logout_hi.png', - 'htdocs/images/login_input_normal.png', + 'htdocs/images/button_services_lo.png', 'htdocs/images/README', + 'htdocs/images/button_filters_set_lo.png', + 'htdocs/images/button_snapin_greyswitch_on_hi.png', + 'htdocs/images/icon_repl_success.png', + 'htdocs/images/button_filters_set_down_lo.png', + 'htdocs/images/icon_empty.png', + 'htdocs/images/icon_svc_problems.png', + 'htdocs/images/link_performance.gif', + 'htdocs/images/columnswitcher_hi.png', + 'htdocs/images/tree_black_40.png', 'htdocs/images/button_painteroptions_down_lo.png', - 'htdocs/images/icon_checkmk.gif', - 'htdocs/images/dial_refresh_hi.png', - 'htdocs/images/icon_authok.png', - 'htdocs/images/icon_services.png', - 'htdocs/images/button_sitestatus_waiting_hi.png', - 'htdocs/images/image_button_r.png', - 'htdocs/images/logo_mk.png', - 'htdocs/images/breadcrumb_r_hi.png', - 'htdocs/images/button_roles_hi.png', + 'htdocs/images/icon_trash.png', + 'htdocs/images/button_sidebar_addsnapin_hi.png', + 'htdocs/images/button_table_actions_off_lo.png', + 'htdocs/images/sidebar_button_hi.png', + 'htdocs/images/icon_servicegroups.png', + 'htdocs/images/dashlet_view.png', + 'htdocs/images/icon_hardware.png', + 'htdocs/images/button_clone_lo.png', + 'htdocs/images/button_timewarp_hi.png', + 'htdocs/images/icon_random.png', + 'htdocs/images/icon_contactgroups.png', 'htdocs/images/icon_newfolder.png', - 'htdocs/images/button_random_hi.png', - 'htdocs/images/favicon.ico', - 'htdocs/images/dashadow-se.png', - 'htdocs/images/link_monitoring.gif', - 'htdocs/images/icon_need_replicate.png', - 'htdocs/images/button_filters_set_lo.png', - 'htdocs/images/button_users_lo.png', - 'htdocs/images/icon_aggr.png', - 'htdocs/images/dial_num_columns_off.png', - 'htdocs/images/button_backup_hi.png', - 'htdocs/images/form_background.png', - 'htdocs/images/columnswitcher_down.png', + 'htdocs/images/icon_comment.png', + 'htdocs/images/button_check_parameters_lo.png', + 'htdocs/images/button_snapin_greyswitch_on_lo.png', + 'htdocs/images/icon_pnp.png', + 'htdocs/images/icon_reload.gif', + 'htdocs/images/icon_discard.png', + 'htdocs/images/icon_snapshot_checksum.png', + 'htdocs/images/icon_snapshot.png', + 'htdocs/images/metanav_40_hi.png', + 'htdocs/images/sidebar_profile_hi.png', + 'htdocs/images/icon_outofnot.png', + 'htdocs/images/button_filters_down_hi.png', + 'htdocs/images/dial_refresh.png', + 'htdocs/images/icon_view.png', + 'htdocs/images/tree_black_70.png', + 'htdocs/images/tree_50.png', + 'htdocs/images/icon_properties.png', 'htdocs/images/tree_70.png', - 'htdocs/images/button_hostgroups_lo.png', - 'htdocs/images/button_insert_hi.png', - 'htdocs/images/status_frameurl.png', - 'htdocs/images/snapinfooter_addsnapin_down.png', - 'htdocs/images/icon_ack.gif', - 'htdocs/images/icon_npassive.gif', - 'htdocs/images/button_configuration_hi.png', - 'htdocs/images/alert_downtimestop.png', 'htdocs/images/assume_0.png', - 'htdocs/images/white_80percent.png', - 'htdocs/images/button_sitestatus_dead_hi.png', - 'htdocs/images/button_inventory_failed_hi.png', - 'htdocs/images/nform_background.png', - 'htdocs/images/alert_up.png', - 'htdocs/images/tree_black_10.png', - 'htdocs/images/icon_timeperiods.png', - 'htdocs/images/icon_history.png', - 'htdocs/images/tree_black_00.png', - 'htdocs/images/helpbutton_passive.png', - 'htdocs/images/assume_1.png', - 'htdocs/images/button_folder_hi.png', - 'htdocs/images/icon_wato.gif', - 'htdocs/images/icon_sites.png', - 'htdocs/images/button_bottom_hi.png', - 'htdocs/images/icon_factoryreset.png', - 'htdocs/images/button_auditlog_lo.png', - 'htdocs/images/tree_black_20.png', - 'htdocs/images/button_snapin_greyswitch_on_lo.png', - 'htdocs/images/link_link.gif', - 'htdocs/images/link_notes.gif', - 'htdocs/images/sidebar_profile_lo.png', - 'htdocs/images/dashadow-w.png', - 'htdocs/images/button_delete_lo.png', - 'htdocs/images/button_filters_set_down_hi.png', - 'htdocs/images/button_sites_lo.png', - 'htdocs/images/button_clone_hi.png', - 'htdocs/images/contextlink.png', - 'htdocs/images/alert_stop.png', - 'htdocs/images/icon_folder.png', + 'htdocs/images/button_closetimewarp_lo.png', + 'htdocs/images/link_monitoring.gif', + 'htdocs/images/white_40percent.png', + 'htdocs/images/icon_annotation.png', + 'htdocs/images/assume_none.png', + 'htdocs/images/icon_usedrulesets.png', + 'htdocs/images/button_moveup_hi.png', + 'htdocs/images/button_url_hi.png', + 'htdocs/images/icon_bulk.png', + 'htdocs/images/tree_black_50.png', + 'htdocs/images/icon_edit.png', + 'htdocs/images/icons/kfind.png', + 'htdocs/images/icons/hwinfo.png', + 'htdocs/images/icons/knode.png', + 'htdocs/images/icons/ark.png', + 'htdocs/images/icons/tv.png', + 'htdocs/images/icons/access.png', + 'htdocs/images/icons/kshisen.png', + 'htdocs/images/icons/kfm.png', + 'htdocs/images/icons/konqsidebar_mediaplayer.png', + 'htdocs/images/icons/kget.png', + 'htdocs/images/icons/missing.png', + 'htdocs/images/icons/kcmdrkonqi.png', + 'htdocs/images/icons/gimp.png', + 'htdocs/images/icons/joystick.png', + 'htdocs/images/icons/core.png', + 'htdocs/images/icons/kreversi.png', + 'htdocs/images/icons/cache.png', + 'htdocs/images/icons/package_system.png', + 'htdocs/images/icons/kig.png', + 'htdocs/images/icons/winprops.png', + 'htdocs/images/icons/zip_unmount.png', + 'htdocs/images/icons/package_toys.png', + 'htdocs/images/icons/kwin4.png', + 'htdocs/images/icons/kdisknav.png', + 'htdocs/images/icons/package_games_arcade.png', + 'htdocs/images/icons/fsview.png', + 'htdocs/images/icons/kcmpci.png', + 'htdocs/images/icons/scanner.png', + 'htdocs/images/icons/khexedit.png', + 'htdocs/images/icons/keyboard_layout.png', + 'htdocs/images/icons/artsbuilder.png', + 'htdocs/images/icons/kappfinder.png', + 'htdocs/images/icons/kpilot.png', + 'htdocs/images/icons/ooo_setup.png', + 'htdocs/images/icons/error.png', + 'htdocs/images/icons/dvd_mount.png', + 'htdocs/images/icons/kmenuedit.png', + 'htdocs/images/icons/package_games_board.png', + 'htdocs/images/icons/kruler.png', + 'htdocs/images/icons/bell.png', + 'htdocs/images/icons/edu_science.png', + 'htdocs/images/icons/kwikdisk.png', + 'htdocs/images/icons/printer1.png', + 'htdocs/images/icons/enhanced_browsing.png', + 'htdocs/images/icons/khangman.png', + 'htdocs/images/icons/galeon.png', + 'htdocs/images/icons/mplayer.png', + 'htdocs/images/icons/aktion.png', + 'htdocs/images/icons/gnome_apps2.png', + 'htdocs/images/icons/krita.png', + 'htdocs/images/icons/3floppy_mount.png', + 'htdocs/images/icons/package.png', + 'htdocs/images/icons/keditbookmarks.png', + 'htdocs/images/icons/kcmsystem.png', + 'htdocs/images/icons/fonts.png', + 'htdocs/images/icons/ksirc.png', + 'htdocs/images/icons/kblackbox.png', + 'htdocs/images/icons/cdrom_mount.png', + 'htdocs/images/icons/kverbos.png', + 'htdocs/images/icons/kmahjong.png', + 'htdocs/images/icons/usb.png', + 'htdocs/images/icons/iconthemes.png', + 'htdocs/images/icons/xconfig.png', + 'htdocs/images/icons/artsmidimanager.png', + 'htdocs/images/icons/amarok.png', + 'htdocs/images/icons/kwin.png', + 'htdocs/images/icons/5floppy_mount.png', + 'htdocs/images/icons/edu_languages.png', + 'htdocs/images/icons/kopete.png', + 'htdocs/images/icons/tablet.png', + 'htdocs/images/icons/zip_mount.png', + 'htdocs/images/icons/filetypes.png', + 'htdocs/images/icons/locale.png', + 'htdocs/images/icons/cdwriter_unmount.png', + 'htdocs/images/icons/kooka.png', + 'htdocs/images/icons/krec.png', + 'htdocs/images/icons/network.png', + 'htdocs/images/icons/kalzium.png', + 'htdocs/images/icons/noatun.png', + 'htdocs/images/icons/x.png', + 'htdocs/images/icons/package_graphics.png', + 'htdocs/images/icons/samba_unmount.png', + 'htdocs/images/icons/style.png', + 'htdocs/images/icons/kmoon.png', + 'htdocs/images/icons/cdwriter_mount.png', + 'htdocs/images/icons/kpager.png', + 'htdocs/images/icons/kmid.png', + 'htdocs/images/icons/kjots.png', + 'htdocs/images/icons/ktimer.png', + 'htdocs/images/icons/key_bindings.png', + 'htdocs/images/icons/kaddressbook.png', + 'htdocs/images/icons/korn.png', + 'htdocs/images/icons/kmplot.png', + 'htdocs/images/icons/email.png', + 'htdocs/images/icons/pda_blue.png', + 'htdocs/images/icons/nfs_unmount.png', + 'htdocs/images/icons/kcontrol.png', + 'htdocs/images/icons/clanbomber.png', + 'htdocs/images/icons/kalarm.png', + 'htdocs/images/icons/date.png', + 'htdocs/images/icons/kivio.png', + 'htdocs/images/icons/mozilla-firebird.png', + 'htdocs/images/icons/klpq.png', + 'htdocs/images/icons/juk.png', + 'htdocs/images/icons/konquest.png', + 'htdocs/images/icons/knotes.png', + 'htdocs/images/icons/go.png', + 'htdocs/images/icons/xmag.png', + 'htdocs/images/icons/katomic.png', + 'htdocs/images/icons/mozilla.png', + 'htdocs/images/icons/input_devices_settings.png', + 'htdocs/images/icons/samba.png', + 'htdocs/images/icons/kview.png', + 'htdocs/images/icons/print_printer.png', + 'htdocs/images/icons/package_applications.png', + 'htdocs/images/icons/ktouch.png', + 'htdocs/images/icons/atlantik.png', + 'htdocs/images/icons/download_manager.png', + 'htdocs/images/icons/home.png', + 'htdocs/images/icons/colors.png', + 'htdocs/images/icons/kasteroids.png', + 'htdocs/images/icons/ksig.png', + 'htdocs/images/icons/konsole.png', + 'htdocs/images/icons/printmgr.png', + 'htdocs/images/icons/kcmdf.png', + 'htdocs/images/icons/kthememgr.png', + 'htdocs/images/icons/xapp.png', + 'htdocs/images/icons/evolution.png', + 'htdocs/images/icons/kgpg.png', + 'htdocs/images/icons/5floppy_unmount.png', + 'htdocs/images/icons/camera_unmount.png', + 'htdocs/images/icons/package_edutainment.png', + 'htdocs/images/icons/arts.png', + 'htdocs/images/icons/usbpendrive_mount.png', + 'htdocs/images/icons/package_network.png', + 'htdocs/images/icons/kfloppy.png', + 'htdocs/images/icons/designer.png', + 'htdocs/images/icons/background.png', + 'htdocs/images/icons/kmessedwords.png', + 'htdocs/images/icons/kcalc.png', + 'htdocs/images/icons/cookie.png', + 'htdocs/images/icons/bookcase.png', + 'htdocs/images/icons/multimedia.png', + 'htdocs/images/icons/mo_mount.png', + 'htdocs/images/icons/modem.png', + 'htdocs/images/icons/kfig.png', + 'htdocs/images/icons/package_favorite.png', + 'htdocs/images/icons/kmail.png', + 'htdocs/images/icons/keyboard.png', + 'htdocs/images/icons/xclock.png', + 'htdocs/images/icons/korganizer.png', + 'htdocs/images/icons/acroread.png', + 'htdocs/images/icons/chat.png', + 'htdocs/images/icons/ksplash.png', + 'htdocs/images/icons/kcolorchooser.png', + 'htdocs/images/icons/kugar.png', + 'htdocs/images/icons/kcmdevice.png', + 'htdocs/images/icons/ksim_cpu.png', + 'htdocs/images/icons/remote.png', + 'htdocs/images/icons/blockdevice.png', + 'htdocs/images/icons/kcmpartitions.png', + 'htdocs/images/icons/khotkeys.png', + 'htdocs/images/icons/camera_mount.png', + 'htdocs/images/icons/kservices.png', + 'htdocs/images/icons/security.png', + 'htdocs/images/icons/mo_unmount.png', + 'htdocs/images/icons/kxkb.png', + 'htdocs/images/icons/knewsticker.png', + 'htdocs/images/icons/window_list.png', + 'htdocs/images/icons/netscape.png', + 'htdocs/images/icons/camera.png', + 'htdocs/images/icons/kmousetool.png', + 'htdocs/images/icons/processor.png', + 'htdocs/images/icons/help_index.png', + 'htdocs/images/icons/kword.png', + 'htdocs/images/icons/kcmdevices.png', + 'htdocs/images/icons/kcmkwm.png', + 'htdocs/images/icons/printer.png', + 'htdocs/images/icons/networkdevice.png', + 'htdocs/images/icons/package_utilities.png', + 'htdocs/images/icons/kdat.png', + 'htdocs/images/icons/irkick.png', + 'htdocs/images/icons/kpresenter.png', + 'htdocs/images/icons/kcmmidi.png', + 'htdocs/images/icons/usbpendrive_unmount.png', + 'htdocs/images/icons/package_settings.png', + 'htdocs/images/icons/realplayer.png', + 'htdocs/images/icons/raid.png', + 'htdocs/images/icons/xemacs.png', + 'htdocs/images/icons/krdc.png', + 'htdocs/images/icons/indeximg.png', + 'htdocs/images/icons/memory.png', + 'htdocs/images/icons/gnome_apps.png', + 'htdocs/images/icons/kolourpaint.png', + 'htdocs/images/icons/icons.png', + 'htdocs/images/icons/kuser.png', + 'htdocs/images/icons/kaboodle.png', + 'htdocs/images/icons/korganizer_todo.png', + 'htdocs/images/icons/mouse.png', + 'htdocs/images/icons/clock.png', + 'htdocs/images/icons/ksnapshot.png', + 'htdocs/images/icons/kcmmemory.png', + 'htdocs/images/icons/sodipodi.png', + 'htdocs/images/icons/kpdf.png', + 'htdocs/images/icons/ipod.png', + 'htdocs/images/icons/hdd_mount.png', + 'htdocs/images/icons/cdrom_unmount.png', + 'htdocs/images/icons/file-manager.png', + 'htdocs/images/icons/randr.png', + 'htdocs/images/icons/kcmx.png', + 'htdocs/images/icons/kdmconfig.png', + 'htdocs/images/icons/kmag.png', + 'htdocs/images/icons/ksim.png', + 'htdocs/images/icons/mycomputer.png', + 'htdocs/images/icons/dvd_unmount.png', + 'htdocs/images/icons/stylesheet.png', + 'htdocs/images/icons/kolf.png', + 'htdocs/images/icons/kaudiocreator.png', + 'htdocs/images/icons/digikam.png', + 'htdocs/images/icons/bug.png', + 'htdocs/images/icons/kate.png', + 'htdocs/images/icons/cdaudio_mount.png', + 'htdocs/images/icons/mozilla-thunderbird.png', + 'htdocs/images/icons/kpaint.png', + 'htdocs/images/icons/ooo_gulls.png', + 'htdocs/images/icons/artscontrol.png', + 'htdocs/images/icons/kdict.png', + 'htdocs/images/icons/terminal.png', + 'htdocs/images/icons/gimp2.png', + 'htdocs/images/icons/kpackage.png', + 'htdocs/images/icons/kcoloredit.png', + 'htdocs/images/icons/kbrunch.png', + 'htdocs/images/icons/shed.png', + 'htdocs/images/icons/kjobviewer.png', + 'htdocs/images/icons/kpercentage.png', + 'htdocs/images/icons/kscreensaver.png', + 'htdocs/images/icons/edu_mathematics.png', + 'htdocs/images/icons/xfmail.png', + 'htdocs/images/icons/pda.png', + 'htdocs/images/icons/cdaudio_unmount.png', + 'htdocs/images/icons/personal.png', + 'htdocs/images/icons/kedit.png', + 'htdocs/images/icons/konqueror.png', + 'htdocs/images/icons/kchart.png', + 'htdocs/images/icons/kiten.png', + 'htdocs/images/icons/package_games.png', + 'htdocs/images/icons/kpovmodeler.png', + 'htdocs/images/icons/applixware.png', + 'htdocs/images/icons/knotify.png', + 'htdocs/images/icons/laptop_battery.png', + 'htdocs/images/icons/kppp.png', + 'htdocs/images/icons/package_editors.png', + 'htdocs/images/icons/kweather.png', + 'htdocs/images/icons/kbackgammon.png', + 'htdocs/images/icons/flashkard.png', + 'htdocs/images/icons/krfb.png', + 'htdocs/images/icons/kcmscsi.png', + 'htdocs/images/icons/kworldclock.png', + 'htdocs/images/icons/looknfeel.png', + 'htdocs/images/icons/ktip.png', + 'htdocs/images/icons/edu_miscellaneous.png', + 'htdocs/images/icons/pda_black.png', + 'htdocs/images/icons/kcmfontinst.png', + 'htdocs/images/icons/password.png', + 'htdocs/images/icons/kwrite.png', + 'htdocs/images/icons/important.png', + 'htdocs/images/icons/khelpcenter.png', + 'htdocs/images/icons/energy.png', + 'htdocs/images/icons/kuickshow.png', + 'htdocs/images/icons/samba_mount.png', + 'htdocs/images/icons/package_multimedia.png', + 'htdocs/images/icons/package_games_strategy.png', + 'htdocs/images/icons/klettres.png', + 'htdocs/images/icons/laptop_pcmcia.png', + 'htdocs/images/icons/kontact.png', + 'htdocs/images/icons/kcmprocessor.png', + 'htdocs/images/icons/kbounce.png', + 'htdocs/images/icons/kmenu.png', + 'htdocs/images/icons/package_development.png', + 'htdocs/images/icons/kghostview.png', + 'htdocs/images/icons/ksysv.png', + 'htdocs/images/icons/cactus.png', + 'htdocs/images/icons/3floppy_unmount.png', + 'htdocs/images/icons/keybindings.png', + 'htdocs/images/icons/kmines.png', + 'htdocs/images/icons/agent.png', + 'htdocs/images/icons/amor.png', + 'htdocs/images/icons/gaim.png', + 'htdocs/images/icons/kmix.png', + 'htdocs/images/icons/xcalc.png', + 'htdocs/images/icons/kscd.png', + 'htdocs/images/icons/kbackgammon_engine.png', + 'htdocs/images/icons/hdd_unmount.png', + 'htdocs/images/icons/earth.png', + 'htdocs/images/icons/karm.png', + 'htdocs/images/icons/browser.png', + 'htdocs/images/icons/kfm_home.png', + 'htdocs/images/icons/kcharselect.png', + 'htdocs/images/icons/opera.png', + 'htdocs/images/icons/kdf.png', + 'htdocs/images/icons/print_class.png', + 'htdocs/images/icons/kteatime.png', + 'htdocs/images/icons/nfs_mount.png', + 'htdocs/images/icons/package_wordprocessing.png', + 'htdocs/images/icons/emacs.png', + 'htdocs/images/icons/display.png', + 'htdocs/images/icons/licq.png', 'htdocs/images/white_90percent.png', - 'htdocs/images/tree_90.png', - 'htdocs/images/login_error_message.png', - 'htdocs/images/white_70percent.png', - 'htdocs/images/button_checkbox_off.png', - 'htdocs/images/icon_logwatch.png', - 'htdocs/images/image_button.png', - 'htdocs/images/button_painteroptions_off.png', - 'htdocs/images/white_40percent.png', - 'htdocs/images/button_up_hi.png', - 'htdocs/images/button_snapin_switch_on_lo.png', - 'htdocs/images/tableshadow.png', - 'htdocs/images/icon_detail.gif', - 'htdocs/images/button_commands_down_hi.png', - 'htdocs/images/icon_foreign_changes.png', - 'htdocs/images/link_reporting.gif', - 'htdocs/images/icon_host.png', - 'htdocs/images/link_queue.gif', - 'htdocs/images/button_commands_hi.png', - 'htdocs/images/alert_start.png', - 'htdocs/images/button_down_hi.png', - 'htdocs/images/tree_60.png', - 'htdocs/images/button_download_lo.png', - 'htdocs/images/button_filters_off.png', - 'htdocs/images/tree_00.png', - 'htdocs/images/button_sidebar_logout_hi.png', - 'htdocs/images/button_move_hi.png', - 'htdocs/images/icon_auditlog.png', - 'htdocs/images/icon_autherr.png', - 'htdocs/images/button_filters_down_hi.png', - 'htdocs/images/breadcrumb_r_lo.png', - 'htdocs/images/button_end_lo.png', - 'htdocs/images/white_30percent.png', - 'htdocs/images/sidebar_logout_lo.png', - 'htdocs/images/button_back_lo.png', - 'htdocs/images/status_persist.png', - 'htdocs/images/icon_notes.gif', - 'htdocs/images/button_checkbox_down_hi.png', - 'htdocs/images/button_sidebar_logout_lo.png', - 'htdocs/images/tree_black_40.png', - 'htdocs/images/button_services_hi.png', - 'htdocs/images/alert_restart.png', - 'htdocs/images/button_folder_lo.png', - 'htdocs/images/breadcrumb_l_lo.png', + 'htdocs/images/button_delete_hi.png', + 'htdocs/images/icon_downtime.png', + 'htdocs/images/button_filters_down_lo.png', + 'htdocs/images/link_search.gif', + 'htdocs/images/icon_rulesets.png', + 'htdocs/images/icon_timewarp_off.png', + 'htdocs/images/dashlet_edit_lo.png', + 'htdocs/images/icon_check_parameters.png', + 'htdocs/images/link_hosts.gif', + 'htdocs/images/status_download_csv.png', + 'htdocs/images/breadcrumb_rm_hi.png', + 'htdocs/images/icon_validation_error.png', + 'htdocs/images/button_static_checks_hi.png', 'htdocs/images/metanav_button.png', - 'htdocs/images/button_snapin_switch_on_hi.png', - 'htdocs/images/icon_localrule.png', - 'htdocs/images/icon_wato_nochanges.png', - 'htdocs/images/ios_logo.png', - 'htdocs/images/icon_replicate.png', + 'htdocs/images/button_checkbox_down_hi.png', + 'htdocs/images/button_availability_hi.png', + 'htdocs/images/assume_1.png', + 'htdocs/images/icon_download_agents.png', + 'htdocs/images/alert_downtimestop.png', + 'htdocs/images/repprogress.png', + 'htdocs/images/button_rulesets_lo.png', + 'htdocs/images/tree_black_80.png', + 'htdocs/images/icon_delete.png', + 'htdocs/images/sidebar_button_lo.png', + 'htdocs/images/link_error.gif', + 'htdocs/images/button_notifications_lo.png', 'htdocs/images/folder_open.png', - 'htdocs/images/perfometer-bg.png', - 'htdocs/images/white_20percent.png', - 'htdocs/images/alert_notify.png', - 'htdocs/images/button_snapin_greyswitch_off_hi.png', - 'htdocs/images/button_servicegroups_lo.png', - 'htdocs/images/snapin_footer.png', - 'htdocs/images/button_quicksearch_lo.png', - 'htdocs/images/contextlink_lo.png', - 'htdocs/images/icon_hostgroups.png', - 'htdocs/images/metanav_30.png', - 'htdocs/images/icon_notification.png', - 'htdocs/images/mk_logo_small.gif', - 'htdocs/images/assume_bg.png', - 'htdocs/images/button_sitestatus_unreach_hi.png', - 'htdocs/images/icon_search.png', - 'htdocs/images/icon_validation_error.png', - 'htdocs/images/dial_refresh_off.png', - 'htdocs/images/icon_abort.png', - 'htdocs/images/icon_ndisabled.gif', - 'htdocs/images/button_services_lo.png', - 'htdocs/images/speedometer.png', - 'htdocs/images/button_closesnapin_hi.png', - 'htdocs/images/alert_command.png', - 'htdocs/images/button_painteroptions_hi.png', - 'htdocs/images/button_rulesets_hi.png', - 'htdocs/images/button_sitestatus_disabled_hi.png', - 'htdocs/images/icon_enabled.gif', - 'htdocs/images/icon_trash.png', - 'htdocs/images/tree_black_90.png', - 'htdocs/images/link_downtime.gif', - 'htdocs/images/link_events.gif', - 'htdocs/images/breadcrumb_lm_hi.png', + 'htdocs/images/icon_filter_set.png', + 'htdocs/images/alert_restart.png', 'htdocs/images/button_maxisnapin_lo.png', - 'htdocs/images/button_roles_lo.png', - 'htdocs/images/icon_passive.gif', - 'htdocs/images/button_sidebar_settings_lo.png', - 'htdocs/mobile.py', - 'htdocs/mobile.css', - 'htdocs/pages.css', - 'htdocs/actions.py', - 'htdocs/sidebar.py', + 'htdocs/images/tree_black_20.png', 'htdocs/status.css', - 'htdocs/default_permissions.py', - 'htdocs/config.py', - 'htdocs/help.py', - 'htdocs/login.py', - 'htdocs/weblib.py', - 'htdocs/forms.py', - 'htdocs/login.css', + 'htdocs/sounds/down.wav', 'htdocs/sounds/unknown.wav', 'htdocs/sounds/ok.wav', - 'htdocs/sounds/down.wav', 'htdocs/sounds/warning.wav', 'htdocs/sounds/critical.wav', - 'htdocs/userdb.py', - 'htdocs/main.py', - 'htdocs/livestatus.py', - 'htdocs/jquery/jquery-1.6.4.min.js', - 'htdocs/jquery/jquery.mobile-1.0.js', - 'htdocs/jquery/images/icons-18-white.png', - 'htdocs/jquery/images/icons-36-black.png', - 'htdocs/jquery/images/icons-36-white.png', - 'htdocs/jquery/images/icons-18-black.png', - 'htdocs/jquery/images/ajax-loader.png', - 'htdocs/jquery/jquery.mobile.structure-1.0.min.css', - 'htdocs/jquery/jquery-1.7.1.min.js', - 'htdocs/jquery/jquery.mobile-1.0.min.js', - 'htdocs/jquery/jquery.mobile-1.0.min.css', - 'htdocs/jquery/jquery.mobile.structure-1.0.css', - 'htdocs/jquery/jquery.mobile-1.0.css', - 'htdocs/check_mk.css', - 'htdocs/bi.py', - 'htdocs/logwatch.py', - 'htdocs/md5crypt.py', - 'htdocs/css/README', + 'htdocs/multitar.py', 'htdocs/lib.py', - 'plugins/icons/wato.py', - 'plugins/icons/builtin.py', - 'plugins/sidebar/wato.py', - 'plugins/sidebar/shipped.py', - 'plugins/sidebar/search.py', - 'plugins/sidebar/nagvis_maps.py', - 'plugins/sidebar/bi.py', + 'htdocs/cron.py', + 'htdocs/config.py', + 'htdocs/forms.py', 'plugins/perfometer/check_mk.py', 'plugins/perfometer/active_checks.py', - 'plugins/pages/wato.py', - 'plugins/pages/mobile.py', - 'plugins/pages/shipped.py', - 'plugins/pages/bi.py', - 'plugins/views/commands.py', - 'plugins/views/layouts.py', + 'plugins/config/bi.py', + 'plugins/config/wato.py', + 'plugins/config/builtin.py', + 'plugins/wato/builtin_modules.py', + 'plugins/wato/userdb.py', + 'plugins/wato/bi.py', + 'plugins/wato/check_mk_configuration.py', + 'plugins/wato/active_checks.py', + 'plugins/wato/inventory.py', + 'plugins/wato/globals_notification.py', + 'plugins/wato/user_attributes.py', + 'plugins/wato/agents.py', + 'plugins/wato/datasource_programs.py', + 'plugins/wato/auth.py', + 'plugins/wato/builtin_attributes.py', + 'plugins/wato/backup_domains.py', + 'plugins/wato/nagvis_auth.py', + 'plugins/wato/notifications.py', + 'plugins/wato/check_parameters.py', + 'plugins/wato/mknotifyd.py', + 'plugins/webapi/webapi.py', + 'plugins/sidebar/bi.py', + 'plugins/sidebar/nagvis_maps.py', + 'plugins/sidebar/shipped.py', + 'plugins/sidebar/search.py', + 'plugins/sidebar/wato.py', + 'plugins/views/datasources.py', + 'plugins/views/bi.py', + 'plugins/views/painters.py', + 'plugins/views/mobile.py', 'plugins/views/webservice.py', - 'plugins/views/sorters.py', + 'plugins/views/inventory.py', 'plugins/views/filters.py', - 'plugins/views/wato.py', 'plugins/views/perfometer.py', + 'plugins/views/commands.py', + 'plugins/views/layouts.py', + 'plugins/views/availability.py', + 'plugins/views/wato.py', + 'plugins/views/sorters.py', 'plugins/views/dashboard.py', - 'plugins/views/painters.py', - 'plugins/views/mobile.py', - 'plugins/views/datasources.py', 'plugins/views/builtin.py', - 'plugins/views/bi.py', - 'plugins/wato/check_parameters.py', - 'plugins/wato/user_attributes.py', - 'plugins/wato/auth.py', - 'plugins/wato/active_checks.py', - 'plugins/wato/globals_notification.py', - 'plugins/wato/builtin_modules.py', - 'plugins/wato/userdb.py', - 'plugins/wato/check_mk_configuration.py', - 'plugins/wato/nagvis_auth.py', - 'plugins/wato/builtin_attributes.py', + 'plugins/dashboard/dashlets.py', + 'plugins/dashboard/builtin.py', + 'plugins/pages/bi.py', + 'plugins/pages/shipped.py', + 'plugins/pages/mobile.py', + 'plugins/pages/wato.py', + 'plugins/pages/cron.py', 'plugins/userdb/ldap.py', - 'plugins/userdb/htpasswd.py', 'plugins/userdb/hook_auth.py', - 'plugins/dashboard/builtin.py', + 'plugins/userdb/user_attributes.py', + 'plugins/userdb/htpasswd.py', + 'plugins/visuals/bi.py', + 'plugins/visuals/inventory.py', + 'plugins/visuals/filters.py', + 'plugins/visuals/infos.py', + 'plugins/visuals/wato.py', + 'plugins/icons/inventory.py', + 'plugins/icons/wato.py', + 'plugins/icons/builtin.py', 'htdocs/defaults.py']}, 'name': 'check_mk', 'title': 'Files shipped with Check_MK', - 'version': '1.2.2p3', - 'version.min_required': '1.2.2p3', - 'version.packaged': '1.2.2p3'} \ No newline at end of file + 'version': '1.2.6p12', + 'version.min_required': '1.2.6p12', + 'version.packaged': '1.2.6p12'} \ No newline at end of file diff -Nru check-mk-1.2.2p3/packaging.py check-mk-1.2.6p12/packaging.py --- check-mk-1.2.2p3/packaging.py 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/packaging.py 2015-06-24 09:48:36.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -44,14 +44,15 @@ pass # in case of local directories (OMD) use those instead -package_parts = [ (part, title, ldir and ldir or dir) for part, title, dir, ldir in [ - ( "checks", "Checks", checks_dir, local_checks_dir ), - ( "notifications", "Notification scripts", notifications_dir, local_notifications_dir ), - ( "checkman", "Checks' man pages", check_manpages_dir, local_check_manpages_dir ), - ( "agents", "Agents", agents_dir, local_agents_dir ), - ( "web", "Multisite extensions", web_dir, local_web_dir ), - ( "pnp-templates", "PNP4Nagios templates", pnp_templates_dir, local_pnp_templates_dir ), - ( "doc", "Documentation files", doc_dir, local_doc_dir ), +package_parts = [ (part, title, perm, ldir and ldir or dir) for part, title, perm, dir, ldir in [ + ( "checks", "Checks", 0644, checks_dir, local_checks_dir ), + ( "notifications", "Notification scripts", 0755, notifications_dir, local_notifications_dir ), + ( "inventory", "Inventory plugins", 0644, inventory_dir, local_inventory_dir ), + ( "checkman", "Checks' man pages", 0644, check_manpages_dir, local_check_manpages_dir ), + ( "agents", "Agents", 0755, agents_dir, local_agents_dir ), + ( "web", "Multisite extensions", 0644, web_dir, local_web_dir ), + ( "pnp-templates", "PNP4Nagios templates", 0644, pnp_templates_dir, local_pnp_templates_dir ), + ( "doc", "Documentation files", 0644, doc_dir, local_doc_dir ), ]] def packaging_usage(): @@ -165,14 +166,14 @@ else: if opt_verbose: sys.stdout.write("Files in package %s:\n" % name) - for part, title, dir in package_parts: + for part, title, perm, dir in package_parts: files = package["files"].get(part, []) if len(files) > 0: sys.stdout.write(" %s%s%s:\n" % (tty_bold, title, tty_normal)) for f in files: sys.stdout.write(" %s\n" % f) else: - for part, title, dir in package_parts: + for part, title, perm, dir in package_parts: for fn in package["files"].get(part, []): sys.stdout.write(dir + "/" + fn + "\n") @@ -199,7 +200,7 @@ "files" : filelists } num_files = 0 - for part, title, dir in package_parts: + for part, title, perm, dir in package_parts: files = unpackaged_files_in_dir(part, dir) filelists[part] = files num_files += len(files) @@ -215,7 +216,7 @@ def package_find(_no_args): first = True - for part, title, dir in package_parts: + for part, title, perm, dir in package_parts: files = unpackaged_files_in_dir(part, dir) if len(files) > 0: if first: @@ -242,7 +243,7 @@ os.unlink(pacpath) verbose("Releasing files of package %s into freedom...\n" % pacname) if opt_verbose: - for part, title, dir in package_parts: + for part, title, perm, dir in package_parts: filenames = package["files"].get(part, []) if len(filenames) > 0: verbose(" %s%s%s:\n" % (tty_bold, title, tty_normal)) @@ -255,7 +256,7 @@ # Make sure, user is not in data directories of Check_MK p = os.path.abspath(os.curdir) - for dir in [var_dir] + [ dir for x,y,dir in package_parts ]: + for dir in [var_dir] + [ dir for x,y,perm,dir in package_parts ]: if p == dir or p.startswith(dir + "/"): raise PackageException("You are in %s!\n" "Please leave the directories of Check_MK before creating\n" @@ -286,7 +287,7 @@ tar.addfile(info, info_file) # Now pack the actual files into sub tars - for part, title, dir in package_parts: + for part, title, perm, dir in package_parts: filenames = package["files"].get(part, []) if len(filenames) > 0: verbose(" %s%s%s:\n" % (tty_bold, title, tty_normal)) @@ -309,7 +310,7 @@ raise PackageException("No such package %s." % pacname) verbose("Removing package %s...\n" % pacname) - for part, title, dir in package_parts: + for part, title, perm, dir in package_parts: filenames = package["files"].get(part, []) if len(filenames) > 0: verbose(" %s%s%s\n" % (tty_bold, title, tty_normal)) @@ -344,7 +345,7 @@ # Before installing check for conflicts keep_files = {} - for part, title, dir in package_parts: + for part, title, perm, dir in package_parts: packaged = packaged_files_in_dir(part) keep = [] keep_files[part] = keep @@ -360,7 +361,7 @@ raise PackageException("File conflict: %s already existing." % path) # Now install files, but only unpack files explicitely listed - for part, title, dir in package_parts: + for part, title, perm, dir in package_parts: filenames = package["files"].get(part, []) if len(filenames) > 0: verbose(" %s%s%s:\n" % (tty_bold, title, tty_normal)) @@ -378,10 +379,20 @@ if not data: break tardest.write(data) + tardest.close() + + # Fix permissions of extracted files + for filename in filenames: + path = dir + "/" + filename + has_perm = os.stat(path).st_mode & 07777 + if has_perm != perm: + verbose(" Fixing permissions of %s: %04o -> %04o\n" % (path, has_perm, perm)) + os.chmod(path, perm) + # In case of an update remove files from old_package not present in new one if update: - for part, title, dir in package_parts: + for part, title, perm, dir in package_parts: filenames = old_package["files"].get(part, []) keep = keep_files.get(part, []) for fn in filenames: @@ -402,7 +413,7 @@ return [] # Handle case where one part-dir lies below another - taboo_dirs = [ d for p, t, d in package_parts if p != part ] + taboo_dirs = [ d for p, t, perm, d in package_parts if p != part ] if dir in taboo_dirs: return [] diff -Nru check-mk-1.2.2p3/pdu_gude_8301 check-mk-1.2.6p12/pdu_gude_8301 --- check-mk-1.2.2p3/pdu_gude_8301 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/pdu_gude_8301 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/pdu_gude_8310 check-mk-1.2.6p12/pdu_gude_8310 --- check-mk-1.2.2p3/pdu_gude_8310 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/pdu_gude_8310 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/pdu_gude.include check-mk-1.2.6p12/pdu_gude.include --- check-mk-1.2.2p3/pdu_gude.include 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/pdu_gude.include 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -25,55 +25,53 @@ # Boston, MA 02110-1301 USA. pdu_gude_default_levels = { - "V" : ( 220, 210 ), - "A" : ( 15, 16 ), - "W" : ( 3500, 3600 ), - } + "V" : ( 220, 210 ), # Volt + "A" : ( 15, 16 ), # Ampere + "W" : ( 3500, 3600 ), # Watt +} + def inventory_pdu_gude(info): if len(info) > 0: - return [(x + 1, "pdu_gude_default_levels") for x in xrange(len(info))] + return [(x + 1, "pdu_gude_default_levels") for x in range(len(info))] + def check_pdu_gude(item, params, info): try: values = info[item - 1] except ValueError: - return (3, "UKN - Item not found") + yield 3, "No phase %d found in agent output" % item + return units = { 0 : ("kWh", 1000), - 1 : ("W", False ), - 2 : ("A", 1000), - 3 : ("V", False), - 4 : ("VA",False), + 1 : ("W", False), + 2 : ("A", 1000), + 3 : ("V", False), + 4 : ("VA", False), } - message = [] - perf = [] - state = 0 - for pos, data in units.items(): - value = savefloat(values[pos]) - unit, div = data + for pos, (unit, div) in units.items(): + value = float(values[pos]) if div: value = value / div + infotext = "%.2f %s" % (value, unit) + warn, crit = params.get(unit, (None, None)) - perf.append((unit, value, warn, crit)) - label = "" + perfdata = [ (unit, value, warn, crit) ] + status = 0 + if warn > crit: if value < crit: - state = 2 - label = "(!!)" + status = 2 elif value < warn: - state = max(state, 1) - label = "(!)" + status = 1 + else: if crit != None and value > crit: - state = 2 - label = "(!!)" + status = 2 elif warn != None and value > warn: - state = max(state, 1) - label = "(!)" - message.append("%.2f%s%s" % (value, unit, label)) + status = 1 - return(state, nagios_state_names[0] + " - " + ", ".join(message), perf) + yield status, infotext, perfdata diff -Nru check-mk-1.2.2p3/plesk_backups check-mk-1.2.6p12/plesk_backups --- check-mk-1.2.2p3/plesk_backups 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/plesk_backups 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -37,18 +37,18 @@ if len(line) != 5 or line[1] != '0': if line[1] == '2': - return (3, 'UNKNOWN - Error in agent (%s)' % ' '.join(line[1:])) + return (3, 'Error in agent (%s)' % ' '.join(line[1:])) elif line[1] == '4': state = params.get('no_backup_configured_state', 1) - return (state, '%s - No backup configured' % nagios_state_names[state]) + return (state, 'No backup configured') elif line[1] == '5': state = params.get('no_backup_found_state', 1) - return (state, '%s - No backup found' % nagios_state_names[state]) + return (state, 'No backup found') else: - return (3, 'UNKNOWN - Unexpected line %r' % line) + return (3, 'Unexpected line %r' % line) domain, rc, timestamp, size, total_size = line size = saveint(size) @@ -105,9 +105,9 @@ output.append('Total Size: %s%s' % (get_bytes_human_readable(total_size), status_txt)) perfdata.append(('total_size', total_size)) - return (status, 'OK - %s' % ', '.join(output), perfdata) + return (status, ', '.join(output), perfdata) - return (3, 'UNKNOWN - Domain not found') + return (3, 'Domain not found') check_info['plesk_backups'] = { diff -Nru check-mk-1.2.2p3/plesk_domains check-mk-1.2.6p12/plesk_domains --- check-mk-1.2.2p3/plesk_domains 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/plesk_domains 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -30,7 +30,12 @@ def check_plesk_domains(_no_item, _no_params, info): if not info: - return (1, 'WARN - No domains configured') + return (1, 'No domains configured') return (0, '%s' % ',
    '.join([ i[0] for i in info])) -check_info["plesk_domains"] = (check_plesk_domains, "Plesk Domains", 0, inventory_plesk_domains) + +check_info["plesk_domains"] = { + 'check_function': check_plesk_domains, + 'inventory_function': inventory_plesk_domains, + 'service_description': 'Plesk Domains', +} diff -Nru check-mk-1.2.2p3/plugins/apache_status check-mk-1.2.6p12/plugins/apache_status --- check-mk-1.2.2p3/plugins/apache_status 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/plugins/apache_status 2015-06-24 09:48:39.000000000 +0000 @@ -1,5 +1,28 @@ #!/usr/bin/python -# encoding: utf-8 +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. # Check_MK-Agent-Plugin - Apache Server Status # @@ -18,13 +41,19 @@ # It is also possible to override or extend the ssl_ports variable to make the # check contact other ports than 443 with HTTPS requests. -import os, sys, urllib2 +import os, sys, urllib2, re, socket -config_dir = os.getenv("MK_CONFDIR", "/etc/check_mk") +config_dir = os.getenv("MK_CONFDIR", "/etc/check_mk") config_file = config_dir + "/apache_status.conf" + if not os.path.exists(config_file): config_file = config_dir + "/apache_status.cfg" +# We have to deal with socket timeouts. Python > 2.6 +# supports timeout parameter for the urllib2.urlopen method +# but we are on a python 2.5 system here which seem to use the +# default socket timeout. We are local here so set it to 1 second. +socket.setdefaulttimeout(5.0) # None or list of (proto, ipaddress, port) tuples. # proto is 'http' or 'https' @@ -43,9 +72,11 @@ if len(parts) < 7 or '/' not in parts[6]: continue - pid, proc = parts[6].split('/') + pid, proc = parts[6].split('/', 1) + to_replace = re.compile('^.*/') + proc = to_replace.sub('', proc) - procs = [ 'apache2', 'httpd', 'httpd2-prefork', 'httpd2-worker', 'httpd.worker' ] + procs = [ 'apache2', 'httpd', 'httpd2-prefork', 'httpd2-worker', 'httpd.worker', 'fcgi-pm' ] # the pid/proc field length is limited to 19 chars. Thus in case of # long PIDs, the process names are stripped of by that length. # Workaround this problem here @@ -82,16 +113,27 @@ if servers is None: servers = try_detect_servers() + if not servers: sys.exit(0) print '<<>>' -for proto, address, port in servers: +for server in servers: + if isinstance(server, tuple): + proto, address, port = server + page = 'server-status' + else: + proto = server['protocol'] + address = server['address'] + port = server['port'] + page = server.get('page', 'server-status') + try: - url = '%s://%s:%s/server-status?auto' % (proto, address, port) + url = '%s://%s:%s/%s?auto' % (proto, address, port, page) # Try to fetch the status page for each server try: - fd = urllib2.urlopen(url) + request = urllib2.Request(url, headers={"Accept" : "text/plain"}) + fd = urllib2.urlopen(request) except urllib2.URLError, e: if 'SSL23_GET_SERVER_HELLO:unknown protocol' in str(e): # HACK: workaround misconfigurations where port 443 is used for @@ -104,6 +146,9 @@ for line in fd.read().split('\n'): if not line.strip(): continue + if line.lstrip()[0] == '<': + # seems to be html output. Skip this server. + break print address, port, line except urllib2.HTTPError, e: sys.stderr.write('HTTP-Error (%s:%d): %s %s\n' % (address, port, e.code, e)) diff -Nru check-mk-1.2.2p3/plugins/config/bi.py check-mk-1.2.6p12/plugins/config/bi.py --- check-mk-1.2.2p3/plugins/config/bi.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/config/bi.py 2015-06-24 09:48:38.000000000 +0000 @@ -0,0 +1,31 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +aggregation_rules = {} +aggregations = [] +host_aggregations = [] +bi_compile_log = None +bi_precompile_on_demand = False diff -Nru check-mk-1.2.2p3/plugins/config/builtin.py check-mk-1.2.6p12/plugins/config/builtin.py --- check-mk-1.2.2p3/plugins/config/builtin.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/config/builtin.py 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,252 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# ____ _ +# | _ \ ___ | | ___ ___ +# | |_) / _ \| |/ _ \/ __| +# | _ < (_) | | __/\__ \ +# |_| \_\___/|_|\___||___/ +# +roles = {} # User supplied roles + +# define default values for all settings +debug = False +screenshotmode = False +profile = False +users = [] +admin_users = [] +guest_users = [] +default_user_role = "user" +save_user_access_times = False +user_online_maxage = 30 # seconds + +# New style, used by WATO +multisite_users = {} + +# ____ _ _ _ +# / ___|(_) __| | ___| |__ __ _ _ __ +# \___ \| |/ _` |/ _ \ '_ \ / _` | '__| +# ___) | | (_| | __/ |_) | (_| | | +# |____/|_|\__,_|\___|_.__/ \__,_|_| +# + +sidebar = [ + ('tactical_overview', 'open'), + ('search', 'open'), + ('views', 'open'), + ('reports', 'closed'), # does not harm if not available + ('bookmarks', 'open'), + ('admin', 'open'), + ('master_control', 'closed') +] + +# Interval of snapin updates in seconds +sidebar_update_interval = 30.0 + +# It is possible (but ugly) to enable a scrollbar in the sidebar +sidebar_show_scrollbar = False + +# Enable regular checking for popup notifications +sidebar_notify_interval = None + +# _ _ _ _ +# | | (_)_ __ ___ (_) |_ ___ +# | | | | '_ ` _ \| | __/ __| +# | |___| | | | | | | | |_\__ \ +# |_____|_|_| |_| |_|_|\__|___/ +# + +soft_query_limit = 1000 +hard_query_limit = 5000 + +# ____ _ +# / ___| ___ _ _ _ __ __| |___ +# \___ \ / _ \| | | | '_ \ / _` / __| +# ___) | (_) | |_| | | | | (_| \__ \ +# |____/ \___/ \__,_|_| |_|\__,_|___/ +# + +sound_url = "sounds/" +enable_sounds = False +sounds = [ + ( "down", "down.wav" ), + ( "critical", "critical.wav" ), + ( "unknown", "unknown.wav" ), + ( "warning", "warning.wav" ), + # ( None, "ok.wav" ), +] + + +# __ ___ _ _ +# \ \ / (_) _____ __ ___ _ __ | |_(_) ___ _ __ ___ +# \ \ / /| |/ _ \ \ /\ / / / _ \| '_ \| __| |/ _ \| '_ \/ __| +# \ V / | | __/\ V V / | (_) | |_) | |_| | (_) | | | \__ \ +# \_/ |_|\___| \_/\_/ \___/| .__/ \__|_|\___/|_| |_|___/ +# |_| + +view_option_refreshes = [ 30, 60, 90, 0 ] +view_option_columns = [ 1, 2, 3, 4, 5, 6, 8 ] + +# MISC +doculink_urlformat = "http://mathias-kettner.de/checkmk_%s.html"; + + +# ____ _ _ _ _ +# / ___| _ ___| |_ ___ _ __ ___ | | (_)_ __ | | _____ +# | | | | | / __| __/ _ \| '_ ` _ \ | | | | '_ \| |/ / __| +# | |__| |_| \__ \ || (_) | | | | | | | |___| | | | | <\__ \ +# \____\__,_|___/\__\___/|_| |_| |_| |_____|_|_| |_|_|\_\___/ +# + +custom_links = {} + +# __ __ _ +# \ \ / /_ _ _ __(_) ___ _ _ ___ +# \ \ / / _` | '__| |/ _ \| | | / __| +# \ V / (_| | | | | (_) | |_| \__ \ +# \_/ \__,_|_| |_|\___/ \__,_|___/ +# + +debug_livestatus_queries = False + +# Show livestatus errors in multi site setup if some sites are +# not reachable. +show_livestatus_errors = True + +# Whether the livestatu proxy daemon is available +liveproxyd_enabled = False + +# Set this to a list in order to globally control which views are +# being displayed in the sidebar snapin "Views" +visible_views = None + +# Set this list in order to actively hide certain views +hidden_views = None + +# Custom user stylesheet to load (resides in htdocs/) +custom_style_sheet = None + +# URL for start page in main frame (welcome page) +start_url = "dashboard.py" + +# Page heading for main frame set +page_heading = "Check_MK %s" + +# Timeout for rescheduling of host- and servicechecks +reschedule_timeout = 10.0 + +# Number of columsn in "Filter" form +filter_columns = 2 + +# Default language for l10n +default_language = None + +# Hide these languages from user selection +hide_languages = [] + +# Default timestamp format to be used in multisite +default_ts_format = 'mixed' + +# Default authentication type. Can be changed to e.g. "cookie" for +# using the cookie auth +auth_type = 'basic' + +# Show only most used buttons, set to None if you want +# always all buttons to be shown +context_buttons_to_show = 5 + +# Buffering of HTML output stream +buffered_http_stream = True + +# Maximum livetime of unmodified selections +selection_livetime = 3600 + +# Configure HTTP header to read usernames from +auth_by_http_header = False + +# Number of rows to display by default in tables rendered with +# the table.py module +table_row_limit = 100 + +# Add an icon pointing to the WATO rule to each service +multisite_draw_ruleicon = True + +# Default downtime configuration +adhoc_downtime = {} + +# Display dashboard date +pagetitle_date_format = None + +# Value of the host_staleness/service_staleness field to make hosts/services +# appear in a stale state +staleness_threshold = 1.5 + +# Escape HTML in plugin output / log messages +escape_plugin_output = True + +# Virtual host trees for the "Virtual Host Trees" snapin +virtual_host_trees = [] + +# Target email address for "Crashed Check" page +crash_report_target = "feedback@check-mk.org" + + +# _ _ ____ ____ +# | | | |___ ___ _ __| _ \| __ ) +# | | | / __|/ _ \ '__| | | | _ \ +# | |_| \__ \ __/ | | |_| | |_) | +# \___/|___/\___|_| |____/|____/ +# + +user_connectors = ['htpasswd'] +userdb_automatic_sync = [ 'wato_users', 'page', 'wato_pre_activate_changes', 'wato_snapshot_pushed' ] +ldap_connection = { + 'type' : 'ad', + 'page_size' : 1000, +} +ldap_userspec = { + 'scope' : 'sub', + 'user_id_umlauts' : 'replace', +} +ldap_groupspec = { + 'scope' : 'sub', +} +ldap_active_plugins = {'email': {}, 'alias': {}, 'auth_expire': {}} +ldap_cache_livetime = 300 +ldap_debug_log = False +default_user_profile = { + 'roles': ['user'], +} +lock_on_logon_failures = False +password_policy = {} + +user_localizations = default_user_localizations + +# Write WATO folder permissions to auth.php file +export_folder_permissions = False + +# Name of the hostgroup to filter the network topology view by default +topology_default_filter_group = None diff -Nru check-mk-1.2.2p3/plugins/config/wato.py check-mk-1.2.6p12/plugins/config/wato.py --- check-mk-1.2.2p3/plugins/config/wato.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/config/wato.py 2015-06-24 09:48:38.000000000 +0000 @@ -0,0 +1,60 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +wato_enabled = True +wato_host_tags = [] +wato_aux_tags = [] +wato_hide_filenames = True +wato_hide_hosttags = False +wato_upload_insecure_snapshots = False +wato_hide_varnames = True +wato_hide_help_in_lists = True +wato_max_snapshots = 50 +wato_num_hostspecs = 12 +wato_num_itemspecs = 15 +wato_activation_method = 'restart' +wato_write_nagvis_auth = False +wato_use_git = False +wato_hidden_users = [] +wato_user_attrs = [] +wato_legacy_eval = False + +def tag_alias(tag): + for entry in wato_host_tags: + id, title, tags = entry[:3] + for t in tags: + if t[0] == tag: + return t[1] + for id, alias in wato_aux_tags: + if id == tag: + return alias + +def tag_group_title(tag): + for entry in wato_host_tags: + id, title, tags = entry[:3] + for t in tags: + if t[0] == tag: + return title diff -Nru check-mk-1.2.2p3/plugins/dashboard/builtin.py check-mk-1.2.6p12/plugins/dashboard/builtin.py --- check-mk-1.2.2p3/plugins/dashboard/builtin.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/dashboard/builtin.py 2015-06-24 09:48:38.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -25,68 +25,299 @@ # Boston, MA 02110-1301 USA. builtin_dashboards["main"] = { - "title" : _("Main Overview"), + "single_infos": [], + "context" : {}, + "mtime" : 0, + "show_title" : True, + "title" : _("Main Overview"), + "topic" : _("Overview"), + "description" : _("This dashboard gives you a general overview on the state of your " + "monitored devices."), "dashlets" : [ -# { -# "url" : "dashlet_mk_logo.py", -# "position" : (1, 1), -# "size" : (4, 5), -# "shadow" : False, -# "background" : False, -# }, { - "title" : _("Host Statistics"), - "url" : "dashlet_hoststats.py", - "position" : (1, 1), - "size" : (30, 18), - "shadow" : True, - "background" : True, - "refresh" : 60, + "title" : _("Host Statistics"), + "type" : 'hoststats', + "position" : (1, 1), + "refresh" : 60, + "show_title" : True, + "context" : {}, + 'single_infos' : [], }, { "title" : _("Service Statistics"), - "url" : "dashlet_servicestats.py", + "type" : 'servicestats', "position" : (31, 1), - "size" : (30, 18), - "shadow" : True, - "background" : True, "refresh" : 60, + "show_title" : True, + "context" : {}, + 'single_infos' : [], }, { + "type" : "view", "title" : _("Host Problems (unhandled)"), "title_url" : "view.py?view_name=hostproblems&is_host_acknowledged=0", - "view" : "hostproblems_dash", # "view.py?view_name=hostproblems_dash&display_options=SIXHR&_body_class=dashlet", "position" : (-1, 1), "size" : (GROW, 18), + "show_title" : True, + "context" : {}, + + 'browser_reload': 30, + 'column_headers': 'pergroup', + 'datasource': 'hosts', + 'single_infos': [], + 'group_painters': [], + 'context': { + 'hoststate': { + 'hst0': '', + 'hst1': 'on', + 'hst2': 'on', + 'hstp': '', + }, + 'summary_host': {'is_summary_host': '0'}, + 'host_acknowledged': {'is_host_acknowledged': '0'}, + 'host_scheduled_downtime_depth': {'is_host_scheduled_downtime_depth': '0'}, + }, + 'hidden': True, + 'hidebutton': True, + 'layout': 'table', + 'mustsearch': False, + 'name': 'dashlet_2', + 'num_columns': 1, + 'owner': '', + 'painters': [ + ('host_state', None), + ('host', 'host'), + ('host_icons', None), + ('host_state_age', None), + ('host_plugin_output', None), + ], + 'public': True, + 'sorters': [('hoststate', True)], + 'topic': None, }, { + "type" : "view", "title" : _("Service Problems (unhandled)"), "title_url" : "view.py?view_name=svcproblems&is_service_acknowledged=0", - "view" : "svcproblems_dash", # "view.py?view_name=svcproblems_dash&display_options=SIXHR&_body_class=dashlet", "position" : (1, 19), "size" : (GROW, MAX), + "show_title" : True, + "context" : {}, + + 'browser_reload': 30, + 'column_headers': 'pergroup', + 'datasource': 'services', + 'single_infos': [], + 'group_painters': [], + 'context': { + 'service_acknowledged': {'is_service_acknowledged': '0'}, + 'summary_host': {'is_summary_host': '0'}, + 'in_downtime': {'is_in_downtime': '0'}, + 'hoststate': {'hst0': 'on', + 'hst1': '', + 'hst2': '', + 'hstp': 'on'}, + 'svcstate': { + 'st0': '', + 'st1': 'on', + 'st2': 'on', + 'st3': 'on', + 'stp': '', + } + }, + 'hidden': True, + 'layout': 'table', + 'mustsearch': False, + 'name': 'dashlet_3', + 'num_columns': 1, + 'owner': '', + 'painters': [('service_state', None), + ('host', 'host'), + ('service_description', 'service'), + ('service_icons', None), + ('svc_plugin_output', None), + ('svc_state_age', None), + ('svc_check_age', None), + ], + 'play_sounds': True, + 'public': True, + 'sorters': [('svcstate', True), + ('stateage', False), + ('svcdescr', False)], }, { + "type" : "view", "title" : _("Events of recent 4 hours"), "title_url" : "view.py?view_name=events_dash", - "view" : "events_dash", # "view.py?view_name=events_dash&display_options=SIXHR&_body_class=dashlet", "position" : (-1, -1), "size" : (GROW, GROW), + "show_title" : True, + "context" : {}, + + 'browser_reload': 90, + 'column_headers': 'pergroup', + 'datasource': 'log_events', + 'single_infos': [], + 'group_painters': [], + 'context': { + 'logtime': { + 'logtime_from_range': '3600', + 'logtime_from': '4', + }, + }, + 'hidden': True, + 'layout': 'table', + 'linktitle': 'Events', + 'mustsearch': False, + 'name': 'dashlet_4', + 'num_columns': 1, + 'owner': 'admin', + 'painters': [('log_icon', None), + ('log_time', None), + ('host', 'hostsvcevents'), + ('service_description', 'svcevents'), + ('log_plugin_output', None)], + + + 'play_sounds': False, + 'public': True, + 'sorters': [], + }, + ] +} + +#Only work in OMD installations +if defaults.omd_site: + def topology_url(): + return defaults.url_prefix + 'nagvis/frontend/nagvis-js/index.php?' + \ + 'mod=Map&header_template=on-demand-filter&header_menu=1&label_show=1' + \ + '&sources=automap&act=view&backend_id=' + defaults.omd_site + \ + '&render_mode=undirected&url_target=main&filter_group=' + \ + (config.topology_default_filter_group or '') + + builtin_dashboards["topology"] = { + "single_infos": [], + "context" : {}, + "mtime" : 0, + "show_title" : True, + "title" : _("Network Topology"), + "topic" : _("Overview"), + "description" : _("This dashboard uses the parent relationships of your hosts to display a " + "hierarchical map."), + "dashlets" : [ + { + "type" : "url", + "title" : "Topology of Site " + defaults.omd_site, + "urlfunc" : 'topology_url', + "reload_on_resize" : True, + "position" : (1, 1), + "size" : (GROW, GROW), + "context" : {}, + "single_infos" : [], + }, + ] + } + +builtin_dashboards["simple_problems"] = { + "single_infos": [], + "context" : {}, + "mtime" : 0, + "show_title" : True, + "title" : _("Host & Services Problems"), + "topic" : _("Overview"), + "description" : _("A compact dashboard which lists your unhandled host and service problems."), + "dashlets" : [ + { + "type" : "view", + "title" : _("Host Problems (unhandled)"), + "title_url" : "view.py?view_name=hostproblems&is_host_acknowledged=0", + "show_title" : True, + "position" : (1, 1), + "size" : (GROW, 18), + "context" : {}, + + 'browser_reload': 30, + 'column_headers': 'pergroup', + 'datasource': 'hosts', + 'single_infos': [], + 'group_painters': [], + 'context': { + 'host_acknowledged': {'is_host_acknowledged': '0'}, + 'host_scheduled_downtime_depth': {'is_host_scheduled_downtime_depth': '0'}, + 'summary_host': {'is_summary_host': '0'}, + 'hoststate': {'hst0': '', + 'hst1': 'on', + 'hst2': 'on', + 'hstp': ''}, + }, + 'hidden': True, + 'hidebutton': True, + 'layout': 'table', + 'mustsearch': False, + 'name': 'dashlet_0', + 'num_columns': 1, + 'owner': '', + 'painters': [ + ('host_state', None), + ('host', 'host'), + ('host_icons', None), + ('host_state_age', None), + ('host_plugin_output', None), + ], + 'public': True, + 'sorters': [('hoststate', True)], + 'topic': None, + }, + { + "type" : "view", + "title" : _("Service Problems (unhandled)"), + "title_url" : "view.py?view_name=svcproblems&is_service_acknowledged=0", + "show_title" : True, + "position" : (1, 19), + "size" : (GROW, MAX), + "context" : {}, + + 'browser_reload': 30, + 'column_headers': 'pergroup', + 'datasource': 'services', + 'single_infos': [], + 'group_painters': [], + 'context': { + 'service_acknowledged': {'is_service_acknowledged': '0'}, + 'summary_host': {'is_summary_host': '0'}, + 'in_downtime': {'is_in_downtime': '0'}, + 'hoststate': {'hst0': 'on', + 'hst1': '', + 'hst2': '', + 'hstp': 'on'}, + 'svcstate': { + 'st0': '', + 'st1': 'on', + 'st2': 'on', + 'st3': 'on', + 'stp': '', + } + }, + 'hidden': True, + 'layout': 'table', + 'mustsearch': False, + 'name': 'dashlet_1', + 'num_columns': 1, + 'owner': '', + 'painters': [('service_state', None), + ('host', 'host'), + ('service_description', 'service'), + ('service_icons', None), + ('svc_plugin_output', None), + ('svc_state_age', None), + ('svc_check_age', None), + ], + 'play_sounds': True, + 'public': True, + 'sorters': [('svcstate', True), + ('stateage', False), + ('svcdescr', False)], }, - # { - # "title" : "CPU load of Nagios", - # # "url" : "http://localhost/dk/pnp4nagios/index.php/image?host=DerNagiosSelbst&srv=fs__var&view=0", - # "url" : "http://localhost/dk/pnp4nagios/index.php/popup?host=localhost&srv=CPU_load&view=0&source=2", - # "position" : (1, -1), - # "size" : (11, 5), - # }, - # { - # "title" : "CPU utilization of Nagios", - # # "url" : "http://localhost/dk/pnp4nagios/index.php/image?host=DerNagiosSelbst&srv=fs__var&view=0", - # "url" : "http://localhost/dk/pnp4nagios/index.php/popup?host=localhost&srv=CPU_utilization&view=0&source=2", - # "position" : (12, -1), - # "size" : (11, 5), - # }, - # { ] } + diff -Nru check-mk-1.2.2p3/plugins/dashboard/dashlets.py check-mk-1.2.6p12/plugins/dashboard/dashlets.py --- check-mk-1.2.2p3/plugins/dashboard/dashlets.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/dashboard/dashlets.py 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,638 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# .--Overview------------------------------------------------------------. +# | ___ _ | +# | / _ \__ _____ _ ____ _(_) _____ __ | +# | | | | \ \ / / _ \ '__\ \ / / |/ _ \ \ /\ / / | +# | | |_| |\ V / __/ | \ V /| | __/\ V V / | +# | \___/ \_/ \___|_| \_/ |_|\___| \_/\_/ | +# | | +# +----------------------------------------------------------------------+ +# | | +# '----------------------------------------------------------------------' + +def dashlet_overview(nr, dashlet): + html.write( + '' + '' + '' + ) + + html.write('
    ' + '' + '

    Check_MK Multisite

    ' + 'Welcome to Check_MK Multisite. If you want to learn more about Multisite, please visit ' + 'our online documentation. ' + 'Multisite is part of Check_MK - an Open Source ' + 'project by Mathias Kettner.' + '
    ') + +dashlet_types["overview"] = { + "title" : _("Overview / Introduction"), + "description" : _("Displays an introduction and Check_MK logo."), + "render" : dashlet_overview, + "allowed" : config.builtin_role_ids, + "selectable" : False, # can not be selected using the dashboard editor +} + +#. +# .--MK-Logo-------------------------------------------------------------. +# | __ __ _ __ _ | +# | | \/ | |/ / | | ___ __ _ ___ | +# | | |\/| | ' /_____| | / _ \ / _` |/ _ \ | +# | | | | | . \_____| |__| (_) | (_| | (_) | | +# | |_| |_|_|\_\ |_____\___/ \__, |\___/ | +# | |___/ | +# +----------------------------------------------------------------------+ +# | | +# '----------------------------------------------------------------------' + +def dashlet_mk_logo(nr, dashlet): + html.write('' + '') + +dashlet_types["mk_logo"] = { + "title" : _("Check_MK Logo"), + "description" : _("Shows the Check_MK logo."), + "render" : dashlet_mk_logo, + "allowed" : config.builtin_role_ids, + "selectable" : False, # can not be selected using the dashboard editor +} + +#. +# .--Globes/Stats--------------------------------------------------------. +# | ____ _ _ ______ _ _ | +# | / ___| | ___ | |__ ___ ___ / / ___|| |_ __ _| |_ ___ | +# | | | _| |/ _ \| '_ \ / _ \/ __| / /\___ \| __/ _` | __/ __| | +# | | |_| | | (_) | |_) | __/\__ \/ / ___) | || (_| | |_\__ \ | +# | \____|_|\___/|_.__/ \___||___/_/ |____/ \__\__,_|\__|___/ | +# | | +# +----------------------------------------------------------------------+ +# | | +# '----------------------------------------------------------------------' + +def dashlet_hoststats(nr, dashlet): + table = [ + ( _("Up"), "#0b3", + "searchhost&is_host_scheduled_downtime_depth=0&hst0=on", + "Stats: state = 0\n" \ + "Stats: scheduled_downtime_depth = 0\n" \ + "StatsAnd: 2\n"), + + ( _("Down"), "#f00", + "searchhost&is_host_scheduled_downtime_depth=0&hst1=on", + "Stats: state = 1\n" \ + "Stats: scheduled_downtime_depth = 0\n" \ + "StatsAnd: 2\n"), + + ( _("Unreachable"), "#f80", + "searchhost&is_host_scheduled_downtime_depth=0&hst2=on", + "Stats: state = 2\n" \ + "Stats: scheduled_downtime_depth = 0\n" \ + "StatsAnd: 2\n"), + + ( _("In Downtime"), "#0af", + "searchhost&search=1&is_host_scheduled_downtime_depth=1", + "Stats: scheduled_downtime_depth > 0\n" \ + ) + ] + filter = "Filter: custom_variable_names < _REALNAME\n" + + render_statistics('dashlet_%d' % nr, "hosts", table, filter, dashlet) + +dashlet_types["hoststats"] = { + "title" : _("Host Statistics"), + "sort_index" : 45, + "description" : _("Displays statistics about host states as globe and a table."), + "render" : dashlet_hoststats, + "refresh" : 60, + "allowed" : config.builtin_role_ids, + "size" : (30, 18), + "resizable" : False, +} + +def dashlet_servicestats(nr, dashlet): + table = [ + ( _("OK"), "#0b3", + "searchsvc&hst0=on&st0=on&is_in_downtime=0", + "Stats: state = 0\n" \ + "Stats: scheduled_downtime_depth = 0\n" \ + "Stats: host_scheduled_downtime_depth = 0\n" \ + "Stats: host_state = 0\n" \ + "Stats: host_has_been_checked = 1\n" \ + "StatsAnd: 5\n"), + + ( _("In Downtime"), "#0af", + "searchsvc&is_in_downtime=1", + "Stats: scheduled_downtime_depth > 0\n" \ + "Stats: host_scheduled_downtime_depth > 0\n" \ + "StatsOr: 2\n"), + + ( _("On Down host"), "#048", + "searchsvc&hst1=on&hst2=on&hstp=on&is_in_downtime=0", + "Stats: scheduled_downtime_depth = 0\n" \ + "Stats: host_scheduled_downtime_depth = 0\n" \ + "Stats: host_state != 0\n" \ + "StatsAnd: 3\n"), + + ( _("Warning"), "#ff0", + "searchsvc&hst0=on&st1=on&is_in_downtime=0", + "Stats: state = 1\n" \ + "Stats: scheduled_downtime_depth = 0\n" \ + "Stats: host_scheduled_downtime_depth = 0\n" \ + "Stats: host_state = 0\n" \ + "Stats: host_has_been_checked = 1\n" \ + "StatsAnd: 5\n"), + + ( _("Unknown"), "#f80", + "searchsvc&hst0=on&st3=on&is_in_downtime=0", + "Stats: state = 3\n" \ + "Stats: scheduled_downtime_depth = 0\n" \ + "Stats: host_scheduled_downtime_depth = 0\n" \ + "Stats: host_state = 0\n" \ + "Stats: host_has_been_checked = 1\n" \ + "StatsAnd: 5\n"), + + ( _("Critical"), "#f00", + "searchsvc&hst0=on&st2=on&is_in_downtime=0", + "Stats: state = 2\n" \ + "Stats: scheduled_downtime_depth = 0\n" \ + "Stats: host_scheduled_downtime_depth = 0\n" \ + "Stats: host_state = 0\n" \ + "Stats: host_has_been_checked = 1\n" \ + "StatsAnd: 5\n"), + ] + filter = "Filter: host_custom_variable_names < _REALNAME\n" + + render_statistics('dashlet_%d' % nr, "services", table, filter, dashlet) + + +dashlet_types["servicestats"] = { + "title" : _("Service Statistics"), + "sort_index" : 50, + "description" : _("Displays statistics about service states as globe and a table."), + "render" : dashlet_servicestats, + "refresh" : 60, + "allowed" : config.builtin_role_ids, + "size" : (30, 18), + "resizable" : False, +} + +def render_statistics(pie_id, what, table, filter, dashlet): + pie_diameter = 130 + pie_left_aspect = 0.5 + pie_right_aspect = 0.8 + + info = what == 'hosts' and 'host' or 'service' + use_filters = visuals.filters_of_visual(dashlet, [info]) + for filt in use_filters: + if filt.available() and not isinstance(filt, visuals.FilterSite): + filter += filt.filter(info) + + query = "GET %s\n" % what + for entry in table: + query += entry[3] + query += filter + + site = dashlet['context'].get('siteopt', {}).get('site') + if site: + html.live.set_only_sites([site]) + result = html.live.query_row(query) + html.live.set_only_sites() + else: + result = html.live.query_summed_stats(query) + + pies = zip(table, result) + total = sum([x[1] for x in pies]) + + html.write("
    ") + html.write('' % + (pie_diameter, pie_diameter, pie_id)) + html.write('') + + html.write('' % ( + len(pies) > 1 and " narrow" or "")) + table_entries = pies + while len(table_entries) < 6: + table_entries = table_entries + [ (("", "#95BBCD", "", ""), " ") ] + table_entries.append(((_("Total"), "", "all%s" % what, ""), total)) + for (name, color, viewurl, query), count in table_entries: + url = "view.py?view_name=" + viewurl + "&filled_in=filter&search=1&wato_folder=" + for filter_name, url_params in dashlet['context'].items(): + url += '&' + html.urlencode_vars(url_params.items()) + html.write('' % (url, name)) + style = '' + if color: + style = ' style="background-color: %s"' % color + html.write('' % (style, url, count)) + + html.write("
    %s' + '%s
    ") + + r = 0.0 + pie_parts = [] + if total > 0: + # Count number of non-empty classes + num_nonzero = 0 + for info, value in pies: + if value > 0: + num_nonzero += 1 + + # Each non-zero class gets at least a view pixels of visible thickness. + # We reserve that space right now. All computations are done in percent + # of the radius. + separator = 0.02 # 3% of radius + remaining_separatorspace = num_nonzero * separator # space for separators + remaining_radius = 1 - remaining_separatorspace # remaining space + remaining_part = 1.0 # keep track of remaining part, 1.0 = 100% + + # Loop over classes, begin with most outer sphere. Inner spheres show + # worse states and appear larger to the user (which is the reason we + # are doing all this stuff in the first place) + for (name, color, viewurl, q), value in pies[::1]: + if value > 0 and remaining_part > 0: # skip empty classes + + # compute radius of this sphere *including all inner spheres!* The first + # sphere always gets a radius of 1.0, of course. + radius = remaining_separatorspace + remaining_radius * (remaining_part ** (1/3.0)) + pie_parts.append('chart_pie("%s", %f, %f, %r, true);' % (pie_id, pie_right_aspect, radius, color)) + pie_parts.append('chart_pie("%s", %f, %f, %r, false);' % (pie_id, pie_left_aspect, radius, color)) + + # compute relative part of this class + part = float(value) / total # ranges from 0 to 1 + remaining_part -= part + remaining_separatorspace -= separator + + + html.write("
    ") + html.javascript(""" +function chart_pie(pie_id, x_scale, radius, color, right_side) { + var context = document.getElementById(pie_id + "_stats").getContext('2d'); + if (!context) + return; + var pie_x = %(x)f; + var pie_y = %(y)f; + var pie_d = %(d)f; + context.fillStyle = color; + context.save(); + context.translate(pie_x, pie_y); + context.scale(x_scale, 1); + context.beginPath(); + if(right_side) + context.arc(0, 0, (pie_d / 2) * radius, 1.5 * Math.PI, 0.5 * Math.PI, false); + else + context.arc(0, 0, (pie_d / 2) * radius, 0.5 * Math.PI, 1.5 * Math.PI, false); + context.closePath(); + context.fill(); + context.restore(); + context = null; +} + + +if (has_canvas_support()) { + %(p)s +} +""" % { "x" : pie_diameter / 2, "y": pie_diameter/2, "d" : pie_diameter, 'p': '\n'.join(pie_parts) }) + +#. +# .--PNP-Graph-----------------------------------------------------------. +# | ____ _ _ ____ ____ _ | +# | | _ \| \ | | _ \ / ___|_ __ __ _ _ __ | |__ | +# | | |_) | \| | |_) |____| | _| '__/ _` | '_ \| '_ \ | +# | | __/| |\ | __/_____| |_| | | | (_| | |_) | | | | | +# | |_| |_| \_|_| \____|_| \__,_| .__/|_| |_| | +# | |_| | +# +----------------------------------------------------------------------+ +# | Renders a single performance graph | +# '----------------------------------------------------------------------' + +def make_pnp_url(dashlet, what): + host = dashlet['context'].get('host') + if not host: + raise MKUserError('host', _('Missing needed host parameter.')) + + service = dashlet['context'].get('service') + if not service: + service = "_HOST_" + + # When the site is available via URL context, use it. Otherwise it is needed + # to check all sites for the requested host + if html.has_var('site'): + site = html.var('site') + else: + html.live.set_prepend_site(True) + query = "GET hosts\nFilter: name = %s\nColumns: name" % lqencode(host) + site = html.live.query_column(query)[0] + html.live.set_prepend_site(False) + + if not site: + base_url = defaults.url_prefix + else: + base_url = html.site_status[site]["site"]["url_prefix"] + + base_url += "pnp4nagios/index.php/" + var_part = "?host=%s&srv=%s&source=%d&view=%s&theme=multisite" % \ + (pnp_cleanup(dashlet['context']['host']), pnp_cleanup(service), + dashlet['source'], dashlet['timerange']) + return base_url + what + var_part + +def dashlet_pnpgraph(nr, dashlet): + html.write('' % (make_pnp_url(dashlet, 'graph'), nr)) + +dashlet_types["pnpgraph"] = { + "title" : _("Performance Graph"), + "sort_index" : 20, + "description" : _("Displays a performance graph of a host or service."), + "render" : dashlet_pnpgraph, + "refresh" : 60, + "size" : (60, 21), + "allowed" : config.builtin_role_ids, + "infos" : ["service", "host"], + "single_infos" : ["service", "host"], + "parameters" : [ + ("timerange", DropdownChoice( + title = _('Timerange'), + default_value = '1', + choices= [ + ("0", _("4 Hours")), ("1", _("25 Hours")), + ("2", _("One Week")), ("3", _("One Month")), + ("4", _("One Year")), + ], + )), + ("source", Integer( + title = _('Source (n\'th Graph)'), + default_value = 0, + )), + ], + "styles": """ +.dashlet.pnpgraph .dashlet_inner { + background-color: #fff; + color: #000; + text-align: center; +} +""", + "on_resize" : lambda nr, dashlet: 'dashboard_render_pnpgraph(%d, \'%s\');' % + (nr, make_pnp_url(dashlet, 'image')), + # execute this js handler instead of refreshing the dashlet by calling "render" again + "on_refresh" : lambda nr, dashlet: 'dashboard_render_pnpgraph(%d, \'%s\');' % + (nr, make_pnp_url(dashlet, 'image')), + "script": """ +var dashlet_offsets = {}; +function dashboard_render_pnpgraph(nr, img_url) +{ + // Get the target size for the graph from the inner dashlet container + var inner = document.getElementById('dashlet_inner_' + nr); + var c_w = inner.clientWidth; + var c_h = inner.clientHeight; + + var container = document.getElementById('dashlet_graph_' + nr); + var img = document.getElementById('dashlet_img_' + nr); + if (!img) { + var img = document.createElement('img'); + img.setAttribute('id', 'dashlet_img_' + nr); + container.appendChild(img); + } + + // This handler is called after loading the configured graph image to verify + // it fits into the inner dashlet container. + // One could think that it can simply be solved by requesting an image of the + // given size from PNP/rrdtool, but this is not the case. When we request an + // image of a specified size, this size is used for the graphing area. The + // resulting image has normally labels which are added to the requested size. + img.onload = function(nr, url, w, h) { + return function() { + var i_w = this.clientWidth; + var i_h = this.clientHeight; + + // difference between the requested size and the real size of the image + var x_diff = i_w - w; + var y_diff = i_h - h; + + if (Math.abs(x_diff) < 10 && Math.abs(y_diff) < 10) { + return; // Finished resizing + } + + // When the target height is smaller or equal to 81 pixels, PNP + // returns an image which has no labels, just the graph, which has + // exactly the requested height. In this situation no further resizing + // is needed. + if (h <= 81 || h - y_diff <= 81) { + this.style.width = '100%'; + this.style.height = '100%'; + return; + } + + // Save the sizing differences between the requested size and the + // resulting size. This is, in fact, the size of the graph labels. + // load_graph_img() uses these dimensions to try to get an image + // which really fits the requested dimensions. + if (typeof dashlet_offsets[nr] == 'undefined') { + dashlet_offsets[nr] = [x_diff, y_diff]; + } else if (dashlet_offsets[nr][0] != x_diff || dashlet_offsets[nr][1] != y_diff) { + // was not successful in getting a correctly sized image. Seems + // that PNP/rrdtool was not able to render this size. Terminate + // and automatically scale to 100%/100% + this.style.width = '100%'; + this.style.height = '100%'; + return; + } + + load_graph_img(nr, this, url, w, h); + }; + }(nr, img_url, c_w, c_h); + + img.style.width = 'auto'; + img.style.height = 'auto'; + load_graph_img(nr, img, img_url, c_w, c_h); +} + +function load_graph_img(nr, img, img_url, c_w, c_h) +{ + if (typeof dashlet_offsets[nr] == 'undefined' + || (c_h > 1 && c_h - dashlet_offsets[nr][1] < 81)) { + // use this on first load and later when the graph is less high than 81px + img_url += '&graph_width='+c_w+'&graph_height='+c_h; + } else { + img_url += '&graph_width='+(c_w - dashlet_offsets[nr][0]) + +'&graph_height='+(c_h - dashlet_offsets[nr][1]); + } + img_url += '&_t='+Math.floor(Date.parse(new Date()) / 1000); // prevent caching + img.src = img_url; +} +""" +} + +#. +# .--nodata--------------------------------------------------------------. +# | _ _ | +# | _ __ ___ __| | __ _| |_ __ _ | +# | | '_ \ / _ \ / _` |/ _` | __/ _` | | +# | | | | | (_) | (_| | (_| | || (_| | | +# | |_| |_|\___/ \__,_|\__,_|\__\__,_| | +# | | +# +----------------------------------------------------------------------+ +# | | +# '----------------------------------------------------------------------' + +def dashlet_nodata(nr, dashlet): + html.write("
    ") + html.write(dashlet.get("text")) + html.write("
    ") + +dashlet_types["nodata"] = { + "title" : _("Static text"), + "sort_index" : 100, + "description" : _("Displays a static text to the user."), + "render" : dashlet_nodata, + "allowed" : config.builtin_role_ids, + "parameters" : [ + ("text", TextUnicode( + title = _('Text'), + size = 50, + )), + ], + "styles" : """ +div.dashlet_inner div.nodata { + width: 100%; + height: 100%; +} + +div.dashlet_inner.background div.nodata div.msg { + color: #000; +} + +div.dashlet_inner div.nodata div.msg { + padding: 10px; +} + +}""", +} + +#. +# .--View----------------------------------------------------------------. +# | __ ___ | +# | \ \ / (_) _____ __ | +# | \ \ / /| |/ _ \ \ /\ / / | +# | \ V / | | __/\ V V / | +# | \_/ |_|\___| \_/\_/ | +# | | +# +----------------------------------------------------------------------+ +# | | +# '----------------------------------------------------------------------' + +def dashlet_view(nr, dashlet): + import bi # FIXME: Cleanup? + bi.reset_cache_status() # needed for status icon + + html.set_var('display_options', 'HRSIXL') + html.set_var('_display_options', 'HRSIXL') + html.add_body_css_class('dashlet') + + import views # FIXME: HACK, clean this up somehow + views.load_views() + views.show_view(dashlet, True, True, True) + +def dashlet_view_add_url(): + return 'create_view_dashlet.py?name=%s&back=%s' % \ + (html.urlencode(html.var('name')), html.urlencode(html.makeuri([('edit', '1')]))) + +def dashlet_view_parameters(): + return dashlet_view_render_input, dashlet_view_handle_input + +def dashlet_view_render_input(dashlet): + import views # FIXME: HACK, clean this up somehow + views.load_views() + views.transform_view_to_valuespec_value(dashlet) + return views.render_view_config(dashlet) + +def dashlet_view_handle_input(ident, dashlet): + dashlet['name'] = 'dashlet_%d' % ident + dashlet.setdefault('title', _('View')) + import views # FIXME: HACK, clean this up somehow + views.load_views() + return views.create_view_from_valuespec(dashlet, dashlet) + +dashlet_types["view"] = { + "title" : _("View"), + "sort_index" : 10, + "description" : _("Displays a the content of a Multisite view."), + "size" : (40, 20), + "iframe_render" : dashlet_view, + "allowed" : config.builtin_role_ids, + "add_urlfunc" : dashlet_view_add_url, + "parameters" : dashlet_view_parameters, +} + +#. +# .--Custom URL----------------------------------------------------------. +# | ____ _ _ _ ____ _ | +# | / ___| _ ___| |_ ___ _ __ ___ | | | | _ \| | | +# | | | | | | / __| __/ _ \| '_ ` _ \ | | | | |_) | | | +# | | |__| |_| \__ \ || (_) | | | | | | | |_| | _ <| |___ | +# | \____\__,_|___/\__\___/|_| |_| |_| \___/|_| \_\_____| | +# | | +# +----------------------------------------------------------------------+ +# | | +# '----------------------------------------------------------------------' + +def dashlet_url(dashlet): + if dashlet.get('show_in_iframe', True): + return dashlet['url'] + +def dashlet_url_validate(value, varprefix): + if 'url' not in value and 'urlfunc' not in value: + raise MKUserError(varprefix, _('You need to provide either an URL or ' + 'the name of a python function to be used ' + 'for rendering the dashlet.')) + +dashlet_types["url"] = { + "title" : _("Custom URL"), + "sort_index" : 80, + "description" : _("Displays the content of a custom website."), + "iframe_urlfunc" : dashlet_url, + "allowed" : config.builtin_role_ids, + "size" : (30, 10), + "parameters" : [ + ("url", TextAscii( + title = _('URL'), + size = 50, + allow_empty = False, + )), + ("urlfunc", TextAscii( + title = _('Dynamic URL rendering function'), + size = 50, + allow_empty = False, + )), + ("show_in_iframe", Checkbox( + title = _('Render in iframe'), + label = _('Render URL contents in own frame'), + default_value = True, + )), + ], + "opt_params" : ['url', 'urlfunc'], + "validate_params" : dashlet_url_validate, +} diff -Nru check-mk-1.2.2p3/plugins/db2_mem check-mk-1.2.6p12/plugins/db2_mem --- check-mk-1.2.2p3/plugins/db2_mem 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/db2_mem 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,34 @@ +#!/bin/sh +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +INSTANCES=$(ps -ef | grep db2sysc | awk '{print $1}' | sort -u | grep -v root) + +if [ "$INSTANCES" ] ; then + echo "<<>>" + for INSTANCE in $INSTANCES; do + echo "Instance $INSTANCE" + su - $INSTANCE -c "db2pd -dbptnmem " | egrep '(Memory Limit|HWM usage)' + done +fi diff -Nru check-mk-1.2.2p3/plugins/db2_mem.sh check-mk-1.2.6p12/plugins/db2_mem.sh --- check-mk-1.2.2p3/plugins/db2_mem.sh 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/plugins/db2_mem.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,33 +0,0 @@ -#!/bin/bash -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# ails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -INSTANCES=$(ps -ef | grep db2sysc | awk '{print $1}' | sort -u | grep -v root) - -for INSTANCE in $INSTANCES; do - echo "<<>>" - echo "Instance $INSTANCE" - su - $INSTANCE -c "db2pd -dbptnmem " | egrep '(Memory Limit|HWM usage)' -done - diff -Nru check-mk-1.2.2p3/plugins/dmi_sysinfo check-mk-1.2.6p12/plugins/dmi_sysinfo --- check-mk-1.2.2p3/plugins/dmi_sysinfo 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/plugins/dmi_sysinfo 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -#/bin/sh - -if which dmidecode >/dev/null 2>&1; then - echo "<<>>" - dmidecode -t 1 -q -fi diff -Nru check-mk-1.2.2p3/plugins/dmraid check-mk-1.2.6p12/plugins/dmraid --- check-mk-1.2.2p3/plugins/dmraid 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/plugins/dmraid 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -#!/bin/sh -echo '<<>>' - -STATUS=$(dmraid -r) -if [ $? != 0 ]; then - exit 1 -fi - -# Name und Status ausgeben -dmraid -s | grep -e ^name -e ^status - -# Diskname der Raidplatten ausgeben -DISKS=$(echo "$STATUS" | cut -f1 -d\:) - -for disk in $DISKS ; do - device=$(cat /sys/block/$(basename $disk)/device/model ) - status=$(echo "$STATUS" | grep ^${disk}) - echo "$status Model: $device" -done diff -Nru check-mk-1.2.2p3/plugins/dnsclient check-mk-1.2.6p12/plugins/dnsclient --- check-mk-1.2.2p3/plugins/dnsclient 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/dnsclient 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,48 @@ +#!/bin/sh +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# This check can be used to test the name resolution of a given host +# address using the local resolver of the system this script is +# running on. + +HOSTADDRESSES=mathias-kettner.de + +if [ -e $MK_CONFDIR/dnsclient.cfg ] ; then + . $MK_CONFDIR/dnsclient.cfg +fi + +echo "<<>>" +for HOSTADDRESS in $HOSTADDRESSES +do + ADDRESSES=`nslookup $HOSTADDRESS | sed -n -e 1,3d -e '/^Address: *\(.*\)$/s//\1/p'` + if [ ! "$ADDRESSES" ] ; then + STATE=2 + OUTPUT="CRIT - $HOSTADDRESS could not be resolved" + else + STATE=0 + OUTPUT="OK - $HOSTADDRESS resolved into $ADDRESSES" + fi + echo Resolve_$HOSTADDRESS $STATE $OUTPUT +done diff -Nru check-mk-1.2.2p3/plugins/hpux_lunstats check-mk-1.2.6p12/plugins/hpux_lunstats --- check-mk-1.2.2p3/plugins/hpux_lunstats 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/hpux_lunstats 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,91 @@ +#!/usr/bin/ksh +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Put this file into /usr/lib/check_mk_agent/plugins. Then +# reinventorize your host. +# Actually querying these stats is quite slow since they freshly update +# on each call. If you have a few 1000 luns then this will not work. + +get_stats() +{ + scsimgr get_stat -D $LUN | tr '\=' ':' | grep -e 'STATISTICS FOR LUN' -e 'Bytes' -e 'Total I/Os processed' -e 'I/O failure' -e 'IO failures due +to' + return $? +} + + +# Ex: +#LUN PATH INFORMATION FOR LUN : /dev/pt/pt2 +#World Wide Identifier(WWID) = +#LUN PATH INFORMATION FOR LUN : /dev/rdisk/disk5 +#World Wide Identifier(WWID) = 0x60a98000572d44745634645076556357 +#LUN PATH INFORMATION FOR LUN : /dev/rdisk/disk6 + +get_lun_map() +{ +scsimgr lun_map | egrep '^[[:space:]]*(LUN PATH|World Wide Identifier)' | tr '\=' ':' +} + + +main() +{ +get_lun_map | while read line ; do + descr=$(echo $line | awk -F: '{print $1}') + val=$( echo $line | awk -F: '{print $2}') + case $descr in + LUN*) + if echo $val | grep /dev/rdisk 1>/dev/null; then + DMP=yes + LUN=$val + else + DMP=no + unset LUN + fi + ;; + World*) + if [ $DMP = "yes" ]; then + echo "WWID: $val" + get_stats $LUN + fi + ;; + *) + echo "Fehler:" + echo $line + echo $descr + echo $val + sleep 1 + ;; + esac +done +} + + + +# Verify the system is using new multipath device model. +if [ -d /dev/rdisk ] && [ -d /dev/disk ]; then + echo '<<>>' + main +fi + diff -Nru check-mk-1.2.2p3/plugins/hpux_statgrab check-mk-1.2.6p12/plugins/hpux_statgrab --- check-mk-1.2.2p3/plugins/hpux_statgrab 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/hpux_statgrab 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,46 @@ +#!/bin/sh +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# this is for users who compiled statgrab on hp-ux. +# note you'll need a 0.18+ version, from their github page at +# https://github.com/i-scream/libstatgrab +# flags used for compiling - disable documentation, examples and set*id + + +if which statgrab > /dev/null ; then + if statgrab const. cpu. general. mem. page. proc. swap. user. > /tmp/statgrab.$$ 2>/dev/null + then + for s in proc cpu page + do + echo "<<>>" + cat /tmp/statgrab.$$ | grep "^$s\." | cut -d. -f2-99 | sed 's/ *= */ /' + done + + echo '<<>>' + cat /tmp/statgrab.$$ | egrep "^(swap|mem)\." | sed 's/ *= */ /' + + fi + [ -f /tmp/statgrab.$$ ] && rm -f /tmp/statgrab.$$ +fi diff -Nru check-mk-1.2.2p3/plugins/icons/builtin.py check-mk-1.2.6p12/plugins/icons/builtin.py --- check-mk-1.2.2p3/plugins/icons/builtin.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/icons/builtin.py 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -68,7 +68,7 @@ def paint_icon_image(what, row, tags, custom_vars): if row[what + '_icon_image']: - return '' % row[what + '_icon_image'] + return html.render_icon("icons/" + row[what + '_icon_image']) multisite_icons.append({ 'columns': [ 'icon_image' ], @@ -94,7 +94,7 @@ servicedesc = '' wait_svc = '' icon = 'icon_reload' - txt = _('Reschedule an immediate check of this %s') % _(what) + txt = _('Reschedule an immediate check') if what == 'service': servicedesc = row['service_description'].replace("\\","\\\\") @@ -108,13 +108,34 @@ return '' \ - '' % \ - (row["site"], row["host_name"], htmllib.urlencode(servicedesc), htmllib.urlencode(wait_svc), txt, icon) + '' % \ + (row["site"], row["host_name"], html.urlencode(servicedesc), html.urlencode(wait_svc), txt, icon) multisite_icons.append({ 'columns': [ 'active_checks_enabled' ], 'paint': paint_reschedule, }) + + +def paint_rule_editor(what, row, tags, custom_vars): + if config.wato_enabled and config.may("wato.rulesets") and config.multisite_draw_ruleicon: + urlvars = [("mode", "object_parameters"), + ("host", row["host_name"])] + + if what == 'service': + urlvars.append(("service", row["service_description"])) + title = _("View and edit parameters for this service") + else: + title = _("View and edit parameters for this host") + + url = html.makeuri_contextless(urlvars, "wato.py") + return '%s' % (url, html.render_icon('rulesets', title)) + +multisite_icons.append({ + 'service_columns': [ 'description', 'check_command', "host_name" ], + 'paint': paint_rule_editor, +}) + # +----------------------------------------------------------------------+ # | _ _ _ _ | # | / \ ___| | ___ __ _____ _| | ___ __| | __ _ ___ | @@ -126,8 +147,7 @@ def paint_ack_image(what, row, tags, custom_vars): if row[what + "_acknowledged"]: - return '' + return html.render_icon('ack', _('This problem has been acknowledged')) multisite_icons.append({ 'columns': [ 'acknowledged' ], @@ -148,8 +168,7 @@ if "_REALNAME" in custom_vars: newrow = row.copy() newrow["host_name"] = custom_vars["_REALNAME"] - return link_to_view("", newrow, 'host') + return link_to_view(html.render_icon('detail', _("Detailed host infos")), newrow, 'host') multisite_icons.append({ 'paint': paint_realhost_link_image, @@ -175,14 +194,14 @@ site = html.site_status[sitename]["site"] if html.mobile: url = site["url_prefix"] + ("pnp4nagios/index.php?kohana_uri=/mobile/%s/%s/%s" % \ - (how, htmllib.urlencode(host), htmllib.urlencode(svc))) + (how, html.urlencode(host), html.urlencode(svc))) else: url = site["url_prefix"] + ("pnp4nagios/index.php/%s?host=%s&srv=%s" % \ - (how, htmllib.urlencode(host), htmllib.urlencode(svc))) + (how, html.urlencode(host), html.urlencode(svc))) if how == 'graph': url += "&theme=multisite&baseurl=%scheck_mk/" % \ - htmllib.urlencode(site["url_prefix"]) + html.urlencode(site["url_prefix"]) return url def pnp_popup_url(row, what): @@ -194,8 +213,7 @@ else: url = "" return '' % \ - (url, pnp_popup_url(row, what)) + 'onmouseout="hoverHide()">%s' % (url, pnp_popup_url(row, what), html.render_icon('pnp', '')) def paint_pnp_graph(what, row, tags, custom_vars): pnpgraph_present = row[what + "_pnpgraph_present"] @@ -208,6 +226,36 @@ }) # +----------------------------------------------------------------------+ +# | ____ _ _ _ _ | +# | | _ \ _ __ ___ __| (_) ___| |_(_) ___ _ __ | +# | | |_) | '__/ _ \/ _` | |/ __| __| |/ _ \| '_ \ | +# | | __/| | | __/ (_| | | (__| |_| | (_) | | | | | +# | |_| |_| \___|\__,_|_|\___|\__|_|\___/|_| |_| | +# | | +# +----------------------------------------------------------------------+ +def paint_prediction_icon(what, row, tags, custom_vars): + if what == "service": + parts = row[what + "_perf_data"].split() + for p in parts: + if p.startswith("predict_"): + varname, value = p.split("=") + dsname = varname[8:] + sitename = row["site"] + site = html.site_status[sitename]["site"] + url = site["url_prefix"] + "check_mk/prediction_graph.py?" + html.urlencode_vars([ + ( "host", row["host_name"] ), + ( "service", row["service_description"] ), + ( "dsname", dsname ) ]) + title = _("Analyse predictive monitoring for this service") + return '%s' % (url, html.render_icon('prediction', title)) + +multisite_icons.append({ + 'columns' : [ 'perf_data' ], + 'paint' : paint_prediction_icon, +}) + + +# +----------------------------------------------------------------------+ # | _ _ _ _ _ ____ _ | # | / \ ___| |_(_) ___ _ __ | | | | _ \| | | # | / _ \ / __| __| |/ _ \| '_ \ _____| | | | |_) | | | @@ -223,8 +271,7 @@ pnpgraph_present = row[what + "_pnpgraph_present"] if action_url \ and not ('/pnp4nagios/' in action_url and pnpgraph_present >= 0): - return '' % action_url + return '%s' % (action_url, html.render_icon('action', _('Custom Action'))) multisite_icons.append({ 'columns': [ 'action_url_expanded', 'pnpgraph_present' ], @@ -241,36 +288,40 @@ # | | # +----------------------------------------------------------------------+ -# Adds the url_prefix of the services site to the notes url configured in this site. -# It also adds the master_url which will be used to link back to the source site -# in multi site environments. -def logwatch_url(sitename, notes_url): - i = notes_url.index("check_mk/logwatch.py") +def logwatch_url(sitename, hostname, item): + host_item_url = "check_mk/logwatch.py?host=%s&file=%s" % (html.urlencode(hostname), html.urlencode(item)) site = html.site_status[sitename]["site"] - master_url = '' if config.is_multisite(): master_url = '&master_url=' + defaults.url_prefix + 'check_mk/' - return site["url_prefix"] + notes_url[i:] + master_url + return site["url_prefix"] + host_item_url + master_url + +def paint_logwatch(what, row, tags, custom_vars): + if what != "service": + return + if row[what + "_check_command"] in [ 'check_mk-logwatch', 'check_mk-logwatch.groups' ]: + return '%s' % (logwatch_url(row["site"], row['host_name'], row['service_description'][4:]), + html.render_icon('logwatch', _('Open Log'))) + +multisite_icons.append({ + 'service_columns': [ 'host_name', 'service_description', 'check_command' ], + 'paint': paint_logwatch, +}) + +# Adds the url_prefix of the services site to the notes url configured in this site. +# It also adds the master_url which will be used to link back to the source site +# in multi site environments. def paint_notes(what, row, tags, custom_vars): if 'X' in html.display_options: - # notes_url (only, if not a Check_MK logwatch check pointing to - # logwatch.py. These is done by a special icon) notes_url = row[what + "_notes_url_expanded"] check_command = row[what + "_check_command"] + if check_command == 'check_mk-logwatch' and \ + "check_mk/logwatch.py?host" in notes_url: + return if notes_url: - # unmodified original logwatch link - # -> translate into more intelligent icon - if check_command == 'check_mk-logwatch' \ - and "/check_mk/logwatch.py" in notes_url: - return '' % \ - logwatch_url(row["site"], notes_url) - else: - return '' % notes_url + return '%s' % (notes_url, html.render_icon('notes', _('Custom Notes'))) multisite_icons.append({ 'columns': [ 'notes_url_expanded', 'check_command' ], @@ -294,13 +345,11 @@ icon = "hostdowntime" else: icon = "downtime" - return link_to_view('' % - (_("Currently in downtime"), icon), row, 'downtimes_of_' + what) + return link_to_view(html.render_icon(icon, _("Currently in downtime")), + row, 'downtimes_of_' + what) elif what == "service" and row["host_scheduled_downtime_depth"] > 0: - return link_to_view('' % - _("The host is currently in downtime"), row, 'downtimes_of_host') - - + return link_to_view(html.render_icon('hostdowntime', _("The host is currently in downtime")), + row, 'downtimes_of_host') multisite_icons.append({ 'host_columns': [ 'scheduled_downtime_depth' ], @@ -318,15 +367,14 @@ # +----------------------------------------------------------------------+ def paint_comments(what, row, tags, custom_vars): - comments = row[what+ "_comments_with_extra_info"] + comments = row[what + "_comments_with_extra_info"] if len(comments) > 0: text = "" for c in comments: id, author, comment, ty, timestamp = c + comment = comment.replace("\n", "
    ").replace("'","'") text += "%s %s: \"%s\" \n" % (paint_age(timestamp, True, 0, 'abs')[1], author, comment) - return link_to_view('' % - text, row, 'comments_of_' + what) + return link_to_view(html.render_icon('comment', text), row, 'comments_of_' + what) multisite_icons.append({ 'columns': [ 'comments_with_extra_info' ], @@ -344,12 +392,21 @@ def paint_notifications(what, row, tags, custom_vars): # Notifications disabled - if not row[what + "_notifications_enabled"]: - return '' % \ - _('Notifications are disabled for this %s') % what + enabled = row[what + "_notifications_enabled"] + modified = "notifications_enabled" in row[what + "_modified_attributes_list"] + if modified and enabled: + return html.render_icon('notif_enabled', + _('Notifications are manually enabled for this %s') % what) + elif modified and not enabled: + return html.render_icon('notif_man_disabled', + _('Notifications are manually disabled for this %s') % what) + elif not enabled: + return html.render_icon('notif_disabled', + _('Notifications are disabled for this %s') % what) + multisite_icons.append({ - 'columns': [ 'notifications_enabled' ], + 'columns': [ 'modified_attributes_list', 'notifications_enabled' ], 'paint': paint_notifications, }) @@ -368,13 +425,37 @@ title = _("This host is flapping") else: title = _("This service is flapping") - return '' % title + return html.render_icon('flapping', title) multisite_icons.append({ 'columns': [ 'is_flapping' ], 'paint': paint_flapping, }) +#. +# .--Staleness-----------------------------------------------------------. +# | ____ _ _ | +# | / ___|| |_ __ _| | ___ _ __ ___ ___ ___ | +# | \___ \| __/ _` | |/ _ \ '_ \ / _ \/ __/ __| | +# | ___) | || (_| | | __/ | | | __/\__ \__ \ | +# | |____/ \__\__,_|_|\___|_| |_|\___||___/___/ | +# | | +# +----------------------------------------------------------------------+ + +def paint_is_stale(what, row, tags, custom_vars): + if is_stale(row): + if what == "host": + title = _("This host is stale") + else: + title = _("This service is stale") + title += _(", no data has been received within the last %.1f check periods") % config.staleness_threshold + return html.render_icon('stale', title) + +multisite_icons.append({ + 'columns': [ 'staleness' ], + 'paint': paint_is_stale, +}) + # +----------------------------------------------------------------------+ # | _ _ _ ____ _ _ | # | / \ ___| |_(_)_ _____ / ___| |__ ___ ___| | _____ | @@ -388,12 +469,9 @@ # Setting of active checks modified by user if "active_checks_enabled" in row[what + "_modified_attributes_list"]: if row[what + "_active_checks_enabled"] == 0: - return '' % \ - _('Active checks have been manually disabled for this %s!') % what + return html.render_icon('disabled', _('Active checks have been manually disabled for this %s!') % what) else: - return '' % \ - _('Active checks have been manually enabled for this %s!') % what - + return html.render_icon('enabled', _('Active checks have been manually enabled for this %s!') % what) multisite_icons.append({ 'columns': [ 'modified_attributes_list', 'active_checks_enabled' ], @@ -413,9 +491,7 @@ # Passive checks disabled manually? if "passive_checks_enabled" in row[what + "_modified_attributes_list"]: if row[what + "_accept_passive_checks"] == 0: - return '' % \ - _('Passive checks have been manually disabled for this %s!') % what - + return html.render_icon('npassive', _('Passive checks have been manually disabled for this %s!') % what) multisite_icons.append({ 'columns': [ 'modified_attributes_list', 'accept_passive_checks' ], @@ -433,9 +509,7 @@ def paint_notification_periods(what, row, tags, custom_vars): if not row[what + "_in_notification_period"]: - return '' % \ - _('Out of notification period') - + return html.render_icon('outofnot', _('Out of notification period')) multisite_icons.append({ 'columns': [ 'in_notification_period' ], @@ -458,9 +532,101 @@ if config.bi_precompile_on_demand \ or bi.is_part_of_aggregation(what, row["site"], row["host_name"], row.get("service_description")): - return link_to_view('' % - _('Aggregations containing this %s') % what, row, 'aggr_' + what) + urivars = [ + ("view_name", "aggr_" + what), + ("aggr_%s_site" % what, row["site"]), + ("aggr_%s_host" % what, row["host_name"]), + ] + if what == "service": + urivars += [ + ( "aggr_service_service", row["service_description"]) + ] + url = html.makeuri_contextless(urivars) + return '%s' % (url, html.render_icon('aggr', + _("BI Aggregations containing this %s") % (what == "host" and _("Host") or _("Service")))) + multisite_icons.append({ 'paint': paint_aggregations, }) + +#. +# .--Stars *-------------------------------------------------------------. +# | ____ _ | +# | / ___|| |_ __ _ _ __ ___ __/\__ | +# | \___ \| __/ _` | '__/ __| \ / | +# | ___) | || (_| | | \__ \ /_ _\ | +# | |____/ \__\__,_|_| |___/ \/ | +# | | +# '----------------------------------------------------------------------' + + +def paint_stars(what, row, tags, custom_vars): + try: + stars = html.stars + except: + stars = set(config.load_user_file("favorites", [])) + html.stars = stars + + if what == "host": + starred = row["host_name"] in stars + else: + starred = (row["host_name"] + ";" + row["service_description"]) in stars + if starred: + return html.render_icon('starred', _("This %s is one of your favorites") % _(what)) + +multisite_icons.append({ + 'columns': [], + 'paint': paint_stars, +}) + +def paint_icon_check_bi_aggr(what, row, tags, custom_vars): + if what == "service" and row.get("service_check_command","").startswith("check_mk_active-bi_aggr!"): + args = row['service_check_command'] + start = args.find('-b \'') + 4 + end = args.find('\' ', start) + base_url = args[start:end] + base_url = base_url.replace('$HOSTADDRESS$', row['host_address']) + base_url = base_url.replace('$HOSTNAME$', row['host_name']) + + start = args.find('-a \'') + 4 + end = args.find('\' ', start) + aggr_name = args[start:end] + + url = "%s/check_mk/view.py?view_name=aggr_single&aggr_name=%s" % \ + (base_url, html.urlencode(aggr_name)) + + return '%s' % (html.attrencode(url), html.render_icon('aggr', _('Open this Aggregation'))) + + +multisite_icons.append({ + 'host_columns' : [ 'check_command', 'name', 'address' ], + 'paint' : paint_icon_check_bi_aggr, +}) + +#. +# .--Crashdump-----------------------------------------------------------. +# | ____ _ _ | +# | / ___|_ __ __ _ ___| |__ __| |_ _ _ __ ___ _ __ | +# | | | | '__/ _` / __| '_ \ / _` | | | | '_ ` _ \| '_ \ | +# | | |___| | | (_| \__ \ | | | (_| | |_| | | | | | | |_) | | +# | \____|_| \__,_|___/_| |_|\__,_|\__,_|_| |_| |_| .__/ | +# | |_| | +# +----------------------------------------------------------------------+ +# | Icon for a crashed check with a link to the crash dump page. | +# '----------------------------------------------------------------------' + +def paint_icon_crashed_check(what, row, tags, custom_vars): + if what == "service" \ + and row["service_state"] == 3 \ + and "check failed - please submit a crash report!" in row["service_plugin_output"] : + crashurl = html.makeuri([("site", row["site"]), ("host", row["host_name"]), ("service", row["service_description"])], filename="crashed_check.py") + return '%s' % ( + crashurl, html.render_icon('crash', + _("This check crashed. Please click here for more information. You also can submit " + "a crash report to the development team if you like."))) + +multisite_icons.append({ + 'service_columns' : [ 'plugin_output', 'state', 'host_name' ], + 'paint' : paint_icon_crashed_check, +}) diff -Nru check-mk-1.2.2p3/plugins/icons/inventory.py check-mk-1.2.6p12/plugins/icons/inventory.py --- check-mk-1.2.2p3/plugins/icons/inventory.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/icons/inventory.py 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,37 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def paint_icon_inventory(what, row, tags, customer_vars): + if (what == "host" or row.get("service_check_command","").startswith("check_mk_active-cmk_inv!")) \ + and inventory.has_inventory(row["host_name"]): + return link_to_view(html.render_icon('inv', _("Show Hardware/Software-Inventory of this host")), + row, 'inv_host' ) + +multisite_icons.append({ + 'host_columns': [ "name" ], + 'paint': paint_icon_inventory, +}) + diff -Nru check-mk-1.2.2p3/plugins/icons/wato.py check-mk-1.2.6p12/plugins/icons/wato.py --- check-mk-1.2.2p3/plugins/icons/wato.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/icons/wato.py 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -27,17 +27,21 @@ import wato def wato_link(folder, site, hostname, where): + if not config.wato_enabled: + return "" + if 'X' in html.display_options: url = "wato.py?folder=%s&host=%s" % \ - (htmllib.urlencode(folder), htmllib.urlencode(hostname)) + (html.urlencode(folder), html.urlencode(hostname)) if where == "inventory": url += "&mode=inventory" - help = _("Edit services in WATO - the Check_MK Web Administration Tool") + help = _("Edit services") + icon = "services" else: url += "&mode=edithost" - help = _("Open this host in WATO - the Check_MK Web Administration Tool") - return '' % (url, help) + help = _("Edit this host") + icon = "wato" + return '%s' % (url, html.render_icon(icon, help)) else: return "" @@ -50,7 +54,7 @@ wato_folder = filename[6:-8].rstrip("/") if what == "host": return wato_link(wato_folder, row["site"], row["host_name"], "edithost") - elif row["service_description"].lower() == "check_mk inventory": + elif row["service_description"] in [ "Check_MK inventory", "Check_MK Discovery" ]: return wato_link(wato_folder, row["site"], row["host_name"], "inventory") multisite_icons.append({ diff -Nru check-mk-1.2.2p3/plugins/j4p_performance check-mk-1.2.6p12/plugins/j4p_performance --- check-mk-1.2.2p3/plugins/j4p_performance 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/plugins/j4p_performance 1970-01-01 00:00:00.000000000 +0000 @@ -1,134 +0,0 @@ -#!/usr/bin/python - -import urllib2, sys, os - -server = "localhost" -port = 8080 -user = "monitoring" -password = "foobar" -mode = "digest" -suburi = "jolokia" -instance = None - -vars = [ - ( "java.lang:type=Memory/NonHeapMemoryUsage/used", "NonHeapMemoryUsage" ), - ( "java.lang:type=Memory/HeapMemoryUsage/used", "HeapMemoryUsage" ), - ( "java.lang:type=Threading/ThreadCount", "ThreadCount" ), - ( "java.lang:type=Threading/DaemonThreadCount", "DeamonThreadCount" ), - ( "java.lang:type=Threading/PeakThreadCount", "PeakThreadCount" ), - ( "java.lang:type=Threading/TotalStartedThreadCount", "TotalStartedThreadCount" ), - ( "java.lang:type=Runtime/Uptime", "Uptime" ), -] - -app_vars = [ - ('*:j2eeType=WebModule,name=/--/localhost/-/%(app)s,*/state', 'Running'), - ('*:path=/-/%(app)s,type=Manager,*/activeSessions', 'Sessions'), -] - -servlet_vars = [ - ('*:j2eeType=Servlet,WebModule=/--/localhost/-/%(app)s,name=%(servlet)s,*/requestCount', 'Requests') -] - -# The servlets dictionary keys represent an application (webmodule) which holds a -# list of servlets which are available within this application -servlets = {} - -conffile = os.getenv("MK_CONFDIR", "/etc/check_mk") + "/j4p.conf" - -if instance == None: - instance = str(port) - -if os.path.exists(conffile): - execfile(conffile) - -# We have to deal with socket timeouts. Python > 2.6 -# supports timeout parameter for the urllib2.urlopen method -# but we are on a python 2.5 system here which seem to use the -# default socket timeout. We are local here so set it to 1 second. -import socket -socket.setdefaulttimeout(1.0) - -# Convert servlet config definitions and append it to the vars definitions -for app, servlets in servlets.iteritems(): - # Add application specific vars - for var in app_vars: - vars.append(((app, None), var[0] % {'app': app}, var[1])) - - # Add servlet specific checks - for servlet in servlets: - for var in servlet_vars: - vars.append(((app, servlet), var[0] % {'app': app, 'servlet': servlet}, var[1])) - - -def init_auth(): - if user and password: - passwdmngr = urllib2.HTTPPasswordMgrWithDefaultRealm() - passwdmngr.add_password(None, "http://%s:%d/" % (server, port), user, password) - if mode == 'digest': - authhandler = urllib2.HTTPDigestAuthHandler(passwdmngr) - else: - authhandler = urllib2.HTTPBasicAuthHandler(passwdmngr) - opener = urllib2.build_opener(authhandler) - urllib2.install_opener(opener) - - -def fetch_var(server, port, path, suburi): - url = "http://%s:%d/%s/read/%s" % (server, port, suburi, path) - json = urllib2.urlopen(url).read() - - try: - obj = eval(json) - except Exception, e: - sys.stderr.write('ERROR: Invalid json code (%s)\n' % e) - sys.stderr.write(' Response %s\n' % json) - return None - - if obj.get('status', 200) == 404: - sys.stderr.write('ERROR: Invalid response when fetching url %s\n' % url) - sys.stderr.write(' Response: %s\n' % json) - - # Only take the value of the object. If the value is an object - # take the first items first value. - # {'Catalina:host=localhost,path=\\/test,type=Manager': {'activeSessions': 0}} - val = obj.get('value', None) - if isinstance(val, dict): - item = val[val.keys()[0]] - return item[item.keys()[0]] - else: - return val - - -init_auth() - -# Fetch the general information first -first = True -for var in vars: - app, servlet = None, None - if len(var) == 2: - path, title = var - else: - (app, servlet), path, title = var - - if servlet: - item = [instance, app, servlet] - elif app: - item = [instance, app] - else: - item = [instance] - - try: - value = fetch_var(server, port, path, suburi) - except IOError: - sys.exit(1) - except socket.timeout: - sys.exit(1) - except: - # Simply ignore exceptions. Need to be removed for debugging - continue - - if first: - first = False - sys.stdout.write("<<>>\n") - - sys.stdout.write("%s %s %s\n" % (','.join(item), title, value)) -sys.exit(0) diff -Nru check-mk-1.2.2p3/plugins/jar_signature check-mk-1.2.6p12/plugins/jar_signature --- check-mk-1.2.2p3/plugins/jar_signature 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/jar_signature 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,52 @@ +#!/bin/bash +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# This agent uses the program "jarsigner" to read ssl certificate +# information of jar files and outputs the information to stdout +# for the Check_MK check. +# We assume that all files in the jar archive are signed with the +# same certificate. So we only deal with the last signed file here. + +JAVA_HOME=/home/oracle/bin/jdk_latest_version +JAR_PATH=/home/oracle/fmw/11gR2/as_1/forms/java/*.jar + +# Let user override these defaults in a configuration file +if [ -e $MK_CONFDIR/jar_signature.cfg ] ; then + . $MK_CONFDIR/jar_signature.cfg +fi + +PATH=$JAVA_HOME/bin:$PATH + +echo "<<>>" +for JAR in $JAR_PATH; do + if [ -e "$JAR" ] ; then # avoid entry for '*.jar' + echo "[[[${JAR##*/}]]]" + OUTPUT=$(jarsigner -verify -verbose -certs "$JAR") + LINE=$(echo "$OUTPUT" | grep -n ^s | tail -n1 | cut -d: -f1) + echo "$(echo "$OUTPUT" | tail -n +$LINE)" + echo + fi +done + diff -Nru check-mk-1.2.2p3/plugins/kaspersky_av check-mk-1.2.6p12/plugins/kaspersky_av --- check-mk-1.2.2p3/plugins/kaspersky_av 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/kaspersky_av 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ +#!/bin/sh +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +if [ -f /opt/kaspersky/kav4fs/bin/kav4fs-control ] +then + echo "<<>>" + /opt/kaspersky/kav4fs/bin/kav4fs-control --get-stat Update + + echo "<<>>" + /opt/kaspersky/kav4fs/bin/kav4fs-control -Q --get-stat + + echo "<<>>" + /opt/kaspersky/kav4fs/bin/kav4fs-control --get-task-list + +fi + diff -Nru check-mk-1.2.2p3/plugins/lnx_quota check-mk-1.2.6p12/plugins/lnx_quota --- check-mk-1.2.2p3/plugins/lnx_quota 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/lnx_quota 2015-09-21 10:59:53.000000000 +0000 @@ -0,0 +1,32 @@ +#!/bin/bash +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +if type repquota >/dev/null ; then + echo "<<>>" + for VOL in $(grep usrjquota /etc/fstab | cut -d' ' -f2); do + echo "[[[$VOL]]]" + repquota -up $VOL + done +fi diff -Nru check-mk-1.2.2p3/plugins/mailman_lists check-mk-1.2.6p12/plugins/mailman_lists --- check-mk-1.2.2p3/plugins/mailman_lists 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/plugins/mailman_lists 2015-06-24 09:48:39.000000000 +0000 @@ -1,5 +1,29 @@ #!/usr/bin/python -# encoding: utf-8 +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + # This Check_MK-Agent plugin gathers information about mailinglists hosted # by the local mailman instance. diff -Nru check-mk-1.2.2p3/plugins/mk_inventory.aix check-mk-1.2.6p12/plugins/mk_inventory.aix --- check-mk-1.2.2p3/plugins/mk_inventory.aix 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/mk_inventory.aix 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,69 @@ +#!/bin/bash +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Run and *send* only once every 4 hours +INTERVAL=14400 + +FLAGFILE=$MK_CONFDIR/mk_inventory.last.$REMOTE +NOW=$(date +%s) +UNTIL=$((NOW + INTERVAL + 600)) + +#check if flagfile exits +if [ -e "$FLAGFILE" ]; then + LAST_RUN=$(cat $FLAGFILE) +else + #First run of the script + LAST_RUN=0 +fi + +if [ $(( NOW - LAST_RUN )) -ge $INTERVAL ] +then + echo $NOW > $FLAGFILE + + # List of installed AIX packages + if type lslpp >/dev/null; then + echo "<<>>" + lslpp -c -L + fi + + if type oslevel > /dev/null; then + # base level of the system + echo "<<>>" + oslevel + + # list the known service packs on a system + echo "<<>>" + oslevel -s + fi + + # If you run the prtconf command without any flags, it displays the system model, machine serial, + # processor type, number of processors, processor clock speed, cpu type, total memory size, network information, filesystem + # information, paging space information, and devices information. + if type prtconf >/dev/null ; then + echo "<<>>" + prtconf + fi +fi + diff -Nru check-mk-1.2.2p3/plugins/mk_inventory.linux check-mk-1.2.6p12/plugins/mk_inventory.linux --- check-mk-1.2.2p3/plugins/mk_inventory.linux 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/mk_inventory.linux 2015-09-21 10:59:53.000000000 +0000 @@ -0,0 +1,85 @@ +#!/bin/bash +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Run and *send* only once every __ seconds +. $MK_CONFDIR/mk_inventory.cfg 2>/dev/null || true + +# Default to four hours +INTERVAL=${INVENTORY_INTERVAL:-14400} + +FLAGFILE=$MK_VARDIR/mk_inventory.last.$REMOTE +LAST_RUN=$(stat -c %Y $FLAGFILE) +NOW=$(date +%s) +UNTIL=$((NOW + INTERVAL + 600)) + +if [ $(( NOW - LAST_RUN )) -ge $INTERVAL ] +then + touch $FLAGFILE + + # List of DEB packages + if type dpkg-query >/dev/null; then + echo "<<>>" + dpkg-query --show --showformat='${Package}|${Version}|${Architecture}|deb|${Summary}|${Status}\n' + fi + + # List of RPM packages in same format + if type rpm >/dev/null; then + echo "<<>>" + rpm -qa --qf '%{NAME}\t%{VERSION}\t%{ARCH}\trpm\t%{SUMMARY}\t-\n' + fi + + # Information about distribution + echo "<<>>" + for f in /etc/{debian_version,lsb-release,redhat-release,SuSE-release} ; do + if [ -e $f ] ; then + echo -n "$f|" ; tr \\n \| < $f | sed 's/|$//' ; echo + fi + done + + # CPU Information. We need just the first one + if [ -e /proc/cpuinfo ] ; then + echo "<<>>" + sed 's/[[:space:]]*:[[:space:]]*/:/' < /proc/cpuinfo + fi + + # Information about main board, memory, etc. + if type dmidecode >/dev/null ; then + echo "<<>>" + dmidecode -q | sed 's/\t/:/g' + fi + + # Information about kernel architecture + if type uname >/dev/null ; then + echo "<<>>" + uname -m + uname -r + fi + if type lspci > /dev/null ; then + echo "<<>>" + lspci -v -s $(lspci | grep VGA | cut -d" " -f 1) + fi + +fi + diff -Nru check-mk-1.2.2p3/plugins/mk_inventory.solaris check-mk-1.2.6p12/plugins/mk_inventory.solaris --- check-mk-1.2.2p3/plugins/mk_inventory.solaris 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/mk_inventory.solaris 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,71 @@ +#!/bin/bash +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +# Run and *send* only once every 4 hours +INTERVAL=14400 + +FLAGFILE=$MK_VARDIR/mk_inventory.last.$REMOTE +NOW=$(truss /usr/bin/date 2>&1 | grep ^time | awk -F"= " '{print $2}') +UNTIL=$((NOW + INTERVAL + 600)) + +#check if flagfile exits +if [ -e "$FLAGFILE" ]; then + LAST_RUN=$(cat $FLAGFILE) +else + #First run of the script + LAST_RUN=0 +fi + +if [ $(( NOW - LAST_RUN )) -ge $INTERVAL ] +then + echo $NOW > $FLAGFILE + + echo "<<>>" + uname -X + + if type prtdiag > /dev/null; then + echo "<<>>" + serial=`sneep -t serial`;echo "SerialNumber: $serial" + prtdiag -v + fi + + if type psrinfo > /dev/null; then + echo "<<>>" + psrinfo -p -v + fi + + if type prtpicl > /dev/null; then + echo "<<>>" + prtpicl -v + fi + + + if type pkginfo >/dev/null ; then + echo "<<>>" + pkginfo -l + fi +fi + diff -Nru check-mk-1.2.2p3/plugins/mk_jolokia check-mk-1.2.6p12/plugins/mk_jolokia --- check-mk-1.2.2p3/plugins/mk_jolokia 2013-05-06 09:58:46.000000000 +0000 +++ check-mk-1.2.6p12/plugins/mk_jolokia 2015-09-21 10:59:53.000000000 +0000 @@ -1,4 +1,28 @@ #!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. import urllib2, sys, os, socket, pprint @@ -31,7 +55,7 @@ # Only take the value of the object. If the value is an object # take the first items first value. # {'Catalina:host=localhost,path=\\/test,type=Manager': {'activeSessions': 0}} - if 'value' not in obj: + if 'value' not in obj: if opt_debug: sys.stderr.write("ERROR: not found: %s\n" % path) return [] @@ -103,11 +127,14 @@ # Determine type of server server_info = fetch_var(inst["server"], inst["port"], "", inst["suburi"], "") + sys.stdout.write('<<>>\n') if server_info: d = dict(server_info) version = d.get(('info', 'version'), "unknown") product = d.get(('info', 'product'), "unknown") + if inst.has_key("product"): + product = inst["product"] agentversion = d.get(('agent',), "unknown") sys.stdout.write("%s %s %s %s\n" % (inst["instance"], product, version, agentversion)) else: @@ -136,6 +163,9 @@ print "INTERNAL ERROR: %s" % value continue + if "threadStatus" in subinstance or "threadParam" in subinstance: + continue + if len(subinstance) > 1: item = ",".join((inst["instance"],) + subinstance[:-1]) else: @@ -170,6 +200,8 @@ ( "java.lang:type=Threading/PeakThreadCount", "PeakThreadCount", [] ), ( "java.lang:type=Threading/TotalStartedThreadCount", "TotalStartedThreadCount", [] ), ( "java.lang:type=Runtime/Uptime", "Uptime", [] ), + ( "java.lang:type=GarbageCollector,name=*/CollectionCount", "", [] ), + ( "java.lang:type=GarbageCollector,name=*/CollectionTime", "", [] ), ] @@ -189,7 +221,9 @@ ( "*:j2eeType=Servlet,name=default,*/stateName", None, [ "WebModule" ] ), # Check not yet working ( "*:j2eeType=Servlet,name=default,*/requestCount", None, [ "WebModule" ]), - + ( "*:name=*,type=ThreadPool/maxThreads", None, []), + ( "*:name=*,type=ThreadPool/currentThreadCount", None, []), + ( "*:name=*,type=ThreadPool/currentThreadsBusy", None, []), # too wide location for addressing the right info # ( "*:j2eeType=Servlet,*/requestCount", None, [ "WebModule" ] ), @@ -229,12 +263,12 @@ # of dicts). for inst in instances: for varname, value in [ - ( "server", server ), - ( "port", port ), - ( "user", user ), + ( "server", server ), + ( "port", port ), + ( "user", user ), ( "password", password ), - ( "mode", mode ), - ( "suburi", suburi ), + ( "mode", mode ), + ( "suburi", suburi ), ( "instance", instance )]: if varname not in inst: inst[varname] = value @@ -243,6 +277,3 @@ inst["instance"] = inst["instance"].replace(" ", "_") query_instance(inst) - - - diff -Nru check-mk-1.2.2p3/plugins/mk_logins check-mk-1.2.6p12/plugins/mk_logins --- check-mk-1.2.2p3/plugins/mk_logins 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/mk_logins 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,29 @@ +#!/bin/bash +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +if type who >/dev/null; then + echo "<<>>" + who | wc -l +fi diff -Nru check-mk-1.2.2p3/plugins/mk_logwatch check-mk-1.2.6p12/plugins/mk_logwatch --- check-mk-1.2.2p3/plugins/mk_logwatch 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/plugins/mk_logwatch 2015-09-21 10:59:53.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2010 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -26,9 +26,9 @@ # Call with -d for debug mode: colored output, no saving of status -import sys, os, re, time +import sys, os, re, time, glob -if '-d' in sys.argv[1:]: +if '-d' in sys.argv[1:] or '--debug' in sys.argv[1:]: tty_red = '\033[1;31m' tty_green = '\033[1;32m' tty_yellow = '\033[1;33m' @@ -41,7 +41,7 @@ tty_yellow = '' tty_blue = '' tty_normal = '' - debug = False + debug = False # The configuration file and status file are searched # in the directory named by the environment variable @@ -49,15 +49,18 @@ # If that is not set either, the current directory ist # used. logwatch_dir = os.getenv("LOGWATCH_DIR") -if not logwatch_dir: - logwatch_dir = os.getenv("MK_CONFDIR") - if not logwatch_dir: - logwatch_dir = "." - +if logwatch_dir: + mk_confdir = logwatch_dir + mk_vardir = logwatch_dir +else: + mk_confdir = os.getenv("MK_CONFDIR") or "." + mk_vardir = os.getenv("MK_VARDIR") or "." + print "<<>>" -config_filename = logwatch_dir + "/logwatch.cfg" -status_filename = logwatch_dir + "/logwatch.state" +config_filename = mk_confdir + "/logwatch.cfg" +config_dir = mk_confdir + "/logwatch.d/*.cfg" +status_filename = mk_vardir + "/logwatch.state" def is_not_comment(line): if line.lstrip().startswith('#') or \ @@ -68,43 +71,68 @@ def parse_filenames(line): return line.split() -def parse_pattern(line): - level, pattern = line.split(None, 1) +def parse_pattern(level, pattern): + if level not in [ 'C', 'W', 'I', 'O' ]: + raise(Exception("Invalid pattern line '%s'" % line)) try: compiled = re.compile(pattern) except: raise(Exception("Invalid regular expression in line '%s'" % line)) - if level not in [ 'C', 'W', 'I', 'O' ]: - raise(Exception("Invalid pattern line '%s'" % line)) return (level, compiled) def read_config(): config_lines = [ line.rstrip() for line in filter(is_not_comment, file(config_filename).readlines()) ] + # Add config from a logwatch.d folder + for config_file in glob.glob(config_dir): + config_lines += [ line.rstrip() for line in filter(is_not_comment, file(config_file).readlines()) ] have_filenames = False config = [] - # Line starts with whitespace -> pattern line - # otherwise -> file name line + for line in config_lines: - if line[0].isspace(): + rewrite = False + if line[0].isspace(): # pattern line if not have_filenames: raise Exception("Missing logfile names") - patterns.append(parse_pattern(line)) - else: + level, pattern = line.split(None, 1) + if level == 'A': + cont_list.append(parse_cont_pattern(pattern)) + elif level == 'R': + rewrite_list.append(pattern) + else: + level, compiled = parse_pattern(level, pattern) + cont_list = [] # List of continuation patterns + rewrite_list = [] # List of rewrite patterns + patterns.append((level, compiled, cont_list, rewrite_list)) + else: # filename line patterns = [] config.append((parse_filenames(line), patterns)) have_filenames = True return config +def parse_cont_pattern(pattern): + try: + return int(pattern) + except: + try: + return re.compile(pattern) + except: + if debug: + raise + raise Exception("Invalid regular expression in line '%s'" % pattern) + # structure of statusfile # # LOGFILE OFFSET INODE # /var/log/messages|7767698|32455445 # /var/test/x12134.log|12345|32444355 def read_status(): + if debug: + return {} + status = {} for line in file(status_filename): # TODO: Remove variants with spaces. rsplit is - # not portale. split fails if logfilename contains + # not portable. split fails if logfilename contains # spaces inode = -1 try: @@ -127,24 +155,43 @@ for filename, (offset, inode) in status.items(): f.write("%s|%d|%d\n" % (filename, offset, inode)) +pushed_back_line = None +def next_line(file_handle): + global pushed_back_line + if pushed_back_line != None: + line = pushed_back_line + pushed_back_line = None + return line + else: + try: + line = file_handle.next() + return line + except: + return None + + def process_logfile(logfile, patterns): + global pushed_back_line + # Look at which file offset we have finished scanning # the logfile last time. If we have never seen this file # before, we set the offset to -1 offset, prev_inode = status.get(logfile, (-1, -1)) try: - fl = os.open(logfile, os.O_RDONLY) - inode = os.fstat(fl)[1] # 1 = st_ino + file_desc = os.open(logfile, os.O_RDONLY) + inode = os.fstat(file_desc)[1] # 1 = st_ino except: + if debug: + raise print "[[[%s:cannotopen]]]" % logfile return print "[[[%s]]]" % logfile # Seek to the current end in order to determine file size - current_end = os.lseek(fl, 0, 2) # os.SEEK_END not available in Python 2.4 + current_end = os.lseek(file_desc, 0, 2) # os.SEEK_END not available in Python 2.4 status[logfile] = current_end, inode - + # If we have never seen this file before, we just set the # current pointer to the file end. We do not want to make # a fuss about ancient log messages... @@ -153,7 +200,7 @@ return else: offset = 0 - + # If the inode of the logfile has changed it has appearently # been started from new (logfile rotation). At least we must @@ -175,21 +222,29 @@ offset = 0 # now seek to offset where interesting data begins - os.lseek(fl, offset, 0) # os.SEEK_SET not available in Python 2.4 - f = os.fdopen(fl) + os.lseek(file_desc, offset, 0) # os.SEEK_SET not available in Python 2.4 + file_handle = os.fdopen(file_desc) worst = -1 outputtxt = "" lines_parsed = 0 start_time = time.time() - for line in f: + while True: + line = next_line(file_handle) + if line == None: + break # End of file + + # Handle option maxlinesize + if opt_maxlinesize != None and len(line) > opt_maxlinesize: + line = line[:opt_maxlinesize] + "[TRUNCATED]\n" + lines_parsed += 1 # Check if maximum number of new log messages is exceeded if opt_maxlines != None and lines_parsed > opt_maxlines: outputtxt += "%s Maximum number (%d) of new log messages exceeded.\n" % ( opt_overflow, opt_maxlines) worst = max(worst, opt_overflow_level) - os.lseek(fl, 0, 2) # Seek to end of file, skip all other messages + os.lseek(file_desc, 0, 2) # Seek to end of file, skip all other messages break # Check if maximum processing time (per file) is exceeded. Check only @@ -199,20 +254,55 @@ outputtxt += "%s Maximum parsing time (%.1f sec) of this log file exceeded.\n" % ( opt_overflow, opt_maxtime) worst = max(worst, opt_overflow_level) - os.lseek(fl, 0, 2) # Seek to end of file, skip all other messages + os.lseek(file_desc, 0, 2) # Seek to end of file, skip all other messages break level = "." - for lev, pattern in patterns: - if pattern.search(line[:-1]): + for lev, pattern, cont_patterns, replacements in patterns: + matches = pattern.search(line[:-1]) + if matches: level = lev levelint = {'C': 2, 'W': 1, 'O': 0, 'I': -1, '.': -1}[lev] worst = max(levelint, worst) - break + + # Check for continuation lines + for cont_pattern in cont_patterns: + if type(cont_pattern) == int: # add that many lines + for x in range(cont_pattern): + cont_line = next_line(file_handle) + if cont_line == None: # end of file + break + line = line[:-1] + "\1" + cont_line + + else: # pattern is regex + while True: + cont_line = next_line(file_handle) + if cont_line == None: # end of file + break + elif cont_pattern.search(cont_line[:-1]): + line = line[:-1] + "\1" + cont_line + else: + pushed_back_line = cont_line # sorry for stealing this line + break + + # Replacement + for replace in replacements: + line = replace.replace('\\0', line.rstrip()) + "\n" + for nr, group in enumerate(matches.groups()): + line = line.replace('\\%d' % (nr+1), group) + + break # matching rule found and executed + color = {'C': tty_red, 'W': tty_yellow, 'O': tty_green, 'I': tty_blue, '.': ''}[level] + if debug: + line = line.replace("\1", "\nCONT:") + if level == "I": + level = "." + if opt_nocontext and level == '.': + continue outputtxt += "%s%s %s%s\n" % (color, level, line[:-1], tty_normal) - new_offset = os.lseek(fl, 0, 1) # os.SEEK_CUR not available in Python 2.4 + new_offset = os.lseek(file_desc, 0, 1) # os.SEEK_CUR not available in Python 2.4 status[logfile] = new_offset, inode # output all lines if at least one warning, error or ok has been found @@ -220,9 +310,16 @@ sys.stdout.write(outputtxt) sys.stdout.flush() + # Handle option maxfilesize, regardless of warning or errors that have happened + if opt_maxfilesize != None and offset <= opt_maxfilesize and new_offset > opt_maxfilesize: + sys.stdout.write("%sW Maximum allowed logfile size (%d bytes) exceeded.%s\n" % (tty_yellow, opt_maxfilesize, tty_normal)) + + try: config = read_config() except Exception, e: + if debug: + raise print "CANNOT READ CONFIG FILE: %s" % e sys.exit(1) @@ -240,9 +337,12 @@ # Initialize options with default values opt_maxlines = None opt_maxtime = None + opt_maxlinesize = None + opt_maxfilesize = None opt_regex = None opt_overflow = 'C' opt_overflow_level = 2 + opt_nocontext = False try: options = [ o.split('=', 1) for o in filenames if '=' in o ] for key, value in options: @@ -250,6 +350,10 @@ opt_maxlines = int(value) elif key == 'maxtime': opt_maxtime = float(value) + elif key == 'maxlinesize': + opt_maxlinesize = int(value) + elif key == 'maxfilesize': + opt_maxfilesize = int(value) elif key == 'overflow': if value not in [ 'C', 'I', 'W', 'O' ]: raise Exception("Invalid value %s for overflow. Allowed are C, I, O and W" % value) @@ -259,21 +363,25 @@ opt_regex = re.compile(value) elif key == 'iregex': opt_regex = re.compile(value, re.I) + elif key == 'nocontext': + opt_nocontext = True else: raise Exception("Invalid option %s" % key) except Exception, e: + if debug: + raise print "INVALID CONFIGURATION: %s" % e sys.exit(1) - for glob in filenames: - if '=' in glob: + for glob_pattern in filenames: + if '=' in glob_pattern: continue - logfiles = [ l.strip() for l in os.popen("ls %s 2>/dev/null" % glob).readlines() ] + logfiles = glob.glob(glob_pattern) if opt_regex: logfiles = [ f for f in logfiles if opt_regex.search(f) ] if len(logfiles) == 0: - print '[[[%s:missing]]]' % glob + print '[[[%s:missing]]]' % glob_pattern else: for logfile in logfiles: process_logfile(logfile, patterns) diff -Nru check-mk-1.2.2p3/plugins/mk_logwatch_aix check-mk-1.2.6p12/plugins/mk_logwatch_aix --- check-mk-1.2.2p3/plugins/mk_logwatch_aix 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/mk_logwatch_aix 2015-09-21 10:59:53.000000000 +0000 @@ -0,0 +1,13 @@ +#!/usr/bin/ksh +# Beware: This Plugin clears the errors after each run, +# but it creates an detailed backup in /var/log/errpt_TIMESTAMP.log + +echo "<<>>" +echo "[[[errorlog]]]" +OUT=$(errpt | awk 'NR>1 { printf "C %s\n", $0 }') +if [[ $OUT != '' ]];then + echo $OUT + errpt -a > /var/log/errpt_$(date +%s).log + errclear 0 +fi + diff -Nru check-mk-1.2.2p3/plugins/mk_mysql check-mk-1.2.6p12/plugins/mk_mysql --- check-mk-1.2.2p3/plugins/mk_mysql 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/plugins/mk_mysql 2015-09-21 10:59:53.000000000 +0000 @@ -1,5 +1,4 @@ #!/bin/bash -# -*- encoding: utf-8; py-indent-offset: 4 -*- # +------------------------------------------------------------------+ # | ____ _ _ __ __ _ __ | # | / ___| |__ ___ ___| | __ | \/ | |/ / | @@ -7,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2012 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,12 +23,20 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -if which mysqladmin > /dev/null +if which mysqladmin >/dev/null then # Check if mysqld is running and root password setup echo "<<>>" mysqladmin --defaults-extra-file=$MK_CONFDIR/mysql.cfg ping 2>&1 mysql --defaults-extra-file=$MK_CONFDIR/mysql.cfg -sN \ - -e "select '<<>>' ; show global status; show global variables; select '<<>>' ; SELECT table_schema, sum( data_length + index_length ), sum( data_free ) FROM information_schema.TABLES GROUP BY table_schema;" + -e "select '<<>>' ; + show global status ; show global variables ; + + select '<<>>' ; + SELECT table_schema, sum(data_length + index_length), sum(data_free) + FROM information_schema.TABLES GROUP BY table_schema" + + mysql --defaults-extra-file=$MK_CONFDIR/mysql.cfg -s \ + -e "SELECT '<<>>' ; show slave status\G" fi diff -Nru check-mk-1.2.2p3/plugins/mk_oracle check-mk-1.2.6p12/plugins/mk_oracle --- check-mk-1.2.2p3/plugins/mk_oracle 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/plugins/mk_oracle 2015-09-21 10:59:53.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2010 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,38 +24,71 @@ # Boston, MA 02110-1301 USA. # Check_MK agent plugin for monitoring ORACLE databases - -# Get list of all running databases -SIDS=$(UNIX95=true ps ax -o args | sed -n '/^\(ora\|xe\)_pmon_\([^ ]*\)/s//\2/p') -if [ -z "$SIDS" ] ; then - # If on this system we've already found a database - if [ -e "$MK_CONFDIR/mk_oracle.found" ] ; then - echo '<<>>' - echo '<<>>' - echo '<<>>' - echo '<<>>' +# This plugin is a result of the common work of Thorsten Bruhns +# and Mathias Kettner. Thorsten is responsible for the ORACLE +# stuff, Mathias for the shell hacking... + +# Example for mk_oracle.cfg +# DBUSER=:::: +# ASMUSER=:::: +# +# SYSDBA or SYSASM is optional but needed for a mounted instance +# HOSTNAME is optional - Default is localhost +# PORT is optional - Default is 1521 + +while test $# -gt 0 +do + if [ "${1}" = '-d' ] ; then + set -xv ; DEBUG=1 + elif [ "${1}" = '-t' ] ; then + DEBUGCONNECT=1 fi - exit 0 + shift +done + +if [ ! "$MK_CONFDIR" ] ; then + echo "MK_CONFDIR not set!" >&2 + exit 1 fi -touch $MK_CONFDIR/mk_oracle.found +if [ ! "$MK_VARDIR" ] ; then + export MK_VARDIR=$MK_CONFDIR +fi -# Recreate data if cachefile is older than 120 seconds. -# If you set this to 0, then the cache file will be created -# as often as possible. If the database queries last longer -# then your check interval, caching will be active nevertheless. -CACHE_MAXAGE=120 -# Source the optional configuration file for this agent plugin -if [ -e "$MK_CONFDIR/mk_oracle.cfg" ] -then - . $MK_CONFDIR/mk_oracle.cfg -fi +# .--Config--------------------------------------------------------------. +# | ____ __ _ | +# | / ___|___ _ __ / _(_) __ _ | +# | | | / _ \| '_ \| |_| |/ _` | | +# | | |__| (_) | | | | _| | (_| | | +# | \____\___/|_| |_|_| |_|\__, | | +# | |___/ | +# +----------------------------------------------------------------------+ +# | The user can override and set variables in mk_oracle.cfg | +# '----------------------------------------------------------------------' + +# Sections that run fast and do no caching +SYNC_SECTIONS="instance sessions logswitches undostat recovery_area processes recovery_status longactivesessions dataguard_stats performance" + +# Sections that are run in the background and at a larger interval. +# Note: sections not listed in SYNC_SECTIONS or ASYNC_SECTIONS will not be +# executed at all! +ASYNC_SECTIONS="tablespaces rman jobs ts_quotas resumable locks" + +# Sections that are run in the background and at a larger interval. +# Note: _ASM_ sections are only executed when SID starts with '+' +# sections listed in SYNC_SECTIONS or ASYNC_SECTIONS are not +# executed for ASM. +SYNC_ASM_SECTIONS="instance" +ASYNC_ASM_SECTIONS="asm_diskgroup" + +# Interval for running async checks (in seconds) +CACHE_MAXAGE=600 # You can specify a list of SIDs to monitor. Those databases will -# only be handled, if they are found running, though! +# only be handled, if they are found running, though! # -# ONLY_SIDS="XE HIRN SEPP" +# ONLY_SIDS="XE ORCL FOO BAR" # # It is possible to filter SIDS negatively. Just add the following to # the mk_oracle.cfg file: @@ -73,38 +106,879 @@ # # EXCLUDE_mysid="sessions logswitches" # -# -# This check uses a cache file to prevent problems with long running -# SQL queries. It starts building a cache when -# a) no cache is present or the cache is too old and -# b) the cache is not currently being built -# The cache is used for $CACHE_MAXAGE seconds. The CACHE_MAXAGE -# option is pre-set to 120 seconds but can be changed in mk_oracle.cfg. +# Source the optional configuration file for this agent plugin +if [ -e "$MK_CONFDIR/mk_oracle.cfg" ] +then + . $MK_CONFDIR/mk_oracle.cfg +fi + +#. +# .--SQL Queries---------------------------------------------------------. +# | ____ ___ _ ___ _ | +# | / ___| / _ \| | / _ \ _ _ ___ _ __(_) ___ ___ | +# | \___ \| | | | | | | | | | | |/ _ \ '__| |/ _ \/ __| | +# | ___) | |_| | |___ | |_| | |_| | __/ | | | __/\__ \ | +# | |____/ \__\_\_____| \__\_\\__,_|\___|_| |_|\___||___/ | +# | | +# +----------------------------------------------------------------------+ +# | The following functions create SQL queries for ORACLE and output | +# | them to stdout. All queries output the database name or the instane | +# | name as first column. | +# '----------------------------------------------------------------------' + +sql_performance() +{ + if [ "$AT_LEAST_ORACLE_101" = 'yes' ] ; then + echo 'PROMPT <<>>' + echo "select upper(i.INSTANCE_NAME) + ||'|'|| 'sys_time_model' + ||'|'|| S.STAT_NAME + ||'|'|| Round(s.value/1000000) + from v\$instance i, + v\$sys_time_model s + where s.stat_name in('DB time', 'DB CPU') + order by s.stat_name; + select upper(i.INSTANCE_NAME) + ||'|'|| 'buffer_pool_statistics' + ||'|'|| b.name + ||'|'|| b.db_block_gets + ||'|'|| b.db_block_change + ||'|'|| b.consistent_gets + ||'|'|| b.physical_reads + ||'|'|| b.physical_writes + ||'|'|| b.FREE_BUFFER_WAIT + ||'|'|| b.BUFFER_BUSY_WAIT + from v\$instance i, V\$BUFFER_POOL_STATISTICS b; + select upper(i.INSTANCE_NAME) + ||'|'|| 'librarycache' + ||'|'|| b.namespace + ||'|'|| b.gets + ||'|'|| b.gethits + ||'|'|| b.pins + ||'|'|| b.pinhits + ||'|'|| b.reloads + ||'|'|| b.invalidations + from v\$instance i, V\$librarycache b;" + fi +} + +sql_tablespaces() +{ + echo 'PROMPT <<>>' + if [ "$AT_LEAST_ORACLE_102" = 'yes' ] ; then + + echo "select upper(d.NAME) || '|' || file_name ||'|'|| tablespace_name ||'|'|| fstatus ||'|'|| AUTOEXTENSIBLE + ||'|'|| blocks ||'|'|| maxblocks ||'|'|| USER_BLOCKS ||'|'|| INCREMENT_BY + ||'|'|| ONLINE_STATUS ||'|'|| BLOCK_SIZE + ||'|'|| decode(tstatus,'READ ONLY', 'READONLY', tstatus) || '|' || free_blocks + ||'|'|| contents + from v\$database d , ( + select f.file_name, f.tablespace_name, f.status fstatus, f.AUTOEXTENSIBLE, + f.blocks, f.maxblocks, f.USER_BLOCKS, f.INCREMENT_BY, + f.ONLINE_STATUS, t.BLOCK_SIZE, t.status tstatus, nvl(sum(fs.blocks),0) free_blocks, t.contents + from dba_data_files f, dba_tablespaces t, dba_free_space fs + where f.tablespace_name = t.tablespace_name + and f.file_id = fs.file_id(+) + group by f.file_name, f.tablespace_name, f.status, f.autoextensible, + f.blocks, f.maxblocks, f.user_blocks, f.increment_by, f.online_status, + t.block_size, t.status, t.contents + UNION + select f.file_name, f.tablespace_name, f.status, f.AUTOEXTENSIBLE, + f.blocks, f.maxblocks, f.USER_BLOCKS, f.INCREMENT_BY, 'TEMP', + t.BLOCK_SIZE, t.status, sum(sh.blocks_free) free_blocks, 'TEMPORARY' + from v\$thread th, dba_temp_files f, dba_tablespaces t, v\$temp_space_header sh + WHERE f.tablespace_name = t.tablespace_name and f.file_id = sh.file_id + GROUP BY th.instance, f.file_name, f.tablespace_name, f.status, + f.autoextensible, f.blocks, f.maxblocks, f.user_blocks, f.increment_by, + 'TEMP', t.block_size, t.status); + " + elif [ "$AT_LEAST_ORACLE_92" = 'yes' ] ; then + + echo "select upper(d.NAME) || '|' || file_name ||'|'|| tablespace_name ||'|'|| fstatus ||'|'|| AUTOEXTENSIBLE + ||'|'|| blocks ||'|'|| maxblocks ||'|'|| USER_BLOCKS ||'|'|| INCREMENT_BY + ||'|'|| ONLINE_STATUS ||'|'|| BLOCK_SIZE + ||'|'|| decode(tstatus,'READ ONLY', 'READONLY', tstatus) || '|' || free_blocks + ||'|'|| contents + from v\$database d , ( + select f.file_name, f.tablespace_name, f.status fstatus, f.AUTOEXTENSIBLE, + f.blocks, f.maxblocks, f.USER_BLOCKS, f.INCREMENT_BY, + 'ONLINE' ONLINE_STATUS, t.BLOCK_SIZE, t.status tstatus, nvl(sum(fs.blocks),0) free_blocks, t.contents + from dba_data_files f, dba_tablespaces t, dba_free_space fs + where f.tablespace_name = t.tablespace_name + and f.file_id = fs.file_id(+) + group by f.file_name, f.tablespace_name, f.status, f.autoextensible, + f.blocks, f.maxblocks, f.user_blocks, f.increment_by, 'ONLINE', + t.block_size, t.status, t.contents + UNION + select f.file_name, f.tablespace_name, 'ONLINE' status, f.AUTOEXTENSIBLE, + f.blocks, f.maxblocks, f.USER_BLOCKS, f.INCREMENT_BY, 'TEMP', + t.BLOCK_SIZE, 'TEMP' status, sum(sh.blocks_free) free_blocks, 'TEMPORARY' + from v\$thread th, dba_temp_files f, dba_tablespaces t, v\$temp_space_header sh + WHERE f.tablespace_name = t.tablespace_name and f.file_id = sh.file_id + GROUP BY th.instance, f.file_name, f.tablespace_name, 'ONLINE', + f.autoextensible, f.blocks, f.maxblocks, f.user_blocks, f.increment_by, + 'TEMP', t.block_size, t.status); + " + fi +} + +sql_dataguard_stats() +{ + if [ "$AT_LEAST_ORACLE_102" = 'yes' ] ; then + echo 'PROMPT <<>>' + echo "SELECT upper(d.NAME) + ||'|'|| upper(d.DB_UNIQUE_NAME) + ||'|'|| d.DATABASE_ROLE + ||'|'|| ds.name + ||'|'|| ds.value + FROM v\$database d + JOIN v\$parameter vp on 1=1 + left outer join V\$dataguard_stats ds on 1=1 + WHERE vp.name = 'log_archive_config' + AND vp.value is not null + ORDER BY 1; + " + fi +} + +sql_recovery_status() +{ + echo 'PROMPT <<>>' + if [ "$AT_LEAST_ORACLE_101" = 'yes' ] ; then + echo "SELECT upper(d.NAME) + ||'|'|| d.DB_UNIQUE_NAME + ||'|'|| d.DATABASE_ROLE + ||'|'|| d.open_mode + ||'|'|| dh.file# + ||'|'|| round((dh.CHECKPOINT_TIME-to_date('01.01.1970','dd.mm.yyyy'))*24*60*60) + ||'|'|| round((sysdate-dh.CHECKPOINT_TIME)*24*60*60) + ||'|'|| dh.STATUS + ||'|'|| dh.RECOVER + ||'|'|| dh.FUZZY + ||'|'|| dh.CHECKPOINT_CHANGE# + FROM V\$datafile_header dh, v\$database d, v\$instance i + ORDER BY dh.file#; + " + elif [ "$AT_LEAST_ORACLE_92" = 'yes' ] ; then + echo "SELECT upper(d.NAME) + ||'|'|| d.NAME + ||'|'|| d.DATABASE_ROLE + ||'|'|| d.open_mode + ||'|'|| dh.file# + ||'|'|| round((dh.CHECKPOINT_TIME-to_date('01.01.1970','dd.mm.yyyy'))*24*60*60) + ||'|'|| round((sysdate-dh.CHECKPOINT_TIME)*24*60*60) + ||'|'|| dh.STATUS + ||'|'|| dh.RECOVER + ||'|'|| dh.FUZZY + ||'|'|| dh.CHECKPOINT_CHANGE# + FROM V\$datafile_header dh, v\$database d, v\$instance i + ORDER BY dh.file#; + " + fi +} + +sql_rman() +{ + if [ "$AT_LEAST_ORACLE_102" = 'yes' ] ; then + echo 'PROMPT <<>>' + echo "SELECT upper(d.NAME) + ||'|'|| a.STATUS + ||'|'|| to_char(a.START_TIME, 'YYYY-mm-dd_HH24:MI:SS') + ||'|'|| to_char(a.END_TIME, 'YYYY-mm-dd_HH24:MI:SS') + ||'|'|| replace(b.INPUT_TYPE, ' ', '_') + ||'|'|| round(((sysdate - END_TIME) * 24 * 60),0) + FROM V\$RMAN_BACKUP_JOB_DETAILS a, v\$database d, + (SELECT input_type, max(command_id) as command_id + FROM V\$RMAN_BACKUP_JOB_DETAILS + WHERE START_TIME > sysdate-14 + and input_type != 'ARCHIVELOG' + and STATUS<>'RUNNING' GROUP BY input_type) b + WHERE a.COMMAND_ID = b.COMMAND_ID + UNION ALL + select name + || '|COMPLETED' + || '|'|| to_char(sysdate, 'YYYY-mm-dd_HH24:MI:SS') + || '|'|| to_char(completed, 'YYYY-mm-dd_HH24:MI:SS') + || '|ARCHIVELOG|' + || round((sysdate - completed)*24*60,0) + from ( + select d.name + , max(a.completion_time) completed + , case when a.backup_count > 0 then 1 else 0 end + from v\$archived_log a, v\$database d + where a.backup_count > 0 + and a.dest_id in + (select b.dest_id + from v\$archive_dest b + where b.target = 'PRIMARY' + and b.SCHEDULE = 'ACTIVE' + ) + group by d.name, case when a.backup_count > 0 then 1 else 0 end);" + fi +} + +sql_recovery_area() +{ + if [ "$AT_LEAST_ORACLE_102" = 'yes' ] ; then + echo 'PROMPT <<>>' + echo "select upper(d.NAME) + ||' '|| round((SPACE_USED-SPACE_RECLAIMABLE)/ + (CASE NVL(SPACE_LIMIT,1) WHEN 0 THEN 1 ELSE SPACE_LIMIT END)*100) + ||' '|| round(SPACE_LIMIT/1024/1024) + ||' '|| round(SPACE_USED/1024/1024) + ||' '|| round(SPACE_RECLAIMABLE/1024/1024) + from V\$RECOVERY_FILE_DEST, v\$database d; + " + fi +} + +sql_undostat() +{ + echo 'PROMPT <<>>' + if [ "$AT_LEAST_ORACLE_102" = 'yes' ] ; then + echo "select upper(i.INSTANCE_NAME) + ||'|'|| ACTIVEBLKS + ||'|'|| MAXCONCURRENCY + ||'|'|| TUNED_UNDORETENTION + ||'|'|| maxquerylen + ||'|'|| NOSPACEERRCNT + from v\$instance i, + (select * from (select * + from v\$undostat order by end_time desc + ) + where rownum = 1 + and TUNED_UNDORETENTION > 0 + ); + " + elif [ "$AT_LEAST_ORACLE_92" = 'yes' ] ; then + # TUNED_UNDORETENTION and ACTIVEBLKS are not availibe in Oracle <=9.2! + # we sent a -1 for filtering in check_undostat + echo "select upper(i.INSTANCE_NAME) + ||'|-1' + ||'|'|| MAXCONCURRENCY + ||'|-1' + ||'|'|| maxquerylen + ||'|'|| NOSPACEERRCNT + from v\$instance i, + (select * from (select * + from v\$undostat order by end_time desc + ) + where rownum = 1 + ); + " + fi +} + +sql_resumable() +{ + echo 'PROMPT <<>>' + echo "select upper(i.INSTANCE_NAME) + ||'|'|| u.username + ||'|'|| a.SESSION_ID + ||'|'|| a.status + ||'|'|| a.TIMEOUT + ||'|'|| round((sysdate-to_date(a.SUSPEND_TIME,'mm/dd/yy hh24:mi:ss'))*24*60*60) + ||'|'|| a.ERROR_NUMBER + ||'|'|| to_char(to_date(a.SUSPEND_TIME, 'mm/dd/yy hh24:mi:ss'),'mm/dd/yy_hh24:mi:ss') + ||'|'|| a.RESUME_TIME + ||'|'|| a.ERROR_MSG + from dba_resumable a, v\$instance i, dba_users u + where a.INSTANCE_ID = i.INSTANCE_NUMBER + and u.user_id = a.user_id + and a.SUSPEND_TIME is not null + union all + select upper(i.INSTANCE_NAME) + || '|||||||||' + from v\$instance i +; + " +} + +sql_jobs() +{ + if [ "$AT_LEAST_ORACLE_102" = 'yes' ] ; then + echo 'PROMPT <<>>' + echo "SELECT upper(d.NAME) + ||'|'|| j.OWNER + ||'|'|| j.JOB_NAME + ||'|'|| j.STATE + ||'|'|| ROUND((TRUNC(sysdate) + j.LAST_RUN_DURATION - TRUNC(sysdate)) * 86400) + ||'|'|| j.RUN_COUNT + ||'|'|| j.ENABLED + ||'|'|| NVL(j.NEXT_RUN_DATE, to_date('1970-01-01', 'YYYY-mm-dd')) + ||'|'|| NVL(j.SCHEDULE_NAME, '-') + ||'|'|| d.STATUS + FROM dba_scheduler_jobs j, dba_scheduler_job_run_details d, v\$database d + WHERE d.owner=j.OWNER AND d.JOB_NAME=j.JOB_NAME + AND d.LOG_ID=(SELECT max(LOG_ID) FROM dba_scheduler_job_run_details dd + WHERE dd.owner=j.OWNER and dd.JOB_NAME=j.JOB_NAME + ); + " + fi +} + +sql_ts_quotas() +{ + echo 'PROMPT <<>>' + echo "select upper(d.NAME) + ||'|'|| Q.USERNAME + ||'|'|| Q.TABLESPACE_NAME + ||'|'|| Q.BYTES + ||'|'|| Q.MAX_BYTES + from dba_ts_quotas Q, v\$database d + where max_bytes > 0 + union all + select upper(d.NAME) + ||'|||' + from v\$database d + order by 1; + " +} + +sql_version() +{ + echo 'PROMPT <<>>' + echo "select upper(i.INSTANCE_NAME) + || ' ' || banner + from v\$version, v\$instance i + where banner like 'Oracle%';" +} + +sql_instance() +{ + echo 'prompt <<>>' + if [ ${ORACLE_SID:0:1} = '+' ] ; then + # ASM + echo "select upper(i.instance_name) + || '|' || i.VERSION + || '|' || i.STATUS + || '|' || i.LOGINS + || '|' || i.ARCHIVER + || '|' || round((sysdate - i.startup_time) * 24*60*60) + || '|' || '0' + || '|' || 'NO' + || '|' || 'ASM' + || '|' || 'NO' + || '|' || i.instance_name + from v\$instance i; + " + else + # normal Instance + echo "select upper(i.instance_name) + || '|' || i.VERSION + || '|' || i.STATUS + || '|' || i.LOGINS + || '|' || i.ARCHIVER + || '|' || round((sysdate - i.startup_time) * 24*60*60) + || '|' || DBID + || '|' || LOG_MODE + || '|' || DATABASE_ROLE + || '|' || FORCE_LOGGING + || '|' || d.name + from v\$instance i, v\$database d; + " + fi +} + +sql_sessions() +{ + echo 'prompt <<>>' + echo "select upper(i.instance_name) + || ' ' || CURRENT_UTILIZATION + from v\$resource_limit, v\$instance i + where RESOURCE_NAME = 'sessions'; + " +} + +sql_processes() +{ + echo 'prompt <<>>' + echo "select upper(i.instance_name) + || ' ' || CURRENT_UTILIZATION + || ' ' || ltrim(rtrim(LIMIT_VALUE)) + from v\$resource_limit, v\$instance i + where RESOURCE_NAME = 'processes'; + " +} + +sql_logswitches() +{ + echo 'prompt <<>>' + echo "select upper(i.instance_name) + || ' ' || logswitches + from v\$instance i , + (select count(1) logswitches + from v\$loghist + where first_time > sysdate - 1/24 + ); + " +} + +sql_locks() +{ + if [ "$AT_LEAST_ORACLE_101" = 'yes' ] ; then + echo 'prompt <<>>' + echo "SET SERVEROUTPUT ON feedback off +DECLARE + type x is table of varchar2(20000) index by pls_integer; + xx x; +begin + begin + execute immediate 'select upper(i.instance_name) + || ''|'' || a.sid + || ''|'' || b.serial# + || ''|'' || b.machine + || ''|'' || b.program + || ''|'' || b.process + || ''|'' || b.osuser + || ''|'' || a.ctime + || ''|'' || decode(c.owner,NULL,''NULL'',c.owner) + || ''|'' || decode(c.object_name,NULL,''NULL'',c.object_name) + from V\$LOCK a, v\$session b, dba_objects c, v\$instance i + where (a.id1, a.id2, a.type) + IN (SELECT id1, id2, type + FROM GV\$LOCK + WHERE request>0 + ) + and request=0 + and a.sid = b.sid + and a.id1 = c.object_id (+) + union all + select upper(i.instance_name) || ''|||||||||'' + from v\$instance i' + bulk collect into xx; + if xx.count >= 1 then + for i in 1 .. xx.count loop + dbms_output.put_line(xx(i)); + end loop; + end if; + exception + when others then + for cur1 in (select upper(i.instance_name) instance_name from v\$instance i) loop + dbms_output.put_line(cur1.instance_name || '|||||||||'||sqlerrm); + end loop; + end; +END; +/ +set serverout off +" + fi +} + +sql_longactivesessions() +{ + if [ "$AT_LEAST_ORACLE_101" = 'yes' ] ; then + echo 'prompt <<>>' + echo "select upper(i.instance_name) + || '|' || s.sid + || '|' || s.serial# + || '|' || s.machine + || '|' || s.process + || '|' || s.osuser + || '|' || s.program + || '|' || s.last_call_et + || '|' || s.sql_id + from v\$session s, v\$instance i + where s.status = 'ACTIVE' + and type != 'BACKGROUND' + and s.username is not null + and s.username not in('PUBLIC') + and s.last_call_et > 60*60 + union all + select upper(i.instance_name) + || '||||||||' + from v\$instance i; + " + fi +} + +sql_asm_diskgroup() +{ + echo 'prompt <<>>' + if [ "$AT_LEAST_ORACLE_112" = 'yes' ] ; then + echo "select STATE + || ' ' || TYPE + || ' ' || 'N' + || ' ' || sector_size + || ' ' || block_size + || ' ' || allocation_unit_size + || ' ' || total_mb + || ' ' || free_mb + || ' ' || required_mirror_free_mb + || ' ' || usable_file_mb + || ' ' || offline_disks + || ' ' || voting_files + || ' ' || name || '/' + from v\$asm_diskgroup; + " + elif [ "$AT_LEAST_ORACLE_102" = 'yes' ] ; then + echo "select STATE + || ' ' || TYPE + || ' ' || 'N' + || ' ' || sector_size + || ' ' || block_size + || ' ' || allocation_unit_size + || ' ' || total_mb + || ' ' || free_mb + || ' ' || required_mirror_free_mb + || ' ' || usable_file_mb + || ' ' || offline_disks + || ' ' || 'N' + || ' ' || name || '/' + from v\$asm_diskgroup; + " + fi +} + +#. +# .--oraenv--------------------------------------------------------------. +# | | +# | ___ _ __ __ _ ___ _ ____ __ | +# | / _ \| '__/ _` |/ _ \ '_ \ \ / / | +# | | (_) | | | (_| | __/ | | \ V / | +# | \___/|_| \__,_|\___|_| |_|\_/ | +# | | +# +----------------------------------------------------------------------+ +# | Functions for getting the Oracle environment | +# '----------------------------------------------------------------------' + +function set_oraenv () { + ORACLE_SID=${1} + + test -f /etc/oratab && ORATAB=/etc/oratab + # /var/opt/oracle/oratab is needed for Oracle Solaris + test -f /var/opt/oracle/oratab && ORATAB=/var/opt/oracle/oratab + test -f ${ORATAB:-""} || echo "ORA-99999 oratab not found" + test -f ${ORATAB:-""} || exit 1 + + ORACLE_HOME=$(cat ${ORATAB} | grep "^"${ORACLE_SID}":" | cut -d":" -f2) + if [ -z $ORACLE_HOME ] ; then + # cut last number from SID for Oracle RAC to find entry in oratab + ORACLE_SID_SHORT=$(echo $ORACLE_SID | sed "s/[0-9]$//") + ORACLE_HOME=$(cat ${ORATAB} | grep "^"${ORACLE_SID_SHORT}":" | cut -d":" -f2) + fi + + if [ ! -d ${ORACLE_HOME:-"not_found"} ] ; then + echo "ORA-99999 ORACLE_HOME for ORACLE_SID="$ORACLE_SID" not found or not existing!" + exit 1 + fi + + TNS_ADMIN=${TNS_ADMIN:-$MK_CONFDIR} + + test -f ${TNS_ADMIN}/sqlnet.ora || ( echo "ORA-99998 Couldn't find "${TNS_ADMIN}/sqlnet.ora ; exit 1) + + export ORACLE_HOME TNS_ADMIN ORACLE_SID +} + +function get_oraversion () { + set_oraenv ${1} + ORACLE_VERSION=$($ORACLE_HOME/bin/sqlplus -V | grep ^SQL | cut -d" " -f3 | cut -d"." -f-2) + + # remove possible existing variables + unset AT_LEAST_ORACLE_121 AT_LEAST_ORACLE_112 AT_LEAST_ORACLE_111 AT_LEAST_ORACLE_102 AT_LEAST_ORACLE_101 AT_LEAST_ORACLE_92 + + if [ "$ORACLE_VERSION" = '12.1' ] ; then + AT_LEAST_ORACLE_121=yes + fi + + if [ "$ORACLE_VERSION" = '12.1' -o "$ORACLE_VERSION" = '11.2' ] ; then + AT_LEAST_ORACLE_112=yes + fi + + if [ "$ORACLE_VERSION" = '12.1' -o "$ORACLE_VERSION" = '11.2' \ + -o "$ORACLE_VERSION" = '11.1' ] ; then + AT_LEAST_ORACLE_111=yes + fi + + if [ "$ORACLE_VERSION" = '12.1' -o "$ORACLE_VERSION" = '11.2' \ + -o "$ORACLE_VERSION" = '11.1' -o "$ORACLE_VERSION" = '10.2' ] ; then + AT_LEAST_ORACLE_102=yes + fi + + if [ "$ORACLE_VERSION" = '12.1' -o "$ORACLE_VERSION" = '11.2' \ + -o "$ORACLE_VERSION" = '11.1' -o "$ORACLE_VERSION" = '10.2' \ + -o "$ORACLE_VERSION" = '10.1' ] ; then + AT_LEAST_ORACLE_101=yes + fi + + if [ "$ORACLE_VERSION" = '12.1' -o "$ORACLE_VERSION" = '11.2' \ + -o "$ORACLE_VERSION" = '11.1' -o "$ORACLE_VERSION" = '10.2' \ + -o "$ORACLE_VERSION" = '10.1' -o "$ORACLE_VERSION" = '9.2' ] ; then + AT_LEAST_ORACLE_92=yes + fi +} + +#. +# .--Functions-----------------------------------------------------------. +# | _____ _ _ | +# | | ___| _ _ __ ___| |_(_) ___ _ __ ___ | +# | | |_ | | | | '_ \ / __| __| |/ _ \| '_ \/ __| | +# | | _|| |_| | | | | (__| |_| | (_) | | | \__ \ | +# | |_| \__,_|_| |_|\___|\__|_|\___/|_| |_|___/ | +# | | +# +----------------------------------------------------------------------+ +# | Helper functions | +# '----------------------------------------------------------------------' + +function sqlplus_internal() { + loc_stdin=$(cat) + set_oraenv $SID + + # reload mk_oracle.cfg for run_cached. Otherwise some variables are missing + if [ -e "$MK_CONFDIR/mk_oracle.cfg" ] + then + . $MK_CONFDIR/mk_oracle.cfg + fi + + # mk_oracle_dbusers.conf is for compatibility. Do not use it anymore + ORACLE_USERCONF=${MK_CONFDIR}/mk_oracle_dbuser.conf + + TNSPINGOK=no + if [ -f ${TNS_ADMIN}/tnsnames.ora ] ; then + if "${ORACLE_HOME}"/bin/tnsping "${ORACLE_SID}" >/dev/null 2>&1 ; then + TNSALIAS=$ORACLE_SID + TNSPINGOK=yes + fi + fi + + ORADBUSER="" + DBPASSWORD="" + + # ASM use '+' as 1st character in SID! + if [ ${ORACLE_SID:0:1} = '+' ] ; then + ORACFGLINE=${ASMUSER} + else + # use an individuel user or the default DBUSER from mk_oracle.cfg + dummy="DBUSER_"${ORACLE_SID} + ORACFGLINE=${!dummy} + if [ "$ORACFGLINE" = '' ] ; then + ORACFGLINE=${DBUSER} + fi + fi + + if [ -f ${ORACLE_USERCONF} -a "${ORACFGLINE}" = '' ] ; then + # mk_oracle_dbuser.conf + ORACFGLINE=$(cat ${ORACLE_USERCONF} | grep "^"${ORACLE_SID}":") + # mk_oracle_dbuser has ORACLE_SID as 1. parameter. we need an offset for all values + offset=1 + else + # mk_oracle.cfg + offset=0 + fi + + ORADBUSER=$(echo ${ORACFGLINE} | cut -d":" -f$[1+offset]) + DBPASSWORD=$(echo ${ORACFGLINE} | cut -d":" -f$[2+offset]) + DBSYSCONNECT=$(echo ${ORACFGLINE} | cut -d":" -f$[3+offset]) + DBHOST=$(echo ${ORACFGLINE} | cut -d":" -f$[4+offset]) + DBPORT=$(echo ${ORACFGLINE} | cut -d":" -f$[5+offset]) + + if [ ! "${ORACFGLINE}" ] ; then + # no configuration found + # => use the wallet with tnsnames.ora or EZCONNECT + TNSALIAS=${TNSALIAS:-"localhost:1521/${ORACLE_SID}"} + else + if [ ${DBSYSCONNECT} ] ; then + assysdbaconnect=" as "${DBSYSCONNECT} + fi + TNSALIAS=${TNSALIAS:-"(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=${DBHOST:-"localhost"})(PORT=${DBPORT:-1521}))(CONNECT_DATA=(SID=${ORACLE_SID})(SERVER=DEDICATED)(UR=A)))"} + + # ORADBUSER = '/'? => ignore DBPASSWORD and use the wallet + if [ "${ORADBUSER}" = '/' ] ; then + # connect with / and wallet + ORADBUSER="" + DBPASSWORD="" + if [ "$TNSPINGOK" = 'no' ] ; then + # create an EZCONNECT string when no tnsnames.ora is usable + # defaults to localhost:1521/ + TNSALIAS="${DBHOST:-"localhost"}:${DBPORT:-1521}/${ORACLE_SID}" + fi + fi + fi + + DBCONNECT="${ORADBUSER}/${DBPASSWORD}@${TNSALIAS}${assysdbaconnect}" + + SQLPLUS=${ORACLE_HOME}/bin/sqlplus + if [ ! -x ${SQLPLUS} ] ; then + echo "sqlplus not found or ORACLE_HOME wrong! " + echo "SQLPLUS="${SQLPLUS} + return 1 + fi + + echo "$loc_stdin" | ${SQLPLUS} -L -s ${DBCONNECT} + if [ $? -ne 0 ] ; then + if [ "$DEBUGCONNECT" ] ; then + echo "Logindetails: ${DBCONNECT}" >&2 + fi + return 1 + fi +} + +# Helper function that calls an SQL statement with a clean output +# Usage: echo "..." | sqlplus SID function sqlplus () { - if OUTPUT=$({ echo 'set pages 0' ; echo 'whenever sqlerror exit 1'; echo 'set lines 8000' ; echo 'set feedback off'; cat ; } | $MK_CONFDIR/sqlplus.sh $1) + local SID=$1 + loc_stdin=$(cat) + + # use sqlplus_internal when no sqlplus.sh is found + SQLPLUS="$MK_CONFDIR"/sqlplus.sh + test -f "$SQLPLUS" || SQLPLUS=sqlplus_internal + + if OUTPUT=$({ echo 'set pages 0 trimspool on feedback off lines 8000' ; echo 'whenever sqlerror exit 1'; echo "$loc_stdin" ; } | "$SQLPLUS" $SID) then - echo "${OUTPUT}" | sed -e 's/[[:space:]]\+/ /g' -e '/^[[:space:]]*$/d' -e "s/^/$1 /" + echo "$OUTPUT" else - echo "${OUTPUT}" | sed "s/^/$1 FAILURE /" + echo '<<>>' + local SID_UPPER=$(echo "$SID" | tr '[:lower:]' '[:upper:]') + echo "$OUTPUT" | grep -v "^ERROR at line" | tr '\n' ' ' | sed "s/^/$SID_UPPER|FAILURE|/" ; echo + return 1 + fi +} + +function remove_excluded_sections () +{ + local sections="$1" + local excluded="$2" + local result="" + for section in $sections + do + local skip= + for exclude in $excluded + do + if [ "$exclude" = "$section" ] ; then + local skip=yes + break + fi + done + if [ "$skip" != yes ] ; then + result="$result $section" + fi + done + echo "$result" +} + + +# Create one SQL statements for several sections and run +# these with sqlplus. The exitcode is preserved. +function do_sync_checks () +{ + local SID=$1 + local SECTIONS="$2" + for section in $SECTIONS + do + eval "sql_$section" + done | sqlplus $SID +} + +function do_async_checks () +{ + local SID=$1 + echo "$ASYNC_SQL" | sqlplus $SID +} + +# Make sure that the new shell that is being run by run_cached inherits +# our functions +export -f sqlplus +export -f sqlplus_internal +export -f do_async_checks +export -f set_oraenv + +function run_cached_local () { + local section= + if [ "$1" = -s ] ; then local section="echo '<<<$2>>>' ; " ; shift ; fi + local NAME=$1 + local MAXAGE=$2 + shift 2 + local CMDLINE="$section$@" + + if [ ! -d $MK_VARDIR/cache ]; then mkdir -p $MK_VARDIR/cache ; fi + CACHEFILE="$MK_VARDIR/cache/$NAME.cache" + + # Check if the creation of the cache takes suspiciously long and return + # nothing if the age (access time) of $CACHEFILE.new is twice the MAXAGE + local NOW=$(date +%s) + if [ -e "$CACHEFILE.new" ] ; then + local CF_ATIME=$(stat -c %X "$CACHEFILE.new") + if [ $((NOW - CF_ATIME)) -ge $((MAXAGE * 2)) ] ; then + return + fi + fi + + # Check if cache file exists and is recent enough + if [ -s "$CACHEFILE" ] ; then + local MTIME=$(stat -c %Y "$CACHEFILE") + if [ $((NOW - MTIME)) -le $MAXAGE ] ; then local USE_CACHEFILE=1 ; fi + # Output the file in any case, even if it is + # outdated. The new file will not yet be available + cat "$CACHEFILE" + fi + + # Cache file outdated and new job not yet running? Start it + if [ -z "$USE_CACHEFILE" -a ! -e "$CACHEFILE.new" ] ; then + if [ "$DEBUG" ] ; then + echo "set -o noclobber ; exec > \"$CACHEFILE.new\" || exit 1 ; $CMDLINE && mv \"$CACHEFILE.new\" \"$CACHEFILE\" || rm -f \"$CACHEFILE\" \"$CACHEFILE.new\"" | bash + else + echo "set -o noclobber ; exec > \"$CACHEFILE.new\" || exit 1 ; $CMDLINE && mv \"$CACHEFILE.new\" \"$CACHEFILE\" || rm -f \"$CACHEFILE\" \"$CACHEFILE.new\"" | nohup bash 2>/dev/null & + fi fi } +#. +# .--Main----------------------------------------------------------------. +# | __ __ _ | +# | | \/ | __ _(_)_ __ | +# | | |\/| |/ _` | | '_ \ | +# | | | | | (_| | | | | | | +# | |_| |_|\__,_|_|_| |_| | +# | | +# +----------------------------------------------------------------------+ +# | Iterate over all instances and execute sync and async sections. | +# '----------------------------------------------------------------------' + +# Get list of all running databases +# Do not work on ASM in this plugin. => Ignore a running ASM-Instance! +SIDS=$(UNIX95=true ps -ef | awk '{print $NF}' | grep -E '^asm_pmon_|^ora_pmon_|^xe_pmon_XE' | cut -d"_" -f3-) + +# If we do not have found any running database instance, then either +# no ORACLE is present on this system or it's just currently not running. +# In the later case we ouput empty agent sections so that Check_MK will be +# happy and execute the actual check functions. +if [ -z "$SIDS" -a ! -e "$MK_VARDIR/mk_oracle.found" ] ; then + exit +fi + +# From now on we expect databases on this system (for ever) +touch $MK_VARDIR/mk_oracle.found + +# Make sure that always all sections are present, even +# in case of an error. Note: the section <<>> +# section shows the general state of a database instance. If +# that section fails for an instance then all other sections +# do not contain valid data anyway. +for section in $SYNC_SECTIONS $ASYNC_SECTIONS $SYNC_ASM_SECTIONS $ASYNC_ASM_SECTIONS +do + echo "<<>>" +done + +for SID in $SIDS +do + # We need the SID in uppercase at later time + SID_UPPER=$(echo $SID | tr '[:lower:]' '[:upper:]') -for SID in $SIDS; do # Check if SID is listed in ONLY_SIDS if this is used if [ "$ONLY_SIDS" ] ; then SKIP=yes for S in $ONLY_SIDS ; do if [ "$S" = "$SID" ] ; then - SKIP= - break - fi + SKIP= + break + fi done - if [ "$SKIP" ] ; then continue ; fi + if [ "$SKIP" ] ; then continue ; fi fi - + + # Handle explicit exclusion of instances EXCLUDE=EXCLUDE_$SID EXCLUDE=${!EXCLUDE} # SID filtered totally? @@ -112,85 +986,39 @@ continue fi - # Do Version-Check (use as a general login check) without caching - if [ "$EXCLUDE" = "${EXCLUDE/version/}" ]; then - echo '<<>>' - echo "select banner from v\$version where banner like 'Oracle%';" | sqlplus "$SID" - fi - - CACHE_FILE=$MK_CONFDIR/oracle_$SID.cache - - # Check if file exists and recent enough - CACHE_FILE_UPTODATE= - if [ -s $CACHE_FILE ]; then - NOW=$(date +%s) - MTIME=$(stat -c %Y $CACHE_FILE) - if [ $(($NOW - $MTIME)) -le $CACHE_MAXAGE ]; then - CACHE_FILE_UPTODATE=1 - fi + if [ ${SID:0:1} = '+' ] ; then + DO_ASYNC_SECTIONS=${ASYNC_ASM_SECTIONS} + DO_SYNC_SECTIONS=${SYNC_ASM_SECTIONS} + else + # switch sections to ASM + DO_SYNC_SECTIONS=${SYNC_SECTIONS} + DO_ASYNC_SECTIONS=${ASYNC_SECTIONS} fi - # If the cache file exists, output it, regardless of its age. If it's outdated - # then it will be recreated *asynchronously*. It's new contents will not - # be available here anyway. - if [ -s "$CACHE_FILE" ] ; then cat "$CACHE_FILE" ; fi - - # When the cache file is not valid, we recreated it, but only if there is not - # yet a background process from a previous check still doing this! We see this - # because of the existance of the .new file - # When the cache is old and there is no *new file present, then start a query - # to update the information for this instance. - if [ -z "$CACHE_FILE_UPTODATE" -a ! -e "$CACHE_FILE.new" ] - then - setsid bash -c " - set -o noclobber - function sqlplus () - { - if OUTPUT=\$({ echo 'set pages 0' ; echo 'whenever sqlerror exit 1'; echo 'set lines 8000' ; echo 'set feedback off'; cat ; } | $MK_CONFDIR/sqlplus.sh \$1) - then - echo \"\${OUTPUT}\" | sed -e 's/[[:space:]]\+/ /g' -e '/^[[:space:]]*$/d' -e \"s/^/\$1 /\" - else - echo \"\${OUTPUT}\" | sed \"s/^/\$1 FAILURE /\" - fi - } - - { - # Only execute checks when not filtered - if [ '$EXCLUDE' = '${EXCLUDE/sessions/}' ]; then - echo '<<>>' - echo \"select count(1) from v\\\$session where status = 'ACTIVE';\" | sqlplus \"$SID\" - fi + get_oraversion $SID - if [ '$EXCLUDE' = '${EXCLUDE/logswitches/}' ]; then - echo '<<>>' - echo \"select count(1) from v\\\$loghist where first_time > sysdate - 1/24;\" | sqlplus \"$SID\" - fi - - if [ '$EXCLUDE' = '${EXCLUDE/tablespaces/}' ]; then - echo '<<>>' - sqlplus "$SID" < $CACHE_FILE.new && mv $CACHE_FILE.new $CACHE_FILE || rm -f $CACHE_FILE* - " + # Do sync checks + EXCLUDED=$(eval 'echo $EXCLUDE'"_$SID") + SECTIONS=$(remove_excluded_sections "$DO_SYNC_SECTIONS" "$EXCLUDED") + + # Do async checks + ASECTIONS=$(remove_excluded_sections "$DO_ASYNC_SECTIONS" "$EXCLUDED") + ASYNC_SQL=$(for section in $ASECTIONS ; do eval "sql_$section" ; done) + export ASYNC_SQL + + if [ "$DEBUGCONNECT" ] ; then + echo "-----------------------------------------------" + echo "Logincheck to Instance: "$SID" Version: "$ORACLE_VERSION + echo "select 'Login ok User: ' || user || ' on ' || host_name + from v\$instance;" | sqlplus $SID + echo "SYNC_SECTIONS=$SECTIONS" + echo "ASYNC_SECTIONS=$ASECTIONS" + # do not execute any check + continue fi + + do_sync_checks $SID "$SECTIONS" + + run_cached_local oracle_$SID $CACHE_MAXAGE do_async_checks $SID + done diff -Nru check-mk-1.2.2p3/plugins/mk_oracle.aix check-mk-1.2.6p12/plugins/mk_oracle.aix --- check-mk-1.2.2p3/plugins/mk_oracle.aix 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/mk_oracle.aix 2015-09-21 10:59:53.000000000 +0000 @@ -0,0 +1,1026 @@ +#!/bin/bash +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Check_MK agent plugin for monitoring ORACLE databases +# This plugin is a result of the common work of Thorsten Bruhns +# and Mathias Kettner. Thorsten is responsible for the ORACLE +# stuff, Mathias for the shell hacking... + +# Example for mk_oracle.cfg +# DBUSER=:::: +# ASMUSER=:::: +# +# SYSDBA or SYSASM is optional but needed for a mounted instance +# HOSTNAME is optional - Default is localhost +# PORT is optional - Default is 1521 + +while test $# -gt 0 +do + if [ "${1}" = '-d' ] ; then + set -xv ; DEBUG=1 + elif [ "${1}" = '-t' ] ; then + DEBUGCONNECT=1 + fi + shift +done + +if [ ! "$MK_CONFDIR" ] ; then + echo "MK_CONFDIR not set!" >&2 + exit 1 +fi + +if [ ! "$MK_VARDIR" ] ; then + export MK_VARDIR=$MK_CONFDIR +fi + + +# .--Config--------------------------------------------------------------. +# | ____ __ _ | +# | / ___|___ _ __ / _(_) __ _ | +# | | | / _ \| '_ \| |_| |/ _` | | +# | | |__| (_) | | | | _| | (_| | | +# | \____\___/|_| |_|_| |_|\__, | | +# | |___/ | +# +----------------------------------------------------------------------+ +# | The user can override and set variables in mk_oracle.cfg | +# '----------------------------------------------------------------------' + +# Sections that run fast and do no caching +SYNC_SECTIONS="instance sessions logswitches undostat recovery_area processes recovery_status longactivesessions dataguard_stats performance" + +# Sections that are run in the background and at a larger interval. +# Note: sections not listed in SYNC_SECTIONS or ASYNC_SECTIONS will not be +# executed at all! +ASYNC_SECTIONS="tablespaces rman jobs ts_quotas resumable locks" + +# Sections that are run in the background and at a larger interval. +# Note: _ASM_ sections are only executed when SID starts with '+' +# sections listed in SYNC_SECTIONS or ASYNC_SECTIONS are not +# executed for ASM. +SYNC_ASM_SECTIONS="instance" +ASYNC_ASM_SECTIONS="asm_diskgroup" + +# Interval for running async checks (in seconds) +CACHE_MAXAGE=600 + +# You can specify a list of SIDs to monitor. Those databases will +# only be handled, if they are found running, though! +# +# ONLY_SIDS="XE ORCL FOO BAR" +# +# It is possible to filter SIDS negatively. Just add the following to +# the mk_oracle.cfg file: +# +# EXCLUDE_="ALL" +# +# Another option is to filter single checks for SIDS. Just add +# lines as follows to the mk_oracle.cfg file. One service per +# line: +# +# EXCLUDE_="" +# +# For example skip oracle_sessions and oracle_logswitches checks +# for the instance "mysid". +# +# EXCLUDE_mysid="sessions logswitches" +# + +# Source the optional configuration file for this agent plugin +if [ -e "$MK_CONFDIR/mk_oracle.cfg" ] +then + . $MK_CONFDIR/mk_oracle.cfg +fi + +#. +# .--SQL Queries---------------------------------------------------------. +# | ____ ___ _ ___ _ | +# | / ___| / _ \| | / _ \ _ _ ___ _ __(_) ___ ___ | +# | \___ \| | | | | | | | | | | |/ _ \ '__| |/ _ \/ __| | +# | ___) | |_| | |___ | |_| | |_| | __/ | | | __/\__ \ | +# | |____/ \__\_\_____| \__\_\\__,_|\___|_| |_|\___||___/ | +# | | +# +----------------------------------------------------------------------+ +# | The following functions create SQL queries for ORACLE and output | +# | them to stdout. All queries output the database name or the instane | +# | name as first column. | +# '----------------------------------------------------------------------' + +sql_performance() +{ + if [ "$AT_LEAST_ORACLE_101" = 'yes' ] ; then + echo 'PROMPT <<>>' + echo "select upper(i.INSTANCE_NAME) + ||'|'|| 'sys_time_model' + ||'|'|| S.STAT_NAME + ||'|'|| Round(s.value/1000000) + from v\$instance i, + v\$sys_time_model s + where s.stat_name in('DB time', 'DB CPU') + order by s.stat_name; + select upper(i.INSTANCE_NAME) + ||'|'|| 'buffer_pool_statistics' + ||'|'|| b.name + ||'|'|| b.db_block_gets + ||'|'|| b.db_block_change + ||'|'|| b.consistent_gets + ||'|'|| b.physical_reads + ||'|'|| b.physical_writes + ||'|'|| b.FREE_BUFFER_WAIT + ||'|'|| b.BUFFER_BUSY_WAIT + from v\$instance i, V\$BUFFER_POOL_STATISTICS b; + select upper(i.INSTANCE_NAME) + ||'|'|| 'librarycache' + ||'|'|| b.namespace + ||'|'|| b.gets + ||'|'|| b.gethits + ||'|'|| b.pins + ||'|'|| b.pinhits + ||'|'|| b.reloads + ||'|'|| b.invalidations + from v\$instance i, V\$librarycache b;" + fi +} + +sql_tablespaces() +{ + echo 'PROMPT <<>>' + if [ "$AT_LEAST_ORACLE_102" = 'yes' ] ; then + + echo "select upper(d.NAME) || '|' || file_name ||'|'|| tablespace_name ||'|'|| fstatus ||'|'|| AUTOEXTENSIBLE + ||'|'|| blocks ||'|'|| maxblocks ||'|'|| USER_BLOCKS ||'|'|| INCREMENT_BY + ||'|'|| ONLINE_STATUS ||'|'|| BLOCK_SIZE + ||'|'|| decode(tstatus,'READ ONLY', 'READONLY', tstatus) || '|' || free_blocks + ||'|'|| contents + from v\$database d , ( + select f.file_name, f.tablespace_name, f.status fstatus, f.AUTOEXTENSIBLE, + f.blocks, f.maxblocks, f.USER_BLOCKS, f.INCREMENT_BY, + f.ONLINE_STATUS, t.BLOCK_SIZE, t.status tstatus, nvl(sum(fs.blocks),0) free_blocks, t.contents + from dba_data_files f, dba_tablespaces t, dba_free_space fs + where f.tablespace_name = t.tablespace_name + and f.file_id = fs.file_id(+) + group by f.file_name, f.tablespace_name, f.status, f.autoextensible, + f.blocks, f.maxblocks, f.user_blocks, f.increment_by, f.online_status, + t.block_size, t.status, t.contents + UNION + select f.file_name, f.tablespace_name, f.status, f.AUTOEXTENSIBLE, + f.blocks, f.maxblocks, f.USER_BLOCKS, f.INCREMENT_BY, 'TEMP', + t.BLOCK_SIZE, t.status, sum(sh.blocks_free) free_blocks, 'TEMPORARY' + from v\$thread th, dba_temp_files f, dba_tablespaces t, v\$temp_space_header sh + WHERE f.tablespace_name = t.tablespace_name and f.file_id = sh.file_id + GROUP BY th.instance, f.file_name, f.tablespace_name, f.status, + f.autoextensible, f.blocks, f.maxblocks, f.user_blocks, f.increment_by, + 'TEMP', t.block_size, t.status); + " + elif [ "$AT_LEAST_ORACLE_92" = 'yes' ] ; then + + echo "select upper(d.NAME) || '|' || file_name ||'|'|| tablespace_name ||'|'|| fstatus ||'|'|| AUTOEXTENSIBLE + ||'|'|| blocks ||'|'|| maxblocks ||'|'|| USER_BLOCKS ||'|'|| INCREMENT_BY + ||'|'|| ONLINE_STATUS ||'|'|| BLOCK_SIZE + ||'|'|| decode(tstatus,'READ ONLY', 'READONLY', tstatus) || '|' || free_blocks + ||'|'|| contents + from v\$database d , ( + select f.file_name, f.tablespace_name, f.status fstatus, f.AUTOEXTENSIBLE, + f.blocks, f.maxblocks, f.USER_BLOCKS, f.INCREMENT_BY, + 'ONLINE' ONLINE_STATUS, t.BLOCK_SIZE, t.status tstatus, nvl(sum(fs.blocks),0) free_blocks, t.contents + from dba_data_files f, dba_tablespaces t, dba_free_space fs + where f.tablespace_name = t.tablespace_name + and f.file_id = fs.file_id(+) + group by f.file_name, f.tablespace_name, f.status, f.autoextensible, + f.blocks, f.maxblocks, f.user_blocks, f.increment_by, 'ONLINE', + t.block_size, t.status, t.contents + UNION + select f.file_name, f.tablespace_name, 'ONLINE' status, f.AUTOEXTENSIBLE, + f.blocks, f.maxblocks, f.USER_BLOCKS, f.INCREMENT_BY, 'TEMP', + t.BLOCK_SIZE, 'TEMP' status, sum(sh.blocks_free) free_blocks, 'TEMPORARY' + from v\$thread th, dba_temp_files f, dba_tablespaces t, v\$temp_space_header sh + WHERE f.tablespace_name = t.tablespace_name and f.file_id = sh.file_id + GROUP BY th.instance, f.file_name, f.tablespace_name, 'ONLINE', + f.autoextensible, f.blocks, f.maxblocks, f.user_blocks, f.increment_by, + 'TEMP', t.block_size, t.status); + " + fi +} + +sql_dataguard_stats() +{ + if [ "$AT_LEAST_ORACLE_102" = 'yes' ] ; then + echo 'PROMPT <<>>' + echo "SELECT upper(d.NAME) + ||'|'|| upper(d.DB_UNIQUE_NAME) + ||'|'|| d.DATABASE_ROLE + ||'|'|| ds.name + ||'|'|| ds.value + FROM v\$database d + JOIN v\$parameter vp on 1=1 + left outer join V\$dataguard_stats ds on 1=1 + WHERE vp.name = 'log_archive_config' + AND vp.value is not null + ORDER BY 1; + " + fi +} + +sql_recovery_status() +{ + echo 'PROMPT <<>>' + if [ "$AT_LEAST_ORACLE_101" = 'yes' ] ; then + echo "SELECT upper(d.NAME) + ||'|'|| d.DB_UNIQUE_NAME + ||'|'|| d.DATABASE_ROLE + ||'|'|| d.open_mode + ||'|'|| dh.file# + ||'|'|| round((dh.CHECKPOINT_TIME-to_date('01.01.1970','dd.mm.yyyy'))*24*60*60) + ||'|'|| round((sysdate-dh.CHECKPOINT_TIME)*24*60*60) + ||'|'|| dh.STATUS + ||'|'|| dh.RECOVER + ||'|'|| dh.FUZZY + ||'|'|| dh.CHECKPOINT_CHANGE# + FROM V\$datafile_header dh, v\$database d, v\$instance i + ORDER BY dh.file#; + " + elif [ "$AT_LEAST_ORACLE_92" = 'yes' ] ; then + echo "SELECT upper(d.NAME) + ||'|'|| d.NAME + ||'|'|| d.DATABASE_ROLE + ||'|'|| d.open_mode + ||'|'|| dh.file# + ||'|'|| round((dh.CHECKPOINT_TIME-to_date('01.01.1970','dd.mm.yyyy'))*24*60*60) + ||'|'|| round((sysdate-dh.CHECKPOINT_TIME)*24*60*60) + ||'|'|| dh.STATUS + ||'|'|| dh.RECOVER + ||'|'|| dh.FUZZY + ||'|'|| dh.CHECKPOINT_CHANGE# + FROM V\$datafile_header dh, v\$database d, v\$instance i + ORDER BY dh.file#; + " + fi +} + +sql_rman() +{ + if [ "$AT_LEAST_ORACLE_102" = 'yes' ] ; then + echo 'PROMPT <<>>' + echo "SELECT upper(d.NAME) + ||'|'|| a.STATUS + ||'|'|| to_char(a.START_TIME, 'YYYY-mm-dd_HH24:MI:SS') + ||'|'|| to_char(a.END_TIME, 'YYYY-mm-dd_HH24:MI:SS') + ||'|'|| replace(b.INPUT_TYPE, ' ', '_') + ||'|'|| round(((sysdate - END_TIME) * 24 * 60),0) + FROM V\$RMAN_BACKUP_JOB_DETAILS a, v\$database d, + (SELECT input_type, max(command_id) as command_id + FROM V\$RMAN_BACKUP_JOB_DETAILS + WHERE START_TIME > sysdate-14 + and input_type != 'ARCHIVELOG' + and STATUS<>'RUNNING' GROUP BY input_type) b + WHERE a.COMMAND_ID = b.COMMAND_ID + UNION ALL + select name + || '|COMPLETED' + || '|'|| to_char(sysdate, 'YYYY-mm-dd_HH24:MI:SS') + || '|'|| to_char(completed, 'YYYY-mm-dd_HH24:MI:SS') + || '|ARCHIVELOG|' + || round((sysdate - completed)*24*60,0) + from ( + select d.name + , max(a.completion_time) completed + , case when a.backup_count > 0 then 1 else 0 end + from v\$archived_log a, v\$database d + where a.backup_count > 0 + and a.dest_id in + (select b.dest_id + from v\$archive_dest b + where b.target = 'PRIMARY' + and b.SCHEDULE = 'ACTIVE' + ) + group by d.name, case when a.backup_count > 0 then 1 else 0 end);" + fi +} + +sql_recovery_area() +{ + if [ "$AT_LEAST_ORACLE_102" = 'yes' ] ; then + echo 'PROMPT <<>>' + echo "select upper(d.NAME) + ||' '|| round((SPACE_USED-SPACE_RECLAIMABLE)/ + (CASE NVL(SPACE_LIMIT,1) WHEN 0 THEN 1 ELSE SPACE_LIMIT END)*100) + ||' '|| round(SPACE_LIMIT/1024/1024) + ||' '|| round(SPACE_USED/1024/1024) + ||' '|| round(SPACE_RECLAIMABLE/1024/1024) + from V\$RECOVERY_FILE_DEST, v\$database d; + " + fi +} + +sql_undostat() +{ + echo 'PROMPT <<>>' + if [ "$AT_LEAST_ORACLE_102" = 'yes' ] ; then + echo "select upper(i.INSTANCE_NAME) + ||'|'|| ACTIVEBLKS + ||'|'|| MAXCONCURRENCY + ||'|'|| TUNED_UNDORETENTION + ||'|'|| maxquerylen + ||'|'|| NOSPACEERRCNT + from v\$instance i, + (select * from (select * + from v\$undostat order by end_time desc + ) + where rownum = 1 + and TUNED_UNDORETENTION > 0 + ); + " + elif [ "$AT_LEAST_ORACLE_92" = 'yes' ] ; then + # TUNED_UNDORETENTION and ACTIVEBLKS are not availibe in Oracle <=9.2! + # we sent a -1 for filtering in check_undostat + echo "select upper(i.INSTANCE_NAME) + ||'|-1' + ||'|'|| MAXCONCURRENCY + ||'|-1' + ||'|'|| maxquerylen + ||'|'|| NOSPACEERRCNT + from v\$instance i, + (select * from (select * + from v\$undostat order by end_time desc + ) + where rownum = 1 + ); + " + fi +} + +sql_resumable() +{ + echo 'PROMPT <<>>' + echo "select upper(i.INSTANCE_NAME) + ||'|'|| u.username + ||'|'|| a.SESSION_ID + ||'|'|| a.status + ||'|'|| a.TIMEOUT + ||'|'|| round((sysdate-to_date(a.SUSPEND_TIME,'mm/dd/yy hh24:mi:ss'))*24*60*60) + ||'|'|| a.ERROR_NUMBER + ||'|'|| to_char(to_date(a.SUSPEND_TIME, 'mm/dd/yy hh24:mi:ss'),'mm/dd/yy_hh24:mi:ss') + ||'|'|| a.RESUME_TIME + ||'|'|| a.ERROR_MSG + from dba_resumable a, v\$instance i, dba_users u + where a.INSTANCE_ID = i.INSTANCE_NUMBER + and u.user_id = a.user_id + and a.SUSPEND_TIME is not null + union all + select upper(i.INSTANCE_NAME) + || '|||||||||' + from v\$instance i +; + " +} + +sql_jobs() +{ + if [ "$AT_LEAST_ORACLE_102" = 'yes' ] ; then + echo 'PROMPT <<>>' + echo "SELECT upper(d.NAME) + ||'|'|| j.OWNER + ||'|'|| j.JOB_NAME + ||'|'|| j.STATE + ||'|'|| ROUND((TRUNC(sysdate) + j.LAST_RUN_DURATION - TRUNC(sysdate)) * 86400) + ||'|'|| j.RUN_COUNT + ||'|'|| j.ENABLED + ||'|'|| NVL(j.NEXT_RUN_DATE, to_date('1970-01-01', 'YYYY-mm-dd')) + ||'|'|| NVL(j.SCHEDULE_NAME, '-') + ||'|'|| d.STATUS + FROM dba_scheduler_jobs j, dba_scheduler_job_run_details d, v\$database d + WHERE d.owner=j.OWNER AND d.JOB_NAME=j.JOB_NAME + AND d.LOG_ID=(SELECT max(LOG_ID) FROM dba_scheduler_job_run_details dd + WHERE dd.owner=j.OWNER and dd.JOB_NAME=j.JOB_NAME + ); + " + fi +} + +sql_ts_quotas() +{ + echo 'PROMPT <<>>' + echo "select upper(d.NAME) + ||'|'|| Q.USERNAME + ||'|'|| Q.TABLESPACE_NAME + ||'|'|| Q.BYTES + ||'|'|| Q.MAX_BYTES + from dba_ts_quotas Q, v\$database d + where max_bytes > 0 + union all + select upper(d.NAME) + ||'|||' + from v\$database d + order by 1; + " +} + +sql_version() +{ + echo 'PROMPT <<>>' + echo "select upper(i.INSTANCE_NAME) + || ' ' || banner + from v\$version, v\$instance i + where banner like 'Oracle%';" +} + +sql_instance() +{ + echo 'prompt <<>>' + if [ ${ORACLE_SID:0:1} = '+' ] ; then + # ASM + echo "select upper(i.instance_name) + || '|' || i.VERSION + || '|' || i.STATUS + || '|' || i.LOGINS + || '|' || i.ARCHIVER + || '|' || round((sysdate - i.startup_time) * 24*60*60) + || '|' || '0' + || '|' || 'NO' + || '|' || 'ASM' + || '|' || 'NO' + || '|' || i.instance_name + from v\$instance i; + " + else + # normal Instance + echo "select upper(i.instance_name) + || '|' || i.VERSION + || '|' || i.STATUS + || '|' || i.LOGINS + || '|' || i.ARCHIVER + || '|' || round((sysdate - i.startup_time) * 24*60*60) + || '|' || DBID + || '|' || LOG_MODE + || '|' || DATABASE_ROLE + || '|' || FORCE_LOGGING + || '|' || d.name + from v\$instance i, v\$database d; + " + fi +} + +sql_sessions() +{ + echo 'prompt <<>>' + echo "select upper(i.instance_name) + || ' ' || CURRENT_UTILIZATION + from v\$resource_limit, v\$instance i + where RESOURCE_NAME = 'sessions'; + " +} + +sql_processes() +{ + echo 'prompt <<>>' + echo "select upper(i.instance_name) + || ' ' || CURRENT_UTILIZATION + || ' ' || ltrim(rtrim(LIMIT_VALUE)) + from v\$resource_limit, v\$instance i + where RESOURCE_NAME = 'processes'; + " +} + +sql_logswitches() +{ + echo 'prompt <<>>' + echo "select upper(i.instance_name) + || ' ' || logswitches + from v\$instance i , + (select count(1) logswitches + from v\$loghist + where first_time > sysdate - 1/24 + ); + " +} + +sql_locks() +{ + if [ "$AT_LEAST_ORACLE_101" = 'yes' ] ; then + echo 'prompt <<>>' + echo "SET SERVEROUTPUT ON feedback off +DECLARE + type x is table of varchar2(20000) index by pls_integer; + xx x; +begin + begin + execute immediate 'select upper(i.instance_name) + || ''|'' || a.sid + || ''|'' || b.serial# + || ''|'' || b.machine + || ''|'' || b.program + || ''|'' || b.process + || ''|'' || b.osuser + || ''|'' || a.ctime + || ''|'' || decode(c.owner,NULL,''NULL'',c.owner) + || ''|'' || decode(c.object_name,NULL,''NULL'',c.object_name) + from V\$LOCK a, v\$session b, dba_objects c, v\$instance i + where (a.id1, a.id2, a.type) + IN (SELECT id1, id2, type + FROM GV\$LOCK + WHERE request>0 + ) + and request=0 + and a.sid = b.sid + and a.id1 = c.object_id (+) + union all + select upper(i.instance_name) || ''|||||||||'' + from v\$instance i' + bulk collect into xx; + if xx.count >= 1 then + for i in 1 .. xx.count loop + dbms_output.put_line(xx(i)); + end loop; + end if; + exception + when others then + for cur1 in (select upper(i.instance_name) instance_name from v\$instance i) loop + dbms_output.put_line(cur1.instance_name || '|||||||||'||sqlerrm); + end loop; + end; +END; +/ +set serverout off +" + fi +} + +sql_longactivesessions() +{ + if [ "$AT_LEAST_ORACLE_101" = 'yes' ] ; then + echo 'prompt <<>>' + echo "select upper(i.instance_name) + || '|' || s.sid + || '|' || s.serial# + || '|' || s.machine + || '|' || s.process + || '|' || s.osuser + || '|' || s.program + || '|' || s.last_call_et + || '|' || s.sql_id + from v\$session s, v\$instance i + where s.status = 'ACTIVE' + and type != 'BACKGROUND' + and s.username is not null + and s.username not in('PUBLIC') + and s.last_call_et > 60*60 + union all + select upper(i.instance_name) + || '||||||||' + from v\$instance i; + " + fi +} + +sql_asm_diskgroup() +{ + echo 'prompt <<>>' + if [ "$AT_LEAST_ORACLE_112" = 'yes' ] ; then + echo "select STATE + || ' ' || TYPE + || ' ' || 'N' + || ' ' || sector_size + || ' ' || block_size + || ' ' || allocation_unit_size + || ' ' || total_mb + || ' ' || free_mb + || ' ' || required_mirror_free_mb + || ' ' || usable_file_mb + || ' ' || offline_disks + || ' ' || voting_files + || ' ' || name || '/' + from v\$asm_diskgroup; + " + elif [ "$AT_LEAST_ORACLE_102" = 'yes' ] ; then + echo "select STATE + || ' ' || TYPE + || ' ' || 'N' + || ' ' || sector_size + || ' ' || block_size + || ' ' || allocation_unit_size + || ' ' || total_mb + || ' ' || free_mb + || ' ' || required_mirror_free_mb + || ' ' || usable_file_mb + || ' ' || offline_disks + || ' ' || 'N' + || ' ' || name || '/' + from v\$asm_diskgroup; + " + fi +} + +# .--oraenv--------------------------------------------------------------. +# | | +# | ___ _ __ __ _ ___ _ ____ __ | +# | / _ \| '__/ _` |/ _ \ '_ \ \ / / | +# | | (_) | | | (_| | __/ | | \ V / | +# | \___/|_| \__,_|\___|_| |_|\_/ | +# | | +# +----------------------------------------------------------------------+ +# | Functions for getting the Oracle environment | +# '----------------------------------------------------------------------' + +function set_oraenv () { + ORACLE_SID=${1} + + test -f /etc/oratab && ORATAB=/etc/oratab + # /var/opt/oracle/oratab is needed for Oracle Solaris + test -f /var/opt/oracle/oratab && ORATAB=/var/opt/oracle/oratab + test -f ${ORATAB:-""} || echo "ORA-99999 oratab not found" + test -f ${ORATAB:-""} || exit 1 + + ORACLE_HOME=$(cat ${ORATAB} | grep "^"${ORACLE_SID}":" | cut -d":" -f2) + if [ -z $ORACLE_HOME ] ; then + # cut last number from SID for Oracle RAC to find entry in oratab + ORACLE_SID_SHORT=$(echo $ORACLE_SID | sed "s/[0-9]$//") + ORACLE_HOME=$(cat ${ORATAB} | grep "^"${ORACLE_SID_SHORT}":" | cut -d":" -f2) + fi + + if [ ! -d ${ORACLE_HOME:-"not_found"} ] ; then + echo "ORA-99999 ORACLE_HOME for ORACLE_SID="$ORACLE_SID" not found or not existing!" + exit 1 + fi + + TNS_ADMIN=${TNS_ADMIN:-$MK_CONFDIR} + + test -f ${TNS_ADMIN}/sqlnet.ora || ( echo "ORA-99998 Couldn't find "${TNS_ADMIN}/sqlnet.ora ; exit 1) + + export ORACLE_HOME TNS_ADMIN ORACLE_SID +} + +function get_oraversion () { + set_oraenv ${1} + ORACLE_VERSION=$($ORACLE_HOME/bin/sqlplus -V | grep ^SQL | cut -d" " -f3 | cut -d"." -f-2) + + # remove possible existing variables + unset AT_LEAST_ORACLE_121 AT_LEAST_ORACLE_112 AT_LEAST_ORACLE_111 AT_LEAST_ORACLE_102 AT_LEAST_ORACLE_101 AT_LEAST_ORACLE_92 + + if [ "$ORACLE_VERSION" = '12.1' ] ; then + AT_LEAST_ORACLE_121=yes + fi + + if [ "$ORACLE_VERSION" = '12.1' -o "$ORACLE_VERSION" = '11.2' ] ; then + AT_LEAST_ORACLE_112=yes + fi + + if [ "$ORACLE_VERSION" = '12.1' -o "$ORACLE_VERSION" = '11.2' \ + -o "$ORACLE_VERSION" = '11.1' ] ; then + AT_LEAST_ORACLE_111=yes + fi + + if [ "$ORACLE_VERSION" = '12.1' -o "$ORACLE_VERSION" = '11.2' \ + -o "$ORACLE_VERSION" = '11.1' -o "$ORACLE_VERSION" = '10.2' ] ; then + AT_LEAST_ORACLE_102=yes + fi + + if [ "$ORACLE_VERSION" = '12.1' -o "$ORACLE_VERSION" = '11.2' \ + -o "$ORACLE_VERSION" = '11.1' -o "$ORACLE_VERSION" = '10.2' \ + -o "$ORACLE_VERSION" = '10.1' ] ; then + AT_LEAST_ORACLE_101=yes + fi + + if [ "$ORACLE_VERSION" = '12.1' -o "$ORACLE_VERSION" = '11.2' \ + -o "$ORACLE_VERSION" = '11.1' -o "$ORACLE_VERSION" = '10.2' \ + -o "$ORACLE_VERSION" = '10.1' -o "$ORACLE_VERSION" = '9.2' ] ; then + AT_LEAST_ORACLE_92=yes + fi +} + +# +# .--Functions-----------------------------------------------------------. +# | _____ _ _ | +# | | ___| _ _ __ ___| |_(_) ___ _ __ ___ | +# | | |_ | | | | '_ \ / __| __| |/ _ \| '_ \/ __| | +# | | _|| |_| | | | | (__| |_| | (_) | | | \__ \ | +# | |_| \__,_|_| |_|\___|\__|_|\___/|_| |_|___/ | +# | | +# +----------------------------------------------------------------------+ +# | Helper functions | +# '----------------------------------------------------------------------' + +function sqlplus_internal() { + loc_stdin=$(cat) + set_oraenv $SID + + # reload mk_oracle.cfg for run_cached. Otherwise some variables are missing + if [ -e "$MK_CONFDIR/mk_oracle.cfg" ] + then + . $MK_CONFDIR/mk_oracle.cfg + fi + + # mk_oracle_dbusers.conf is for compatibility. Do not use it anymore + ORACLE_USERCONF=${MK_CONFDIR}/mk_oracle_dbuser.conf + + TNSPINGOK=no + if [ -f ${TNS_ADMIN}/tnsnames.ora ] ; then + if "${ORACLE_HOME}"/bin/tnsping "${ORACLE_SID}" >/dev/null 2>&1 ; then + TNSALIAS=$ORACLE_SID + TNSPINGOK=yes + fi + fi + + ORADBUSER="" + DBPASSWORD="" + + # ASM use '+' as 1st character in SID! + if [ ${ORACLE_SID:0:1} = '+' ] ; then + ORACFGLINE=${ASMUSER} + else + # use an individuel user or the default DBUSER from mk_oracle.cfg + dummy="DBUSER_"${ORACLE_SID} + ORACFGLINE=${!dummy} + if [ "$ORACFGLINE" = '' ] ; then + ORACFGLINE=${DBUSER} + fi + fi + + if [ -f ${ORACLE_USERCONF} -a "${ORACFGLINE}" = '' ] ; then + # mk_oracle_dbuser.conf + ORACFGLINE=$(cat ${ORACLE_USERCONF} | grep "^"${ORACLE_SID}":") + # mk_oracle_dbuser has ORACLE_SID as 1. parameter. we need an offset for all values + offset=1 + else + # mk_oracle.cfg + offset=0 + fi + + ORADBUSER=$(echo ${ORACFGLINE} | cut -d":" -f$[1+offset]) + DBPASSWORD=$(echo ${ORACFGLINE} | cut -d":" -f$[2+offset]) + DBSYSCONNECT=$(echo ${ORACFGLINE} | cut -d":" -f$[3+offset]) + DBHOST=$(echo ${ORACFGLINE} | cut -d":" -f$[4+offset]) + DBPORT=$(echo ${ORACFGLINE} | cut -d":" -f$[5+offset]) + + if [ ! "${ORACFGLINE}" ] ; then + # no configuration found + # => use the wallet with tnsnames.ora or EZCONNECT + TNSALIAS=${TNSALIAS:-"localhost:1521/${ORACLE_SID}"} + else + if [ ${DBSYSCONNECT} ] ; then + assysdbaconnect=" as "${DBSYSCONNECT} + fi + TNSALIAS=${TNSALIAS:-"(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=${DBHOST:-"localhost"})(PORT=${DBPORT:-1521}))(CONNECT_DATA=(SID=${ORACLE_SID})(SERVER=DEDICATED)(UR=A)))"} + + # ORADBUSER = '/'? => ignore DBPASSWORD and use the wallet + if [ "${ORADBUSER}" = '/' ] ; then + # connect with / and wallet + ORADBUSER="" + DBPASSWORD="" + if [ "$TNSPINGOK" = 'no' ] ; then + # create an EZCONNECT string when no tnsnames.ora is usable + # defaults to localhost:1521/ + TNSALIAS="${DBHOST:-"localhost"}:${DBPORT:-1521}/${ORACLE_SID}" + fi + fi + fi + + DBCONNECT="${ORADBUSER}/${DBPASSWORD}@${TNSALIAS}${assysdbaconnect}" + + SQLPLUS=${ORACLE_HOME}/bin/sqlplus + if [ ! -x ${SQLPLUS} ] ; then + echo "sqlplus not found or ORACLE_HOME wrong! " + echo "SQLPLUS="${SQLPLUS} + return 1 + fi + + echo "$loc_stdin" | ${SQLPLUS} -L -s ${DBCONNECT} + if [ $? -ne 0 ] ; then + if [ "$DEBUGCONNECT" ] ; then + echo "Logindetails: ${DBCONNECT}" >&2 + fi + return 1 + fi +} + +# Helper function that calls an SQL statement with a clean output +# Usage: echo "..." | sqlplus SID +function sqlplus () +{ + local SID=$1 + loc_stdin=$(cat) + + # use sqlplus_internal when no sqlplus.sh is found + SQLPLUS="$MK_CONFDIR"/sqlplus.sh + test -f "$SQLPLUS" || SQLPLUS=sqlplus_internal + + if OUTPUT=$({ echo 'set pages 0 trimspool on feedback off lines 8000' ; echo 'whenever sqlerror exit 1'; echo "$loc_stdin" ; } | "$SQLPLUS" $SID) + then + echo "$OUTPUT" + else + echo '<<>>' + local SID_UPPER=$(echo "$SID" | tr '[:lower:]' '[:upper:]') + echo "$OUTPUT" | grep -v "^ERROR at line" | tr '\n' ' ' | sed "s/^/$SID_UPPER|FAILURE|/" ; echo + return 1 + fi +} + +function remove_excluded_sections () +{ + local sections="$1" + local excluded="$2" + local result="" + for section in $sections + do + local skip= + for exclude in $excluded + do + if [ "$exclude" = "$section" ] ; then + local skip=yes + break + fi + done + if [ "$skip" != yes ] ; then + result="$result $section" + fi + done + echo "$result" +} + + +# Create one SQL statements for several sections and run +# these with sqlplus. The exitcode is preserved. +function do_sync_checks () +{ + local SID=$1 + local SECTIONS="$2" + for section in $SECTIONS + do + eval "sql_$section" + done | sqlplus $SID +} + +function do_async_checks () +{ + local SID=$1 + echo "$ASYNC_SQL" | sqlplus $SID +} + +# Make sure that the new shell that is being run by run_cached inherits +# our functions +export -f sqlplus +export -f sqlplus_internal +export -f do_async_checks +export -f set_oraenv + +function file_age() { + /usr/bin/perl -e 'if (! -f $ARGV[0]){die "0000000"};$mtime=(stat($ARGV[0]))[9];print ($^T-$mtime);' "$1" +} + +function run_cached_local () { + local section= + if [ "$1" = -s ] ; then local section="echo '<<<$2>>>' ; " ; shift ; fi + local NAME=$1 + local MAXAGE=$2 + shift 2 + local CMDLINE="$section$@" + + if [ ! -d $MK_VARDIR/cache ]; then mkdir -p $MK_VARDIR/cache ; fi + CACHEFILE="$MK_VARDIR/cache/$NAME.cache" + + # Check if the creation of the cache takes suspiciously long and return + # nothing if the age (access time) of $CACHEFILE.new is twice the MAXAGE + if [ -e "$CACHEFILE.new" ] ; then + AGE=$(file_age "$CACHEFILE.new") + if [ $AGE -ge $((MAXAGE * 2)) ] ; then + return + fi + fi + + # Check if cache file exists and is recent enough + if [ -s "$CACHEFILE" ] ; then + AGE=$(file_age "$CACHEFILE") + if [ $AGE -le $MAXAGE ] ; then local USE_CACHEFILE=1 ; fi + # Output the file in any case, even if it is + # outdated. The new file will not yet be available + cat "$CACHEFILE" + fi + + # Cache file outdated and new job not yet running? Start it + if [ -z "$USE_CACHEFILE" -a ! -e "$CACHEFILE.new" ] ; then + if [ "$DEBUG" ] ; then + echo "set -o noclobber ; exec > \"$CACHEFILE.new\" || exit 1 ; $CMDLINE && mv \"$CACHEFILE.new\" \"$CACHEFILE\" || rm -f \"$CACHEFILE\" \"$CACHEFILE.new\"" | bash + else + echo "set -o noclobber ; exec > \"$CACHEFILE.new\" || exit 1 ; $CMDLINE && mv \"$CACHEFILE.new\" \"$CACHEFILE\" || rm -f \"$CACHEFILE\" \"$CACHEFILE.new\"" | nohup bash 2>/dev/null & + fi + fi +} + +#. +# .--Main----------------------------------------------------------------. +# | __ __ _ | +# | | \/ | __ _(_)_ __ | +# | | |\/| |/ _` | | '_ \ | +# | | | | | (_| | | | | | | +# | |_| |_|\__,_|_|_| |_| | +# | | +# +----------------------------------------------------------------------+ +# | Iterate over all instances and execute sync and async sections. | +# '----------------------------------------------------------------------' + +# Get list of all running databases +# Do not work on ASM in this plugin. => Ignore a running ASM-Instance! +SIDS=$(UNIX95=true ps -ef | awk '{print $NF}' | grep -E '^asm_pmon_|^ora_pmon_|^xe_pmon_XE' | cut -d"_" -f3-) + +# If we do not have found any running database instance, then either +# no ORACLE is present on this system or it's just currently not running. +# In the later case we ouput empty agent sections so that Check_MK will be +# happy and execute the actual check functions. +if [ -z "$SIDS" -a ! -e "$MK_CONFDIR/mk_oracle.found" ] ; then + exit +fi + +# From now on we expect databases on this system (for ever) +touch $MK_CONFDIR/mk_oracle.found + +# Make sure that always all sections are present, even +# in case of an error. Note: the section <<>> +# section shows the general state of a database instance. If +# that section fails for an instance then all other sections +# do not contain valid data anyway. +for section in $SYNC_SECTIONS $ASYNC_SECTIONS $SYNC_ASM_SECTIONS $ASYNC_ASM_SECTIONS +do + echo "<<>>" +done + +for SID in $SIDS +do + # We need the SID in uppercase at later time + SID_UPPER=$(echo $SID | tr '[:lower:]' '[:upper:]') + + # Check if SID is listed in ONLY_SIDS if this is used + if [ "$ONLY_SIDS" ] ; then + SKIP=yes + for S in $ONLY_SIDS ; do + if [ "$S" = "$SID" ] ; then + SKIP= + break + fi + done + if [ "$SKIP" ] ; then continue ; fi + fi + + # Handle explicit exclusion of instances + EXCLUDE=EXCLUDE_$SID + EXCLUDE=${!EXCLUDE} + # SID filtered totally? + if [ "$EXCLUDE" = "ALL" ]; then + continue + fi + + if [ ${SID:0:1} = '+' ] ; then + DO_ASYNC_SECTIONS=${ASYNC_ASM_SECTIONS} + DO_SYNC_SECTIONS=${SYNC_ASM_SECTIONS} + else + # switch sections to ASM + DO_SYNC_SECTIONS=${SYNC_SECTIONS} + DO_ASYNC_SECTIONS=${ASYNC_SECTIONS} + fi + + get_oraversion $SID + + # Do sync checks + EXCLUDED=$(eval 'echo $EXCLUDE'"_$SID") + SECTIONS=$(remove_excluded_sections "$DO_SYNC_SECTIONS" "$EXCLUDED") + + # Do async checks + ASECTIONS=$(remove_excluded_sections "$DO_ASYNC_SECTIONS" "$EXCLUDED") + ASYNC_SQL=$(for section in $ASECTIONS ; do eval "sql_$section" ; done) + export ASYNC_SQL + + if [ "$DEBUGCONNECT" ] ; then + echo "-----------------------------------------------" + echo "Logincheck to Instance: "$SID" Version: "$ORACLE_VERSION + echo "select 'Login ok User: ' || user || ' on ' || host_name + from v\$instance;" | sqlplus $SID + echo "SYNC_SECTIONS=$SECTIONS" + echo "ASYNC_SECTIONS=$ASECTIONS" + # do not execute any check + continue + fi + + do_sync_checks $SID "$SECTIONS" + + run_cached_local oracle_$SID $CACHE_MAXAGE do_async_checks $SID + +done diff -Nru check-mk-1.2.2p3/plugins/mk_oracle_asm check-mk-1.2.6p12/plugins/mk_oracle_asm --- check-mk-1.2.2p3/plugins/mk_oracle_asm 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/mk_oracle_asm 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ +#!/bin/bash +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Check_MK agent plugin for monitoring ORACLE ASM + +if [ ! -e $MK_CONFDIR/asmcmd.sh ]; then + echo "$MK_CONFDIR/asmcmd.sh does not exist." >&2 + exit 1 +fi + +function asmcmd () { + $MK_CONFDIR/asmcmd.sh $@ +} + +echo "<<>>" +asmcmd lsdg | grep -v State | grep -v "The Oracle" diff -Nru check-mk-1.2.2p3/plugins/mk_oracle_crs check-mk-1.2.6p12/plugins/mk_oracle_crs --- check-mk-1.2.2p3/plugins/mk_oracle_crs 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/mk_oracle_crs 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,127 @@ +#!/bin/bash +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Developed by Thorsten Bruhns from OPITZ CONSULTING Deutschland GmbH + +set -f + +ocrcfgfile=/etc/oracle/ocr.loc +olrcfgfile=/etc/oracle/olr.loc +resourcefilter="^NAME=|^TYPE=|^STATE=|^TARGET=|^ENABLED=" + +# .--Functions-----------------------------------------------------------. +# | _____ _ _ | +# | | ___| _ _ __ ___| |_(_) ___ _ __ ___ | +# | | |_ | | | | '_ \ / __| __| |/ _ \| '_ \/ __| | +# | | _|| |_| | | | | (__| |_| | (_) | | | \__ \ | +# | |_| \__,_|_| |_|\___|\__|_|\___/|_| |_|___/ | +# | | +# +----------------------------------------------------------------------+ +# | | +# '----------------------------------------------------------------------' + +function set_has_env(){ + test -f ${ocrcfgfile} || exit 0 + local_has_type=$(cat $ocrcfgfile | grep "^local_only=" | cut -d"=" -f2 | tr '[:lower:]' '[:upper:]') + local_has_type=${local_has_type:-"FALSE"} + + if [ -f ${olrcfgfile} ] ; then + has_ORACLE_HOME=$(cat $olrcfgfile | grep "^crs_home=" | cut -d"=" -f2) + else + # There is no olr.cfg in 10.2 and 11.1 + # we try to get the ORA_CRS_HOME from /etc/init.d/init.cssd + local_has_type=FALSE + INITCSSD=/etc/init.d/init.cssd + if [ ! -f ${INITCSSD} ] ; then + exit 0 + else + has_ORACLE_HOME=$(grep "^ORA_CRS_HOME=" ${INITCSSD} | cut -d"=" -f2-) + fi + fi + + CRSCTL=${has_ORACLE_HOME}/bin/crsctl + OLSNODES=${has_ORACLE_HOME}/bin/olsnodes + CRS_STAT=${has_ORACLE_HOME}/bin/crs_stat +} + +function printhasdata() { + ps -e | grep cssd.bin > /dev/null || exit 0 + + echo "<<>>" + $CRSCTL query has releaseversion + + echo "<<>>" + $CRSCTL stat res -f | grep -E $resourcefilter +} + +function printcrsdata() { + ps -e | grep -e ohasd.bin -e crsd.bin > /dev/null || exit 0 + + echo "<<>>" + crs_version=$($CRSCTL query crs releaseversion) + crs_version_short=$(echo $crs_version | cut -d"[" -f2- | cut -d"." -f-2) + echo $crs_version + + echo "<<>>" + $CRSCTL query css votedisk | grep "^ [0-9]" + + ps -e | grep crsd.bin > /dev/null || exit 0 + echo "<<>>" + OLS_NODENAME=$($OLSNODES -l) + + echo "nodename|"$OLS_NODENAME + + if [ $crs_version_short = '11.2' ] ; then + $CRSCTL stat res -v -n $OLS_NODENAME -init | grep -E $resourcefilter | sed "s/^/csslocal\|/" + for nodelist in $($OLSNODES) + do + $CRSCTL stat res -v -n $nodelist | grep -E $resourcefilter | sed "s/^/$nodelist\|/" + done + else + $CRS_STAT -f -c $OLS_NODENAME | grep -E $resourcefilter | sed "s/^/$OLS_NODENAME\|/" + fi +} + +# .--Main----------------------------------------------------------------. +# | __ __ _ | +# | | \/ | __ _(_)_ __ | +# | | |\/| |/ _` | | '_ \ | +# | | | | | (_| | | | | | | +# | |_| |_|\__,_|_|_| |_| | +# | | +# +----------------------------------------------------------------------+ +# | | +# '----------------------------------------------------------------------' + +set_has_env +echo "<<>>" +echo "<<>>" +echo "<<>>" +if [ $local_has_type = 'FALSE' ] ; then + printcrsdata +else + printhasdata +fi + diff -Nru check-mk-1.2.2p3/plugins/mk_oracle.solaris check-mk-1.2.6p12/plugins/mk_oracle.solaris --- check-mk-1.2.2p3/plugins/mk_oracle.solaris 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/mk_oracle.solaris 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,200 @@ +#!/bin/bash +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Check_MK agent plugin for monitoring ORACLE databases + +# This plugin is a port of the linux agent plugin. It has been +# tested with solaris 10. + +# Get list of all running databases +SIDS=$(UNIX95=true ps -ef -o args | sed -n '/^ora_pmon_/p;/^xe_pmon_/p' | sed -n '/^[a-z]*_pmon_\([^ ]*\)/s//\1/p') +if [ -z "$SIDS" ] ; then + # If on this system we've already found a database + if [ -e "$MK_VARDIR/mk_oracle.found" ] ; then + echo '<<>>' + echo '<<>>' + echo '<<>>' + echo '<<>>' + fi + exit 0 +fi + +touch $MK_VARDIR/mk_oracle.found + +# Recreate data if cachefile is older than 120 seconds. +# If you set this to 0, then the cache file will be created +# as often as possible. If the database queries last longer +# then your check interval, caching will be active nevertheless. +CACHE_MAXAGE=120 + +# Source the optional configuration file for this agent plugin +if [ -e "$MK_CONFDIR/mk_oracle.cfg" ] +then + . $MK_CONFDIR/mk_oracle.cfg +fi + +# You can specify a list of SIDs to monitor. Those databases will +# only be handled, if they are found running, though! +# +# ONLY_SIDS="XE HIRN SEPP" +# +# It is possible to filter SIDS negatively. Just add the following to +# the mk_oracle.cfg file: +# +# EXCLUDE_="ALL" +# +# Another option is to filter single checks for SIDS. Just add +# lines as follows to the mk_oracle.cfg file. One service per +# line: +# +# EXCLUDE_="" +# +# For example skip oracle_sessions and oracle_logswitches checks +# for the instance "mysid". +# +# EXCLUDE_mysid="sessions logswitches" +# +# +# This check uses a cache file to prevent problems with long running +# SQL queries. It starts building a cache when +# a) no cache is present or the cache is too old and +# b) the cache is not currently being built +# The cache is used for $CACHE_MAXAGE seconds. The CACHE_MAXAGE +# option is pre-set to 120 seconds but can be changed in mk_oracle.cfg. + +function sqlplus () +{ + if OUTPUT=$({ echo 'set pages 0' ; echo 'whenever sqlerror exit 1'; echo 'set lines 8000' ; echo 'set feedback off'; cat ; } | $MK_CONFDIR/sqlplus.sh $1) + then + echo "${OUTPUT}" | sed -e 's/[[:space:]]\+/ /g' -e '/^[[:space:]]*$/d' -e "s/^/$1 /" + else + echo "${OUTPUT}" | sed "s/^/$1 FAILURE /" + fi +} + + +for SID in $SIDS; do + # Check if SID is listed in ONLY_SIDS if this is used + if [ "$ONLY_SIDS" ] ; then + SKIP=yes + for S in $ONLY_SIDS ; do + if [ "$S" = "$SID" ] ; then + SKIP= + break + fi + done + if [ "$SKIP" ] ; then continue ; fi + fi + + EXCLUDE=EXCLUDE_$SID + EXCLUDE=${!EXCLUDE} + # SID filtered totally? + if [ "$EXCLUDE" = "ALL" ]; then + continue + fi + + # Do Version-Check (use as a general login check) without caching + if [ "$EXCLUDE" = "${EXCLUDE/version/}" ]; then + echo '<<>>' + echo "select banner from v\$version where banner like 'Oracle%';" | sqlplus "$SID" + fi + + CACHE_FILE=$MK_VARDIR/oracle_$SID.cache + + # Check if file exists and recent enough + CACHE_FILE_UPTODATE= + if [ -s $CACHE_FILE ]; then + NOW=$(perl -le "print time()") + + MTIME=$(perl -MPOSIX -le 'print mktime(localtime((lstat($ARGV[0]))[9]))' $CACHE_FILE) + if [ $(($NOW - $MTIME)) -le $CACHE_MAXAGE ]; then + CACHE_FILE_UPTODATE=1 + fi + fi + + # If the cache file exists, output it, regardless of its age. If it's outdated + # then it will be recreated *asynchronously*. It's new contents will not + # be available here anyway. + if [ -s "$CACHE_FILE" ] ; then cat "$CACHE_FILE" ; fi + + # When the cache file is not valid, we recreated it, but only if there is not + # yet a background process from a previous check still doing this! We see this + # because of the existance of the .new file + # When the cache is old and there is no *new file present, then start a query + # to update the information for this instance. + if [ -z "$CACHE_FILE_UPTODATE" -a ! -e "$CACHE_FILE.new" ] + then + nohup bash -c " + set -o noclobber + function sqlplus () + { + if OUTPUT=\$({ echo 'set pages 0' ; echo 'whenever sqlerror exit 1'; echo 'set lines 8000' ; echo 'set feedback off'; cat ; } | $MK_CONFDIR/sqlplus.sh \$1) + then + echo \"\${OUTPUT}\" | sed -e 's/[[:space:]]\+/ /g' -e '/^[[:space:]]*$/d' -e \"s/^/\$1 /\" + else + echo \"\${OUTPUT}\" | sed \"s/^/\$1 FAILURE /\" + fi + } + + { + # Only execute checks when not filtered + if [ '$EXCLUDE' = '${EXCLUDE/sessions/}' ]; then + echo '<<>>' + echo \"select count(1) from v\\\$session where status = 'ACTIVE';\" | sqlplus \"$SID\" + fi + + if [ '$EXCLUDE' = '${EXCLUDE/logswitches/}' ]; then + echo '<<>>' + echo \"select count(1) from v\\\$loghist where first_time > sysdate - 1/24;\" | sqlplus \"$SID\" + fi + + if [ '$EXCLUDE' = '${EXCLUDE/tablespaces/}' ]; then + echo '<<>>' + sqlplus "$SID" < $CACHE_FILE.new && mv $CACHE_FILE.new $CACHE_FILE || rm -f $CACHE_FILE* + " + fi +done diff -Nru check-mk-1.2.2p3/plugins/mk_postgres check-mk-1.2.6p12/plugins/mk_postgres --- check-mk-1.2.2p3/plugins/mk_postgres 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/plugins/mk_postgres 2015-09-21 10:59:53.000000000 +0000 @@ -1,4 +1,27 @@ -#!/bin/sh +#!/bin/bash +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. # Try to detect the postgres user if id pgsql >/dev/null 2>&1; then @@ -10,8 +33,17 @@ fi echo '<<>>' -echo "select current_query = '', count(*) from pg_stat_activity group by (current_query = '');" | su - $USER -c "psql -d postgres -A -t -F' '" +# Postgres 9.2 uses 'query' instead of 'current_query' +QNAME="$(echo "select column_name from information_schema.columns where table_name='pg_stat_activity' and column_name in ('query','current_query');" |\ + su - $USER -c "psql -d postgres -t -A -F';'")" +OUTPUT="$(echo "select $QNAME = '', count(*) from pg_stat_activity group by ($QNAME = '');" |\ + su - $USER -c "psql --variable ON_ERROR_STOP=1 -d postgres -A -t -F' '" 2>/dev/null)" + +echo "$OUTPUT" +# line with number of idle sessions is sometimes missing on Postgre 8.x. This can lead +# to an altogether empty section and thus the check disappearing. +echo "$OUTPUT" | grep -q '^t ' || echo "t 0" echo '<<>>' -echo 'select *, pg_database_size(datname) as "datsize" from pg_stat_database;' \ +echo 'select datid, datname, numbackends, xact_commit, xact_rollback, blks_read, blks_hit, tup_returned, tup_fetched, tup_inserted, tup_updated, tup_deleted, pg_database_size(datname) "datsize" from pg_stat_database;' \ | su - $USER -c "psql -d postgres -A -F';'" | sed '$d' diff -Nru check-mk-1.2.2p3/plugins/mk_sap check-mk-1.2.6p12/plugins/mk_sap --- check-mk-1.2.2p3/plugins/mk_sap 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/mk_sap 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,499 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# This agent plugin has been built to collect information from SAP R/3 systems +# using RFC calls. It needs the python module sapnwrfc (available in Check_MK +# git at agents/sap/sapnwrfc) and the nwrfcsdk (can be downloaded from SAP +# download portal) installed to be working. You can configure the agent plugin +# using the configuration file /etc/check_mk/sap.cfg (a sample file can be +# found in Check_MK git at agents/sap/sap.cfg) to tell it how to connect to +# your SAP instance and which values you want to fetch from your system to be +# forwarded to and checked by Check_MK. +# +# This current agent has been developed and tested with: +# python-sapnwrfc-0.19 +# +# During development the "CCMS_Doku.pdf" was really helpful. + +import os, sys, fcntl +import time, datetime + +# sapnwrfc needs to know where the libs are located. During +# development the import failed, since the module did not +# find the libraries. So we preload the library to have it +# already loaded. +try: + import sapnwrfc +except ImportError, e: + if 'sapnwrfc.so' in str(e): + sys.stderr.write( + 'Unable to find the library sapnwrfc.so. Maybe you need to put a file pointing to\n' + 'the sapnwrfc library directory into the /etc/ld.so.conf.d directory. For example\n' + 'create the file /etc/ld.so.conf.d/sapnwrfc.conf containing the path\n' + '"/usr/sap/nwrfcsdk/lib" and run "ldconfig" afterwards.\n' + ) + sys.exit(1) + elif 'No module named sapnwrfc' in str(e): + sys.stderr.write("Missing the Python module sapnwfrc.\n") + sys.exit(1) + else: + raise + +# ############################################################################# + +# This sign is used to separate the path parts given in the config +SEPARATOR = '/' + +# This are the different classes of monitoring objects which +# can be found in the tree. +# +# Summarizs information from several subnodes +MTE_SUMMARY = '050' +# A monitoring object which has several subnodes which lead to the status +# of this object. For example it is the "CPU" object on a host +MTE_MON_OBJ = '070' +# Contains performance information (which can be used to create graphs from) +MTE_PERFORMANCE = '100' +# Might contain several messages +MTE_MSG_CONTAINER = '101' +# Contains a single status message +MTE_SINGLE_MSG = '102' +# This is a long text label without status +MTE_LONG_TXT = '110' +# This is a short text label without status +MTE_SHORT_TXT = '111' +# Is a "folder" which has no own state, just computed by its childs +MTE_VIRTUAL = '199' + +# This map converts between the SAP color codes (key values) and the +# nagios state codes and strings +STATE_VALUE_MAP = { + 0: (0, 'OK'), # GRAY (inactive or no current info available) -> OK + 1: (0, 'OK'), # GREEN -> OK + 2: (1, 'WARN'), # YELLOW -> WARNING + 3: (2, 'CRIT'), # RED -> CRITICAL +} + +STATE_LOGWATCH_MAP = [ 'O', 'O', 'W', 'C' ] + +# Monitoring objects of these classes are skipped during processing +SKIP_MTCLASSES = [ + MTE_VIRTUAL, + MTE_SUMMARY, + MTE_MON_OBJ, + MTE_SHORT_TXT, + MTE_LONG_TXT, +] + +MK_CONFDIR = os.getenv("MK_CONFDIR") or "/etc/check_mk" +MK_VARDIR = os.getenv("MK_VARDIR") or "/var/lib/check_mk_agent" + +STATE_FILE = MK_VARDIR + '/sap.state' +state_file_changed = False + +# ############################################################################# + +# Settings to be used to connect to the SAP R/3 host. +local_cfg = { + 'ashost': 'localhost', + 'sysnr': '00', + 'client': '100', + 'user': '', + 'passwd': '', + 'trace': '3', + 'loglevel': 'warn', + #'lang': 'EN', +} + +# A list of strings, while the string must match the full path to one or +# several monitor objects. We use unix shell patterns during matching, so +# you can use several chars as placeholders: +# +# * matches everything +# ? matches any single character +# [seq] matches any character in seq +# [!seq] matches any character not in seq +# +# The * matches the whole following string and does not end on next "/". +# For examples, take a look at the default config file (/etc/check_mk/sap.cfg). +monitor_paths = [ + 'SAP CCMS Monitor Templates/Dialog Overview/*', +] +monitor_types = [] +config_file = MK_CONFDIR + '/sap.cfg' + +cfg = {} +if os.path.exists(config_file): + execfile(config_file) + if type(cfg) == dict: + cfg = [ cfg ] +else: + cfg = [ local_cfg ] + +# Load the state file into memory +try: + states = eval(file(STATE_FILE).read()) +except IOError: + states = {} + +# index of all logfiles which have been found in a run. This is used to +# remove logfiles which are not available anymore from the states dict. +logfiles = [] + +# ############################################################################# + +# +# HELPERS +# + +import fnmatch + +def to_be_monitored(path, toplevel_match = False): + for rule in monitor_paths: + if toplevel_match and rule.count('/') > 1: + rule = '/'.join(rule.split('/')[:2]) + + if fnmatch.fnmatch(path, rule): + return True + return False + +def node_path(tree, node, path = ''): + if path: + path = node['MTNAMESHRT'].rstrip() + SEPARATOR + path + else: + path = node['MTNAMESHRT'].rstrip() + + if node['ALPARINTRE'] > 0: + parent_node = tree[node['ALPARINTRE'] - 1] + return node_path(tree, parent_node, path) + return path + +# +# API ACCESS FUNCTIONS +# + +def query(what, params, debug = False): + fd = conn.discover(what) + + if debug: + print "Name: %s Params: %s" % (fd.name, fd.handle.parameters) + print "Given-Params: %s" % params + + f = fd.create_function_call() + for key, val in params.items(): + getattr(f, key)(val) + f.invoke() + + ret = f.RETURN.value + if ret['TYPE'] == 'E': + sys.stderr.write("ERROR: %s\n" % ret['MESSAGE'].strip()) + + return f + +def login(): + f = query('BAPI_XMI_LOGON', { + 'EXTCOMPANY': 'Mathias Kettner GmbH', + 'EXTPRODUCT': 'Check_MK SAP Agent', + 'INTERFACE': 'XAL', + 'VERSION': '1.0', + }) + #print f.RETURN + return f.SESSIONID.value + +def logout(): + query('BAPI_XMI_LOGOFF', { + 'INTERFACE': 'XAL', + }) + +def mon_list(cfg): + f = query("BAPI_SYSTEM_MON_GETLIST", { + 'EXTERNAL_USER_NAME': cfg['user'], + }) + l = [] + for mon in f.MONITOR_NAMES.value: + l.append((mon["MS_NAME"].rstrip(), mon["MONI_NAME"].rstrip())) + return l + +#def ms_list( cfg ): +# f = query("BAPI_SYSTEM_MS_GETLIST", { +# 'EXTERNAL_USER_NAME': cfg['user'], +# }) +# l = [] +# for ms in f.MONITOR_SETS.value: +# l.append(ms['NAME'].rstrip()) +# return l + +def mon_tree(cfg, ms_name, mon_name): + f = query("BAPI_SYSTEM_MON_GETTREE", { + 'EXTERNAL_USER_NAME': cfg['user'], + 'MONITOR_NAME': {"MS_NAME": ms_name, "MONI_NAME": mon_name}, + }) + tree = f.TREE_NODES.value + for node in tree: + node['PATH'] = ms_name + SEPARATOR + node_path(tree, node) + return tree + +def tid(node): + return { + 'MTSYSID': node['MTSYSID'].strip(), + 'MTMCNAME': node['MTMCNAME'].strip(), + 'MTNUMRANGE': node['MTNUMRANGE'].strip(), + 'MTUID': node['MTUID'].strip(), + 'MTCLASS': node['MTCLASS'].strip(), + 'MTINDEX': node['MTINDEX'].strip(), + 'EXTINDEX': node['EXTINDEX'].strip(), + } + +def mon_perfdata(cfg, node): + f = query('BAPI_SYSTEM_MTE_GETPERFCURVAL', { + 'EXTERNAL_USER_NAME': cfg['user'], + 'TID': tid(node), + }) + value = f.CURRENT_VALUE.value['LASTPERVAL'] + + f = query('BAPI_SYSTEM_MTE_GETPERFPROP', { + 'EXTERNAL_USER_NAME': cfg['user'], + 'TID': tid(node), + }) + if f.PROPERTIES.value['DECIMALS'] != 0: + value = (value + 0.0) / 10**f.PROPERTIES.value['DECIMALS'] + uom = f.PROPERTIES.value['VALUNIT'].strip() + + return value, uom + +def mon_msg(cfg, node): + f = query('BAPI_SYSTEM_MTE_GETSMVALUE', { + 'EXTERNAL_USER_NAME': cfg['user'], + 'TID': tid(node), + }) + data = f.VALUE.value + dt = parse_dt(data['SMSGDATE'], data['SMSGTIME']) + return (dt, data['MSG'].strip()) + +def parse_dt(d, t): + d = d.strip() + t = t.strip() + if not d or not t: + return None + else: + return datetime.datetime(*time.strptime(d + t, '%Y%m%d%H%M%S')[:6]) + +def mon_alerts(cfg, node): + f = query('BAPI_SYSTEM_MTE_GETALERTS', { + 'EXTERNAL_USER_NAME': cfg['user'], + 'TID': tid(node), + }) + return f.ALERTS.value + +def aid(alert): + return { + "ALSYSID": alert["ALSYSID"], + "MSEGNAME": alert["MSEGNAME"], + "ALUNIQNUM": alert["ALUNIQNUM"], + "ALINDEX": alert["ALINDEX"], + "ALERTDATE": alert["ALERTDATE"], + "ALERTTIME": alert["ALERTTIME"], + } + +def alert_details(cfg, alert): + f = query('BAPI_SYSTEM_ALERT_GETDETAILS', { + 'EXTERNAL_USER_NAME': cfg['user'], + 'AID': aid(alert), + }) + prop = f.PROPERTIES.value + state = f.VALUE.value + msg = f.XMI_EXT_MSG.value['MSG'].strip() + return state, msg + +def process_alerts(cfg, logs, ms_name, mon_name, node, alerts): + global state_file_changed + + sid = node["MTSYSID"].strip() or 'Other' + context = node["MTMCNAME"].strip() or 'Other' + path = node["PATH"] + + # Use the sid as hostname for the logs + hostname = sid + logfile = context + "/" + path + + logfiles.append((hostname, logfile)) + + logs.setdefault(sid, {}) + logs[hostname][logfile] = [] + newest_log_dt = None + for alert in alerts: + dt = parse_dt(alert['ALERTDATE'], alert['ALERTTIME']) + + if (hostname, logfile) in states and states[(hostname, logfile)] >= dt: + continue # skip log messages which are older than the last cached date + + if not newest_log_dt or dt > newest_log_dt: + newest_log_dt = dt # store the newest log of this run + + alert_state, alert_msg = alert_details(cfg, alert) + # Format lines to "logwatch" format + logs[hostname][logfile].append('%s %s %s' % (STATE_LOGWATCH_MAP[alert_state['VALUE']], + dt.strftime("%Y-%m-%d %H:%M:%S"), alert_msg)) + + if newest_log_dt: + # Write newest log age to cache to prevent double processing of logs + states[(hostname, logfile)] = newest_log_dt + state_file_changed = True + return logs + + + +def check(cfg): + global conn + conn = sapnwrfc.base.rfc_connect(cfg) + login() + + logs = {} + sap_data = {} + + # This loop is used to collect all information from SAP + for ms_name, mon_name in mon_list(cfg): + path = ms_name + SEPARATOR + mon_name + if not to_be_monitored(path, True): + continue + + tree = mon_tree(cfg, ms_name, mon_name) + for node in tree: + if not to_be_monitored(node['PATH']): + continue + #print node["PATH"] + + status_details = '' + perfvalue = '-' + uom = '-' + + # Use precalculated states + state = { + 'VALUE': node['ACTUALVAL'], + 'SEVERITY': node['ACTUALSEV'], + } + + if state['VALUE'] not in STATE_VALUE_MAP: + print 'UNHANDLED STATE VALUE' + sys.exit(1) + + # + # Handle different object classes individually + # to get details about them + # + + if monitor_types and node['MTCLASS'] not in monitor_types: + continue # Skip unwanted classes if class filtering is enabled + + if node['MTCLASS'] == MTE_PERFORMANCE: + perfvalue, this_uom = mon_perfdata(cfg, node) + uom = this_uom and this_uom or uom + + elif node['MTCLASS'] == MTE_SINGLE_MSG: + status_details = "%s: %s" % mon_msg(cfg, node) + + elif node['MTCLASS'] == MTE_MSG_CONTAINER: + + alerts = mon_alerts(cfg, node) + logs = process_alerts(cfg, logs, ms_name, mon_name, node, alerts) + if len(alerts) > 0: + last_alert = alerts[-1] + dt = parse_dt(last_alert["ALERTDATE"], last_alert["ALERTTIME"]) + alert_state, alert_msg = alert_details(cfg, last_alert) + last_msg = '%s: %s - %s' % (dt, STATE_VALUE_MAP[alert_state['VALUE']][1], alert_msg) + + status_details = '%d Messages, Last: %s' % (len(alerts), last_msg) + else: + status_details = 'The log is empty' + + elif node['MTCLASS'] not in SKIP_MTCLASSES: + # Add an error to output on unhandled classes + status_details = "UNHANDLED MTCLASS", node['MTCLASS'] + + if node['MTCLASS'] not in SKIP_MTCLASSES: + sid = node["MTSYSID"].strip() or 'Other' + context = node["MTMCNAME"].strip() or 'Other' + path = node["PATH"] + + sap_data.setdefault(sid, []) + sap_data[sid].append("%s\t%d\t%3d\t%s\t%s\t%s\t%s" % (context, state['VALUE'], + state['SEVERITY'], path, perfvalue, uom, status_details)) + + + for host, host_sap in sap_data.items(): + sys.stdout.write('<<<<%s>>>>\n' % host) + sys.stdout.write('<<>>\n') + print '\n'.join(host_sap) + sys.stdout.write('<<<<>>>>\n') + + for host, host_logs in logs.items(): + sys.stdout.write('<<<<%s>>>>\n' % host) + sys.stdout.write('<<>>\n') + for log, lines in host_logs.items(): + sys.stdout.write('[[[%s]]]\n' % log) + if lines: + sys.stdout.write('\n'.join(lines) + '\n') + sys.stdout.write('<<<<>>>>\n') + + logout() + conn.close() + +# It is possible to configure multiple SAP instances to monitor. Loop them all, but +# do not terminate when one connection failed +processed_all = True +try: + for entry in cfg: + try: + check(entry) + except sapnwrfc.RFCCommunicationError, e: + sys.stderr.write('ERROR: Unable to connect (%s)\n' % e) + processed_all = False + except Exception, e: + sys.stderr.write('ERROR: Unhandled exception (%s)\n' % e) + processed_all = False + + # Now check whether or not an old logfile needs to be removed. This can only + # be done this way, when all hosts have been reached. Otherwise the cleanup + # is skipped. + if processed_all: + for key in states.keys(): + if key not in logfiles: + state_file_changed = True + del states[key] + + # Only write the state file once per run. And only when it has been changed + if state_file_changed: + new_file = STATE_FILE + '.new' + fd = os.open(new_file, os.O_WRONLY | os.O_CREAT) + fcntl.flock(fd, fcntl.LOCK_EX) + os.write(fd, repr(states)) + os.close(fd) + os.rename(STATE_FILE+'.new', STATE_FILE) + +except Exception, e: + sys.stderr.write('ERROR: Unhandled exception (%s)\n' % e) + +sys.exit(0) diff -Nru check-mk-1.2.2p3/plugins/mk_tsm check-mk-1.2.6p12/plugins/mk_tsm --- check-mk-1.2.2p3/plugins/mk_tsm 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/plugins/mk_tsm 2015-06-24 09:48:39.000000000 +0000 @@ -1,4 +1,27 @@ #!/usr/bin/ksh +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. # Agent for Linux/UNIX for Tivoli Storage Manager (TSM) @@ -20,7 +43,7 @@ do_tsm_checks () { - INST=${DSMSERV_DIR##*/} + INST=${DSMSERV_DIR##*/} # If we have no instance name, we take 'default' if [ -z "$INST" ] ; then INST=default ; fi @@ -44,11 +67,11 @@ $dsmcmd < /dev/null - { $ZYPPER pchk || [ $? = 100 -o $? = 101 ] && $ZYPPER lu ; } \ - | egrep '(patches needed|\|)' | egrep -v '^(#|Repository |Catalog )' + REFRESH=`$ZYPPER refresh 2>&1` + if [ "$REFRESH" ] + then + echo "ERROR: $REFRESH" + else + { $ZYPPER pchk || [ $? = 100 -o $? = 101 ] && $ZYPPER lu ; } \ + | egrep '(patches needed|\|)' | egrep -v '^(#|Repository |Catalog )' + fi else ZYPPER='waitmax 10 zypper --no-gpg-checks --non-interactive --quiet' - $ZYPPER refresh > /dev/null - { { $ZYPPER pchk || [ $? = 100 -o $? = 101 ] && $ZYPPER lp ; } ; $ZYPPER ll ; } \ - | egrep '(patches needed|\|)' | egrep -v '^(#|Repository)' + REFRESH=`$ZYPPER refresh 2>&1` + if [ "$REFRESH" ] + then + echo "ERROR: $REFRESH" + else + { { $ZYPPER pchk || [ $? = 100 -o $? = 101 ] && $ZYPPER lp ; } ; $ZYPPER ll ; } \ + | egrep '(patches needed|\|)' | egrep -v '^(#|Repository)' + fi fi fi diff -Nru check-mk-1.2.2p3/plugins/netstat.aix check-mk-1.2.6p12/plugins/netstat.aix --- check-mk-1.2.2p3/plugins/netstat.aix 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/netstat.aix 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,31 @@ +#!/bin/sh +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# This is not part of the standard agent since it can produce much +# output data of the table is large. This plugin is just needed for +# checking if certain known TCP connections are established. + +echo '<<>>' +netstat -n -f inet | fgrep -v '*.*' | egrep '^(tcp|udp)' diff -Nru check-mk-1.2.2p3/plugins/netstat.linux check-mk-1.2.6p12/plugins/netstat.linux --- check-mk-1.2.2p3/plugins/netstat.linux 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/netstat.linux 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,31 @@ +#!/bin/sh +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# This is not part of the standard agent since it can take very +# long to run if your TCP/UDP table is large. Netstat seems to +# have an execution time complexity of at least O(n^2) on Linux. + +echo '<<>>' +netstat -ntu | egrep '^(tcp|udp)' diff -Nru check-mk-1.2.2p3/plugins/nfsexports check-mk-1.2.6p12/plugins/nfsexports --- check-mk-1.2.2p3/plugins/nfsexports 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/plugins/nfsexports 2015-06-24 09:48:39.000000000 +0000 @@ -1,18 +1,38 @@ -#!/bin/sh +#!/bin/bash +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. # this check will only run if we have a working nfs environment or SHOULD have one. # not tested for nfs3 # verify if there are exports defined in your local /etc/exports -if [ -r /etc/exports ]; then +if [ -r /etc/exports ]; then EXPORTS=$(grep -v -e ^# -e ^$ /etc/exports) fi -pgrep portmap 1>/dev/null && pgrep rpc.mountd 1>/dev/null && DAEMONS="ok" -# any exports or have running daemons? then look for registered exports -if [[ $EXPORTS ]]; then +if [ "$EXPORTS" ] && pgrep '(portmap|rpcbind)' >/dev/null && pgrep rpc.mountd >/dev/null +then echo "<<>>" - if [[ $DAEMONS ]]; then - waitmax 3 showmount --no-headers -e - fi -fi + waitmax 3 showmount --no-headers -e +fi diff -Nru check-mk-1.2.2p3/plugins/nfsexports.solaris check-mk-1.2.6p12/plugins/nfsexports.solaris --- check-mk-1.2.2p3/plugins/nfsexports.solaris 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/nfsexports.solaris 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,57 @@ +#!/usr/bin/bash +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Check_MK agent plugin for monitoring nfsexports on Solaris. This plugin +# has been tested with solaris 10 in a standalone and cluster setting. + +clusterconfigdir="/etc/cluster/ccr/global/directory" +if [ -r $clusterconfigdir ]; then + # is a clustered nfs server + nfsconfig=/etc/cluster/ccr/global/`grep rgm $clusterconfigdir | grep nfs | grep rg_` + if [ -r $nsconfig ]; then + Pathprefix=`grep Pathprefix $nfsconfig | awk {'print $2'}`/SUNW.nfs + dfstabfile=$Pathprefix/dfstab.`grep -v FilesystemMountPoints $nfsconfig | grep SUNW.nfs | \ + awk {'print $1'} | sed -e 's/RS_//'` + if [ -r $dfstabfile ]; then + EXPORTS=`grep -v ^# $dfstabfile | grep -v ^$` + ps -aef | grep nfsd | grep $Pathprefix >/dev/null && DAEMONS="ok" + fi + fi +else + # is a standalone nfs server + dfstabfile="/etc/dfs/dfstab" + if [ -r $dfstabfile ]; then + EXPORTS=`grep -v ^# $dfstabfile | grep -v ^$` + svcs -a | grep nfs/server | grep ^online >/dev/null && DAEMONS="ok" + fi +fi + +# any exports or have running daemons? then look for registered exports +if [ "$EXPORTS" ]; then + echo "<<>>" + if [ "$DAEMONS" ]; then + showmount -e | grep ^/ + fi +fi diff -Nru check-mk-1.2.2p3/plugins/nginx_status check-mk-1.2.6p12/plugins/nginx_status --- check-mk-1.2.2p3/plugins/nginx_status 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/nginx_status 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,146 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Check_MK-Agent-Plugin - Nginx Server Status +# +# Fetches the stub nginx_status page from detected or configured nginx +# processes to gather status information about this process. +# +# Take a look at the check man page for details on how to configure this +# plugin and check. +# +# By default this plugin tries to detect all locally running processes +# and to monitor them. If this is not good for your environment you might +# create an nginx_status.cfg file in MK_CONFDIR and populate the servers +# list to prevent executing the detection mechanism. + +import os, sys, urllib2, re + +# tell urllib2 not to honour "http(s)_proxy" env variables +urllib2.getproxies = lambda: {} + +config_dir = os.getenv("MK_CONFDIR", "/etc/check_mk") +config_file = config_dir + "/nginx_status.cfg" + +# None or list of (proto, ipaddress, port) tuples. +# proto is 'http' or 'https' +servers = None +ssl_ports = [ 443, ] + +if os.path.exists(config_file): + execfile(config_file) + +def try_detect_servers(): + pids = [] + results = [] + for line in os.popen('netstat -tlnp 2>/dev/null').readlines(): + parts = line.split() + # Skip lines with wrong format + if len(parts) < 7 or '/' not in parts[6]: + continue + + pid, proc = parts[6].split('/', 1) + to_replace = re.compile('^.*/') + proc = to_replace.sub('', proc) + + procs = [ 'nginx', 'nginx:' ] + # the pid/proc field length is limited to 19 chars. Thus in case of + # long PIDs, the process names are stripped of by that length. + # Workaround this problem here + procs = [ p[:19 - len(pid) - 1] for p in procs ] + + # Skip unwanted processes + if proc not in procs: + continue + + # Add only the first found port of a single server process + if pid in pids: + continue + pids.append(pid) + + proto = 'http' + address, port = parts[3].rsplit(':', 1) + port = int(port) + + # Use localhost when listening globally + if address == '0.0.0.0': + address = '127.0.0.1' + elif address == '::': + address = '::1' + + # Switch protocol if port is SSL port. In case you use SSL on another + # port you would have to change/extend the ssl_port list + if port in ssl_ports: + proto = 'https' + + results.append((proto, address, port)) + + return results + +if servers is None: + servers = try_detect_servers() + +if not servers: + sys.exit(0) + +print '<<>>' +for server in servers: + if isinstance(server, tuple): + proto, address, port = server + page = 'nginx_status' + else: + proto = server['protocol'] + address = server['address'] + port = server['port'] + page = server.get('page', 'nginx_status') + + try: + url = '%s://%s:%s/%s' % (proto, address, port, page) + # Try to fetch the status page for each server + try: + request = urllib2.Request(url, headers={"Accept" : "text/plain"}) + fd = urllib2.urlopen(request) + except urllib2.URLError, e: + if 'SSL23_GET_SERVER_HELLO:unknown protocol' in str(e): + # HACK: workaround misconfigurations where port 443 is used for + # serving non ssl secured http + url = 'http://%s:%s/%s' % (address, port, page) + fd = urllib2.urlopen(url) + else: + raise + + for line in fd.read().split('\n'): + if not line.strip(): + continue + if line.lstrip()[0] == '<': + # seems to be html output. Skip this server. + break + print address, port, line + except urllib2.HTTPError, e: + sys.stderr.write('HTTP-Error (%s:%d): %s %s\n' % (address, port, e.code, e)) + + except Exception, e: + sys.stderr.write('Exception (%s:%d): %s\n' % (address, port, e)) diff -Nru check-mk-1.2.2p3/plugins/pages/bi.py check-mk-1.2.6p12/plugins/pages/bi.py --- check-mk-1.2.2p3/plugins/pages/bi.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/pages/bi.py 2015-06-24 09:48:38.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/plugins/pages/cron.py check-mk-1.2.6p12/plugins/pages/cron.py --- check-mk-1.2.2p3/plugins/pages/cron.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/pages/cron.py 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,31 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +import cron + +pagehandlers.update({ + "run_cron" : cron.page_run_cron, +}) diff -Nru check-mk-1.2.2p3/plugins/pages/mobile.py check-mk-1.2.6p12/plugins/pages/mobile.py --- check-mk-1.2.2p3/plugins/pages/mobile.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/pages/mobile.py 2015-06-24 09:48:38.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,8 +24,6 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -#!/usr/bin/python - import mobile pagehandlers.update({ diff -Nru check-mk-1.2.2p3/plugins/pages/shipped.py check-mk-1.2.6p12/plugins/pages/shipped.py --- check-mk-1.2.2p3/plugins/pages/shipped.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/pages/shipped.py 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -30,12 +30,19 @@ import main import logwatch import views +import prediction import sidebar import actions import weblib import dashboard import login import help +import bi +import userdb +import notify +import webapi +import visuals +import crashed_check # map URLs to page rendering functions @@ -46,33 +53,56 @@ "ajax_switch_help" : help.ajax_switch_help, "switch_site" : main.ajax_switch_site, "edit_views" : views.page_edit_views, + "create_view" : views.page_create_view, + "create_view_infos" : views.page_create_view_infos, "edit_view" : views.page_edit_view, - "get_edit_column" : views.ajax_get_edit_column, "count_context_button" : views.ajax_count_button, "export_views" : views.ajax_export, "ajax_set_viewoption" : views.ajax_set_viewoption, "ajax_set_rowselection" : weblib.ajax_set_rowselection, "view" : views.page_view, + "prediction_graph" : prediction.page_graph, "logwatch" : logwatch.page_show, "side" : sidebar.page_side, "sidebar_add_snapin" : sidebar.page_add_snapin, "sidebar_snapin" : sidebar.ajax_snapin, + "sidebar_fold" : sidebar.ajax_fold, "sidebar_openclose" : sidebar.ajax_openclose, "sidebar_move_snapin" : sidebar.move_snapin, "sidebar_ajax_speedometer" : sidebar.ajax_speedometer, + "sidebar_ajax_tag_tree" : sidebar.ajax_tag_tree, + "sidebar_ajax_tag_tree_enter": sidebar.ajax_tag_tree_enter, + "sidebar_get_messages" : sidebar.ajax_get_messages, + "sidebar_message_read" : sidebar.ajax_message_read, + "ajax_search" : sidebar.ajax_search, + "search_open" : sidebar.search_open, "switch_master_state" : sidebar.ajax_switch_masterstate, "add_bookmark" : sidebar.ajax_add_bookmark, "del_bookmark" : sidebar.ajax_del_bookmark, "tree_openclose" : weblib.ajax_tree_openclose, "edit_bookmark" : sidebar.page_edit_bookmark, "nagios_action" : actions.ajax_action, + "dashboard" : dashboard.page_dashboard, - "dashboard_resize" : dashboard.ajax_resize, - "dashlet_overview" : dashboard.dashlet_overview, - "dashlet_mk_logo" : dashboard.dashlet_mk_logo, - "dashlet_hoststats" : dashboard.dashlet_hoststats, - "dashlet_servicestats" : dashboard.dashlet_servicestats, - "dashlet_pnpgraph" : dashboard.dashlet_pnpgraph, - "dashlet_nodata" : dashboard.dashlet_nodata, + "dashboard_dashlet" : dashboard.ajax_dashlet, + "edit_dashboards" : dashboard.page_edit_dashboards, + "create_dashboard" : dashboard.page_create_dashboard, + "edit_dashboard" : dashboard.page_edit_dashboard, + "edit_dashlet" : dashboard.page_edit_dashlet, + "delete_dashlet" : dashboard.page_delete_dashlet, + "create_view_dashlet" : dashboard.page_create_view_dashlet, + "create_view_dashlet_infos": dashboard.page_create_view_dashlet_infos, + "ajax_dashlet_pos" : dashboard.ajax_dashlet_pos, + + "ajax_popup_add_visual" : visuals.ajax_popup_add, + "ajax_add_visual" : visuals.ajax_add_visual, + + "ajax_userdb_sync" : userdb.ajax_sync, + "notify" : notify.page_notify, + "ajax_inv_render_tree" : views.ajax_inv_render_tree, + + "webapi" : webapi.page_api, + + "crashed_check" : crashed_check.page_crashed_check, }) diff -Nru check-mk-1.2.2p3/plugins/pages/wato.py check-mk-1.2.6p12/plugins/pages/wato.py --- check-mk-1.2.2p3/plugins/pages/wato.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/pages/wato.py 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -27,11 +27,15 @@ import wato pagehandlers.update({ - "wato" : wato.page_handler, - "wato_ajax_replication" : wato.ajax_replication, - "wato_ajax_activation" : wato.ajax_activation, - "automation_login" : wato.page_automation_login, - "automation" : wato.page_automation, - "user_profile" : wato.page_user_profile, - "ajax_set_foldertree" : wato.ajax_set_foldertree, + "wato" : wato.page_handler, + "wato_ajax_replication" : wato.ajax_replication, + "wato_ajax_activation" : wato.ajax_activation, + "automation_login" : wato.page_automation_login, + "automation" : wato.page_automation, + "user_profile" : wato.page_user_profile, + "user_change_pw" : lambda: wato.page_user_profile(change_pw=True), + "ajax_set_foldertree" : wato.ajax_set_foldertree, + "wato_ajax_diag_host" : wato.ajax_diag_host, + "wato_ajax_profile_repl" : wato.ajax_profile_repl, + "wato_ajax_execute_check" : wato.ajax_execute_check, }) diff -Nru check-mk-1.2.2p3/plugins/perfometer/active_checks.py check-mk-1.2.6p12/plugins/perfometer/active_checks.py --- check-mk-1.2.2p3/plugins/perfometer/active_checks.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/perfometer/active_checks.py 2015-06-24 09:48:38.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -35,7 +35,10 @@ perfometers["check_mk_active-tcp"] = perfometer_check_tcp def perfometer_check_http(row, check_command, perfdata): - time_ms = float(perfdata[0][1]) * 1000.0 + try: + time_ms = float(perfdata[0][1]) * 1000.0 + except: + time_ms = 0 return "%.1f ms" % time_ms, \ perfometer_logarithmic(time_ms, 1000, 10, "#66ccff") diff -Nru check-mk-1.2.2p3/plugins/perfometer/check_mk.py check-mk-1.2.6p12/plugins/perfometer/check_mk.py --- check-mk-1.2.2p3/plugins/perfometer/check_mk.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/perfometer/check_mk.py 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -47,19 +47,59 @@ else: color = "#f44" - return "%.1fs" % exectime, perfometer_linear(perc, color) + return "%.1f s" % exectime, perfometer_linear(perc, color) perfometers["check-mk"] = perfometer_check_mk def perfometer_check_mk_df(row, check_command, perf_data): - h = '' varname, value, unit, warn, crit, minn, maxx = perf_data[0] + + hours_left = None + for data in perf_data: + if data[0] == "trend_hoursleft": + hours_left = float(data[1]) + break + perc_used = 100 * (float(value) / float(maxx)) perc_free = 100 - float(perc_used) - color = { 0: "#0f8", 1: "#ff2", 2: "#f22", 3: "#fa2" }[row["service_state"]] - h += perfometer_td(perc_used, color) - h += perfometer_td(perc_free, "white") - h += "
    " - return "%d%%" % perc_used, h + if hours_left or hours_left == 0: + h = '
    ' + h += perfometer_td(perc_used, "#00ffc6") + h += perfometer_td(perc_free, "white") + h += "
    " + + if hours_left == -1.0: + h += perfometer_td(100, "#39c456") + h += '
    ' + return "%0.1f%% / not growing" % (perc_used), h + + days_left = hours_left / 24 + if days_left > 30: + color = "#39c456" # OK + elif days_left < 7: + color = "#d94747" # CRIT + else: + color = "#d7d139" # WARN + + half = math.log(30.0, 2) # value to be displayed at 50% + pos = 50 + 10.0 * (math.log(days_left, 2) - half) + if pos < 2: + pos = 2 + if pos > 98: + pos = 98 + h += perfometer_td(100 - pos, color) + h += perfometer_td(pos, "white") + h += '
    ' + if days_left > 365: + days_left = " >365" + else: + days_left = "%0.1f" % days_left + return "%0.1f%%/%s days left" % (perc_used, days_left), h + else: + h = '' + h += perfometer_td(perc_used, "#00ffc6") + h += perfometer_td(perc_free, "white") + h += "
    " + return "%0.2f %%" % perc_used, h perfometers["check_mk-df"] = perfometer_check_mk_df perfometers["check_mk-vms_df"] = perfometer_check_mk_df @@ -71,6 +111,56 @@ perfometers["check_mk-hr_fs"] = perfometer_check_mk_df perfometers["check_mk-oracle_asm_diskgroup"] = perfometer_check_mk_df perfometers["check_mk-mysql_capacity"] = perfometer_check_mk_df +perfometers["check_mk-esx_vsphere_counters.ramdisk"] = perfometer_check_mk_df +perfometers["check_mk-hitachi_hnas_span"] = perfometer_check_mk_df +perfometers["check_mk-hitachi_hnas_volume"] = perfometer_check_mk_df +perfometers["check_mk-emcvnx_raidgroups.capacity"] = perfometer_check_mk_df +perfometers["check_mk-emcvnx_raidgroups.capacity_contiguous"] = perfometer_check_mk_df +perfometers["check_mk-ibm_svc_mdiskgrp"] = perfometer_check_mk_df +perfometers["check_mk-fast_lta_silent_cubes.capacity"] = perfometer_check_mk_df +perfometers["check_mk-fast_lta_volumes"] = perfometer_check_mk_df +perfometers["check_mk-libelle_business_shadow.archive_dir"] = perfometer_check_mk_df + +def perfometer_esx_vsphere_datastores(row, check_command, perf_data): + used_mb = perf_data[0][1] + maxx = perf_data[0][-1] + # perf data might be incomplete, if trending perfdata is off... + uncommitted_mb = 0 + for entry in perf_data: + if entry[0] == "uncommitted": + uncommitted_mb = entry[1] + break + + perc_used = 100 * (float(used_mb) / float(maxx)) + perc_uncommitted = 100 * (float(uncommitted_mb) / float(maxx)) + perc_totally_free = 100 - perc_used - perc_uncommitted + + h = '' + if perc_used + perc_uncommitted <= 100: + # Regular handling, no overcommitt + h += perfometer_td(perc_used, "#00ffc6") + h += perfometer_td(perc_uncommitted, "#eeccff") + h += perfometer_td(perc_totally_free, "white") + else: + # Visualize overcommitted space by scaling to total overcommittment value + # and drawing the capacity as red line in the perfometer + total = perc_used + perc_uncommitted + perc_used_bar = perc_used * 100 / total + perc_uncommitted_bar = perc_uncommitted * 100 / total + perc_free = (100 - perc_used) * 100 / total + + h += perfometer_td(perc_used_bar, "#00ffc6") + h += perfometer_td(perc_free, "#eeccff") + h += perfometer_td(1, "red") # This line visualizes the capacity + h += perfometer_td(perc_uncommitted - perc_free, "#eeccff") + h += "
    " + + legend = "%0.2f%%" % perc_used + if uncommitted_mb: + legend += " (+%0.2f%%)" % perc_uncommitted + return legend, h + +perfometers["check_mk-esx_vsphere_datastores"] = perfometer_esx_vsphere_datastores def perfometer_check_mk_kernel_util(row, check_command, perf_data): @@ -101,11 +191,7 @@ state = row["service_state"] # paint used ram and swap - ram_color, swap_color = { - 0:("#80ff40", "#008030"), - 1:("#ff2", "#dd0"), - 2:("#f44", "#d00"), - 3:("#fa2", "#d80") }[state] + ram_color, swap_color = "#80ff40", "#008030" h += perfometer_td(100 * ram_used / virt_total, ram_color) h += perfometer_td(100 * swap_used / virt_total, swap_color) @@ -120,13 +206,13 @@ return "%d%%" % (100 * (virt_used / ram_total)), h perfometers["check_mk-mem.used"] = perfometer_check_mk_mem_used +perfometers["check_mk-aix_memory"] = perfometer_check_mk_mem_used perfometers["check_mk-hr_mem"] = perfometer_check_mk_mem_used def perfometer_check_mk_mem_win(row, check_command, perf_data): # only show mem usage, do omit page file - base_colors = ("#20d060", "#3040d0") state = row["service_state"] - color = { 0: "#20d060", 1: "#ff2", 2: "#f44", 3: "#fa2",}[state] + color = "#5090c0" ram_total = float(perf_data[0][6]) ram_used = float(perf_data[0][1]) perc = ram_used / ram_total * 100.0 @@ -152,9 +238,12 @@ load = float(perf_data[0][1]) return "%.1f" % load, perfometer_logarithmic(load, 4, 2, color) - perfometers["check_mk-cpu.loads"] = perfometer_check_mk_cpu_loads perfometers["check_mk-ucd_cpu_load"] = perfometer_check_mk_cpu_loads +perfometers["check_mk-statgrab_load"] = perfometer_check_mk_cpu_loads +perfometers["check_mk-hpux_cpu"] = perfometer_check_mk_cpu_loads +perfometers["check_mk-blade_bx_load"] = perfometer_check_mk_cpu_loads + def perfometer_check_mk_ntp(row, check_command, perf_data, unit = "ms"): offset = float(perf_data[0][1]) @@ -179,15 +268,16 @@ h += perfometer_td(50, "#fff") h += '' - return "%.1f %s" % (offset, unit), h + return "%.2f %s" % (offset, unit), h perfometers["check_mk-ntp"] = perfometer_check_mk_ntp perfometers["check_mk-ntp.time"] = perfometer_check_mk_ntp +perfometers["check_mk-chrony"] = perfometer_check_mk_ntp perfometers["check_mk-systemtime"] = lambda r, c, p: perfometer_check_mk_ntp(r, c, p, "s") def perfometer_ipmi_sensors(row, check_command, perf_data): state = row["service_state"] - color = { 0: "#39f", 1: "#ff2", 2: "#f22", 3: "#fa2" }[state] + color = "#39f" value = float(perf_data[0][1]) crit = savefloat(perf_data[0][4]) if not crit: @@ -218,10 +308,9 @@ def perfometer_temperature(row, check_command, perf_data): state = row["service_state"] - color = { 0: "#39f", 1: "#ff2", 2: "#f22", 3: "#fa2" }[state] + color = "#39f" value = float(perf_data[0][1]) - crit = savefloat(perf_data[0][4]) - return u"%d°C" % int(value), perfometer_logarithmic(value, 40, 1.2, color) + return u"%d °C" % int(value), perfometer_logarithmic(value, 40, 1.2, color) perfometers["check_mk-nvidia.temp"] = perfometer_temperature perfometers["check_mk-cisco_temp_sensor"] = perfometer_temperature @@ -229,10 +318,97 @@ perfometers["check_mk-cmctc_lcp.temp"] = perfometer_temperature perfometers["check_mk-cmctc.temp"] = perfometer_temperature perfometers["check_mk-smart.temp"] = perfometer_temperature -perfometers["check_mk-f5_bigip_temp"] = perfometer_temperature +perfometers["check_mk-f5_bigip_chassis_temp"] = perfometer_temperature +perfometers["check_mk-f5_bigip_cpu_temp"] = perfometer_temperature perfometers["check_mk-hp_proliant_temp"] = perfometer_temperature perfometers["check_mk-akcp_sensor_temp"] = perfometer_temperature +perfometers["check_mk-akcp_daisy_temp"] = perfometer_temperature perfometers["check_mk-fsc_temp"] = perfometer_temperature +perfometers["check_mk-viprinet_temp"] = perfometer_temperature +perfometers["check_mk-hwg_temp"] = perfometer_temperature +perfometers["check_mk-sensatronics_temp"] = perfometer_temperature +perfometers["check_mk-apc_inrow_temperature"] = perfometer_temperature +perfometers["check_mk-hitachi_hnas_temp"] = perfometer_temperature +perfometers["check_mk-dell_poweredge_temp"] = perfometer_temperature +perfometers["check_mk-dell_chassis_temp"] = perfometer_temperature +perfometers["check_mk-dell_om_sensors"] = perfometer_temperature +perfometers["check_mk-innovaphone_temp"] = perfometer_temperature +perfometers["check_mk-cmciii.temp"] = perfometer_temperature +perfometers["check_mk-ibm_svc_enclosurestats.temp"] = perfometer_temperature +perfometers["check_mk-wagner_titanus_topsense.temp"] = perfometer_temperature +perfometers["check_mk-enterasys_temp"] = perfometer_temperature +perfometers["check_mk-adva_fsp_temp"] = perfometer_temperature +perfometers["check_mk-allnet_ip_sensoric.temp"] = perfometer_temperature +perfometers["check_mk-qlogic_sanbox.temp"] = perfometer_temperature +perfometers["check_mk-bintec_sensors.temp"] = perfometer_temperature +perfometers["check_mk-knuerr_rms_temp"] = perfometer_temperature +perfometers["check_mk-arris_cmts_temp"] = perfometer_temperature +perfometers["check_mk-casa_cpu_temp"] = perfometer_temperature +perfometers["check_mk-rms200_temp"] = perfometer_temperature +perfometers["check_mk-juniper_screenos_temp"] = perfometer_temperature +perfometers["check_mk-lnx_thermal"] = perfometer_temperature +perfometers["check_mk-climaveneta_temp"] = perfometer_temperature +perfometers["check_mk-carel_sensors"] = perfometer_temperature +perfometers["check_mk-ucs_bladecenter_fans.temp"] = perfometer_temperature +perfometers["check_mk-ucs_bladecenter_psu.chassis_temp"] = perfometer_temperature +perfometers["check_mk-cisco_temperature"] = perfometer_temperature + +def perfometer_temperature_multi(row, check_command, perf_data): + display_value = -1 + display_color = "#60f020" + + for sensor, value, uom, warn, crit, min, max in perf_data: + value=saveint(value) + if value > display_value: + display_value=value + + if display_value > saveint(warn): + display_color = "#FFC840" + if display_value > saveint(crit): + display_color = "#FF0000" + + display_string = "%s °C" % display_value + return display_string, perfometer_linear(display_value, display_color) + +perfometers["check_mk-brocade_mlx_temp"] = perfometer_temperature_multi + +def perfometer_power(row, check_command, perf_data): + display_color = "#60f020" + + value=savefloat(perf_data[0][1]) + crit=savefloat(perf_data[0][4]) + warn=savefloat(perf_data[0][3]) + power_perc = value/crit*90 # critical is at 90% to allow for more than crit + + if value > warn: + display_color = "#FFC840" + if value > crit: + display_color = "#FF0000" + + display_string = "%.1f Watt" % value + return display_string, perfometer_linear(power_perc, display_color) + +perfometers["check_mk-dell_poweredge_amperage.power"] = perfometer_power +perfometers["check_mk-dell_chassis_power"] = perfometer_power +perfometers["check_mk-dell_chassis_powersupplies"] = perfometer_power +perfometers["check_mk-hp-proliant_power"] = perfometer_power + +def perfometer_power_simple(row, check_command, perf_data): + watt = int(perf_data[0][1]) + text = "%s Watt" % watt + return text, perfometer_logarithmic(watt, 150, 2, "#60f020") + +perfometers["check_mk-ibm_svc_enclosurestats.power"] = perfometer_power_simple +perfometers["check_mk-sentry_pdu"] = perfometer_power_simple + +def perfometer_users(row, check_command, perf_data): + state = row["service_state"] + color = "#39f" + value = float(perf_data[0][1]) + crit = savefloat(perf_data[0][4]) + return u"%d users" % int(value), perfometer_logarithmic(value, 50, 2, color) + +perfometers["check_mk-hitachi_hnas_cifs"] = perfometer_users def perfometer_blower(row, check_command, perf_data): rpm = saveint(perf_data[0][1]) @@ -292,6 +468,31 @@ unit = unit ) +perfometers["check_mk-if"] = perfometer_check_mk_if +perfometers["check_mk-if64"] = perfometer_check_mk_if +perfometers["check_mk-if64_tplink"] = perfometer_check_mk_if +perfometers["check_mk-winperf_if"] = perfometer_check_mk_if +perfometers["check_mk-vms_if"] = perfometer_check_mk_if +perfometers["check_mk-if_lancom"] = perfometer_check_mk_if +perfometers["check_mk-lnx_if"] = perfometer_check_mk_if +perfometers["check_mk-hpux_if"] = perfometer_check_mk_if +perfometers["check_mk-mcdata_fcport"] = perfometer_check_mk_if +perfometers["check_mk-esx_vsphere_counters.if"] = perfometer_check_mk_if +perfometers["check_mk-hitachi_hnas_fc_if"] = perfometer_check_mk_if +perfometers["check_mk-ucs_bladecenter_if"] = perfometer_check_mk_if + +def perfometer_check_mk_fc_port(row, check_command, perf_data): + unit = "B" + return perfometer_bandwidth( + in_traffic = savefloat(perf_data[0][1]), + out_traffic = savefloat(perf_data[1][1]), + in_bw = savefloat(perf_data[0][6]), + out_bw = savefloat(perf_data[1][6]), + unit = unit + ) +perfometers["check_mk-fc_port"] = perfometer_check_mk_fc_port + + def perfometer_check_mk_brocade_fcport(row, check_command, perf_data): return perfometer_bandwidth( in_traffic = savefloat(perf_data[0][1]), @@ -300,14 +501,21 @@ out_bw = savefloat(perf_data[1][6]), ) -perfometers["check_mk-if"] = perfometer_check_mk_if -perfometers["check_mk-if64"] = perfometer_check_mk_if -perfometers["check_mk-vms_if"] = perfometer_check_mk_if -perfometers["check_mk-if_lancom"] = perfometer_check_mk_if -perfometers["check_mk-lnx_if"] = perfometer_check_mk_if -perfometers["check_mk-hpux_if"] = perfometer_check_mk_if -perfometers["check_mk-mcdata_fcport"] = perfometer_check_mk_if perfometers["check_mk-brocade_fcport"] = perfometer_check_mk_brocade_fcport +perfometers["check_mk-qlogic_fcport"] = perfometer_check_mk_brocade_fcport + +def perfometer_check_mk_cisco_qos(row, check_command, perf_data): + unit = "Bit/s" in row["service_plugin_output"] and "Bit" or "B" + return perfometer_bandwidth( + in_traffic = savefloat(perf_data[0][1]), + out_traffic = savefloat(perf_data[1][1]), + in_bw = savefloat(perf_data[0][5]) , + out_bw = savefloat(perf_data[1][5]) , + unit = unit + ) + +perfometers["check_mk-cisco_qos"] = perfometer_check_mk_cisco_qos + def perfometer_oracle_tablespaces(row, check_command, perf_data): current = float(perf_data[0][1]) @@ -324,9 +532,34 @@ perfometers["check_mk-oracle_tablespaces"] = perfometer_oracle_tablespaces +def perfometer_check_oracle_dataguard_stats(row, check_command, perf_data): + perfdata_found = False + perfdata1 = '' + + for data in perf_data: + if data[0] == "apply_lag": + color = '#80F000' + + perfdata_found = True + days, rest = divmod(int(data[1]), 60*60*24) + hours, rest = divmod(rest, 60*60) + minutes, seconds = divmod(rest, 60) + perfdata1 = data[1] + + + if perfdata_found == False: + days = 0 + hours = 0 + minutes = 0 + color = "#008f48"; + + return "%02dd %02dh %02dm" % (days, hours, minutes), perfometer_logarithmic(perfdata1, 2592000, 2, color) + +perfometers["check_mk-oracle_dataguard_stats"] = perfometer_check_oracle_dataguard_stats + def perfometer_oracle_sessions(row, check_command, perf_data): - if check_command == "check_mk-oracle_sessions": - color = "#00ff48"; + if check_command != "check_mk-oracle_sessions": + color = "#008f48"; unit = ""; else: color = "#4800ff"; @@ -336,23 +569,43 @@ perfometers["check_mk-oracle_sessions"] = perfometer_oracle_sessions perfometers["check_mk-oracle_logswitches"] = perfometer_oracle_sessions +perfometers["check_mk-oracle_processes"] = perfometer_oracle_sessions def perfometer_cpu_utilization(row, check_command, perf_data): util = float(perf_data[0][1]) # is already percentage - color = "#cf2" - if perf_data[0][3]: - warn = float(perf_data[0][3]) - crit = float(perf_data[0][4]) - if util < warn: - color = "#6f2" - elif util < crit: - color = "#9f2" - - return "%.0f%%" % util, perfometer_linear(util, color) + color = "#60c080" + return "%.0f %%" % util, perfometer_linear(util, color) #perfometer_linear(perc, color) perfometers["check_mk-h3c_lanswitch_cpu"] = perfometer_cpu_utilization perfometers["check_mk-winperf_processor.util"] = perfometer_cpu_utilization +perfometers["check_mk-netapp_cpu"] = perfometer_cpu_utilization +perfometers["check_mk-cisco_cpu"] = perfometer_cpu_utilization +perfometers["check_mk-juniper_cpu"] = perfometer_cpu_utilization +perfometers["check_mk-brocade_mlx.module_cpu"] = perfometer_cpu_utilization +perfometers["check_mk-hitachi_hnas_cpu"] = perfometer_cpu_utilization +perfometers["check_mk-hitachi_hnas_fpga"] = perfometer_cpu_utilization +perfometers["check_mk-hr_cpu"] = perfometer_cpu_utilization +perfometers["check_mk-innovaphone_cpu"] = perfometer_cpu_utilization +perfometers["check_mk-enterasys_cpu_util"] = perfometer_cpu_utilization +perfometers["check_mk-juniper_trpz_cpu_util"] = perfometer_cpu_utilization +perfometers["check_mk-ibm_svc_nodestats.cpu_util"] = perfometer_cpu_utilization +perfometers["check_mk-ibm_svc_systemstats.cpu_util"] = perfometer_cpu_utilization +perfometers["check_mk-sni_octopuse_cpu"] = perfometer_cpu_utilization +perfometers["check_mk-casa_cpu_util"] = perfometer_cpu_utilization +perfometers["check_mk-juniper_screenos_cpu"] = perfometer_cpu_utilization + +def perfometer_ps_perf(row, check_command, perf_data): + perf_dict = dict([(p[0], float(p[1])) for p in perf_data]) + try: + perc = perf_dict["pcpu"] + return "%.1f%%" % perc, perfometer_linear(perc, "#30ff80") + except: + return "", "" + +perfometers["check_mk-ps"] = perfometer_ps_perf +perfometers["check_mk-ps.perf"] = perfometer_ps_perf + def perfometer_hpux_snmp_cs_cpu(row, check_command, perf_data): h = '' @@ -368,7 +621,7 @@ def perfometer_check_mk_uptime(row, check_command, perf_data): - days, rest = divmod(int(perf_data[0][1]), 60*60*24) + days, rest = divmod(int(float(perf_data[0][1])), 60*60*24) hours, rest = divmod(rest, 60*60) minutes, seconds = divmod(rest, 60) @@ -376,6 +629,8 @@ perfometers["check_mk-uptime"] = perfometer_check_mk_uptime perfometers["check_mk-snmp_uptime"] = perfometer_check_mk_uptime +perfometers["check_mk-esx_vsphere_counters.uptime"] = perfometer_check_mk_uptime +perfometers["check_mk-oracle_instance"] = perfometer_check_mk_uptime def perfometer_check_mk_diskstat(row, check_command, perf_data): @@ -386,7 +641,7 @@ read_bytes = float(perf_data[0][1]) write_bytes = float(perf_data[1][1]) - text = "%-.2fM/s %-.2fM/s" % \ + text = "%-.2f M/s %-.2f M/s" % \ (read_bytes / (1024*1024.0), write_bytes / (1024*1024.0)) return text, perfometer_logarithmic_dual( @@ -395,8 +650,62 @@ perfometers["check_mk-diskstat"] = perfometer_check_mk_diskstat perfometers["check_mk-winperf_phydisk"] = perfometer_check_mk_diskstat perfometers["check_mk-hpux_lunstats"] = perfometer_check_mk_diskstat +perfometers["check_mk-aix_diskiod"] = perfometer_check_mk_diskstat perfometers["check_mk-mysql.innodb_io"] = perfometer_check_mk_diskstat +perfometers["check_mk-esx_vsphere_counters.diskio"] = perfometer_check_mk_diskstat +perfometers["check_mk-emcvnx_disks"] = perfometer_check_mk_diskstat +perfometers["check_mk-ibm_svc_nodestats.diskio"] = perfometer_check_mk_diskstat +perfometers["check_mk-ibm_svc_systemstats.diskio"] = perfometer_check_mk_diskstat + +def perfometer_check_mk_iops_r_w(row, check_command, perf_data): + iops_r = float(perf_data[0][1]) + iops_w = float(perf_data[1][1]) + text = "%.0f IO/s %.0f IO/s" % (iops_r, iops_w) + return text, perfometer_logarithmic_dual( + iops_r, "#60e0a0", iops_w, "#60a0e0", 100000, 10) +perfometers["check_mk-ibm_svc_nodestats.iops"] = perfometer_check_mk_iops_r_w +perfometers["check_mk-ibm_svc_systemstats.iops"] = perfometer_check_mk_iops_r_w + +def perfometer_check_mk_disk_latency_r_w(row, check_command, perf_data): + latency_r = float(perf_data[0][1]) + latency_w = float(perf_data[1][1]) + text = "%.1f ms %.1f ms" % (latency_r, latency_w) + + return text, perfometer_logarithmic_dual( + latency_r, "#60e0a0", latency_w, "#60a0e0", 20, 10) +perfometers["check_mk-ibm_svc_nodestats.disk_latency"] = perfometer_check_mk_disk_latency_r_w +perfometers["check_mk-ibm_svc_systemstats.disk_latency"] = perfometer_check_mk_disk_latency_r_w + +def perfometer_in_out_mb_per_sec(row, check_command, perf_data): + read_mbit = float(perf_data[0][1]) / 131072 + write_mbit = float(perf_data[1][1]) / 131072 + + text = "%-.2fMb/s %-.2fMb/s" % (read_mbit, write_mbit) + + return text, perfometer_logarithmic_dual( + read_mbit, "#30d050", write_mbit, "#0060c0", 100, 10) +perfometers["check_mk-openvpn_clients"] = perfometer_in_out_mb_per_sec + +def perfometer_check_mk_hba(row, check_command, perf_data): + if len(perf_data) < 2: + return "", "" + + read_blocks = int(perf_data[0][1]) + write_blocks = int(perf_data[1][1]) + + text = "%d/s %d/s" % (read_blocks, write_blocks) + + return text, perfometer_logarithmic_dual( + read_blocks, "#30d050", write_blocks, "#0060c0", 100000, 2) +perfometers["check_mk-emcvnx_hba"] = perfometer_check_mk_hba + +def perfometer_check_mk_iops(row, check_command, perf_data): + iops = int(perf_data[0][1]) + text = "%d/s" % iops + + return text, perfometer_logarithmic(iops, 100000, 2, "#30d050") +perfometers["check_mk-emc_isilon_iops"] = perfometer_check_mk_iops def perfometer_check_mk_printer_supply(row, check_command, perf_data): left = savefloat(perf_data[0][1]) @@ -414,18 +723,18 @@ s = row['service_description'].lower() fg_color = '#000000' - if 'black' in s or s[-1] == 'k': + if 'black' in s or ("ink" not in s and s[-1] == 'k'): colors = [ '#000000', '#6E6F00', '#6F0000' ] if left >= 60: - fg_color = '#ffffff' + fg_color = '#FFFFFF' elif 'magenta' in s or s[-1] == 'm': - colors = [ '#fc00ff', '#FC7FFF', '#FEDFFF' ] + colors = [ '#FC00FF', '#FC7FFF', '#FEDFFF' ] elif 'yellow' in s or s[-1] == 'y': - colors = [ '#ffff00', '#FEFF7F', '#FFFFCF' ] + colors = [ '#FFFF00', '#FEFF7F', '#FFFFCF' ] elif 'cyan' in s or s[-1] == 'c': - colors = [ '#00ffff', '#7FFFFF', '#DFFFFF' ] + colors = [ '#00FFFF', '#7FFFFF', '#DFFFFF' ] else: - colors = [ '#cccccc', '#ffff00', '#ff0000' ] + colors = [ '#CCCCCC', '#ffff00', '#ff0000' ] st = min(2, row['service_state']) color = colors[st] @@ -433,7 +742,14 @@ return "%.0f%%" % (fg_color, left), perfometer_linear(left, color) perfometers["check_mk-printer_supply"] = perfometer_check_mk_printer_supply -perfometers["check_mk-printer_supply_ricon"] = perfometer_check_mk_printer_supply +perfometers["check_mk-printer_supply_ricoh"] = perfometer_check_mk_printer_supply + +def perfometer_printer_pages(row, check_command, perf_data): + color = "#909090" + return "%d" % int(perf_data[0][1]), perfometer_logarithmic(perf_data[0][1], 50000, 6, color) + +perfometers["check_mk-printer_pages"] = perfometer_printer_pages +perfometers["check_mk-canon_pages"] = perfometer_printer_pages def perfometer_msx_queues(row, check_command, perf_data): length = int(perf_data[0][1]) @@ -590,17 +906,24 @@ perfometers["check_mk-cmc_lcp"] = perfometer_cmc_lcp -def perfometer_carel_uniflair_cooling(row, check_command, perf_data): +def perfometer_humidity(row, check_command, perf_data): humidity = float(perf_data[0][1]) - return "%3.1f%%" % humidity, perfometer_linear(humidity, '#6f2') + return "%3.1f% %" % humidity, perfometer_linear(humidity, '#6f2') -perfometers['check_mk-carel_uniflair_cooling'] = perfometer_carel_uniflair_cooling +perfometers['check_mk-carel_uniflair_cooling'] = perfometer_humidity +perfometers['check_mk-cmciii.humidity'] = perfometer_humidity +perfometers['check_mk-allnet_ip_sensoric.humidity'] = perfometer_humidity +perfometers['check_mk-knuerr_rms_humidity'] = perfometer_humidity def perfometer_eaton(row, command, perf): return u"%s°C" % str(perf[0][1]), perfometer_linear(float(perf[0][1]), 'silver') perfometers['check_mk-ups_eaton_enviroment'] = perfometer_eaton +def perfometer_battery(row, command, perf): + return u"%s%%" % str(perf[0][1]), perfometer_linear(float(perf[0][1]), '#C98D5C') + +perfometers['check_mk-emc_datadomain_nvbat'] = perfometer_battery def perfometer_ups_capacity(row, command, perf): return "%0.2f%%" % float(perf[1][1]), perfometer_linear(float(perf[1][1]), '#B2FF7F') @@ -613,10 +936,289 @@ perfometers['check_mk-genu_pfstate'] = perfometer_genu_screen -def perfometer_db2_mem(row, command, perf): +def perfometer_simple_mem_usage(row, command, perf): maxw = float(perf[0][6]) used_level = float(perf[0][1]) used_perc = (100.0 / maxw) * used_level - return "%d%% used" % used_perc , perfometer_linear(used_perc, "#0000FF") + return "%d%%" % used_perc , perfometer_linear(used_perc, "#20cf80") + +perfometers['check_mk-db2_mem'] = perfometer_simple_mem_usage +perfometers['check_mk-esx_vsphere_hostsystem.mem_usage'] = perfometer_simple_mem_usage +perfometers['check_mk-brocade_mlx.module_mem'] = perfometer_simple_mem_usage +perfometers['check_mk-innovaphone_mem'] = perfometer_simple_mem_usage +perfometers['check_mk-juniper_screenos_mem'] = perfometer_simple_mem_usage +perfometers['check_mk-arris_cmts_mem'] = perfometer_simple_mem_usage +perfometers["check_mk-juniper_trpz_mem"] = perfometer_simple_mem_usage + +def perfometer_vmguest_mem_usage(row, command, perf): + used = float(perf[0][1]) + return number_human_readable(used), perfometer_logarithmic(used, 1024*1024*2000, 2, "#20cf80") + +perfometers['check_mk-esx_vsphere_vm.mem_usage'] = perfometer_vmguest_mem_usage + +def perfometer_esx_vsphere_hostsystem_cpu(row, command, perf): + used_perc = float(perf[0][1]) + return "%d%%" % used_perc, perfometer_linear(used_perc, "#60f020") + +perfometers['check_mk-esx_vsphere_hostsystem.cpu_usage'] = perfometer_esx_vsphere_hostsystem_cpu + +def perfometer_mq_queues(row, command, perf): + size = int(perf[0][1]) + return "%s Messages" % size, perfometer_logarithmic(size, 1, 2, "#701141") + +perfometers['check_mk-mq_queues'] = perfometer_mq_queues +perfometers['check_mk-websphere_mq_channels'] = perfometer_mq_queues +perfometers['check_mk-websphere_mq_queues'] = perfometer_mq_queues + +def perfometer_apc_mod_pdu_modules(row, check_command, perf_data): + value = int(savefloat(perf_data[0][1]) * 100) + return "%skw" % perf_data[0][1], perfometer_logarithmic(value, 500, 2, "#3366CC") + +perfometers["check_mk-apc_mod_pdu_modules"] = perfometer_apc_mod_pdu_modules + +# Aiflow in l/s +def perfometer_airflow_ls(row, check_command, perf_data): + value = int(float(perf_data[0][1])*100) + return "%sl/s" % perf_data[0][1], perfometer_logarithmic(value, 1000, 2, '#3366cc') + +perfometers["check_mk-apc_inrow_airflow"] = perfometer_airflow_ls + +# Aiflow Deviation in Percent +def perfometer_airflow_deviation(row, check_command, perf_data): + value = float(perf_data[0][1]) + return "%0.2f%%" % value, perfometer_linear(abs(value), "silver") + +perfometers["check_mk-wagner_titanus_topsense.airflow_deviation"] = perfometer_airflow_deviation + +def perfometer_fanspeed(row, check_command, perf_data): + value = float(perf_data[0][1]) + return "%.2f%%" % value, perfometer_linear(value, "silver") + +perfometers["check_mk-apc_inrow_fanspeed"] = perfometer_fanspeed + +def perfometer_fanspeed_logarithmic(row, check_command, perf_data): + value = float(perf_data[0][1]) + return "%d rpm" % value, perfometer_logarithmic(value, 5000, 2, "silver") + +perfometers["check_mk-hitachi_hnas_fan"] = perfometer_fanspeed_logarithmic +perfometers["check_mk-bintec_sensors.fan"] = perfometer_fanspeed_logarithmic + +def perfometer_check_mk_arcserve_backup(row, check_command, perf_data): + bytes = int(perf_data[2][1]) + text = number_human_readable(bytes) + + return text, perfometer_logarithmic(bytes, 1000 * 1024 * 1024 * 1024, 2, "#BDC6DE") + +perfometers["check_mk-arcserve_backup"] = perfometer_check_mk_arcserve_backup + +def perfometer_check_mk_ibm_svc_host(row, check_command, perf_data): + h = '
    ' + active = int(perf_data[0][1]) + inactive = int(perf_data[1][1]) + degraded = int(perf_data[2][1]) + offline = int(perf_data[3][1]) + other = int(perf_data[4][1]) + total = active + inactive + degraded + offline + other + if active > 0: + perc_active = 100 * active / total + h += perfometer_td(perc_active, "#008000") + if inactive > 0: + perc_inactive = 100 * inactive / total + h += perfometer_td(perc_inactive, "#0000FF") + if degraded > 0: + perc_degraded = 100 * degraded / total + h += perfometer_td(perc_degraded, "#F84") + if offline > 0: + perc_offline = 100 * offline / total + h += perfometer_td(perc_offline, "#FF0000") + if other > 0: + perc_other = 100 * other / total + h += perfometer_td(perc_other, "#000000") + if total == 0: + h += perfometer_td(100, "white") + h += "
    " + return "%d active" % active, h + +perfometers["check_mk-ibm_svc_host"] = perfometer_check_mk_ibm_svc_host + +def perfometer_check_mk_ibm_svc_license(row, check_command, perf_data): + licensed = float(perf_data[0][1]) + used = float(perf_data[1][1]) + if used == 0 and licensed == 0: + return "0 of 0 used", perfometer_linear(100, "white") + elif licensed == 0: + return "completely unlicensed", perfometer_linear(100, "silver") + else: + perc_used = 100 * used / licensed + return "%0.2f %% used" % perc_used, perfometer_linear(perc_used, "silver") + +perfometers["check_mk-ibm_svc_license"] = perfometer_check_mk_ibm_svc_license + +def perfometer_check_mk_ibm_svc_cache(row, check_command, perf_data): + h = '' + write_cache_pc = int(perf_data[0][1]) + total_cache_pc = int(perf_data[1][1]) + read_cache_pc = total_cache_pc - write_cache_pc + free_cache_pc = 100 - total_cache_pc + h += perfometer_td(write_cache_pc, "#60e0a0") + h += perfometer_td(read_cache_pc, "#60a0e0") + h += perfometer_td(free_cache_pc, "white") + h += "
    " + return "%d %% write, %d %% read" % (write_cache_pc, read_cache_pc), h +perfometers["check_mk-ibm_svc_nodestats.cache"] = perfometer_check_mk_ibm_svc_cache +perfometers["check_mk-ibm_svc_systemstats.cache"] = perfometer_check_mk_ibm_svc_cache + + +def perfometer_licenses_percent(row, check_command, perf_data): + licenses = float(perf_data[0][1]) + max_avail = float(perf_data[0][6]) + used_perc = 100.0 * licenses / max_avail + return "%.0f%% used" % used_perc, perfometer_linear( used_perc, 'orange' ) + +perfometers['check_mk-innovaphone_licenses'] = perfometer_licenses_percent +perfometers['check_mk-citrix_licenses'] = perfometer_licenses_percent + +def perfometer_smoke_percent(row, command, perf): + used_perc = float(perf[0][1]) + return "%0.6f%%" % used_perc, perfometer_linear(used_perc, "#404040") + +perfometers['check_mk-wagner_titanus_topsense.smoke'] = perfometer_smoke_percent + +def perfometer_chamber_deviation(row, command, perf): + chamber_dev = float(perf[0][1]) + return "%0.6f%%" % chamber_dev, perfometer_linear(chamber_dev, "#000080") + +perfometers['check_mk-wagner_titanus_topsense.chamber_deviation'] = perfometer_chamber_deviation + +def perfometer_cache_hit_ratio(row, check_command, perf_data): + hit_ratio = float(perf_data[0][1]) # is already percentage + color = "#60f020" + return "%.2f %% hits" % hit_ratio, perfometer_linear(hit_ratio, color) + +perfometers["check_mk-zfs_arc_cache"] = perfometer_cache_hit_ratio +perfometers["check_mk-zfs_arc_cache.l2"] = perfometer_cache_hit_ratio + +def perfometer_current(row, check_command, perf_data): + display_color = "#50f020" + + value=savefloat(perf_data[0][1]) + crit=savefloat(perf_data[0][4]) + warn=savefloat(perf_data[0][3]) + current_perc = value/crit*90 # critical is at 90% to allow for more than crit + + if value > warn: + display_color = "#FDC840" + if value > crit: + display_color = "#FF0000" + + display_string = "%.1f Ampere" % value + return display_string, perfometer_linear(current_perc, display_color) + +perfometers["check_mk-adva_fsp_current"] = perfometer_current + +def perfometer_raritan_pdu_inlet(row, check_command, perf_data): + display_color = "#50f020" + cap = perf_data[0][0].split('-')[-1] + value = float(perf_data[0][1]) + unit = perf_data[0][2] + display_str = perf_data[0][1] + " " + unit + + if cap.startswith("rmsCurrent"): + return display_str, perfometer_logarithmic(value, 1, 2, display_color) + elif cap.startswith("unbalancedCurrent"): + return display_str, perfometer_linear(value, display_color) + elif cap.startswith("rmsVoltage"): + return display_str, perfometer_logarithmic(value, 500, 2, display_color) + elif cap.startswith("activePower"): + return display_str, perfometer_logarithmic(value, 20, 2, display_color) + elif cap.startswith("apparentPower"): + return display_str, perfometer_logarithmic(value, 20, 2, display_color) + elif cap.startswith("powerFactor"): + return display_str, perfometer_linear(value * 100, display_color) + elif cap.startswith("activeEnergy"): + return display_str, perfometer_logarithmic(value, 100000, 2, display_color) + elif cap.startswith("apparentEnergy"): + return display_str, perfometer_logarithmic(value, 100000, 2, display_color) + + return "unimplemented" , perfometer_linear(0, "#ffffff") + +perfometers["check_mk-raritan_pdu_inlet"] = perfometer_raritan_pdu_inlet +perfometers["check_mk-raritan_pdu_inlet_summary"] = perfometer_raritan_pdu_inlet + + +def perfometer_raritan_pdu_outletcount(row, check_command, perf_data): + outletcount = float(perf_data[0][1]) + return "%d" % outletcount, perfometer_logarithmic(outletcount, 20, 2, "#da6") + +perfometers["check_mk-raritan_pdu_outletcount"] = perfometer_raritan_pdu_outletcount + +def perfometer_allnet_ip_sensoric_tension(row, check_command, perf_data): + display_color = "#50f020" + value = float(perf_data[0][1]) + return value, perfometer_linear(value, display_color) + +perfometers["check_mk-allnet_ip_sensoric.tension"] = perfometer_allnet_ip_sensoric_tension + +def perfometer_pressure(row, check_command, perf_data): + pressure = float(perf_data[0][1]) + return "%0.5f bars" % pressure, perfometer_logarithmic(pressure, 1, 2, "#da6") + +perfometers['check_mk-allnet_ip_sensoric.pressure'] = perfometer_pressure + +def perfometer_voltage(row, check_command, perf_data): + color = "#808000" + value = float(perf_data[0][1]) + return "%0.3f V" % value, perfometer_logarithmic(value, 12, 2, color) + +perfometers["check_mk-bintec_sensors.voltage"] = perfometer_voltage + +def perfometer_dbmv(row, check_command, perf_data): + dbmv = float(perf_data[0][1]) + return "%.1f dBmV" % dbmv, perfometer_logarithmic(dbmv, 50, 2, "#da6") + +perfometers["check_mk-docsis_channels_downstream"] = perfometer_dbmv +perfometers["check_mk-docsis_cm_status"] = perfometer_dbmv + +def perfometer_docsis_snr(row, check_command, perf_data): + dbmv = float(perf_data[0][1]) + return "%.1f dB" % dbmv, perfometer_logarithmic(dbmv, 50, 2, "#ad6") + +perfometers["check_mk-docsis_channels_upstream"] = perfometer_docsis_snr + +def perfometer_veeam_client(row, check_command, perf_data): + for graph in perf_data: + if graph[0] == "avgspeed": + avgspeed_bytes = int(graph[1]) + if graph[0] == "duration": + duration_secs = int(graph[1]) + h = perfometer_logarithmic_dual_independent(avgspeed_bytes, '#54b948', 10000000, 2, duration_secs, '#2098cb', 500, 2) + + avgspeed = bytes_human_readable(avgspeed_bytes) + # Return Value always as minutes + duration = age_human_readable(duration_secs, True) + + return "%s/s   %s" % (avgspeed, duration), h + +perfometers["check_mk-veeam_client"] = perfometer_veeam_client + +def perfometer_ups_outphase(row, check_command, perf_data): + load = saveint(perf_data[2][1]) + return "%d%%" % load, perfometer_linear(load, "#8050ff") + +perfometers["check_mk-ups_socomec_outphase"] = perfometer_ups_outphase + +def perfometer_el_inphase(row, check_command, perf_data): + for data in perf_data: + if data[0] == "power": + power = savefloat(data[1]) + return "%.0f W" % power, perfometer_linear(power, "#8050ff") + +perfometers["check_mk-raritan_pdu_inlet"] = perfometer_el_inphase +perfometers["check_mk-raritan_pdu_inlet_summary"] = perfometer_el_inphase +perfometers["check_mk-ucs_bladecenter_psu.switch_power"] = perfometer_el_inphase + +def perfometer_f5_bigip_vserver(row, check_command, perf_data): + connections = int(perf_data[0][1]) + return str(connections), perfometer_logarithmic(connections, 100, 2, "#46a") -perfometers['check_mk-db2_mem'] = perfometer_db2_mem +perfometers["check_mk-f5_bigip_vserver"] = perfometer_f5_bigip_vserver diff -Nru check-mk-1.2.2p3/plugins/plesk_backups check-mk-1.2.6p12/plugins/plesk_backups --- check-mk-1.2.2p3/plugins/plesk_backups 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/plugins/plesk_backups 2015-06-24 09:48:39.000000000 +0000 @@ -1,26 +1,38 @@ #!/usr/bin/python -# encoding: utf-8 +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ # +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + # Monitors FTP backup spaces of plesk domains. # Data format # # <<>> # -# Cache the result for 1 hour by default -cache_seconds = 60 * 60 -# The cache file to use -cache_file = '/tmp/plesk_backups.cache' - import MySQLdb, sys, datetime, time, os from ftplib import FTP -# use the cache? -if os.path.exists(cache_file) \ - and os.stat(cache_file).st_mtime > time.time() - cache_seconds: - print file(cache_file).read() - sys.exit(0) - def connect(): try: return MySQLdb.connect( @@ -46,7 +58,7 @@ 'WHERE id = %d AND type = \'domain\'' % domain_id) params = dict(cursor2.fetchall()) domains[domain] = params - + cursor2.close() cursor.close() return domains @@ -126,12 +138,11 @@ size += int(l.split()[-5]) return size total_size = get_size('') - + output.append('%s 0 %s %d %d' % (domain, last_backup[1].strftime('%s'), last_backup[2], total_size)) - + except Exception, e: output.append('%s 2 %s' % (domain, e)) # Write cache and output -file(cache_file, 'w').write('\n'.join(output)) print '\n'.join(output) diff -Nru check-mk-1.2.2p3/plugins/plesk_domains check-mk-1.2.6p12/plugins/plesk_domains --- check-mk-1.2.2p3/plugins/plesk_domains 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/plugins/plesk_domains 2015-06-24 09:48:39.000000000 +0000 @@ -1,6 +1,29 @@ #!/usr/bin/python -# encoding: utf-8 +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ # +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + # Lists all domains configured in plesk # # <<>> diff -Nru check-mk-1.2.2p3/plugins/README check-mk-1.2.6p12/plugins/README --- check-mk-1.2.2p3/plugins/README 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/README 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,17 @@ +These plugins can be installed in the plugins directory of the Linux agent +in /usr/lib/check_mk_agent/plugins/. Please only install the plugins that +you really need. + +If you want a plugin to be run asynchronously and also in +a larger interval then the normal check interval, then you can +copy it to a subdirectory named after a number of *minutes*, +e.g.: + +/usr/lib/check_mk_agent/plugins/60/mk_zypper + +In that case the agent will: + + - Run this plugin in the background and wait not for it to finish. + - Store the result of the plugin in a cache file below /etc/check_mk/cache. + - Use that file for one hour before running the script again. + diff -Nru check-mk-1.2.2p3/plugins/runas check-mk-1.2.6p12/plugins/runas --- check-mk-1.2.2p3/plugins/runas 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/runas 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,84 @@ +#!/bin/bash +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# This plugin allows to execute mrpe, local and plugin skripts with a different user context +# It is configured with in the file $MK_CONFDIR/runas.cfg +# +# Syntax: +# [Script type] [User context] [File / Directory] +# +# Example configuration +# # Execute mrpe commands in given files under specific user +# # A '-' means no user context switch +# mrpe ab /home/ab/mrpe_commands.cfg +# mrpe lm /home/lm/mrpe_commands.cfg +# mrpe - /root/mrpe/extra_commands.cfg +# +# Excecute -executable- files in the target directories under specific user context +# plugin ab /var/ab/plugins +# local ab /var/ab/local +# + +grep -Ev '^[[:space:]]*($|#)' "$MK_CONFDIR/runas.cfg" | \ +while read type user include +do + if [ -d $include -o \( "$type" == "mrpe" -a -f $include \) ] ; then + PREFIX="" + if [ "$user" != "-" ] ; then + PREFIX="su $user -c " + fi + + # mrpe includes + if [ "$type" == "mrpe" ] ; then + echo "<<>>" + grep -Ev '^[[:space:]]*($|#)' "$include" | \ + while read descr cmdline + do + PLUGIN=${cmdline%% *} + if [ -n "$PREFIX" ] ; then + cmdline="$PREFIX\"$cmdline\"" + fi + OUTPUT=$(eval "$cmdline") + echo -n "(${PLUGIN##*/}) $descr $? $OUTPUT" | tr \\n \\1 + echo + done + # local and plugin includes + elif [ "$type" == "local" -o "$type" == "plugin" ] ; then + if [ "$type" == "local" ] ; then + echo "<<>>" + fi + find $include -executable -type f | \ + while read filename + do + if [ -n "$PREFIX" ] ; then + cmdline="$PREFIX\"$filename\"" + else + cmdline=$filename + fi + $cmdline + done + fi + fi +done diff -Nru check-mk-1.2.2p3/plugins/sidebar/bi.py check-mk-1.2.6p12/plugins/sidebar/bi.py --- check-mk-1.2.2p3/plugins/sidebar/bi.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/sidebar/bi.py 2015-06-24 09:48:38.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -37,7 +37,7 @@ html.write("
      ") for group in bi.aggregation_groups(): bulletlink(group, "view.py?view_name=aggr_group&aggr_group=%s" % - htmllib.urlencode(group)) + html.urlencode(group)) html.write("
    ") sidebar_snapins["biaggr_groups"] = { diff -Nru check-mk-1.2.2p3/plugins/sidebar/nagvis_maps.py check-mk-1.2.6p12/plugins/sidebar/nagvis_maps.py --- check-mk-1.2.2p3/plugins/sidebar/nagvis_maps.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/sidebar/nagvis_maps.py 2015-06-24 09:48:38.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -44,5 +44,24 @@ "render": render_nagvis_maps, "allowed": [ "user", "admin", "guest" ], "refresh": True, - "styles": "" + "styles": """ +div.state1.statea { + border-color: #ff0; +} +div.state2.statea { + border-color: #f00; +} +div.statea { + background-color: #0b3; +} +div.state1.stated { + border-color: #ff0; +} +div.state2.stated { + border-color: #f00; +} +div.stated { + background-color: #0b3; +} +""" } diff -Nru check-mk-1.2.2p3/plugins/sidebar/search.py check-mk-1.2.6p12/plugins/sidebar/search.py --- check-mk-1.2.2p3/plugins/sidebar/search.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/sidebar/search.py 2015-06-24 09:48:38.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,76 +24,18 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -# +------------------------------------------------------------------+ -# | This file has been contributed and is copyrighted by: | -# | | -# | Lars Michelsen Copyright 2010 | -# +------------------------------------------------------------------+ - -import views, defaults - -# Python 2.3 does not have 'set' in normal namespace. -# But it can be imported from 'sets' -try: - set() -except NameError: - from sets import Set as set - def render_searchform(): - try: - limit = config.quicksearch_dropdown_limit - except: - limit = 80 - - html.write('\n
    \n') html.write("\n") - html.write("\n") sidebar_snapins["search"] = { "title": _("Quicksearch"), - "description": _("Interactive search field for direct access to hosts"), + "description": _("Interactive search field for direct access to hosts and services"), "render": render_searchform, - "restart": True, + "restart": False, "allowed": [ "user", "admin", "guest" ], "styles": """ @@ -111,7 +53,8 @@ height: 26px; margin-top: -25px; left: 196px; - float:right; + float: left; + position: relative; z-index:100; } @@ -153,3 +96,205 @@ """ } + +#. +# .--Search Plugins------------------------------------------------------. +# | ____ _ ____ _ _ | +# | / ___| ___ __ _ _ __ ___| |__ | _ \| |_ _ __ _(_)_ __ ___ | +# | \___ \ / _ \/ _` | '__/ __| '_ \ | |_) | | | | |/ _` | | '_ \/ __| | +# | ___) | __/ (_| | | | (__| | | | | __/| | |_| | (_| | | | | \__ \ | +# | |____/ \___|\__,_|_| \___|_| |_| |_| |_|\__,_|\__, |_|_| |_|___/ | +# | |___/ | +# +----------------------------------------------------------------------+ +# | Realize the search mechanism to find objects via livestatus | +# '----------------------------------------------------------------------' + +def search_filter_name(used_filters, column = 'name'): + return 'Filter: %s ~~ %s\n' % (column, lqencode(used_filters[0][1])) + +search_plugins.append({ + 'id' : 'hosts', + "lq_columns" : ["host_name"], + 'filter_func' : search_filter_name, +}) + +search_plugins.append({ + 'id' : 'services', + "lq_columns" : ["service_description"], + 'filter_func' : lambda q: search_filter_name(q, 'description'), +}) + +search_plugins.append({ + 'id' : 'hostgroups', + "lq_columns" : ["name"], + 'filter_func' : search_filter_name, +}) + +search_plugins.append({ + 'id' : 'servicegroups', + "lq_columns" : ["name"], + 'filter_func' : search_filter_name, +}) + +def search_filter_ipaddress(used_filters): + q = used_filters[0][1] + if is_ipaddress(q): + return 'Filter: address ~~ %s\n' % lqencode(q) + +search_plugins.append({ + 'id' : 'host_address', + "dftl_url_tmpl" : "hosts", + "lq_table" : "hosts", + "lq_columns" : ["host_address"], + 'filter_func' : search_filter_ipaddress, + 'search_url_tmpl': 'view.py?view_name=searchhost&host_address=%(search)s&filled_in=filter&host_address_prefix=yes', + 'match_url_tmpl' : 'view.py?view_name=searchhost&host_address=%(search)s&filled_in=filter' +}) + +def search_filter_alias(used_filters): + return 'Filter: alias ~~ %s\n' % lqencode(used_filters[0][1]) + +search_plugins.append({ + 'id' : 'host_alias', + "dftl_url_tmpl" : "hosts", + 'lq_table' : "hosts", + 'qs_show' : 'host_alias', + 'lq_columns' : ['host_name', 'host_alias'], + 'filter_func' : search_filter_alias, + 'search_url_tmpl' : 'view.py?view_name=searchhost&hostalias=%(search)s&filled_in=filter', + 'match_url_tmpl' : 'view.py?view_name=searchhost&hostalias=%(search)s&filled_in=filter' +}) + +def search_hosts_filter(filters, host_is_ip = False): + lq_filter = "" + filter_template = host_is_ip and "Filter: host_address ~~ %s\n" or "Filter: host_name ~~ %s\n" + for name, value in filters: + lq_filter += filter_template % lqencode(value) + if len(filters) > 1: + lq_filter += 'Or: %d\n' % len(filters) + + return lq_filter + +def search_hosts_url_tmpl(used_filters, data, host_is_ip = False): + filter_field = host_is_ip and "host_address=(%s)" or "host_regex=(%s)" % "|".join(map(lambda x: x[1], used_filters)) + return 'view.py?view_name=searchhost&filled_in=filter&' + filter_field + +def search_host_service_filter(filters, host_is_ip = False): + def get_filters(filter_type): + result = [] + for entry in filters: + if entry[0] == filter_type: + result.append(entry[1]) + return result + + services = get_filters("services") + hosts = get_filters("hosts") + hostgroups = get_filters("hostgroups") + servicegroups = get_filters("servicegroups") + + lq_filter = "" + group_count = 0 + for filter_name, entries, optr in [ (host_is_ip and "host_address" or "host_name", hosts, "~~"), + ("service_description", services, "~~"), + ("host_groups", hostgroups, ">="), + ("groups", servicegroups, ">=") ]: + if entries: + group_count += 1 + for entry in entries: + lq_filter += 'Filter: %s %s %s\n' % (filter_name, optr, lqencode(entry)) + if len(entries) > 1: + lq_filter += 'Or: %d\n' % len(entries) + + if group_count > 1: + lq_filter += "And: %d\n" % group_count + + return lq_filter + +def match_host_service_url_tmpl(used_filters, row_dict, host_is_ip = False): + tmpl = 'view.py?view_name=searchsvc&filled_in=filter' + # Sorry, no support for multiple host- or servicegroups filters in match templates (yet) + for ty, entry in [ ("hostgroup", "host_groups"), ("servicegroup", "service_groups")]: + if row_dict.get(entry): + if type(row_dict[entry]) == list: + row_dict[entry] = row_dict[entry][0] + + for param, key in [ ("service_regex", "service_description"), + (host_is_ip and "host_address" or "host_regex", "host_name"), + ("opthost_group", "host_groups"), + ("optservice_group", "service_groups"), + ("site", "site")]: + if row_dict.get(key): + tmpl_pre = "&%s=%%(%s)s" % (param, key) + tmpl += tmpl_pre % row_dict + return tmpl + +def search_host_service_url_tmpl(used_filters, data, host_is_ip = False): + # We combine all used_filters of the same type with (abcd|dfdf) + filters_combined = {"hosts": [], "services": [], "hostgroups": [], "servicegroups": []} + + for entry in filters_combined.keys(): + for filt in used_filters: + if filt[0] == entry: + filters_combined.setdefault(entry, []).append(filt[1].strip()) + for key, value in filters_combined.items(): + if len(value) > 1: + filters_combined[key] = "(%s)" % "|".join(value) + elif len(value) == 1: + filters_combined[key] = value[0] + + tmpl = 'view.py?view_name=searchsvc&filled_in=filter' + for url_param, qs_name in [ ("service_regex", "services" ), + host_is_ip and ("host_address", "host" )\ + or ("host_regex", "hosts" ), + ("opthost_group", "hostgroups" ), + ("optservice_group", "servicegroups")]: + if filters_combined.get(qs_name): + tmpl_pre = "&%s=%%(%s)s" % (url_param, qs_name) + tmpl += tmpl_pre % filters_combined + return tmpl + +search_plugins.append({ + "id" : "service_multi", + "required_types" : ["services"], + "optional_types" : ["hosts", "hostgroups", "servicegroups"], + "qs_show" : "service_description", + "lq_table" : "services", + "lq_columns" : ["service_description", "host_name", "host_groups", "service_groups"], + "filter_func" : lambda x: search_host_service_filter(x), + "match_url_tmpl_func" : lambda x,y: match_host_service_url_tmpl(x, y), + "search_url_tmpl_func" : lambda x,y: search_host_service_url_tmpl(x, y), +}) + +search_plugins.append({ + "id" : "service_multi_address", + "qs_show" : "service_description", + "required_types" : ["services"], + "optional_types" : ["hosts", "hostgroups", "servicegroups"], + "lq_table" : "services", + "lq_columns" : ["service_description", "host_name", "host_groups", "service_groups", "host_address"], + "filter_func" : lambda x: search_host_service_filter(x, host_is_ip = True), + "match_url_tmpl_func" : lambda x,y: match_host_service_url_tmpl(x, y, host_is_ip = True), + "search_url_tmpl_func" : lambda x,y: search_host_service_url_tmpl(x, y, host_is_ip = True), +}) + +search_plugins.append({ + "id" : "host_multi", + "qs_show" : "host_name", + "required_types" : ["hosts"], + "lq_table" : "hosts", + "lq_columns" : ["host_name", "host_address"], + "filter_func" : lambda x: search_hosts_filter(x), + "match_url_tmpl_func" : lambda x,y: "view.py?view_name=host&host=%(host_name)s&site=%(site)s" % y, + "search_url_tmpl_func" : lambda x,y: search_hosts_url_tmpl(x, y), +}) + +search_plugins.append({ + "id" : "host_multi_address", + "qs_show" : "host_address", + "required_types" : ["hosts"], + "lq_table" : "hosts", + "lq_columns" : ["host_address", "host_name"], + "filter_func" : lambda x: search_hosts_filter(x, True), + "match_url_tmpl_func" : lambda x,y: "view.py?view_name=host&host_address=%(host_address)s&site=%(site)s" % y, + "search_url_tmpl_func" : lambda x,y: search_hosts_url_tmpl(x, y, True), +}) diff -Nru check-mk-1.2.2p3/plugins/sidebar/shipped.py check-mk-1.2.6p12/plugins/sidebar/shipped.py --- check-mk-1.2.2p3/plugins/sidebar/shipped.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/sidebar/shipped.py 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -25,7 +25,6 @@ # Boston, MA 02110-1301 USA. import views, time, defaults, dashboard -import weblib from lib import * # Python 2.3 does not have 'set' in normal namespace. @@ -67,31 +66,22 @@ # \_/ |_|\___| \_/\_/ |___/ # # -------------------------------------------------------------- -visible_views = [ "allhosts", "searchsvc" ] -def views_by_topic(): - s = [ (view.get("topic", _("Other")), view["title"], name) - for name, view - in html.available_views.items() - if not view["hidden"] and not view.get("mobile")] - - # Add all the dashboards to the views list - s += [ (_('Dashboards'), d['title'] and d['title'] or d_name, d_name) - for d_name, d - in dashboard.dashboards.items() - ] +def visuals_by_topic(permitted_visuals, + default_order = [ _("Overview"), _("Hosts"), _("Host Groups"), _("Services"), _("Service Groups"), + _("Business Intelligence"), _("Problems"), _("Addons") ]): + s = [ (_u(visual.get("topic") or _("Other")), _u(visual.get("title")), name, 'painters' in visual) + for name, visual + in permitted_visuals + if not visual["hidden"] and not visual.get("mobile")] s.sort() - # Enforce a certain order on the topics - known_topics = [ _('Dashboards'), _("Hosts"), _("Hostgroups"), _("Services"), _("Servicegroups"), - _("Business Intelligence"), _("Problems"), _("Addons") ] - result = [] - for topic in known_topics: + for topic in default_order: result.append((topic, s)) - rest = list(set([ t for (t, _t, _v) in s if t not in known_topics ])) + rest = list(set([ t for (t, _t, _v, _i) in s if t not in default_order ])) rest.sort() for topic in rest: if topic: @@ -100,25 +90,28 @@ return result def render_views(): + views.load_views() + dashboard.load_dashboards() + def render_topic(topic, s): first = True - for t, title, name in s: - if config.visible_views and name not in config.visible_views: + for t, title, name, is_view in s: + if is_view and config.visible_views and name not in config.visible_views: continue - if config.hidden_views and name in config.hidden_views: + if is_view and config.hidden_views and name in config.hidden_views: continue if t == topic: if first: html.begin_foldable_container("views", topic, False, topic, indent=True) first = False - if topic == _('Dashboards'): - bulletlink(title, 'dashboard.py?name=%s' % name) + if is_view: + bulletlink(title, "view.py?view_name=%s" % name, onclick = "return wato_views_clicked(this)") else: - bulletlink(title, "view.py?view_name=%s" % name) + bulletlink(title, 'dashboard.py?name=%s' % name, onclick = "return wato_views_clicked(this)") if not first: # at least one item rendered html.end_foldable_container() - for topic, s in views_by_topic(): + for topic, s in visuals_by_topic(views.permitted_views().items() + dashboard.permitted_dashboards().items()): render_topic(topic, s) links = [] @@ -130,11 +123,67 @@ sidebar_snapins["views"] = { "title" : _("Views"), - "description" : _("Links to all views"), + "description" : _("Links to global views and dashboards"), "render" : render_views, "allowed" : [ "user", "admin", "guest" ], } +# .--Dashboards----------------------------------------------------------. +# | ____ _ _ _ | +# | | _ \ __ _ ___| |__ | |__ ___ __ _ _ __ __| |___ | +# | | | | |/ _` / __| '_ \| '_ \ / _ \ / _` | '__/ _` / __| | +# | | |_| | (_| \__ \ | | | |_) | (_) | (_| | | | (_| \__ \ | +# | |____/ \__,_|___/_| |_|_.__/ \___/ \__,_|_| \__,_|___/ | +# | | +# +----------------------------------------------------------------------+ +# | | +# '----------------------------------------------------------------------' + +def render_dashboards(): + dashboard.load_dashboards() + + def render_topic(topic, s, foldable = True): + first = True + for t, title, name, is_view in s: + if t == topic: + if first: + if foldable: + html.begin_foldable_container("dashboards", topic, False, topic, indent=True) + else: + html.write('
      ') + first = False + bulletlink(title, 'dashboard.py?name=%s' % name, onclick = "return wato_views_clicked(this)") + + if not first: # at least one item rendered + if foldable: + html.end_foldable_container() + else: + html.write('
        ') + + by_topic = visuals_by_topic(dashboard.permitted_dashboards().items(), default_order = [ _('Overview') ]) + topics = [ topic for topic, entry in by_topic ] + + if len(topics) < 2: + render_topic(by_topic[0][0], by_topic[0][1], foldable = False) + + else: + for topic, s in by_topic: + render_topic(topic, s) + + links = [] + if config.may("general.edit_dashboards"): + if config.debug: + links.append((_("EXPORT"), "export_dashboards.py")) + links.append((_("EDIT"), "edit_dashboards.py")) + footnotelinks(links) + +sidebar_snapins["dashboards"] = { + "title" : _("Dashboards"), + "description" : _("Links to all dashboards"), + "render" : render_dashboards, + "allowed" : [ "user", "admin", "guest" ], +} + # -------------------------------------------------------------- # ____ _ __ # / ___| ___ _ ____ _(_) ___ ___ / / @@ -156,7 +205,7 @@ groups.sort() # sort by Alias in lowercase html.write('
          ') for alias_lower, alias, name in groups: - url = "view.py?view_name=%sgroup&%sgroup=%s" % (what, what, htmllib.urlencode(name)) + url = "view.py?view_name=%sgroup&%sgroup=%s" % (what, what, html.urlencode(name)) bulletlink(alias or name, url) html.write('
        ') @@ -194,8 +243,17 @@ query += "Filter: custom_variable_names < _REALNAME\n" if mode == "problems": - query += "Filter: state > 0\nFilter: worst_service_state > 0\nOr: 2\n" view = "problemsofhost" + # Exclude hosts and services in downtime + svc_query = "GET services\nColumns: host_name\n"\ + "Filter: state > 0\nFilter: scheduled_downtime_depth = 0\n"\ + "Filter: host_scheduled_downtime_depth = 0\nAnd: 3" + problem_hosts = set(map(lambda x: x[1], html.live.query(svc_query))) + + query += "Filter: state > 0\nFilter: scheduled_downtime_depth = 0\nAnd: 2\n" + for host in problem_hosts: + query += "Filter: name = %s\n" % host + query += "Or: %d\n" % (len(problem_hosts) + 1) hosts = html.live.query(query) html.live.set_prepend_site(False) @@ -227,7 +285,7 @@ else: statecolor = 0 html.write('
         
        ' % statecolor) - html.write(link(host, target + ("&host=%s&site=%s" % (htmllib.urlencode(host), htmllib.urlencode(site))))) + html.write(link(host, target + ("&host=%s&site=%s" % (html.urlencode(host), html.urlencode(site))))) html.write("") if col == num_columns: html.write("\n") @@ -264,7 +322,7 @@ sidebar_snapins["problem_hosts"] = { "title" : _("Problem hosts"), - "description" : _("A summary state of all hosts that have problem, with links to problems of those hosts"), + "description" : _("A summary state of all hosts that have a problem, with links to problems of those hosts"), "render" : lambda: render_hosts("problems"), "allowed" : [ "user", "admin", "guest" ], "refresh" : True, @@ -334,7 +392,7 @@ s = 1 else: s = 0 - url = "view.py?view_name=host&site=%s&host=%s" % (htmllib.urlencode(site), htmllib.urlencode(host)) + url = "view.py?view_name=host&site=%s&host=%s" % (html.urlencode(site), html.urlencode(host)) html.write('' % (s, url, host, cell_size, cell_size)) if col == n or (row == rows and n == lastcols): @@ -374,13 +432,7 @@ if config.is_multisite(): html.write("") - # Sort the list of sitenames by sitealias - sitenames = [] - for sitename, site in config.allsites().iteritems(): - sitenames.append((sitename, site['alias'])) - sitenames = sorted(sitenames, key=lambda k: k[1], cmp = lambda a,b: cmp(a.lower(), b.lower())) - - for sitename, sitealias in sitenames: + for sitename, sitealias in config.sorted_sites(): site = config.site(sitename) if sitename not in html.site_status or "state" not in html.site_status[sitename]: state = "missing" @@ -581,7 +633,6 @@ data = html.live.query("GET status\nColumns: service_checks_rate host_checks_rate " "external_commands_rate connections_rate forks_rate " - "livechecks_rate livecheck_overflows_rate " "log_messages_rate cached_log_messages\n") for what, col, format in \ [("Service checks", 0, "%.2f/s"), @@ -589,10 +640,8 @@ ("External commands", 2, "%.2f/s"), ("Livestatus-conn.", 3, "%.2f/s"), ("Process creations", 4, "%.2f/s"), - ("Livechecks", 5, "%.2f/s"), - ("Livecheck overflows", 6, "%.2f/s"), - ("New log messages", 7, "%.2f/s"), - ("Cached log messages", 8, "%d")]: + ("New log messages", 5, "%.2f/s"), + ("Cached log messages", 6, "%d")]: write_line(what + ":", format % sum([row[col] for row in data])) if len(config.allsites()) == 1: @@ -682,7 +731,7 @@ context.shadowOffsetX = 2; context.shadowOffsetY = 2; context.shadowBlur = 2; - context.stroStyle = "#000000"; + context.strokeStyle = "#000000"; context.stroke(); context = null; } @@ -744,11 +793,11 @@ sidebar_snapins["speedometer"] = { - "title" : _("Speed-O-Meter"), - "description" : _("A gadget that shows your current check rate in relation to " + "title" : _("Service Speed-O-Meter"), + "description" : _("A gadget that shows your current service check rate in relation to " "the scheduled check rate. If the Speed-O-Meter shows a speed " - "of 100 percent, then all checks are being executed in exactly " - "the rate that is configured (via check_interval)"), + "of 100 percent, all service checks are being executed in exactly " + "the rate that is desired."), "render" : render_speedometer, "allowed" : [ "admin", ], "styles" : """ @@ -888,6 +937,7 @@ ( "enable_notifications", _("Notifications" )), ( "execute_service_checks", _("Service checks" )), ( "execute_host_checks", _("Host checks" )), + ( "enable_flap_detection", _("Flap Detection" )), ( "enable_event_handlers", _("Event handlers" )), ( "process_performance_data", _("Performance data" )), ] @@ -900,8 +950,13 @@ if siteid: sitealias = html.site_status[siteid]["site"]["alias"] html.begin_foldable_container("master_control", siteid, True, sitealias) + is_cmc = html.site_status[siteid]["program_version"].startswith("Check_MK ") html.write("
        \n") for i, (colname, title) in enumerate(items): + # Do not show event handlers on Check_MK Micro Core + if is_cmc and colname == 'enable_event_handlers': + continue + colvalue = siteline[i + 1] url = defaults.url_prefix + ("check_mk/switch_master_state.py?site=%s&switch=%s&state=%d" % (siteid, colname, 1 - colvalue)) onclick = "get_url('%s', updateContents, 'snapin_master_control')" % url @@ -972,8 +1027,8 @@ n = 0 for title, href in bookmarks: html.write("
        " % n) - iconbutton(_("delete"), "del_bookmark.py?num=%d" % n, "side", "updateContents", 'snapin_bookmarks', css_class = 'bookmark') - iconbutton(_("edit"), "edit_bookmark.py?num=%d" % n, "main", css_class = 'bookmark') + iconbutton("delete", "del_bookmark.py?num=%d" % n, "side", "updateContents", 'snapin_bookmarks', css_class = 'bookmark') + iconbutton("edit", "edit_bookmark.py?num=%d" % n, "main", css_class = 'bookmark') html.write(link(title, href)) html.write("
        ") n += 1 @@ -1018,7 +1073,7 @@ return def render_list(ids, links): - states = weblib.get_tree_states('customlinks') + states = html.get_tree_states('customlinks') n = 0 for entry in links: n += 1 @@ -1058,3 +1113,469 @@ } """ } + + + +#Example Sidebar: +#Heading1: +# * [[link1]] +# * [[link2]] +# +#---- +# +#Heading2: +# * [[link3]] +# * [[link4]] + +def render_wiki(): + filename = defaults.omd_root + '/var/dokuwiki/data/pages/sidebar.txt' + html.javascript(""" + function wiki_search() + { + var oInput = document.getElementById('wiki_search_field'); + top.frames["main"].location.href = + "/%s/wiki/doku.php?do=search&id=" + escape(oInput.value); + } + """ % defaults.omd_site) + + html.write('') + html.write('\n') + html.icon_button("#", _("Search"), "wikisearch", onclick="wiki_search();") + html.write('') + html.write('
        ') + + start_ul = True + ul_started = False + try: + title = None + for line in file(filename).readlines(): + line = line.strip() + if line == "": + if ul_started == True: + html.end_foldable_container() + start_ul = True + ul_started = False + elif line.endswith(":"): + title = line[:-1] + elif line == "----": + pass + # html.write("
        ") + + elif line.startswith("*"): + if start_ul == True: + if title: + html.begin_foldable_container("wikisnapin", title, True, title, indent=True) + else: + html.write('
          ') + start_ul = False + ul_started = True + + erg = re.findall('\[\[(.*)\]\]', line) + if len(erg) == 0: + continue + erg = erg[0].split('|') + if len(erg) > 1: + link = erg[0] + name = erg[1] + else: + link = erg[0] + name = erg[0] + + if link.startswith("http://") or link.startswith("https://"): + simplelink(name, link, "_blank") + else: + erg = name.split(':') + if len(erg) > 0: + name = erg[-1] + else: + name = erg[0] + bulletlink(name, "/%s/wiki/doku.php?id=%s" % (defaults.omd_site, link)) + + else: + html.write(line) + + if ul_started == True: + html.write("
        ") + except IOError: + html.write("

        To get a navigation menu, you have to create a sidebar in your wiki first.

        " % (defaults.omd_site, _("sidebar"))) + +if defaults.omd_root: + sidebar_snapins["wiki"] = { + "title" : _("Wiki"), + "description" : _("Shows the Wiki Navigation of the OMD Site"), + "render" : render_wiki, + "allowed" : [ "admin", "user", "guest" ], + "styles" : """ + #snapin_container_wiki div.content { + font-weight: bold; + color: white; + } + + #snapin_container_wiki div.content p { + font-weight: normal; + } + + #wiki_navigation { + text-align: left; + } + + #wiki_search { + width: 232px; + padding: 0; + } + + #wiki_side_clear { + clear: both; + } + + #wiki_search img.iconbutton { + width: 33px; + height: 26px; + margin-top: -25px; + left: 196px; + float: left; + position: relative; + z-index:100; + } + + #wiki_search input { + margin: 0; + padding: 0px 5px; + font-size: 8pt; + width: 194px; + height: 25px; + background-image: url("images/quicksearch_field_bg.png"); + background-repeat: no-repeat; + -moz-border-radius: 0px; + border-style: none; + float: left; + } + """ + } + +# .--Virt. Host Tree-----------------------------------------------------. +# | __ ___ _ _ _ _ _____ | +# | \ \ / (_)_ __| |_ | | | | ___ ___| |_ |_ _| __ ___ ___ | +# | \ \ / /| | '__| __| | |_| |/ _ \/ __| __| | || '__/ _ \/ _ \ | +# | \ V / | | | | |_ _ | _ | (_) \__ \ |_ | || | | __/ __/ | +# | \_/ |_|_| \__(_) |_| |_|\___/|___/\__| |_||_| \___|\___| | +# | | +# '----------------------------------------------------------------------' + +def compute_tag_tree(taglist): + html.live.set_prepend_site(True) + query = "GET hosts\n" \ + "Columns: host_name state num_services_ok num_services_warn num_services_crit num_services_unknown custom_variables" + hosts = html.live.query(query) + html.live.set_prepend_site(False) + hosts.sort() + + def get_tag_group_value(groupentries, tags): + for entry in groupentries: + if entry[0] in tags: + return entry[0], entry[1] # tag, title + # Not found -> try empty entry + for entry in groupentries: + if entry[0] == None: + return None, entry[1] + + # No empty entry found -> get default (i.e. first entry) + return groupentries[0][:2] + + # Prepare list of host tag groups and topics + taggroups = {} + topics = {} + for entry in config.wato_host_tags: + grouptitle = entry[1] + if '/' in grouptitle: + topic, grouptitle = grouptitle.split("/", 1) + topics.setdefault(topic, []).append(entry) + + groupname = entry[0] + group = entry[2] + taggroups[groupname] = group + + tree = {} + for site, host_name, state, num_ok, num_warn, num_crit, num_unknown, custom_variables in hosts: + # make state reflect the state of the services + host + have_svc_problems = False + if state: + state += 1 # shift 1->2 (DOWN->CRIT) and 2->3 (UNREACH->UNKNOWN) + if num_crit: + state = 2 + have_svc_problems = True + elif num_unknown: + if state != 2: + state = 3 + have_svc_problems = True + elif num_warn: + if not state: + state = 1 + have_svc_problems = True + + tags = custom_variables.get("TAGS", []).split() + + tree_entry = tree # Start at top node + + # Now go through the levels of the tree. Each level may either be + # - a tag group id, or + # - "topic:" plus the name of a tag topic. That topic should only contain + # checkbox tags. + # The problem with the "topic" entries is, that a host may appear several + # times! + + current_branches = [ tree ] + + for tag in taglist: + new_current_branches = [] + for tree_entry in current_branches: + if tag.startswith("topic:"): + topic = tag[6:] + if topic in topics: # Could have vanished + # Iterate over all host tag groups with that topic + for entry in topics[topic]: + grouptitle = entry[1].split("/", 1)[1] + group = entry[2] + for tagentry in group: + tag_value, tag_title = tagentry[:2] + if tag_value in tags: + new_current_branches.append(tree_entry.setdefault((tag_title, tag_value), {})) + + else: + if tag not in taggroups: + continue # Configuration error. User deleted tag group after configuring his tree + tag_value, tag_title = get_tag_group_value(taggroups[tag], tags) + new_current_branches.append(tree_entry.setdefault((tag_title, tag_value), {})) + + current_branches = new_current_branches + + for tree_entry in new_current_branches: + if not tree_entry: + tree_entry.update({ + "_num_hosts" : 0, + "_state" : 0, + }) + tree_entry["_num_hosts"] += 1 + tree_entry["_svc_problems"] = tree_entry.get("_svc_problems", False) or have_svc_problems + if state == 2 or tree_entry["_state"] == 2: + tree_entry["_state"] = 2 + else: + tree_entry["_state"] = max(state, tree_entry["_state"]) + + return tree + +def tag_tree_worst_state(tree): + if "_state" in tree: + return tree["_state"] + else: + states = map(tag_tree_worst_state, tree.values()) + for x in states: + if x == 2: + return 2 + return max(states) + +def tag_tree_has_svc_problems(tree): + if "_svc_problems" in tree: + return tree["_svc_problems"] + else: + for x in tree.values(): + if tag_tree_has_svc_problems(x): + return True + return False + +def tag_tree_url(taggroups, taglist, viewname): + urlvars = [("view_name", viewname), ("filled_in", "filter")] + if viewname == "svcproblems": + urlvars += [ ("st1", "on"), ("st2", "on"), ("st3", "on") ] + + for nr, (group, tag) in enumerate(zip(taggroups, taglist)): + if group.startswith("topic:"): + # Find correct tag group for this tag + for entry in config.wato_host_tags: + for tagentry in entry[2]: + if tagentry[0] == tag: # Found our tag + taggroup = entry[0] + urlvars.append(("host_tag_%d_grp" % nr, taggroup)) + urlvars.append(("host_tag_%d_op" % nr, "is")) + urlvars.append(("host_tag_%d_val" % nr, tag)) + break + else: + urlvars.append(("host_tag_%d_grp" % nr, group)) + urlvars.append(("host_tag_%d_op" % nr, "is")) + urlvars.append(("host_tag_%d_val" % nr, tag or "")) + return html.makeuri_contextless(urlvars, "view.py") + +def tag_tree_bullet(state, path, leaf): + code = '
         
        ' % ((leaf and "leaf " or ""), state) + if not leaf: + code = '' % \ + (_("Display the tree only below this node"), "|".join(path)) + code + "" + return code + " " + + +def is_tag_subdir(path, cwd): + if not cwd: + return True + elif not path: + return False + elif path[0] != cwd[0]: + return False + else: + return is_tag_subdir(path[1:], cwd[1:]) + +def render_tag_tree_level(taggroups, path, cwd, title, tree): + if not is_tag_subdir(path, cwd) and not is_tag_subdir(cwd, path): + return + + if path != cwd and is_tag_subdir(path, cwd): + bullet = tag_tree_bullet(tag_tree_worst_state(tree), path, False) + if tag_tree_has_svc_problems(tree): + # We cannot use html.plug() here, since this is not (yet) + # reentrant and it is used by the sidebar snapin updater. + # So we need to duplicate the code of icon_button here: + bullet += ('' + '' % ( + tag_tree_url(taggroups, path, "svcproblems"), + _("Show the service problems contained in this branch"))) + + + if path: + html.begin_foldable_container("tag-tree", ".".join(map(str, path)), False, bullet + title) + + items = tree.items() + items.sort() + + for nr, ((title, tag), subtree) in enumerate(items): + subpath = path + [tag or ""] + url = tag_tree_url(taggroups, subpath, "allhosts") + if "_num_hosts" in subtree: + title += " (%d)" % subtree["_num_hosts"] + href = '%s' % (url, html.attrencode(title)) + if "_num_hosts" in subtree: + + if is_tag_subdir(path, cwd): + html.write(tag_tree_bullet(subtree["_state"], subpath, True)) + if subtree.get("_svc_problems"): + url = tag_tree_url(taggroups, subpath, "svcproblems") + html.icon_button(url, _("Show the service problems contained in this branch"), + "svc_problems", target="main") + html.write(href) + html.write("
        ") + else: + render_tag_tree_level(taggroups, subpath, cwd, href, subtree) + + if path and path != cwd and is_tag_subdir(path, cwd): + html.end_foldable_container() + +virtual_host_tree_js = """ +function virtual_host_tree_changed(field) +{ + var tree_conf = field.value; + // Then send the info to python code via ajax call for persistance + get_url_sync('sidebar_ajax_tag_tree.py?conf=' + escape(tree_conf)); + refresh_single_snapin("tag_tree"); +} + +function virtual_host_tree_enter(path) +{ + get_url_sync('sidebar_ajax_tag_tree_enter.py?path=' + escape(path)); + refresh_single_snapin("tag_tree"); +} +""" + +def render_tag_tree(): + if not config.virtual_host_trees: + url = 'wato.py?varname=virtual_host_trees&mode=edit_configvar' + html.write(_('You have not defined any virtual host trees. You can ' + 'do this in the global settings for Multisite.') % url) + return + + tree_conf = config.load_user_file("virtual_host_tree", {"tree": 0, "cwd": {}}) + if type(tree_conf) == int: + tree_conf = {"tree": tree_conf, "cwd":{}} # convert from old style + + + choices = [ (str(i), v[0]) for i, v in enumerate(config.virtual_host_trees)] + html.begin_form("vtree") + + # Give chance to change one level up, if we are in a subtree + cwd = tree_conf["cwd"].get(tree_conf["tree"]) + if cwd: + upurl = "javascript:virtual_host_tree_enter(%r)" % "|".join(cwd[:-1]) + html.icon_button(upurl, _("Go up one tree level"), "back") + + html.select("vtree", choices, str(tree_conf["tree"]), onchange = 'virtual_host_tree_changed(this)') + html.write("
        ") + html.end_form() + html.final_javascript(virtual_host_tree_js) + + title, taggroups = config.virtual_host_trees[tree_conf["tree"]] + + tree = compute_tag_tree(taggroups) + render_tag_tree_level(taggroups, [], cwd, _("Virtual Host Tree"), tree) + +sidebar_snapins["tag_tree"] = { + "title" : _("Virtual Host Tree"), + "description" : _("This snapin shows tree views of your hosts based on their tag classifications. You " + "can configure which tags to use in your global settings of Multisite."), + "render" : render_tag_tree, + "refresh" : True, + "allowed" : [ "admin", "user", "guest" ], + "styles" : """ + +#snapin_tag_tree img.iconbutton { +} + +#snapin_tag_tree select { + background-color: #6DA1B8; + border-color: #123A4A; + color: #FFFFFF; + font-size: 8pt; + height: 19px; + margin-bottom: 2px; + margin-top: -2px; + padding: 0; + width: 230px; +} + +#snapin_tag_tree div.statebullet { + position: relative; + top: 3px; + left: 1px; + float: none; + display: inline-block; + width: 8px; + height: 8px; + margin-right: 0px; + box-shadow: 0px 0px 0.7px #284850; +} + +#snapin_tag_tree ul > div.statebullet.leaf { + margin-left: 16px; +} +#snapin_tag_tree b { + font-weight: normal; +} + +#snapin_tag_tree { + position: relative; + top: 0px; + left: 0px; +} +#snapin_tag_tree form img.iconbutton { + width: 16px; + height: 16px; + float: none; + display: inline-box; + position: absolute; + top: 9px; + left: 14px; +} +#snapin_tag_tree select { + width: 198px; + margin-left: 17px; +} +""" +} diff -Nru check-mk-1.2.2p3/plugins/sidebar/wato.py check-mk-1.2.6p12/plugins/sidebar/wato.py --- check-mk-1.2.2p3/plugins/sidebar/wato.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/sidebar/wato.py 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,7 +24,7 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -import config, wato +import config, wato, views, dashboard # +----------------------------------------------------------------------+ # | __ ___ _____ ___ | @@ -36,7 +36,8 @@ # +----------------------------------------------------------------------+ def render_wato(mini): if not config.wato_enabled: - html.write(_("WATO is disabled in multisite.mk.")) + html.write(_("WATO is disabled.")) + return False elif not config.may("wato.use"): html.write(_("You are not allowed to use Check_MK's web configuration GUI.")) return False @@ -55,9 +56,10 @@ else: iconlink(title, url, icon) - num_pending = wato.api.num_pending_changes() + num_pending = wato.num_pending_changes() if num_pending: footnotelinks([(_("%d changes") % num_pending, "wato.py?mode=changelog")]) + html.write('
        ') sidebar_snapins["admin"] = { @@ -77,6 +79,7 @@ "styles": """ #snapin_admin_mini { padding-top: 6px; + clear: right; } #snapin_admin_mini img { margin-right: 3.9px; @@ -89,33 +92,12 @@ } #snapin_admin_mini div.footnotelink { - margin-top: -14px; + float: right; } -""", +#snapin_admin_mini div.clear { + clear: right; } - - -# +----------------------------------------------------------------------+ -# | _____ _ _ _____ | -# | | ___|__ | | __| | ___ _ __ |_ _| __ ___ ___ | -# | | |_ / _ \| |/ _` |/ _ \ '__|____| || '__/ _ \/ _ \ | -# | | _| (_) | | (_| | __/ | |_____| || | | __/ __/ | -# | |_| \___/|_|\__,_|\___|_| |_||_| \___|\___| | -# | | -# +----------------------------------------------------------------------+ - -def render_wato_folders(): - if not config.wato_enabled: - html.write(_("WATO is disabled in multisite.mk.")) - else: - html.write(_('This snapin is deprecated. Please use the WATO foldertree snapin instead.')) - -sidebar_snapins["wato"] = { - "title" : _("Hosts"), - "description" : _("A foldable tree showing all your WATO folders and files - " - "allowing you to navigate in the tree while using views or being in WATO"), - "render" : render_wato_folders, - "allowed" : [ "admin", "user", "guest" ], +""", } # .----------------------------------------------------------------------. @@ -129,13 +111,7 @@ # | | # '----------------------------------------------------------------------' -def render_wato_foldertree(): - if not config.wato_enabled: - html.write(_("WATO is disabled in multisite.mk.")) - else: - render_wato_foldertree() - -def render_wato_foldertree(): +def compute_foldertree(): html.live.set_prepend_site(True) query = "GET hosts\n" \ "Stats: state >= 0\n" \ @@ -156,6 +132,7 @@ '.folders': {}, } + # After the query we have a list of lists where each # row is a folder with the number of hosts on this level. # @@ -190,24 +167,83 @@ del user_folders[folder_path] # + # Now reduce the tree by e.g. removing top-level parts which the user is not + # permitted to see directly. Example: + # Locations + # -> Hamburg: Permitted to see all hosts + # -> Munich: Permitted to see no host + # In this case, where only a single child with hosts is available, remove the + # top level + def reduce_tree(folders): + for folder_path, folder in folders.items(): + if len(folder['.folders']) == 1 and folder['.num_hosts'] == 0: + child_path, child_folder = folder['.folders'].items()[0] + folders[child_path] = child_folder + del folders[folder_path] + + reduce_tree(folders) + + reduce_tree(user_folders) + return user_folders + + +def render_tree_folder(f, js_func): + subfolders = f.get(".folders", {}) + is_leaf = len(subfolders) == 0 + + # Suppress indentation for non-emtpy root folder + if f['.path'] == '' and is_leaf: + html.write("
          ") # empty root folder + elif f and f['.path'] != '': + html.write("
            ") + + title = '%s (%d)' % ( + js_func, f[".path"], f["title"], f[".num_hosts"]) + + if not is_leaf: + html.begin_foldable_container('wato-hosts', "/" + f[".path"], False, title) + for sf in wato.sort_by_title(subfolders.values()): + render_tree_folder(sf, js_func) + html.end_foldable_container() + else: + html.write("
          • " + title + "
          • ") + + html.write("
          ") + + +def render_wato_foldertree(): + is_slave_site = not wato.is_distributed() and os.path.exists(defaults.check_mk_configdir + "/distributed_wato.mk") + if not is_slave_site: + if not config.wato_enabled: + html.write(_("WATO is disabled.")) + return False + elif not config.may("wato.use"): + html.write(_("You are not allowed to use Check_MK's web configuration GUI.")) + return False + + user_folders = compute_foldertree() + + # # Render link target selection # selected_topic, selected_target = config.load_user_file("foldertree", (_('Hosts'), 'allhosts')) - topic_views = views_by_topic() + views.load_views() + dashboard.load_dashboards() + topic_views = visuals_by_topic(views.permitted_views().items() + dashboard.permitted_dashboards().items()) topics = [ (t, t) for t, s in topic_views ] html.select("topic", topics, selected_topic, onchange = 'wato_tree_topic_changed(this)') html.write('%s' % _('Topic:')) - for topic, views in topic_views: + for topic, view_list in topic_views: targets = [] - for t, title, name in views: + for t, title, name, is_view in view_list: if config.visible_views and name not in config.visible_views: continue if config.hidden_views and name in config.hidden_views: continue if t == topic: - if topic == _('Dashboards'): + if not is_view: name = 'dashboard|' + name targets.append((name, title)) @@ -224,34 +260,12 @@ # Now render the whole tree if user_folders: - render_tree_folder(user_folders['']) + render_tree_folder(user_folders.values()[0], 'wato_tree_click') -def render_tree_folder(f): - subfolders = f.get(".folders", {}) - is_leaf = len(subfolders) == 0 - - # Suppress indentation for non-emtpy root folder - if f['.path'] == '' and is_leaf: - html.write("
            ") # empty root folder - elif f and f['.path'] != '': - html.write("
              ") - - title = '%s (%d)' % ( - f[".path"], f["title"], f[".num_hosts"]) - - if not is_leaf: - html.begin_foldable_container('wato-hosts', "/" + f[".path"], False, title) - for sf in wato.api.sort_by_title(subfolders.values()): - render_tree_folder(sf) - html.end_foldable_container() - else: - html.write("
            • " + title + "
            • ") - - html.write("
            ") sidebar_snapins['wato_foldertree'] = { - 'title' : _('Foldertree'), - 'description' : _('This snapin shows the folders defined in WATO. It can be used to open views filtered by the WATO folder.'), + 'title' : _('Tree of Folders'), + 'description' : _('This snapin shows the folders defined in WATO. It can be used to open views filtered by the WATO folder. It works standalone, without interaction with any other snapin.'), 'render' : render_wato_foldertree, 'allowed' : [ 'admin', 'user', 'guest' ], 'styles' : """ @@ -277,3 +291,19 @@ } """ } + +def render_wato_folders(): + user_folders = compute_foldertree() + + if user_folders: + render_tree_folder(user_folders.values()[0], 'wato_folders_clicked') + +sidebar_snapins['wato_folders'] = { + 'title' : _('Folders'), + 'description' : _('This snapin shows the folders defined in WATO. It can ' + 'be used to open views filtered by the WATO folder. This ' + 'snapin interacts with the "Views" snapin, when both are ' + 'enabled.'), + 'render' : render_wato_folders, + 'allowed' : [ 'admin', 'user', 'guest' ], +} diff -Nru check-mk-1.2.2p3/plugins/smart check-mk-1.2.6p12/plugins/smart --- check-mk-1.2.2p3/plugins/smart 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/plugins/smart 2015-06-24 09:48:39.000000000 +0000 @@ -1,4 +1,72 @@ #!/bin/bash +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + + + +# This will be called on LSI based raidcontrollers and accesses +# the SMART data of SATA disks attached to a SAS Raid HBA via +# SCSI protocol interface. +megaraid_info() +{ + + PDINFO=$(MegaCli -PDlist -a0) + + echo "$PDINFO" | \ + while read line ; do + case "$line" in + # FIRST LINE + "Enclosure Device ID"*) #Enclosure Device ID: 252 + ENC=$( echo "$line" | awk '{print $4}') + unset SLOT LOG_DEV_ID VEND MODEL + ;; + "Slot Number"*) #Slot Number: 7 + SLOT=$( echo "$line" | awk '{print $3}') + ;; + # Identify the logical device ID. smartctl needs it to access the disk. + "Device Id"*) #Device Id: 19 + LOG_DEV_ID=$( echo "$line" | awk '{print $3}') + ;; + "PD Type"*) #PD Type: SATA + VEND=$( echo "$line" | awk '{print $3}') + ;; + # This is the last value, generate output here + "Inquiry Data"*) + #Inquiry Data: WD-WCC1T1035197WDC WD20EZRX-00DC0B0 80.00A80 + # $4 seems to be better for some vendors... wont be possible to get this perfect. + MODEL=$( echo "$line" | awk '{print $3}') + + # /dev/sdc ATA SAMSUNG_SSD_830 5 Reallocated_Sector_Ct 0x0033 100 100 010 Pre-fail Always - + smartctl -d megaraid,${LOG_DEV_ID} -v 9,raw48 -A /dev/sg0 | \ + grep Always | egrep -v '^190(.*)Temperature(.*)' | \ + sed "s|^|Enc${ENC}/Slot${SLOT} $VEND $MODEL |" + ;; + esac + done +} + # Only handle always updated values, add device path and vendor/model if which smartctl > /dev/null 2>&1 ; then @@ -9,7 +77,7 @@ if which tw_cli > /dev/null 2>&1 ; then # support for only one controller at the moment TWAC=$(tw_cli show | awk 'NR < 4 { next } { print $1 }' | head -n 1) - + # - add a trailing zero to handle case of unused slot # trailing zeros are part of the device links in /dev/disk/by-id/... anyway # - only the last 9 chars seem to be relevant @@ -23,7 +91,10 @@ fi echo '<<>>' - for D in /dev/disk/by-id/scsi-*; do + SEEN= + for D in /dev/disk/by-id/{scsi,ata}-*; do + [ "$D" != "${D%scsi-\*}" ] && continue + [ "$D" != "${D%ata-\*}" ] && continue [ "$D" != "${D%-part*}" ] && continue N=$(readlink $D) N=${N##*/} @@ -39,12 +110,19 @@ MODEL=$(smartctl -a $D | grep -i "device model" | sed -e "s/.*:[ ]*//g" -e "s/\ /_/g") fi # Excluded disk models for SAN arrays or certain RAID luns that are also not usable.. - if [ "$MODEL" = "iSCSI_Disk" ] || ["$MODEL" = "LOGICAL_VOLUME" ]; then + if [ "$MODEL" = "iSCSI_Disk" -o "$MODEL" = "LOGICAL_VOLUME" ]; then continue fi - # strip device name for final output - DNAME=${D#/dev/disk/by-id/scsi-} + # Avoid duplicate entries for same device + if [ "${SEEN//.$N./}" != "$SEEN" ] ; then + continue + fi + SEEN="$SEEN.$N." + + # strip device name for final output + DNAME=${D#/dev/disk/by-id/scsi-} + DNAME=${DNAME#/dev/disk/by-id/ata-} # 2012-01-25 Stefan Kaerst CDJ - special option in case vendor is AMCC CMD= if [ "$VEND" == "AMCC" -a -n "$TWAC" ]; then @@ -70,7 +148,14 @@ else CMD="smartctl -d ata -v 9,raw48 -A $D" fi - + [ -n "$CMD" ] && $CMD | grep Always | egrep -v '^190(.*)Temperature(.*)' | sed "s|^|$DNAME $VEND $MODEL |" done 2>/dev/null + + + # Call MegaRaid submodule if conditions are met + if which MegaCli > /dev/null && [ -c /dev/sg0 ] ; then + megaraid_info + fi fi + diff -Nru check-mk-1.2.2p3/plugins/sylo check-mk-1.2.6p12/plugins/sylo --- check-mk-1.2.2p3/plugins/sylo 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/plugins/sylo 1970-01-01 00:00:00.000000000 +0000 @@ -1,33 +0,0 @@ -#!/bin/bash -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2010 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# ails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -BASE_PATH=/var/spool/sylo - -if [ -f "$BASE_PATH/silo.hint" ]; then - echo "<<>>" - stat --printf "%Y " "$BASE_PATH/silo.hint" - cat "$BASE_PATH/silo.hint" - echo -fi diff -Nru check-mk-1.2.2p3/plugins/symantec_av check-mk-1.2.6p12/plugins/symantec_av --- check-mk-1.2.2p3/plugins/symantec_av 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/symantec_av 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,38 @@ +#!/bin/sh +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +if [ -f /opt/Symantec/symantec_antivirus/sav ] +then + echo "<<>>" + /opt/Symantec/symantec_antivirus/sav info -d + + echo "<<>>" + /opt/Symantec/symantec_antivirus/sav info -a + + echo "<<>>" + /opt/Symantec/symantec_antivirus/sav quarantine -l +fi + diff -Nru check-mk-1.2.2p3/plugins/unitrends_backup check-mk-1.2.6p12/plugins/unitrends_backup --- check-mk-1.2.2p3/plugins/unitrends_backup 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/unitrends_backup 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,88 @@ +#!/usr/bin/pnp +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +>>\n"; +$conn = "port=5432 dbname=bpdb user=postgres"; +$db = pg_connect($conn); + +$query = "SELECT + schedule_id, a.type AS app_type + FROM + bp.schedules AS s + JOIN + bp.application_lookup AS a USING(app_id) + WHERE + enabled=true AND email_report=true + ORDER BY s.name"; +$res = pg_query($db, $query); + +$start = time() - (24 * 3600); +$in = array("start_time" => $start); +bp_bypass_cookie(3, 'schedule_report'); + +while ($obj = pg_fetch_object($res)) { + if ($obj->app_type == "Archive") + continue; + + $in["schedule_id"] = (int)$obj->schedule_id; + $ret = bp_get_schedule_history($in); + if (empty($ret[0]["backups"])) + continue; + + print "HEADER|". + $ret[0]["schedule_name"]."|" . + $ret[0]["application_name"]."|". + $ret[0]["schedule_description"]."|". + $ret[0]["failures"]."\n"; + + foreach($ret[0]["backups"] as $trash => $backup) { + foreach($backup as $row) { + + $name = $row["primary_name"]; + switch($ret[0]["app_type"]){ + case "SQL Server": + $name .= "/".$row["secondary_name"]; + break; + + case "VMware": + $name .= ", VM ".$row["secondary_name"]; + break; + } + + $backup_type = $row["type"]; + + if (!isset($name)) + $name = $backup_tyoe; + + $backup_no = (isset($row["backup_id"])) ? $row["backup_id"] : "N/A" ; + + print "$name|$backup_no|$backup_type|".$row['description']."\n"; + } + } +} +pg_free_result($res); +bp_destroy_cookie(); +?> diff -Nru check-mk-1.2.2p3/plugins/unitrends_replication check-mk-1.2.6p12/plugins/unitrends_replication --- check-mk-1.2.2p3/plugins/unitrends_replication 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/unitrends_replication 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,49 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +import sys, time, urllib +from xml.dom import minidom +now = int(time.time()) +start = now - 24 * 60 * 60 +end = now +dpu = 1 + +url = "http://localhost/recoveryconsole/bpl/syncstatus.php?type=replicate&arguments=start:%s,end:%s&sid=%s&auth=1:" % ( start, end, dpu ) +xml = urllib.urlopen(url) + +print "<<>>" +dom = minidom.parse(xml) +for item in dom.getElementsByTagName('SecureSyncStatus'): + application = item.getElementsByTagName('Application') + if application: + application = application[0].attributes['Name'].value + else: + application = "N/A" + result = item.getElementsByTagName('Result')[0].firstChild.data + completed = item.getElementsByTagName('Complete')[0].firstChild.data + targetname = item.getElementsByTagName('TargetName')[0].firstChild.data + instancename = item.getElementsByTagName('InstanceName')[0].firstChild.data + print "%s|%s|%s|%s|%s" % (application, result, completed, targetname, instancename) diff -Nru check-mk-1.2.2p3/plugins/userdb/hook_auth.py check-mk-1.2.6p12/plugins/userdb/hook_auth.py --- check-mk-1.2.2p3/plugins/userdb/hook_auth.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/userdb/hook_auth.py 2015-06-24 09:48:38.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -40,6 +40,9 @@ # user_roles() # Returns an array of rolenames of the user # +# user_groups() +# Returns an array of names of contactgroups of the user +# # user_permissions() # Returns an array of all permissions of the user # @@ -85,16 +88,30 @@ return s -def create_php_file(callee, users, role_permissions, folder_permissions): +def create_php_file(callee, users, role_permissions, groups, folder_permissions): + # Do not change WATO internal objects + nagvis_users = copy.deepcopy(users) + # Set a language for all users - for username in users: - users[username].setdefault('language', config.default_language) + for user in nagvis_users.values(): + user.setdefault('language', config.default_language) - file(g_auth_base_dir + '/auth.php', 'w').write(''' $perms) { + if (!isset($perms['read'])) + $perms['read'] = false; + elseif (!isset($perms['write'])) + $perms['write'] = false; + } + return $permissions; } } @@ -119,6 +143,14 @@ return $mk_users[$username]['roles']; } +function user_groups($username) { + global $mk_users; + if(!isset($mk_users[$username]) || !isset($mk_users[$username]['contactgroups'])) + return array(); + else + return $mk_users[$username]['contactgroups']; +} + function user_permissions($username) { global $mk_roles; $permissions = array(); @@ -181,14 +213,46 @@ return false; } +function permitted_maps($username) { + global $mk_groups; + $maps = array(); + foreach (user_groups($username) AS $groupname) { + if (isset($mk_groups[$groupname])) { + foreach ($mk_groups[$groupname] AS $mapname) { + $maps[$mapname] = null; + } + } + } + return array_keys($maps); +} + ?> -''' % (callee, format_php(users), format_php(role_permissions), format_php(folder_permissions))) +''' % (callee, format_php(nagvis_users), format_php(role_permissions), + format_php(groups), format_php(folder_permissions))) + + # Now really replace the file + os.rename(tempfile, g_auth_base_dir + '/auth.php') + + release_lock(lockfile) def create_auth_file(callee, users): make_nagios_directory(g_auth_base_dir) - import wato # HACK: cleanup! - create_php_file(callee, users, config.get_role_permissions(), wato.get_folder_permissions_of_users(users)) -hooks.register('users-saved', lambda users: create_auth_file("users-saved", users)) -hooks.register('roles-saved', lambda x: create_auth_file("roles-saved", load_users())) -hooks.register('activate-changes', lambda x: create_auth_file("activate-changes", load_users())) + if config.export_folder_permissions: + import wato # HACK: cleanup! + folder_permissions = wato.get_folder_permissions_of_users(users) + else: + folder_permissions = {} + + contactgroups = load_group_information().get('contact', {}) + groups = {} + for gid, group in contactgroups.items(): + if 'nagvis_maps' in group and group['nagvis_maps']: + groups[gid] = group['nagvis_maps'] + + create_php_file(callee, users, config.get_role_permissions(), groups, folder_permissions) + +hooks.register('users-saved', lambda users: create_auth_file("users-saved", users)) +hooks.register('roles-saved', lambda x: create_auth_file("roles-saved", load_users())) +hooks.register('contactgroups-saved', lambda x: create_auth_file("contactgroups-saved", load_users())) +hooks.register('activate-changes', lambda x: create_auth_file("activate-changes", load_users())) diff -Nru check-mk-1.2.2p3/plugins/userdb/htpasswd.py check-mk-1.2.6p12/plugins/userdb/htpasswd.py --- check-mk-1.2.2p3/plugins/userdb/htpasswd.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/userdb/htpasswd.py 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -107,7 +107,14 @@ # users from htpasswd are lost. If you start managing users with # WATO, you should continue to do so or stop doing to for ever... # Locked accounts get a '!' before their password. This disable it. - out = create_user_file(defaults.htpasswd_file, "w") + filename = defaults.htpasswd_file + '.new' + rename_file = True + try: + out = create_user_file(filename, "w") + except: + rename_file = False + out = create_user_file(defaults.htpasswd_file, "w") + for id, user in users.items(): # only process users which are handled by htpasswd connector if user.get('connector', 'htpasswd') != 'htpasswd': @@ -119,6 +126,9 @@ else: locksym = "" out.write("%s:%s%s\n" % (id, locksym, user["password"])) + out.close() + if rename_file: + os.rename(filename, filename[:-4]) multisite_user_connectors.append({ 'id': 'htpasswd', diff -Nru check-mk-1.2.2p3/plugins/userdb/ldap.py check-mk-1.2.6p12/plugins/userdb/ldap.py --- check-mk-1.2.2p3/plugins/userdb/ldap.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/userdb/ldap.py 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -25,12 +25,21 @@ # Boston, MA 02110-1301 USA. import config, defaults -import time, copy +import time, copy, traceback try: # docs: http://www.python-ldap.org/doc/html/index.html import ldap import ldap.filter + from ldap.controls import SimplePagedResultsControl + + # be compatible to both python-ldap below 2.4 and above + try: + LDAP_CONTROL_PAGED_RESULTS = ldap.LDAP_CONTROL_PAGE_OID + ldap_compat = False + except: + LDAP_CONTROL_PAGED_RESULTS = ldap.CONTROL_PAGEDRESULTS + ldap_compat = True except: pass from lib import * @@ -40,6 +49,8 @@ # File for storing the time of the last success event g_ldap_sync_time_file = defaults.var_dir + '/web/ldap_sync_time.mk' +# Exists when last ldap sync failed, contains exception text +g_ldap_sync_fail_file = defaults.var_dir + '/web/ldap_sync_fail.mk' # LDAP attributes are case insensitive, we only use lower case! # Please note: This are only default values. The user might override this @@ -55,6 +66,12 @@ # group attributes 'member': 'uniquemember', }, + '389directoryserver': { + 'user_id': 'uid', + 'pw_changed': 'krbPasswordExpiration', + # group attributes + 'member': 'uniquemember', + }, } # LDAP attributes are case insensitive, we only use lower case! @@ -69,9 +86,34 @@ 'users': '(objectclass=person)', 'groups': '(objectclass=groupOfUniqueNames)', }, + '389directoryserver': { + 'users': '(objectclass=person)', + 'groups': '(objectclass=groupOfUniqueNames)', + }, } -# .----------------------------------------------------------------------. +# All these characters are replaced from user ids by default. Check_MK +# currently does not support special characters in user ids, so users +# not matching this specification are cleaned up with this map. When the +# user accounts still do not match the specification, they are skipped. +ldap_umlaut_translation = { + ord(u'ü'): u'ue', + ord(u'ö'): u'oe', + ord(u'ä'): u'ae', + ord(u'ß'): u'ss', + ord(u'Ü'): u'UE', + ord(u'Ö'): u'OE', + ord(u'Ä'): u'AE', + ord(u'å'): u'aa', + ord(u'Å'): u'Aa', + ord(u'Ø'): u'Oe', + ord(u'ø'): u'oe', + ord(u'Æ'): u'Ae', + ord(u'æ'): u'ae', +} + +#. +# .-General LDAP code----------------------------------------------------. # | _ ____ _ ____ | # | | | | _ \ / \ | _ \ | # | | | | | | |/ _ \ | |_) | | @@ -82,96 +124,144 @@ # | General LDAP handling code | # '----------------------------------------------------------------------' +def make_utf8(x): + if type(x) == unicode: + return x.encode('utf-8') + else: + return x + def ldap_log(s): - if config.ldap_debug_log is not None: - file(ldap_replace_macros(config.ldap_debug_log), "a").write('%s\n' % s) + if config.ldap_debug_log: + log_file = defaults.log_dir + '/ldap.log' + file(log_file, "a").write('%s %s\n' % (time.strftime('%Y-%m-%d %H:%M:%S'), make_utf8(s))) class MKLDAPException(MKGeneralException): pass ldap_connection = None +ldap_connection_options = None -def ldap_uri(): +def ldap_uri(server): if 'use_ssl' in config.ldap_connection: uri = 'ldaps://' else: uri = 'ldap://' - return uri + '%s:%d' % (config.ldap_connection['server'], config.ldap_connection['port']) - -def ldap_connect(): - global ldap_connection, ldap_connection_options - - if ldap_connection and config.ldap_connection == ldap_connection_options: - return # Use existing connections (if connection settings have not changed) + return uri + '%s:%d' % (server, config.ldap_connection['port']) +def ldap_test_module(): try: ldap except: raise MKLDAPException(_("The python module python-ldap seems to be missing. You need to " "install this extension to make the LDAP user connector work.")) +def ldap_servers(): + servers = [ config.ldap_connection['server'] ] + if config.ldap_connection.get('failover_servers'): + servers += config.ldap_connection.get('failover_servers') + return servers + +def ldap_connect_server(server): + try: + uri = ldap_uri(server) + conn = ldap.ldapobject.ReconnectLDAPObject(uri) + conn.protocol_version = config.ldap_connection['version'] + conn.network_timeout = config.ldap_connection.get('connect_timeout', 2.0) + conn.retry_delay = 0.5 + + # When using the domain top level as base-dn, the subtree search stumbles with referral objects. + # whatever. We simply disable them here when using active directory. Hope this fixes all problems. + if config.ldap_connection['type'] == 'ad': + conn.set_option(ldap.OPT_REFERRALS, 0) + + ldap_default_bind(conn) + return conn, None + except (ldap.SERVER_DOWN, ldap.TIMEOUT, ldap.LOCAL_ERROR, ldap.LDAPError), e: + return None, '%s: %s' % (uri, e[0].get('info', e[0].get('desc', ''))) + except MKLDAPException, e: + return None, '%s' % e + +def ldap_disconnect(): + global ldap_connection, ldap_connection_options + ldap_connection = None + ldap_connection_options = None + +def ldap_connect(enforce_new = False, enforce_server = None): + global ldap_connection, ldap_connection_options + + if not enforce_new \ + and not "no_persistent" in config.ldap_connection \ + and ldap_connection \ + and config.ldap_connection == ldap_connection_options: + ldap_log('LDAP CONNECT - Using existing connecting') + return # Use existing connections (if connection settings have not changed) + else: + ldap_log('LDAP CONNECT - Connecting...') + + ldap_test_module() + # Some major config var validations if not config.ldap_connection.get('server'): raise MKLDAPException(_('The LDAP connector is enabled in global settings, but the ' 'LDAP server to connect to is not configured. Please fix this in the ' - 'LDAP ' + 'LDAP ' 'connection settings.')) if not config.ldap_userspec.get('dn'): raise MKLDAPException(_('The distinguished name of the container object, which holds ' 'the user objects to be authenticated, is not configured. Please ' - 'fix this in the ' + 'fix this in the ' 'LDAP User Settings.')) try: - ldap_connection = ldap.ldapobject.ReconnectLDAPObject(ldap_uri()) - ldap_connection.protocol_version = config.ldap_connection['version'] - ldap_connection.network_timeout = config.ldap_connection.get('connect_timeout', 2.0) - - # When using the domain top level as base-dn, the subtree search stumbles with referral objects. - # whatever. We simply disable them here when using active directory. Hope this fixes all problems. - if config.ldap_connection['type'] == 'ad': - ldap_connection.set_option(ldap.OPT_REFERRALS, 0) + errors = [] + if enforce_server: + servers = [ enforce_server ] + else: + servers = ldap_servers() - ldap_default_bind() + for server in servers: + ldap_connection, error_msg = ldap_connect_server(server) + if ldap_connection: + break # got a connection! + else: + errors.append(error_msg) + + # Got no connection to any server + if ldap_connection is None: + raise MKLDAPException(_('LDAP connection failed:\n%s') % + ('\n'.join(errors))) # on success, store the connection options the connection has been made with ldap_connection_options = config.ldap_connection - except ldap.SERVER_DOWN, e: - msg = e[0].get('info', e[0].get('desc', '')) - ldap_connection = None # Invalidate connection on failure - raise MKLDAPException(_('The LDAP connector is unable to connect to the LDAP server (%s).') % msg) - - except ldap.LDAPError, e: - html.write(repr(e)) - ldap_connection = None # Invalidate connection on failure - raise MKLDAPException(e) - except Exception: - ldap_connection = None # Invalidate connection on failure + # Invalidate connection on failure + ldap_connection = None + ldap_connection_options = None raise # Bind with the default credentials -def ldap_default_bind(): +def ldap_default_bind(conn): try: if 'bind' in config.ldap_connection: ldap_bind(ldap_replace_macros(config.ldap_connection['bind'][0]), - config.ldap_connection['bind'][1], catch = False) + config.ldap_connection['bind'][1], catch = False, conn = conn) else: - ldap_bind('', '', catch = False) # anonymous bind + ldap_bind('', '', catch = False, conn = conn) # anonymous bind except (ldap.INVALID_CREDENTIALS, ldap.INAPPROPRIATE_AUTH): raise MKLDAPException(_('Unable to connect to LDAP server with the configured bind credentials. ' 'Please fix this in the ' - 'LDAP ' - 'connection settings.')) + 'LDAP connection settings.')) -def ldap_bind(username, password, catch = True): - ldap_log('LDAP_BIND %s' % username) +def ldap_bind(user_dn, password, catch = True, conn = None): + if conn is None: + conn = ldap_connection + ldap_log('LDAP_BIND %s' % user_dn) try: - ldap_connection.simple_bind_s(username, password) + conn.simple_bind_s(user_dn, password) ldap_log(' SUCCESS') except ldap.LDAPError, e: ldap_log(' FAILED (%s)' % e) @@ -180,6 +270,68 @@ else: raise +def ldap_async_search(base, scope, filt, columns): + ldap_log(' ASYNC SEARCH') + # issue the ldap search command (async) + msgid = ldap_connection.search_ext(base, scope, filt, columns) + + results = [] + while True: + restype, resdata = ldap_connection.result(msgid = msgid, + timeout = config.ldap_connection.get('response_timeout', 5)) + + results.extend(resdata) + if restype == ldap.RES_SEARCH_RESULT or not resdata: + break + + # no limit at the moment + #if sizelimit and len(users) >= sizelimit: + # ldap_connection.abandon_ext(msgid) + # break + time.sleep(0.1) + + return results + +def ldap_paged_async_search(base, scope, filt, columns): + ldap_log(' PAGED ASYNC SEARCH') + page_size = config.ldap_connection.get('page_size', 100) + + if ldap_compat: + lc = SimplePagedResultsControl(size = page_size, cookie = '') + else: + lc = SimplePagedResultsControl( + LDAP_CONTROL_PAGED_RESULTS, True, (page_size, '') + ) + + results = [] + while True: + # issue the ldap search command (async) + msgid = ldap_connection.search_ext(base, scope, filt, columns, serverctrls = [lc]) + + unused_code, response, unused_msgid, serverctrls = ldap_connection.result3( + msgid = msgid, timeout = config.ldap_connection.get('response_timeout', 5) + ) + + for result in response: + results.append(result) + + # Mark current position in pagination control for next loop + cookie = None + for serverctrl in serverctrls: + if serverctrl.controlType == LDAP_CONTROL_PAGED_RESULTS: + if ldap_compat: + cookie = serverctrl.cookie + if cookie: + lc.cookie = cookie + else: + cookie = serverctrl.controlValue[1] + if cookie: + lc.controlValue = (page_size, cookie) + break + if not cookie: + break + return results + def ldap_search(base, filt = '(objectclass=*)', columns = [], scope = None): if scope: config_scope = scope @@ -196,34 +348,64 @@ ldap_log('LDAP_SEARCH "%s" "%s" "%s" "%r"' % (base, scope, filt, columns)) start_time = time.time() - # Convert all keys to lower case! - result = [] - try: - for dn, obj in ldap_connection.search_s(base, scope, filt, columns): - if dn is None: - continue # skip unwanted answers - new_obj = {} - for key, val in obj.iteritems(): - new_obj[key.lower().decode('utf-8')] = [ i.decode('utf-8') for i in val ] - result.append((dn, new_obj)) - except ldap.NO_SUCH_OBJECT, e: - raise MKLDAPException(_('The given base object "%s" does not exist in LDAP (%s))') % (base, e)) - - except ldap.FILTER_ERROR, e: - raise MKLDAPException(_('The given ldap filter "%s" is invalid (%s)') % (filt, e)) - - except ldap.SIZELIMIT_EXCEEDED: - raise MKLDAPException(_('The response reached a size limit. This could be due to ' - 'a sizelimit configuration on the LDAP server.
            Throwing away the ' - 'incomplete results. You should change the scope of operation ' - 'within the ldap or adapt the limit settings of the LDAP server.')) + # In some environments, the connection to the LDAP server does not seem to + # be as stable as it is needed. So we try to repeat the query for three times. + tries_left = 2 + success = False + last_exc = None + while not success: + tries_left -= 1 + try: + ldap_connect() + result = [] + try: + search_func = config.ldap_connection.get('page_size') \ + and ldap_paged_async_search or ldap_async_search + for dn, obj in search_func(make_utf8(base), scope, make_utf8(filt), columns): + if dn is None: + continue # skip unwanted answers + new_obj = {} + for key, val in obj.iteritems(): + # Convert all keys to lower case! + new_obj[key.lower().decode('utf-8')] = [ i.decode('utf-8') for i in val ] + result.append((dn.lower(), new_obj)) + success = True + except ldap.NO_SUCH_OBJECT, e: + raise MKLDAPException(_('The given base object "%s" does not exist in LDAP (%s))') % (base, e)) + + except ldap.FILTER_ERROR, e: + raise MKLDAPException(_('The given ldap filter "%s" is invalid (%s)') % (filt, e)) + + except ldap.SIZELIMIT_EXCEEDED: + raise MKLDAPException(_('The response reached a size limit. This could be due to ' + 'a sizelimit configuration on the LDAP server.
            Throwing away the ' + 'incomplete results. You should change the scope of operation ' + 'within the ldap or adapt the limit settings of the LDAP server.')) + except (ldap.SERVER_DOWN, ldap.TIMEOUT, MKLDAPException), e: + last_exc = e + if tries_left: + ldap_log(' Received %r. Retrying with clean connection...' % e) + ldap_disconnect() + time.sleep(0.5) + else: + ldap_log(' Giving up.') + break duration = time.time() - start_time + + if not success: + ldap_log(' FAILED') + if config.debug: + raise MKLDAPException(_('Unable to successfully perform the LDAP search ' + '(Base: %s, Scope: %s, Filter: %s, Columns: %s): %s') % + (html.attrencode(base), html.attrencode(scope), + html.attrencode(filt), html.attrencode(','.join(columns)), + last_exc)) + else: + raise MKLDAPException(_('Unable to successfully perform the LDAP search (%s)') % last_exc) + ldap_log(' RESULT length: %d, duration: %0.3f' % (len(result), duration)) return result - #return ldap_connection.search_s(base, scope, filter, columns) - #for dn, obj in ldap_connection.search_s(base, scope, filter, columns): - # html.log(repr(dn) + ' ' + repr(obj)) # Returns the ldap filter depending on the configured ldap directory type def ldap_filter(key, handle_config = True): @@ -237,8 +419,9 @@ # Returns the ldap attribute name depending on the configured ldap directory type # If a key is not present in the map, the assumption is, that the key matches 1:1 +# Always use lower case here, just to prevent confusions. def ldap_attr(key): - return ldap_attr_map[config.ldap_connection['type']].get(key, key) + return ldap_attr_map[config.ldap_connection['type']].get(key, key).lower() # Returns the given distinguished name template with replaced vars def ldap_replace_macros(tmpl): @@ -252,32 +435,73 @@ return dn +def ldap_rewrite_user_id(user_id): + if config.ldap_userspec.get('lower_user_ids', False): + user_id = user_id.lower() + + umlauts = config.ldap_userspec.get('user_id_umlauts', 'replace') + new_user_id = user_id.translate(ldap_umlaut_translation) + + if umlauts == 'replace': + user_id = new_user_id + elif umlauts == 'skip' and user_id != new_user_id: + return None # This makes the user being skipped + + # Now check whether or not the user id matches our specification + try: + str(user_id) + except UnicodeEncodeError: + # Skipping this user: not all "bad" characters were replaced before + ldap_log('Skipped user: %s (contains not allowed special characters)' % user_id) + return None + + return user_id + def ldap_user_id_attr(): return config.ldap_userspec.get('user_id', ldap_attr('user_id')) def ldap_member_attr(): return config.ldap_groupspec.get('member', ldap_attr('member')) +def ldap_bind_credentials_configured(): + return config.ldap_connection.get('bind', ('', ''))[0] != '' + +def ldap_user_base_dn_configured(): + return config.ldap_userspec.get('dn', '') != '' + +def ldap_group_base_dn_configured(): + return config.ldap_groupspec.get('dn', '') != '' + +def ldap_user_base_dn_exists(): + try: + result = ldap_search(ldap_replace_macros(config.ldap_userspec['dn']), columns = ['dn'], scope = 'base') + except Exception, e: + return False + if not result: + return False + else: + return len(result) == 1 + def ldap_get_user(username, no_escape = False): if username in g_ldap_user_cache: return g_ldap_user_cache[username] - # Check wether or not the user exists in the directory - # It's only ok when exactly one entry is found. - # Returns the DN and user_id as tuple in this case. + # Check wether or not the user exists in the directory matching the username AND + # the user search filter configured in the "LDAP User Settings". + # It's only ok when exactly one entry is found. Returns the DN and user_id + # as tuple in this case. result = ldap_search( ldap_replace_macros(config.ldap_userspec['dn']), - '(%s=%s)' % (ldap_user_id_attr(), ldap.filter.escape_filter_chars(username)), + '(&(%s=%s)%s)' % (ldap_user_id_attr(), ldap.filter.escape_filter_chars(username), + config.ldap_userspec.get('filter', '')), [ldap_user_id_attr()], ) if result: dn = result[0][0] - user_id = result[0][1][ldap_user_id_attr()][0] - - if config.ldap_userspec.get('lower_user_ids', False): - user_id = user_id.lower() - + user_id = ldap_rewrite_user_id(result[0][1][ldap_user_id_attr()][0]) + if user_id is None: + return None g_ldap_user_cache[username] = (dn, user_id) if no_escape: @@ -285,12 +509,43 @@ else: return (dn.replace('\\', '\\\\'), user_id) -def ldap_get_users(add_filter = None): +def ldap_get_users(add_filter = ''): columns = [ ldap_user_id_attr(), # needed in all cases as uniq id ] + ldap_needed_attributes() filt = ldap_filter('users') + + # Create filter by the optional filter_group + filter_group_dn = config.ldap_userspec.get('filter_group', None) + member_filter = '' + if filter_group_dn: + member_attr = ldap_member_attr().lower() + # posixGroup objects use the memberUid attribute to specify the group memberships. + # This is the username instead of the users DN. So the username needs to be used + # for filtering here. + user_cmp_attr = member_attr == 'memberuid' and ldap_user_id_attr() or 'distinguishedname' + + # Apply configured group ldap filter + try: + group = ldap_search(ldap_replace_macros(filter_group_dn), + columns = [member_attr], + scope = 'base') + except MKLDAPException: + group = None + + if not group: + raise MKLDAPException(_('The configured ldap user filter group could not be found. ' + 'Please check your configuration.') % + 'wato.py?mode=ldap_config&varname=ldap_userspec') + + members = group[0][1].values()[0] + + member_filter_items = [] + for member in members: + member_filter_items.append('(%s=%s)' % (user_cmp_attr, member)) + add_filter += '(|%s)' % ''.join(member_filter_items) + if add_filter: filt = '(&%s%s)' % (filt, add_filter) @@ -300,46 +555,110 @@ if ldap_user_id_attr() not in ldap_user: raise MKLDAPException(_('The configured User-ID attribute "%s" does not ' 'exist for the user "%s"') % (ldap_user_id_attr(), dn)) - user_id = ldap_user[ldap_user_id_attr()][0] + user_id = ldap_rewrite_user_id(ldap_user[ldap_user_id_attr()][0]) + if user_id: + ldap_user['dn'] = dn # also add the DN + result[user_id] = ldap_user - if config.ldap_userspec.get('lower_user_ids', False): - user_id = user_id.lower() + return result - result[user_id] = ldap_user +def ldap_group_base_dn_exists(): + group_base_dn = ldap_replace_macros(config.ldap_groupspec['dn']) + if not group_base_dn: + return False - return result + try: + result = ldap_search(group_base_dn, columns = ['dn'], scope = 'base') + except Exception, e: + return False -def ldap_user_groups(username, attr = 'cn'): - # The given username might be wrong case. The ldap search is case insensitive, - # so the username read from ldap might differ. Fix it here. - user_dn, username = ldap_get_user(username, True) - - if username in g_ldap_group_cache: - if attr == 'cn': - return g_ldap_group_cache[username][0] - else: - return g_ldap_group_cache[username][1] - - # Apply configured group ldap filter and only reply with groups - # having the current user as member - filt = '(&%s(%s=%s))' % (ldap_filter('groups'), ldap_member_attr(), - ldap.filter.escape_filter_chars(user_dn)) - # First get all groups - groups_cn = [] - groups_dn = [] - for dn, group in ldap_search(ldap_replace_macros(config.ldap_groupspec['dn']), - filt, ['cn']): - groups_cn.append(group['cn'][0]) - groups_dn.append(dn) + if not result: + return False + else: + return len(result) == 1 - g_ldap_group_cache.setdefault(username, (groups_cn, groups_dn)) +def ldap_get_groups(specific_dn = None): + filt = ldap_filter('groups') + dn = ldap_replace_macros(config.ldap_groupspec['dn']) + + if specific_dn: + # When using AD, the groups can be filtered by the DN attribute. With + # e.g. OpenLDAP this is not possible. In that case, change the DN. + if config.ldap_connection['type'] == 'ad': + filt = '(&%s(distinguishedName=%s))' % (filt, specific_dn) + else: + dn = specific_dn + + return ldap_search(dn, filt, ['cn']) + +def ldap_group_members(filters, filt_attr = 'cn', nested = False): + cache_key = '%s-%s-%s' % (filters, nested and 'n' or 'f', filt_attr) + if cache_key in g_ldap_group_cache: + return g_ldap_group_cache[cache_key] + + # When not searching for nested memberships, it is easy when using the an AD base LDAP. + # The group objects can be queried using the attribute distinguishedname. Therefor we + # create an alternating match filter to match that attribute when searching by DNs. + # In OpenLDAP the distinguishedname is no user attribute, therefor it can not be used + # as filter expression. We have to do one ldap query per group. Maybe, in the future, + # we change the role sync plugin parameters to snapins to make this part a little easier. + if not nested: + groups = {} + filt = ldap_filter('groups') + member_attr = ldap_member_attr().lower() + + if config.ldap_connection['type'] == 'ad' or filt_attr != 'distinguishedname': + if filters: + add_filt = '(|%s)' % ''.join([ '(%s=%s)' % (filt_attr, f) for f in filters ]) + filt = '(&%s%s)' % (filt, add_filt) + + for dn, obj in ldap_search(ldap_replace_macros(config.ldap_groupspec['dn']), filt, ['cn', member_attr]): + groups[dn] = { + 'cn' : obj['cn'][0], + 'members' : [ m.encode('utf-8').lower() for m in obj.get(member_attr,[]) ], + } + else: + # Special handling for OpenLDAP when searching for groups by DN + for f_dn in filters: + for dn, obj in ldap_search(ldap_replace_macros(f_dn), filt, ['cn', member_attr]): + groups[f_dn] = { + 'cn' : obj['cn'][0], + 'members' : [ m.encode('utf-8').lower() for m in obj.get(member_attr,[]) ], + } - if attr == 'cn': - return groups_cn else: - return groups_dn + # Nested querying is more complicated. We have no option to simply do a query for group objects + # to make them resolve the memberships here. So we need to query all users with the nested + # memberof filter to get all group memberships of that group. We need one query for each group. + groups = {} + for filter_val in filters: + if filt_attr == 'cn': + result = ldap_search(ldap_replace_macros(config.ldap_groupspec['dn']), + '(&%s(cn=%s))' % (ldap_filter('groups'), filter_val), + columns = ['dn']) + if not result: + continue # Skip groups which can not be found + dn = result[0][0] + cn = filter_val + else: + dn = filter_val + # in case of asking with DNs in nested mode, the resulting objects have the + # cn set to None for all objects. We do not need it in that case. + cn = None + + filt = '(&%s(memberOf:1.2.840.113556.1.4.1941:=%s))' % (ldap_filter('users'), dn) + groups[dn] = { + 'members' : [], + 'cn' : cn, + } + for user_dn, obj in ldap_search(ldap_replace_macros(config.ldap_userspec['dn']), filt, columns = ['dn']): + groups[dn]['members'].append(user_dn.lower()) -# .----------------------------------------------------------------------. + g_ldap_group_cache[cache_key] = groups + return groups + +#. +# .-Attributes-----------------------------------------------------------. # | _ _ _ _ _ _ | # | / \ | |_| |_ _ __(_) |__ _ _| |_ ___ ___ | # | / _ \| __| __| '__| | '_ \| | | | __/ _ \/ __| | @@ -369,7 +688,7 @@ elements.append((key, FixedValue( title = plugin['title'], help = plugin['help'], - value = None, + value = {}, totext = 'no_param_txt' in plugin and plugin['no_param_txt'] \ or _('This synchronization plugin has no parameters.'), ))) @@ -387,7 +706,7 @@ for key, params in config.ldap_active_plugins.items(): plugin = ldap_attribute_plugins[key] if 'needed_attributes' in plugin: - attrs.update(plugin['needed_attributes'](params)) + attrs.update(plugin['needed_attributes'](params or {})) return list(attrs) def ldap_convert_simple(user_id, ldap_user, user, user_attr, attr): @@ -396,10 +715,12 @@ else: return {} -def ldap_convert_mail(params, user_id, ldap_user, user): +def ldap_convert_mail(plugin, params, user_id, ldap_user, user): mail = '' - if ldap_user.get(ldap_attr('mail')): - mail = ldap_user[ldap_attr('mail')][0].lower() + mail_attr = params.get('attr', ldap_attr('mail')).lower() + if ldap_user.get(mail_attr): + mail = ldap_user[mail_attr][0].lower() + if mail: return {'email': mail} else: @@ -409,23 +730,29 @@ 'title': _('Email address'), 'help': _('Synchronizes the email of the LDAP user account into Check_MK.'), # Attributes which must be fetched from ldap - 'needed_attributes': lambda params: [ ldap_attr('mail') ], + 'needed_attributes': lambda params: [ params.get('attr', ldap_attr('mail')).lower() ], # Calculating the value of the attribute based on the configuration and the values # gathered from ldap 'convert': ldap_convert_mail, # User-Attributes to be written by this plugin and will be locked in WATO 'lock_attributes': [ 'email' ], - 'no_param_txt': _('Synchronize the "mail" attribute of LDAP users into Check_MK.'), + 'parameters': [ + ("attr", TextAscii( + title = _("LDAP attribute to sync"), + help = _("The LDAP attribute containing the mail address of the user."), + default_value = lambda: ldap_attr('mail'), + )), + ], } ldap_attribute_plugins['alias'] = { 'title': _('Alias'), 'help': _('Populates the alias attribute of the WATO user by syncrhonizing an attribute ' - 'from the LDAP user account. By default the LDAP attribute "cn" is used.'), - 'needed_attributes': lambda params: [ params.get('attr', ldap_attr('cn')) ], - 'convert': lambda params, user_id, ldap_user, user: \ + 'from the LDAP user account. By default the LDAP attribute cn is used.'), + 'needed_attributes': lambda params: [ params.get('attr', ldap_attr('cn')).lower() ], + 'convert': lambda plugin, params, user_id, ldap_user, user: \ ldap_convert_simple(user_id, ldap_user, user, 'alias', - params.get('attr', ldap_attr('cn'))), + params.get('attr', ldap_attr('cn')).lower()), 'lock_attributes': [ 'alias' ], 'parameters': [ ("attr", TextAscii( @@ -439,8 +766,17 @@ # Checks wether or not the user auth must be invalidated (increasing the serial). # In first instance, it must parse the pw-changed field, then check wether or not # a date has been stored in the user before and then maybe increase the serial. -def ldap_convert_auth_expire(params, user_id, ldap_user, user): - changed_attr = params.get('attr', ldap_attr('pw_changed')) +def ldap_convert_auth_expire(plugin, params, user_id, ldap_user, user): + # Special handling for active directory: Is the user enabled / disabled? + if config.ldap_connection['type'] == 'ad' and ldap_user.get('useraccountcontrol'): + # see http://www.selfadsi.de/ads-attributes/user-userAccountControl.htm for details + if saveint(ldap_user['useraccountcontrol'][0]) & 2 and not user.get("locked", False): + return { + 'locked': True, + 'serial': user.get('serial', 0) + 1, + } + + changed_attr = params.get('attr', ldap_attr('pw_changed')).lower() if not changed_attr in ldap_user: raise MKLDAPException(_('The "Authentication Expiration" attribute (%s) could not be fetched ' 'from the LDAP server for user %s.') % (changed_attr, ldap_user)) @@ -461,12 +797,20 @@ return {} +def ldap_attrs_auth_expire(params): + attrs = [ params.get('attr', ldap_attr('pw_changed')).lower() ] + + # Fetch user account flags to check locking + if config.ldap_connection['type'] == 'ad': + attrs.append('useraccountcontrol') + return attrs + ldap_attribute_plugins['auth_expire'] = { 'title': _('Authentication Expiration'), 'help': _('This plugin fetches all information which are needed to check wether or ' 'not an already authenticated user should be deauthenticated, e.g. because ' 'the password has changed in LDAP or the account has been locked.'), - 'needed_attributes': lambda params: [ params.get('attr', ldap_attr('pw_changed')) ], + 'needed_attributes': ldap_attrs_auth_expire, 'convert': ldap_convert_auth_expire, # When a plugin introduces new user attributes, it should declare the output target for # this attribute. It can either be written to the multisites users.mk or the check_mk @@ -479,7 +823,7 @@ title = _("LDAP attribute to be used as indicator"), help = _("When the value of this attribute changes for a user account, all " "current authenticated sessions of the user are invalidated and the " - "user must login again. By default this field uses the fields whcih " + "user must login again. By default this field uses the fields which " "hold the time of the last password change of the user."), default_value = lambda: ldap_attr('pw_changed'), )), @@ -488,13 +832,13 @@ ldap_attribute_plugins['pager'] = { 'title': _('Pager'), - 'help': _('This plugin synchronizes a field of the users ldap account to the pager attribute ' - 'of the WATO user accounts, which is then forwarded to Nagios and can be used' - 'for notifications. By default the LDAP attribute "mobile" is used.'), - 'needed_attributes': lambda params: [ params.get('attr', ldap_attr('mobile')) ], - 'convert': lambda params, user_id, ldap_user, user: \ + 'help': _('This plugin synchronizes a field of the users LDAP account to the pager attribute ' + 'of the WATO user accounts, which is then forwarded to the monitoring core and can be used' + 'for notifications. By default the LDAP attribute mobile is used.'), + 'needed_attributes': lambda params: [ params.get('attr', ldap_attr('mobile')).lower() ], + 'convert': lambda plugin, params, user_id, ldap_user, user: \ ldap_convert_simple(user_id, ldap_user, user, 'pager', - params.get('attr', ldap_attr('mobile'))), + params.get('attr', ldap_attr('mobile')).lower()), 'lock_attributes': ['pager'], 'parameters': [ ('attr', TextAscii( @@ -505,16 +849,44 @@ ], } -def ldap_convert_groups_to_contactgroups(params, user_id, ldap_user, user): - groups = [] - # 1. Fetch CNs of all LDAP groups of the user (use group_dn, group_filter) - ldap_groups = ldap_user_groups(user_id) +# Register sync plugins for all custom user attributes (assuming simple data types) +def register_user_attribute_sync_plugins(): + # Remove old user attribute plugins + for attr_name in ldap_attribute_plugins.keys(): + if attr_name not in ldap_builtin_attribute_plugin_names: + del ldap_attribute_plugins[attr_name] + + for attr, val in get_user_attributes(): + ldap_attribute_plugins[attr] = { + 'title': val['valuespec'].title(), + 'help': val['valuespec'].help(), + 'needed_attributes': lambda params: [ params.get('attr', ldap_attr(attr)).lower() ], + 'convert': lambda plugin, params, user_id, ldap_user, user: \ + ldap_convert_simple(user_id, ldap_user, user, plugin, + params.get('attr', ldap_attr(plugin)).lower()), + 'lock_attributes': [ attr ], + 'parameters': [ + ('attr', TextAscii( + title = _("LDAP attribute to sync"), + help = _("The LDAP attribute whose contents shall be synced into this custom attribute."), + default_value = lambda: ldap_attr(attr), + )), + ], + } + +def ldap_convert_groups_to_contactgroups(plugin, params, user_id, ldap_user, user): + # 0. Figure out how to check group membership. + user_cmp_val = ldap_member_attr().lower() == 'memberuid' and user_id or ldap_user['dn'] - # 2. Fetch all existing group names in WATO + # 1. Fetch all existing group names in WATO cg_names = load_group_information().get("contact", {}).keys() - # Only add groups which are already contactgroups in wato - return {'contactgroups': [ g for g in ldap_groups if g in cg_names]} + # 2. Load all LDAP groups which have a CN matching one contact + # group which exists in WATO + ldap_groups = ldap_group_members(cg_names, nested = params.get('nested', False)) + + # 3. Only add groups which the user is member of + return {'contactgroups': [ g['cn'] for dn, g in ldap_groups.items() if user_cmp_val in g['members']]} ldap_attribute_plugins['groups_to_contactgroups'] = { 'title': _('Contactgroup Membership'), @@ -523,47 +895,91 @@ 'contactgroup must match the common name (cn) of the LDAP group.'), 'convert': ldap_convert_groups_to_contactgroups, 'lock_attributes': ['contactgroups'], - 'no_param_txt': _('Add user to all contactgroups where the common name matches the group name.'), + 'parameters': [ + ('nested', FixedValue( + title = _('Handle nested group memberships (Active Directory only at the moment)'), + help = _('Once you enable this option, this plugin will not only handle direct ' + 'group memberships, instead it will also dig into nested groups and treat ' + 'the members of those groups as contact group members as well. Please mind ' + 'that this feature might increase the execution time of your LDAP sync.'), + value = True, + totext = _('Nested group memberships are resolved'), + ) + ) + ], } -def ldap_convert_groups_to_roles(params, user_id, ldap_user, user): - groups = [] - # 1. Fetch DNs of all LDAP groups of the user - ldap_groups = [ g.lower() for g in ldap_user_groups(user_id, 'dn') ] +def ldap_convert_groups_to_roles(plugin, params, user_id, ldap_user, user): + # Load the needed LDAP groups, which match the DNs mentioned in the role sync plugin config + ldap_groups = dict(ldap_group_members([ dn.lower() for role_id, dn in params.items() + if type(dn) in [ str, unicode ] ], + filt_attr = 'distinguishedname', nested = params.get('nested', False))) + + # posixGroup objects use the memberUid attribute to specify the group + # memberships. This is the username instead of the users DN. So the + # username needs to be used for filtering here. + user_cmp_val = ldap_member_attr().lower() == 'memberuid' and user_id or ldap_user['dn'] - # 2. Load default roles from default user profile - roles = config.default_user_profile['roles'][:] + roles = set([]) - # 3. Loop all roles mentioned in params (configured to be synchronized) + # Loop all roles mentioned in params (configured to be synchronized) for role_id, dn in params.items(): - if dn.lower() in ldap_groups and role_id not in roles: - roles.append(role_id) + if type(dn) not in [ str, unicode ]: + continue # skip non configured ones + dn = dn.lower() # lower case matching for DNs! + + # if group could be found and user is a member, add the role + if dn in ldap_groups and user_cmp_val in ldap_groups[dn]['members']: + roles.add(role_id) + + # Load default roles from default user profile when the user got no role + # by the role sync plugin + if not roles: + roles = config.default_user_profile['roles'][:] - return {'roles': roles} + return {'roles': list(roles)} def ldap_list_roles_with_group_dn(): elements = [] for role_id, role in load_roles().items(): elements.append((role_id, LDAPDistinguishedName( title = role['alias'] + ' - ' + _("Specify the Group DN"), - help = _("Distinguished Name of the LDAP group to add users this role. This group must " - "be defined within the scope of the " - "LDAP Group Settings."), + help = _("Distinguished Name of the LDAP group to add users this role. " + "e. g. CN=cmk-users,OU=groups,DC=example,DC=com
            " + "This group must be defined within the scope of the " + "LDAP Group Settings."), size = 80, enforce_suffix = ldap_replace_macros(config.ldap_groupspec.get('dn', '')), ))) + + elements.append( + ('nested', FixedValue( + title = _('Handle nested group memberships (Active Directory only at the moment)'), + help = _('Once you enable this option, this plugin will not only handle direct ' + 'group memberships, instead it will also dig into nested groups and treat ' + 'the members of those groups as contact group members as well. Please mind ' + 'that this feature might increase the execution time of your LDAP sync.'), + value = True, + totext = _('Nested group memberships are resolved'), + ) + ) + ) return elements ldap_attribute_plugins['groups_to_roles'] = { 'title': _('Roles'), 'help': _('Configures the roles of the user depending on its group memberships ' - 'in LDAP.'), + 'in LDAP.

            ' + 'Please note: Additionally the user is assigned to the ' + 'Default Roles. ' + 'Deactivate them if unwanted.'), 'convert': ldap_convert_groups_to_roles, 'lock_attributes': ['roles'], 'parameters': ldap_list_roles_with_group_dn, } -# .----------------------------------------------------------------------. +#. +# .-Hooks----------------------------------------------------------------. # | _ _ _ | # | | | | | ___ ___ | | _____ | # | | |_| |/ _ \ / _ \| |/ / __| | @@ -594,7 +1010,7 @@ except: result = False - ldap_default_bind() + ldap_default_bind(ldap_connection) return result def ldap_sync(add_to_changelog, only_username): @@ -602,6 +1018,9 @@ # requests to e.g. the page hook would cause duplicate calculations file(g_ldap_sync_time_file, 'w').write('%s\n' % time.time()) + if not config.ldap_connection or not ldap_user_base_dn_configured(): + return # silently skip sync without configuration + # Flush ldap related before each sync to have a caching only for the # current sync process global g_ldap_user_cache, g_ldap_group_cache @@ -610,23 +1029,25 @@ start_time = time.time() - ldap_connect() + ldap_log(' SYNC PLUGINS: %s' % ', '.join(config.ldap_active_plugins.keys())) # Unused at the moment, always sync all users #filt = None #if only_username: # filt = '(%s=%s)' % (ldap_user_id_attr(), only_username) - import wato - users = load_users() ldap_users = ldap_get_users() + import wato + users = load_users(lock = True) + # Remove users which are controlled by this connector but can not be found in # LDAP anymore for user_id, user in users.items(): if user.get('connector') == 'ldap' and user_id not in ldap_users: del users[user_id] # remove the user - wato.log_pending(wato.SYNCRESTART, None, "edit-users", _("LDAP Connector: Removed user %s" % user_id)) + wato.log_pending(wato.SYNCRESTART, None, "edit-users", + _("LDAP Connector: Removed user %s" % user_id), user_id = '') for user_id, ldap_user in ldap_users.items(): if user_id in users: @@ -642,7 +1063,7 @@ # Gather config from convert functions of plugins for key, params in config.ldap_active_plugins.items(): - user.update(ldap_attribute_plugins[key]['convert'](params, user_id, ldap_user, user)) + user.update(ldap_attribute_plugins[key]['convert'](key, params or {}, user_id, ldap_user, user)) if not mode_create and user == users[user_id]: continue # no modification. Skip this user. @@ -659,15 +1080,37 @@ if mode_create: wato.log_pending(wato.SYNCRESTART, None, "edit-users", - _("LDAP Connector: Created user %s" % user_id)) + _("LDAP Connector: Created user %s" % user_id), user_id = '') else: - wato.log_pending(wato.SYNCRESTART, None, "edit-users", - _("LDAP Connector: Modified user %s (Added: %s, Removed: %s, Changed: %s)" % - (user_id, ', '.join(added), ', '.join(removed), ', '.join(changed)))) + details = [] + if added: + details.append(_('Added: %s') % ', '.join(added)) + if removed: + details.append(_('Removed: %s') % ', '.join(removed)) + + # Ignore password changes from ldap - do not log them. For now. + if 'ldap_pw_last_changed' in changed: + changed.remove('ldap_pw_last_changed') + if 'serial' in changed: + changed.remove('serial') + + if changed: + details.append(('Changed: %s') % ', '.join(changed)) + + if details: + wato.log_pending(wato.SYNCRESTART, None, "edit-users", + _("LDAP Connector: Modified user %s (%s)") % (user_id, ', '.join(details)), + user_id = '') duration = time.time() - start_time ldap_log('SYNC FINISHED - Duration: %0.3f sec' % duration) + # delete the fail flag file after successful sync + try: + os.unlink(g_ldap_sync_fail_file) + except OSError: + pass + save_users(users) # Calculates the attributes of the users which are locked for users managed @@ -675,7 +1118,7 @@ def ldap_locked_attributes(): locked = set([ 'password' ]) # This attributes are locked in all cases! for key in config.ldap_active_plugins.keys(): - locked.update(ldap_attribute_plugins[key].get('lock_attributes', [])) + locked.update(ldap_attribute_plugins.get(key, {}).get('lock_attributes', [])) return list(locked) # Calculates the attributes added in this connector which shal be written to @@ -683,7 +1126,7 @@ def ldap_multisite_attributes(): attrs = set([]) for key in config.ldap_active_plugins.keys(): - attrs.update(ldap_attribute_plugins[key].get('multisite_attributes', [])) + attrs.update(ldap_attribute_plugins.get(key, {}).get('multisite_attributes', [])) return list(attrs) # Calculates the attributes added in this connector which shal NOT be written to @@ -691,9 +1134,20 @@ def ldap_non_contact_attributes(): attrs = set([]) for key in config.ldap_active_plugins.keys(): - attrs.update(ldap_attribute_plugins[key].get('non_contact_attributes', [])) + attrs.update(ldap_attribute_plugins.get(key, {}).get('non_contact_attributes', [])) return list(attrs) +ldap_builtin_attribute_plugin_names = [] + +# Is called during load_plugins() phase of userdb. +def ldap_load(): + # Save the builtin attribute names (to be able to delete removed user attributes) + global ldap_builtin_attribute_plugin_names + if not ldap_builtin_attribute_plugin_names: + ldap_builtin_attribute_plugin_names = ldap_attribute_plugins.keys() + + register_user_attribute_sync_plugins() + # Is called on every multisite http request def ldap_page(): try: @@ -701,11 +1155,25 @@ except: last_sync_time = 0 - if last_sync_time + config.ldap_cache_livetime > time.time(): + # in case of sync problems, synchronize all 20 seconds, instead of the configured + # regular cache livetime + if os.path.exists(g_ldap_sync_fail_file): + cache_livetime = 20 + else: + cache_livetime = config.ldap_cache_livetime + + if last_sync_time + cache_livetime > time.time(): return # No action needed, cache is recent enough # ok, cache is too old. Act! - ldap_sync(False, None) + try: + ldap_sync(False, None) + except: + # Do not let the exception through to the user. Instead write last + # error in a state file which is then visualized for the admin and + # will be deleted upon next successful sync. + file(g_ldap_sync_fail_file, 'w').write('%s\n%s' % (time.strftime('%Y-%m-%d %H:%M:%S'), + traceback.format_exc())) multisite_user_connectors.append({ 'id': 'ldap', @@ -715,6 +1183,7 @@ 'login': ldap_login, 'sync': ldap_sync, 'page': ldap_page, + 'load': ldap_load, 'locked': user_locked, # no ldap check, just check the WATO attribute. # This handles setups where the locked attribute is not # synchronized and the user is enabled in LDAP and disabled diff -Nru check-mk-1.2.2p3/plugins/userdb/user_attributes.py check-mk-1.2.6p12/plugins/userdb/user_attributes.py --- check-mk-1.2.2p3/plugins/userdb/user_attributes.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/userdb/user_attributes.py 2015-06-24 09:48:38.000000000 +0000 @@ -0,0 +1,77 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +declare_user_attribute( + "force_authuser", + Checkbox( + title = _("Visibility of Hosts/Services"), + label = _("Only show hosts and services the user is a contact for"), + help = _("When this option is checked, then the status GUI will only " + "display hosts and services that the user is a contact for - " + "even if he has the permission for seeing all objects."), + ), + permission = "general.see_all" +) + +declare_user_attribute( + "force_authuser_webservice", + Checkbox( + title = _("Visibility of Hosts/Services (Webservice)"), + label = _("Export only hosts and services the user is a contact for"), + help = _("When this option is checked, then the Multisite webservice " + "will only export hosts and services that the user is a contact for - " + "even if he has the permission for seeing all objects."), + ), + permission = "general.see_all" +) + + +declare_user_attribute( + "disable_notifications", + Checkbox( + title = _("Disable Notifications"), + label = _("Temporarily disable all notifications!"), + help = _("When this option is active the you will not get any " + "alerts or other notifications via email, SMS or similar. " + "This overrides all other notification settings or rules, so make " + "sure that you know what you do."), + ), + permission = "general.disable_notifications", + domain = "check_mk", +) + +declare_user_attribute( + "start_url", + TextAscii(title = _("Start-URL to display in main frame"), + help = _("When you point your browser to the Multisite GUI, usually the dashboard " + "is shown in the main (right) frame. You can replace this with any other " + "URL you like here."), + size = 80, + default_value = "dashboard.py", + attrencode = True), + domain = "multisite") + diff -Nru check-mk-1.2.2p3/plugins/veritas/vxvm_enclosures check-mk-1.2.6p12/plugins/veritas/vxvm_enclosures --- check-mk-1.2.2p3/plugins/veritas/vxvm_enclosures 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/plugins/veritas/vxvm_enclosures 1970-01-01 00:00:00.000000000 +0000 @@ -1,15 +0,0 @@ -#!/bin/sh - -# Original Output -#ENCLR_NAME ENCLR_TYPE ENCLR_SNO STATUS ARRAY_TYPE LUN_COUNT -#=================================================================================== -#disk Disk DISKS CONNECTED Disk 2 -#emc1 EMC 0002XXXXXXXX CONNECTED A/A 512 -#emc_clariion0 EMC_CLARiiON CK2000XXXXXXXX CONNECTED CLR-A/P 1 - -# Disk: local disks, or maybe JBOD (then remove the -e ^disk) - -if type vxdmpadm > /dev/null ; then - echo '<<>>' - vxdmpadm listenclosure all | grep -v -w -e ^[dD]isk -e ^other_disks -e ^ENCLR_NAME -e \^= -fi diff -Nru check-mk-1.2.2p3/plugins/veritas/vxvm_multipath check-mk-1.2.6p12/plugins/veritas/vxvm_multipath --- check-mk-1.2.2p3/plugins/veritas/vxvm_multipath 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/plugins/veritas/vxvm_multipath 1970-01-01 00:00:00.000000000 +0000 @@ -1,15 +0,0 @@ -#!/bin/sh - -# DMPNODENAME ENCLR-NAME STATE[A] PATH-TYPE[M] -# disk2057 emc1 ENABLED(A) - -# disk3119 emc_clariion0 ENABLED SECONDARY - - -if type vxdmpadm >/dev/null 2>&1 ; then - echo '<<>>' - ENCS=$( vxdmpadm listenclosure all | grep -v -w -e ^[dD]isk -e ENCLR_NAME -e \^= | awk '{print $1}') - - echo "$ENCS" | while read enc ; do - vxdmpadm getdmpnode enclosure=$enc | grep -v -e \^= -e NAME - done -fi diff -Nru check-mk-1.2.2p3/plugins/veritas/vxvm_objstatus check-mk-1.2.6p12/plugins/veritas/vxvm_objstatus --- check-mk-1.2.2p3/plugins/veritas/vxvm_objstatus 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/plugins/veritas/vxvm_objstatus 1970-01-01 00:00:00.000000000 +0000 @@ -1,30 +0,0 @@ -#!/bin/sh - - -if type vxdg > /dev/null; then - echo '<<>>' - # Get a list of the in-use disk groups. - DGS=$(vxdg list | grep enabled | awk '{print $1}') - # Deported or otherwise inactive needs no performance monitoring - if [ "X${DGS}" != "X" ]; then - for DG in $DGS ; do - vxprint -g $DG -v -q -Q -F "%type %dgname %name %admin_state %kstate" - done - fi -fi - - - -# Output examples: -# A stopped volume -#v datadg lalavol CLEAN DISABLED -# An active volume -#v datadg oravol ACTIVE ENABLE -# v2 layered volumes on lower level that we might or might not need. -#v datadg oravol-L01 ACTIVE ENABLED -#v datadg oravol-L02 ACTIVE ENABLED - - -# Man page - -# https://sort.symantec.com/public/documents/sfha/5.1sp1/solaris/manualpages/html/man/volume_manager/html/man4/vxmake.4.html diff -Nru check-mk-1.2.2p3/plugins/views/availability.py check-mk-1.2.6p12/plugins/views/availability.py --- check-mk-1.2.2p3/plugins/views/availability.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/views/availability.py 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,2017 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Hints: +# There are several modes for displaying data +# 1. Availability table +# 2. Timeline view with chronological events of one object +# There are two types of data sources +# a. Hosts/Services (identified by site, host and service) +# b. BI aggregates (identified by aggr_groups and aggr_name) +# The code flow for these four combinations is different +# +# 1a) availability of hosts/services +# Here the logic of show_view is used for creating the +# filter headers. But these are being reused for the statehist +# table instead of the original hosts/services table! This is +# done in get_availability_data(). +# +# - htdocs/views.py:show_view() +# - plugins/views/availability.py:render_availability() +# - plugins/views/availability.py:get_availability_data() +# - plugins/views/availability.py:do_render_availability() +# - plugins/views/availability.py:render_availability_table() +# +# 2a) timeline of hosts/services +# It is much the same as for 1a), just that in get_availability_data() +# an additional filter is being added for selecting just one host/serivce. +# +# - htdocs/views.py:show_view() +# - plugins/views/availability.py:render_availability() +# - plugins/views/availability.py:get_availability_data() +# - plugins/views/availability.py:do_render_availability() +# - plugins/views/availability.py:render_timeline() +# +# 1b) availability of bi aggregates +# In order to use the filter logic of the aggr datasource, we +# also start in show_view(). But this time we let the actual +# rows being computed - just we make sure that only the two +# columns aggr_name, aggr_group and aggr_tree are being fetched. The +# other columns won't be displayed. We just need the correct +# result set. With that we fork into render_bi_availability(). +# This computes the historic states of the aggregate by using +# data from hosts/services from state_hist. +# +# - htdocs/views.py:show_view() +# - plugins/views/availability.py:render_bi_availability() +# - plugins/views/availability.py:get_bi_timeline() +# - plugins/views/availability.py:do_render_availability() +# - plugins/views/availability.py:render_availability_table() +# +# 2b) timeline of bi aggregates +# In this case we do not need any logic from the view, since +# we just diplay one element - which is identified by aggr_group +# and aggr_name. We immediately fork to page_timeline() +# +# - htdocs/views.py:show_view() (jumps immediately to page_timeline) +# - htdocs/bi.py:page_timeline() +# - plugins/views/availability.py:render_bi_availability() +# - plugins/views/availability.py:do_render_availability() +# - plugins/views/availability.py:render_timeline() + + +import table +from valuespec import * + +# Function building the availability view +def render_availability(view, datasource, filterheaders, display_options, + only_sites, limit): + + if handle_edit_annotations(): + return + + timeline = not not html.var("timeline") + if timeline: + tl_site = html.var("timeline_site") + tl_host = html.var("timeline_host") + tl_service = html.var("timeline_service") + tl_aggr = html.var("timeline_aggr") + if tl_aggr: + title = _("Timeline of") + " " + tl_aggr + timeline = (tl_aggr, None, None) + else: + title = _("Timeline of") + " " + tl_host + if tl_service: + title += ", " + tl_service + timeline = (tl_site, tl_host, tl_service) + + else: + title = _("Availability: ") + view_title(view) + html.add_status_icon("download_csv", _("Export as CSV"), html.makeuri([("output_format", "csv_export")])) + + if timeline and tl_aggr: + what = "bi" + else: + what = "service" in datasource["infos"] and "service" or "host" + + avoptions = get_availability_options_from_url(what) + range, range_title = avoptions["range"] + + title += " - " + range_title + + if html.output_format == "csv_export": + do_csv = True + av_output_csv_mimetype(title) + else: + do_csv = False + + + if 'H' in display_options: + html.body_start(title, stylesheets=["pages","views","status"], force=True) + if 'T' in display_options: + html.top_heading(title) + + handle_delete_annotations() + + # Remove variables for editing annotations, otherwise they will make it into the uris + html.del_all_vars("editanno_") + html.del_all_vars("anno_") + if html.var("filled_in") == "editanno": + html.del_var("filled_in") + + if 'B' in display_options: + html.begin_context_buttons() + togglebutton("avoptions", html.has_user_errors(), "painteroptions", _("Configure details of the report")) + html.context_button(_("Status View"), html.makeuri([("mode", "status")]), "status") + if config.reporting_available(): + html.context_button(_("Export as PDF"), html.makeuri([], filename="report_instant.py"), "report") + if timeline: + html.context_button(_("Availability"), html.makeuri([("timeline", "")]), "availability") + history_url = history_url_of(tl_site, tl_host, tl_service, range[0], range[1]) + if not tl_aggr: # No history for BI aggregate timeline + html.context_button(_("History"), history_url, "history") + html.end_context_buttons() + + if not do_csv: + # Render the avoptions again to get the HTML code, because the HTML vars have changed + # above (anno_ and editanno_ has been removed, which must not be part of the form + avoptions = render_availability_options(what) + + if not html.has_user_errors(): + if timeline and tl_aggr: + if not html.has_var("aggr_group"): + raise MKGeneralException("Missing GET variable aggr_group") + aggr_group = html.var("aggr_group") + tree = bi.get_bi_tree(aggr_group, tl_aggr) + rows = [{ "aggr_tree" : tree , "aggr_group" : aggr_group}] + else: + rows = get_availability_data(datasource, filterheaders, range, only_sites, + limit, timeline, timeline or avoptions["show_timeline"], avoptions) + do_render_availability(rows, what, avoptions, timeline, "") + + if 'Z' in display_options: + html.bottom_footer() + if 'H' in display_options: + html.body_end() + +def av_output_csv_mimetype(title): + html.req.content_type = "text/csv; charset=UTF-8" + filename = '%s-%s.csv' % (title, time.strftime('%Y-%m-%d_%H-%M-%S', time.localtime(time.time()))) + if type(filename) == unicode: + filename = filename.encode("utf-8") + html.req.headers_out['Content-Disposition'] = 'Attachment; filename="%s"' % filename + + +# Options for availability computation and rendering. These are four-tuple +# with the columns: +# 1. variable name +# 2. show in single or double height box +# 3. use this in reporting +# 4. the valuespec +def get_avoption_entries(what): + if what == "bi": + grouping_choices = [ + ( None, _("Do not group") ), + ( "host", _("By Aggregation Group") ), + ] + else: + grouping_choices = [ + ( None, _("Do not group") ), + ( "host", _("By Host") ), + ( "host_groups", _("By Host group") ), + ( "service_groups", _("By Service group") ), + ] + + return [ + # Time range selection + ( "rangespec", + "double", + False, + Timerange( + title = _("Time Range"), + default_value = 'd0', + ) + ), + + # Labelling and Texts + ( "labelling", + "double", + True, + ListChoice( + title = _("Labelling Options"), + choices = [ + ( "omit_headers", _("Do not display column headers")), + ( "omit_host", _("Do not display the host name")), + ( "use_display_name", _("Use alternative display name for services")), + ( "omit_buttons", _("Do not display icons for history and timeline")), + ( "display_timeline_legend", _("Display legend for timeline")), + ] + ) + ), + + # How to deal with downtimes + ( "downtimes", + "double", + True, + Dictionary( + title = _("Scheduled Downtimes"), + columns = 2, + elements = [ + ( "include", + DropdownChoice( + choices = [ + ( "honor", _("Honor scheduled downtimes") ), + ( "ignore", _("Ignore scheduled downtimes") ), + ( "exclude", _("Exclude scheduled downtimes" ) ), + ], + default_value = "honor", + ) + ), + ( "exclude_ok", + Checkbox(label = _("Treat phases of UP/OK as non-downtime")) + ), + ], + optional_keys = False, + ) + ), + + # How to deal with downtimes, etc. + ( "consider", + "double", + True, + Dictionary( + title = _("Status Classification"), + columns = 2, + elements = [ + ( "flapping", + Checkbox( + label = _("Consider periods of flapping states"), + default_value = True), + ), + ( "host_down", + Checkbox( + label = _("Consider times where the host is down"), + default_value = True), + ), + ( "unmonitored", + Checkbox( + label = _("Include unmonitored time"), + default_value = True), + ), + ], + optional_keys = False, + ), + ), + + # Optionally group some states together + ( "state_grouping", + "double", + True, + Dictionary( + title = _("Status Grouping"), + columns = 2, + elements = [ + ( "warn", + DropdownChoice( + label = _("Treat Warning as: "), + choices = [ + ( "ok", _("OK") ), + ( "warn", _("WARN") ), + ( "crit", _("CRIT") ), + ( "unknown", _("UNKNOWN") ), + ], + default_value = "warn", + ), + ), + ( "unknown", + DropdownChoice( + label = _("Treat Unknown as: "), + choices = [ + ( "ok", _("OK") ), + ( "warn", _("WARN") ), + ( "crit", _("CRIT") ), + ( "unknown", _("UNKNOWN") ), + ], + default_value = "unknown", + ), + ), + ( "host_down", + DropdownChoice( + label = _("Treat Host Down as: "), + choices = [ + ( "ok", _("OK") ), + ( "warn", _("WARN") ), + ( "crit", _("CRIT") ), + ( "unknown", _("UNKNOWN") ), + ( "host_down", _("Host Down") ), + ], + default_value = "host_down", + ), + ), + ], + optional_keys = False, + ), + ), + + # Visual levels for the availability + ( "av_levels", + "double", + False, + Optional( + Tuple( + elements = [ + Percentage(title = _("Warning below"), default_value = 99, display_format="%.3f", size=7), + Percentage(title = _("Critical below"), default_value = 95, display_format="%.3f", size=7), + ] + ), + title = _("Visual levels for the availability (OK percentage)"), + ) + ), + + + # Show colummns for min, max, avg duration and count + ( "outage_statistics", + "double", + True, + Tuple( + title = _("Outage statistics"), + orientation = "horizontal", + elements = [ + ListChoice( + title = _("Aggregations"), + choices = [ + ( "min", _("minimum duration" )), + ( "max", _("maximum duration" )), + ( "avg", _("average duration" )), + ( "cnt", _("count" )), + ] + ), + ListChoice( + title = _("For these states:"), + columns = 2, + choices = [ + ( "ok", _("OK/Up") ), + ( "warn", _("Warn") ), + ( "crit", _("Crit/Down") ), + ( "unknown", _("Unknown/Unreach") ), + ( "flapping", _("Flapping") ), + ( "host_down", _("Host Down") ), + ( "in_downtime", _("Downtime") ), + ( "outof_notification_period", _("OO/Notif") ), + ] + ) + ] + ) + ), + + # Omit all non-OK columns + ( "av_mode", + "single", + True, + Checkbox( + title = _("Availability"), + label = _("Just show the availability (i.e. OK/UP)"), + ), + ), + + # How to deal with the service periods + ( "service_period", + "single", + True, + DropdownChoice( + title = _("Service Time"), + choices = [ + ( "honor", _("Base report only on service times") ), + ( "ignore", _("Include both service and non-service times" ) ), + ( "exclude", _("Base report only on non-service times" ) ), + ], + default_value = "honor", + ) + ), + + # How to deal with times out of the notification period + ( "notification_period", + "single", + True, + DropdownChoice( + title = _("Notification Period"), + choices = [ + ( "honor", _("Distinguish times in and out of notification period") ), + ( "exclude", _("Exclude times out of notification period" ) ), + ( "ignore", _("Ignore notification period") ), + ], + default_value = "ignore", + ) + ), + + # Group by Host, Hostgroup or Servicegroup? + ( "grouping", + "single", + True, + DropdownChoice( + title = _("Grouping"), + choices = grouping_choices, + default_value = None, + ) + ), + + # Format of numbers + ( "dateformat", + "single", + True, + DropdownChoice( + title = _("Format time stamps as"), + choices = [ + ("yyyy-mm-dd hh:mm:ss", _("YYYY-MM-DD HH:MM:SS") ), + ("epoch", _("Unix Timestamp (Epoch)") ), + ], + default_value = "yyyy-mm-dd hh:mm:ss", + ) + ), + ( "timeformat", + "single", + True, + DropdownChoice( + title = _("Format time ranges as"), + choices = [ + ("percentage_0", _("Percentage - XX %") ), + ("percentage_1", _("Percentage - XX.X %") ), + ("percentage_2", _("Percentage - XX.XX %") ), + ("percentage_3", _("Percentage - XX.XXX %") ), + ("seconds", _("Seconds") ), + ("minutes", _("Minutes") ), + ("hours", _("Hours") ), + ("hhmmss", _("HH:MM:SS") ), + ], + default_value = "percentage_2", + ) + ), + + # Short time intervals + ( "short_intervals", + "single", + True, + Integer( + title = _("Short Time Intervals"), + label = _("Ignore intervals shorter or equal"), + minvalue = 0, + unit = _("sec"), + default_value = 0, + ), + ), + + # Merging + ( "dont_merge", + "single", + True, + Checkbox( + title = _("Phase Merging"), + label = _("Do not merge consecutive phases with equal state")), + ), + + # Summary line + ( "summary", + "single", + True, + DropdownChoice( + title = _("Summary line"), + choices = [ + ( None, _("Do not show a summary line") ), + ( "sum", _("Display total sum (for % the average)") ), + ( "average", _("Display average") ), + ], + default_value = "sum", + ) + ), + + # Timeline + ( "show_timeline", + "single", + True, + Checkbox( + title = _("Timeline"), + label = _("Show timeline of each object directly in table")), + ), + + # Timelimit + ( "timelimit", + "single", + False, + Age( + title = _("Query Time Limit"), + help = _("Limit the execution time of the query, in order to " + "avoid a hanging system."), + unit = _("sec"), + default_value = 30, + ), + ) +] + +# Get availability options without rendering the valuespecs +def get_availability_options_from_url(what): + html.plug() + avoptions = render_availability_options(what) + html.drain() + html.unplug() + return avoptions + +def get_default_avoptions(): + return { + "range" : (time.time() - 86400, time.time()), + "rangespec" : "d0", + "labelling" : [], + "downtimes" : { + "include" : "honor", + "exclude_ok" : False, + }, + "consider" : { + "flapping" : True, + "host_down" : True, + "unmonitored" : True, + }, + "state_grouping" : { + "warn" : "warn", + "unknown" : "unknown", + "host_down" : "host_down", + }, + "av_levels" : None, + "outage_statistics" : ([],[]), + "av_mode" : False, + "service_period" : "honor", + "notification_period" : "ignore", + "grouping" : None, + "dateformat" : "yyyy-mm-dd hh:mm:ss", + "timeformat" : "percentage_2", + "short_intervals" : 0, + "dont_merge" : False, + "summary" : "sum", + "show_timeline" : False, + "timelimit" : 30, + } + +def render_availability_options(what): + if html.var("_reset") and html.check_transaction(): + config.save_user_file("avoptions", {}) + for varname in html.vars.keys(): + if varname.startswith("avo_"): + html.del_var(varname) + html.del_var("avoptions") + + avoptions = get_default_avoptions() + + # Users of older versions might not have all keys set. The following + # trick will merge their options with our default options. + avoptions.update(config.load_user_file("avoptions", {})) + + is_open = False + html.begin_form("avoptions") + html.hidden_field("avoptions", "set") + avoption_entries = get_avoption_entries(what) + if html.var("avoptions") == "set": + for name, height, show_in_reporting, vs in avoption_entries: + try: + avoptions[name] = vs.from_html_vars("avo_" + name) + except MKUserError, e: + html.add_user_error(e.varname, e) + is_open = True + + range_vs = None + for name, height, show_in_reporting, vs in avoption_entries: + if name == 'rangespec': + range_vs = vs + + try: + range, range_title = range_vs.compute_range(avoptions["rangespec"]) + avoptions["range"] = range, range_title + except MKUserError, e: + html.add_user_error(e.varname, e) + + if html.has_user_errors(): + html.show_user_errors() + + html.write('
            ' + % (not is_open and 'style="display: none"' or '') ) + html.write("
        ") + + html.write("
        ") + + for name, height, show_in_reporting, vs in avoption_entries: + html.write('
        ' % (height, name)) + html.write('
        %s
        ' % vs.title()) + html.write('
        ') + vs.render_input("avo_" + name, avoptions.get(name)) + html.write("
        ") + html.write("
        ") + + html.write("
        ") + html.button("apply", _("Apply"), "submit") + html.button("_reset", _("Reset to defaults"), "submit") + html.write("
        ") + html.write("
    ") + + html.hidden_fields() + html.end_form() + + if html.form_submitted(): + config.save_user_file("avoptions", avoptions) + + # Convert outage-options from service to host + states = avoptions["outage_statistics"][1] + for os, oh in [ ("ok","up"), ("crit","down"), ("unknown", "unreach") ]: + if os in states: + states.append(oh) + + return avoptions + +def get_availability_data(datasource, filterheaders, range, only_sites, limit, single_object, include_output, avoptions): + has_service = "service" in datasource["infos"] + av_filter = "Filter: time >= %d\nFilter: time < %d\n" % range + if single_object: + tl_site, tl_host, tl_service = single_object + av_filter += "Filter: host_name = %s\nFilter: service_description = %s\n" % ( + tl_host, tl_service) + only_sites = [ tl_site ] + elif has_service: + av_filter += "Filter: service_description !=\n" + else: + av_filter += "Filter: service_description =\n" + + query = "GET statehist\n" + av_filter + query += "Timelimit: %d\n" % avoptions["timelimit"] + + # Add Columns needed for object identification + columns = [ "host_name", "service_description" ] + + # Columns for availability + columns += [ + "duration", "from", "until", "state", "host_down", "in_downtime", + "in_host_downtime", "in_notification_period", "in_service_period", "is_flapping", ] + if include_output: + columns.append("log_output") + if "use_display_name" in avoptions["labelling"]: + columns.append("service_display_name") + + # If we group by host/service group then make sure that that information is available + if avoptions["grouping"] not in [ None, "host" ]: + columns.append(avoptions["grouping"]) + + add_columns = datasource.get("add_columns", []) + rows = do_query_data(query, columns, add_columns, None, filterheaders, only_sites, limit = None) + return rows + + +host_availability_columns = [ + ( "up", "state0", _("UP"), None ), + ( "down", "state2", _("DOWN"), None ), + ( "unreach", "state3", _("UNREACH"), None ), + ( "flapping", "flapping", _("Flapping"), None ), + ( "in_downtime", "downtime", _("Downtime"), _("The host was in a scheduled downtime") ), + ( "outof_notification_period", "", _("OO/Notif"), _("Out of Notification Period") ), + ( "outof_service_period", "ooservice", _("OO/Service"), _("Out of Service Period") ), + ( "unmonitored", "unmonitored", _("N/A"), _("During this time period no monitoring data is available") ), +] + +service_availability_columns = [ + ( "ok", "state0", _("OK"), None ), + ( "warn", "state1", _("WARN"), None ), + ( "crit", "state2", _("CRIT"), None ), + ( "unknown", "state3", _("UNKNOWN"), None ), + ( "flapping", "flapping", _("Flapping"), None ), + ( "host_down", "hostdown", _("H.Down"), _("The host was down") ), + ( "in_downtime", "downtime", _("Downtime"), _("The host or service was in a scheduled downtime") ), + ( "outof_notification_period", "", _("OO/Notif"), _("Out of Notification Period") ), + ( "outof_service_period", "ooservice", _("OO/Service"), _("Out of Service Period") ), + ( "unmonitored", "unmonitored", _("N/A"), _("During this time period no monitoring data is available") ), +] + +bi_availability_columns = [ + ( "ok", "state0", _("OK"), None ), + ( "warn", "state1", _("WARN"), None ), + ( "crit", "state2", _("CRIT"), None ), + ( "unknown", "state3", _("UNKNOWN"), None ), + ( "in_downtime", "downtime", _("Downtime"), _("The aggregate was in a scheduled downtime") ), + ( "unmonitored", "unmonitored", _("N/A"), _("During this time period no monitoring data is available") ), +] + + +# Fetch = true: return av table as Python data, do render nothing +def do_render_availability(rows, what, avoptions, timeline, timewarpcode, fetch=False): + # Sort by site/host and service, while keeping native order + by_host = {} + for row in rows: + site_host = row["site"], row["host_name"] + service = row["service_description"] + by_host.setdefault(site_host, {}) + by_host[site_host].setdefault(service, []).append(row) + + # Load annotations + annotations = load_annotations() + + # Now compute availability table. We have the following possible states: + # 1. "unmonitored" + # 2. "monitored" + # 2.1 "outof_notification_period" + # 2.2 "in_notification_period" + # 2.2.1 "in_downtime" (also in_host_downtime) + # 2.2.2 "not_in_downtime" + # 2.2.2.1 "host_down" + # 2.2.2.2 "host not down" + # 2.2.2.2.1 "ok" + # 2.2.2.2.2 "warn" + # 2.2.2.2.3 "crit" + # 2.2.2.2.4 "unknown" + availability = [] + os_aggrs, os_states = avoptions.get("outage_statistics", ([],[])) + need_statistics = os_aggrs and os_states + show_timeline = avoptions["show_timeline"] or timeline + grouping = avoptions["grouping"] + timeline_rows = [] # Need this as a global variable if just one service is affected + total_duration = 0 + considered_duration = 0 + + # Note: in case of timeline, we have data from exacly one host/service + for site_host, site_host_entry in by_host.iteritems(): + for service, service_entry in site_host_entry.iteritems(): + + if grouping == "host": + group_ids = [site_host] + elif grouping: + group_ids = set([]) + else: + group_ids = None + + # First compute timeline + timeline_rows = [] + total_duration = 0 + considered_duration = 0 + for span in service_entry: + # Information about host/service groups are in the actual entries + if grouping and grouping != "host" and what != "bi": + group_ids.update(span[grouping]) # List of host/service groups + + display_name = span.get("service_display_name", service) + state = span["state"] + consider = True + + if state == -1: + s = "unmonitored" + if not avoptions["consider"]["unmonitored"]: + consider = False + + elif avoptions["service_period"] != "ignore" and \ + (( span["in_service_period"] and avoptions["service_period"] != "honor" ) + or \ + ( not span["in_service_period"] and avoptions["service_period"] == "honor" )): + s = "outof_service_period" + consider = False + + elif span["in_notification_period"] == 0 and avoptions["notification_period"] == "exclude": + consider = False + + elif span["in_notification_period"] == 0 and avoptions["notification_period"] == "honor": + s = "outof_notification_period" + + elif (span["in_downtime"] or span["in_host_downtime"]) and not \ + (avoptions["downtimes"]["exclude_ok"] and state == 0) and not \ + avoptions["downtimes"]["include"] == "ignore": + if avoptions["downtimes"]["include"] == "exclude": + consider = False + else: + s = "in_downtime" + elif what != "host" and span["host_down"] and avoptions["consider"]["host_down"]: + s = "host_down" + elif span["is_flapping"] and avoptions["consider"]["flapping"]: + s = "flapping" + else: + if what in [ "service", "bi" ]: + s = { 0: "ok", 1:"warn", 2:"crit", 3:"unknown" }.get(state, "unmonitored") + else: + s = { 0: "up", 1:"down", 2:"unreach" }.get(state, "unmonitored") + if s == "warn": + s = avoptions["state_grouping"]["warn"] + elif s == "unknown": + s = avoptions["state_grouping"]["unknown"] + elif s == "host_down": + s = avoptions["state_grouping"]["host_down"] + + total_duration += span["duration"] + if consider: + timeline_rows.append((span, s)) + considered_duration += span["duration"] + + # Now merge consecutive rows with identical state + if not avoptions["dont_merge"]: + merge_timeline(timeline_rows) + + # Melt down short intervals + if avoptions["short_intervals"]: + melt_short_intervals(timeline_rows, avoptions["short_intervals"], avoptions["dont_merge"]) + + # Condense into availability + states = {} + statistics = {} + for span, s in timeline_rows: + states.setdefault(s, 0) + duration = span["duration"] + states[s] += duration + if need_statistics: + entry = statistics.get(s) + if entry: + entry[0] += 1 + entry[1] = min(entry[1], duration) + entry[2] = max(entry[2], duration) + else: + statistics[s] = [ 1, duration, duration ] # count, min, max + + if not show_timeline: + timeline_rows = None + + availability.append([site_host[0], site_host[1], service, display_name, states, + considered_duration, total_duration, statistics, timeline_rows, group_ids]) + + # Prepare number format function + range, range_title = avoptions["range"] + from_time, until_time = range + duration = until_time - from_time + render_number = render_number_function(avoptions) + + fetch_data = {} + + if timeline: + if not fetch: # Timeline does not support fetch + render_timeline(timeline_rows, from_time, until_time, total_duration, + timeline, range_title, render_number, what, timewarpcode, avoptions, False, style="standalone") + else: + fetch_data["table"] = render_availability_table(availability, from_time, until_time, range_title, + what, avoptions, render_number, fetch) + + if not fetch: + render_annotations(annotations, from_time, until_time, by_host, what, avoptions, omit_service = timeline) + + return fetch_data + + +# Creates a function for rendering time values according to +# the avoptions of the report. +def render_number_function(avoptions): + timeformat = avoptions["timeformat"] + if timeformat.startswith("percentage_"): + def render_number(n, d): + if not d: + return _("n/a") + else: + return ("%." + timeformat[11:] + "f%%") % ( float(n) / float(d) * 100.0) + elif timeformat == "seconds": + def render_number(n, d): + return "%d s" % n + elif timeformat == "minutes": + def render_number(n, d): + return "%d min" % (n / 60) + elif timeformat == "hours": + def render_number(n, d): + return "%d h" % (n / 3600) + else: + def render_number(n, d): + minn, sec = divmod(n, 60) + hours, minn = divmod(minn, 60) + return "%02d:%02d:%02d" % (hours, minn, sec) + + return render_number + +# style is either inline (just the timeline bar) or "standalone" (the complete page) +def render_timeline(timeline_rows, from_time, until_time, considered_duration, + timeline, range_title, render_number, what, timewarpcode, avoptions, fetch, style): + + if not timeline_rows: + if fetch: + return [] + else: + html.write('
    %s
    ' % _("No information available")) + return + + # Timeformat: show date only if the displayed time range spans over + # more than one day. + format = "%H:%M:%S" + if time.localtime(from_time)[:3] != time.localtime(until_time-1)[:3]: + format = "%Y-%m-%d " + format + def render_date(ts): + if avoptions["dateformat"] == "epoch": + return str(int(ts)) + else: + return time.strftime(format, time.localtime(ts)) + + if type(timeline) == tuple: + tl_site, tl_host, tl_service = timeline + if tl_service: + availability_columns = service_availability_columns + else: + availability_columns = host_availability_columns + else: + availability_columns = bi_availability_columns + + # Render graphical representation + # Make sure that each cell is visible, if possible + min_percentage = min(100.0 / len(timeline_rows), style == "inline" and 0.0 or 0.5) + rest_percentage = 100 - len(timeline_rows) * min_percentage + if not fetch: + html.write('
    ' % style) + if style == "standalone": + html.write('
    %s
    %s
    ' % ( + render_date(from_time), render_date(until_time))) + + if not fetch: + html.write('' % style) + html.write('') + chaos_begin = None + chaos_end = None + chaos_count = 0 + chaos_width = 0 + + def output_chaos_period(chaos_begin, chaos_end, chaos_count, chaos_width): + if fetch: + html.write("|chaos:%s" % chaos_width) + else: + title = _("%d chaotic state changes from %s until %s (%s)") % ( + chaos_count, + render_date(chaos_begin), render_date(chaos_end), + render_number(chaos_end - chaos_begin, considered_duration)) + html.write('' % ( + max(0.2, chaos_width), html.attrencode(title))) + + for row_nr, (row, state_id) in enumerate(timeline_rows): + for sid, css, sname, help in availability_columns: + if sid == state_id: + title = _("From %s until %s (%s) %s") % ( + render_date(row["from"]), render_date(row["until"]), + render_number(row["duration"], considered_duration), + help and help or sname) + if "log_output" in row and row["log_output"]: + title += " - " + row["log_output"] + width = rest_percentage * row["duration"] / considered_duration + + # If the width is very small then we group several phases into + # one single "chaos period". + if style == "inline" and width < 0.05: + if not chaos_begin: + chaos_begin = row["from"] + chaos_width += width + chaos_count += 1 + chaos_end = row["until"] + continue + + # Chaos period has ended? One not-small phase: + elif chaos_begin: + # Only output chaos phases with a certain length + if chaos_count >= 4: + output_chaos_period(chaos_begin, chaos_end, chaos_count, chaos_width) + + chaos_begin = None + chaos_count = 0 + chaos_width = 0 + + width += min_percentage + if fetch: + html.write("|%s:%s" % (css, width)) + else: + html.write('' % ( + row_nr, row_nr, width, html.attrencode(title), css)) + + if chaos_count > 1: + output_chaos_period(chaos_begin, chaos_end, chaos_count, chaos_width) + if not fetch: + html.write('
    ') + + if style == "inline": + if not fetch: + render_timeline_choords(from_time, until_time, width=500) + return + + # Render timewarped BI aggregate (might be empty) + html.write(timewarpcode) + + # Render Table + table.begin("av_timeline", "", css="timelineevents") + for row_nr, (row, state_id) in enumerate(timeline_rows): + table.row() + table.cell(_("Links"), css="buttons") + if what == "bi": + url = html.makeuri([("timewarp", str(int(row["from"])))]) + if html.var("timewarp") and int(html.var("timewarp")) == int(row["from"]): + html.disabled_icon_button("timewarp_off") + else: + html.icon_button(url, _("Time warp - show BI aggregate during this time period"), "timewarp") + else: + url = html.makeuri([("anno_site", tl_site), + ("anno_host", tl_host), + ("anno_service", tl_service), + ("anno_from", row["from"]), + ("anno_until", row["until"])]) + html.icon_button(url, _("Create an annotation for this period"), "annotation") + + table.cell(_("From"), render_date(row["from"]), css="nobr narrow") + table.cell(_("Until"), render_date(row["until"]), css="nobr narrow") + table.cell(_("Duration"), render_number(row["duration"], considered_duration), css="narrow number") + for sid, css, sname, help in availability_columns: + if sid == state_id: + table.cell(_("State"), sname, css=css + " state narrow") + break + else: + table.cell(_("State"), "(%s/%s)" % (sid,sname)) + table.cell(_("Last Known Plugin Output"), row["log_output"]) + + table.end() + + # Legend for timeline + if "display_timeline_legend" in avoptions["labelling"]: + render_timeline_legend(what) + + +def render_timeline_choords(from_time, until_time, width): + duration = until_time - from_time + def render_choord(t, title): + pixel = width * (t - from_time) / float(duration) + html.write('
    ' % (title, pixel)) + + # Now comes the difficult part: decide automatically, whether to use + # hours, days, weeks or months. Days and weeks needs to take local time + # into account. Months are irregular. + hours = duration / 3600 + if hours < 12: + scale = "hours" + elif hours < 24: + scale = "2hours" + elif hours < 48: + scale = "6hours" + elif hours < 24 * 14: + scale = "days" + elif hours < 24 * 60: + scale = "weeks" + else: + scale = "months" + + broken = list(time.localtime(from_time)) + while True: + next_choord, title = find_next_choord(broken, scale) + if next_choord >= until_time: + break + render_choord(next_choord, title) + +# Elements in broken: +# 0: year +# 1: month (1 = January) +# 2: day of month +# 3: hour +# 4: minute +# 5: second +# 6: day of week (0 = monday) +# 7: day of year +# 8: isdst (0 or 1) +def find_next_choord(broken, scale): + broken[4:6] = [0, 0] # always set min/sec to 00:00 + old_dst = broken[8] + + if scale == "hours": + epoch = time.mktime(broken) + epoch += 3600 + broken[:] = list(time.localtime(epoch)) + title = time.strftime("%H:%M", broken) + + elif scale == "2hours": + broken[3] = broken[3] / 2 * 2 + epoch = time.mktime(broken) + epoch += 2 * 3600 + broken[:] = list(time.localtime(epoch)) + title = valuespec.weekdays[broken[6]] + time.strftime(" %H:%M", broken) + + elif scale == "6hours": + broken[3] = broken[3] / 6 * 6 + epoch = time.mktime(broken) + epoch += 6 * 3600 + broken[:] = list(time.localtime(epoch)) + title = valuespec.weekdays[broken[6]] + time.strftime(" %H:%M", broken) + + elif scale == "days": + broken[3] = 0 + epoch = time.mktime(broken) + epoch += 24 * 3600 + broken[:] = list(time.localtime(epoch)) + title = valuespec.weekdays[broken[6]] + time.strftime(", %d.%m. 00:00", broken) + + elif scale == "weeks": + broken[3] = 0 + at_00 = int(time.mktime(broken)) + at_monday = at_00 - 86400 * broken[6] + epoch = at_monday + 7 * 86400 + broken[:] = list(time.localtime(epoch)) + title = valuespec.weekdays[broken[6]] + time.strftime(", %d.%m.", broken) + + else: # scale == "months": + broken[3] = 0 + broken[2] = 0 + broken[1] += 1 + if broken[1] > 12: + broken[1] = 1 + broken[0] += 1 + epoch = time.mktime(broken) + title = "%s %d" % (valuespec.month_names[broken[1]-1], broken[0]) + + dst = broken[8] + if old_dst == 1 and dst == 0: + epoch += 3600 + elif old_dst == 0 and dst == 1: + epoch -= 3600 + return epoch, title + + + + + +# Merge consecutive rows with same state +def merge_timeline(entries): + n = 1 + while n < len(entries): + if entries[n][1] == entries[n-1][1]: + entries[n-1][0]["duration"] += entries[n][0]["duration"] + entries[n-1][0]["until"] = entries[n][0]["until"] + del entries[n] + else: + n += 1 + +def melt_short_intervals(entries, duration, dont_merge): + n = 1 + need_merge = False + while n < len(entries) - 1: + if entries[n][0]["duration"] <= duration and \ + entries[n-1][1] == entries[n+1][1]: + entries[n] = (entries[n][0], entries[n-1][1]) + need_merge = True + n += 1 + + # Due to melting, we need to merge again + if need_merge and not dont_merge: + merge_timeline(entries) + melt_short_intervals(entries, duration, dont_merge) + +def history_url_of(site, host, service, from_time, until_time): + history_url_vars = [ + ("site", site), + ("host", host), + ("logtime_from_range", "unix"), # absolute timestamp + ("logtime_until_range", "unix"), # absolute timestamp + ("logtime_from", str(int(from_time))), + ("logtime_until", str(int(until_time)))] + if service: + history_url_vars += [ + ("service", service), + ("view_name", "svcevents"), + ] + else: + history_url_vars += [ + ("view_name", "hostevents"), + ] + + return "view.py?" + html.urlencode_vars(history_url_vars) + +statistics_headers = { + "min" : _("Shortest"), + "max" : _("Longest"), + "avg" : _("Average"), + "cnt" : _("Count"), +} + +def render_availability_table(availability, from_time, until_time, range_title, what, avoptions, render_number, fetch): + do_csv = html.output_format == "csv_export" + no_html = do_csv or fetch + + if not availability: + if not no_html: + html.message(_("No matching hosts/services.")) + return [] # No objects + + grouping = avoptions["grouping"] + fetch_data = [] + + if not grouping: + fetch_data.append((None, + render_availability_group(range_title, range_title, None, availability, from_time, + until_time, what, avoptions, render_number, fetch))) + + else: + # Grouping is one of host/hostgroup/servicegroup + # 1. Get complete list of all groups + all_group_ids = get_av_groups(availability, grouping) + + # 2. Compute Names for the groups and sort according to these names + if grouping != "host": + group_titles = dict(visuals.all_groups(grouping[:-7])) + + titled_groups = [] + for group_id in all_group_ids: + if grouping == "host": + titled_groups.append((group_id[1], group_id)) # omit the site name + else: + if group_id == (): + title = _("Not contained in any group") + else: + title = group_titles.get(group_id, group_id) + titled_groups.append((title, group_id)) ## ACHTUNG + titled_groups.sort(cmp = lambda a,b: cmp(a[1], b[1])) + + # 3. Loop over all groups and render them + for title, group_id in titled_groups: + fetch_data.append((title, + render_availability_group(title, range_title, group_id, availability, + from_time, until_time, what, avoptions, render_number, fetch) + )) + + # Legend for Availability levels + av_levels = avoptions["av_levels"] + if av_levels and not no_html: + warn, crit = av_levels + html.write('
    ') + html.write('

    %s

    ' % _("Availability levels")) + html.write('
    %s
    ≥ %.3f%%
    ' % (_("OK"), warn)) + html.write('
    %s
    ≥ %.3f%%
    ' % (_("WARN"), crit)) + html.write('
    %s
    < %.3f%%
    ' % (_("CRIT"), crit)) + html.write('
    ') + + # Legend for timeline + if "display_timeline_legend" in avoptions["labelling"] and avoptions["show_timeline"] and not no_html: + render_timeline_legend(what) + + return fetch_data + + +def render_timeline_legend(what): + html.write('
    ') + html.write('

    %s

    ' % _('Timeline colors')) + html.write('
    %s
    ' % (what == "host" and _("UP") or _("OK"))) + if what != "host": + html.write('
    %s
    ' % _("WARN")) + html.write('
    %s
    ' % (what == "host" and _("DOWN") or _("CRIT"))) + html.write('
    %s
    ' % (what == "host" and _("UNREACH") or _("UNKNOWN"))) + html.write('
    %s
    ' % _("Flapping")) + if what != "host": + html.write('
    %s
    ' % _("H.Down")) + html.write('
    %s
    ' % _("Downtime")) + html.write('
    %s
    ' % _("OO/Service")) + html.write('
    %s
    ' % _("unmonitored")) + html.write('
    ') + + +def get_av_groups(availability, grouping): + all_group_ids = set([]) + for site, host, service, display_name, states, considered_duration, total_duration, statistics, timeline_rows, group_ids in availability: + all_group_ids.update(group_ids) + if len(group_ids) == 0: + all_group_ids.add(()) # null-tuple denotes ungrouped objects + return all_group_ids + + +# When grouping is enabled, this function is called once for each group +def render_availability_group(group_title, range_title, group_id, availability, + from_time, until_time, what, avoptions, render_number, fetch): + + # Filter out groups that we want to show this time + group_availability = [] + for entry in availability: + group_ids = entry[-1] + if group_id == () and group_ids: + continue # This is not an angrouped object + elif group_id and group_id not in group_ids: + continue # Not this group + group_availability.append(entry) + + # Some columns might be unneeded due to state treatment options + sg = avoptions["state_grouping"] + state_groups = [ sg["warn"], sg["unknown"], sg["host_down"] ] + + show_timeline = avoptions["show_timeline"] + labelling = avoptions["labelling"] + av_levels = avoptions["av_levels"] + + # Helper function, needed in row and in summary line + def cell_active(sid): + if sid not in [ "up", "ok" ] and avoptions["av_mode"]: + return False + if sid == "outof_notification_period" and avoptions["notification_period"] != "honor": + return False + elif sid == "outof_service_period": # Never show this as a column + return False + elif sid == "in_downtime" and avoptions["downtimes"]["include"] != "honor": + return False + elif sid == "unmonitored" and not avoptions["consider"]["unmonitored"]: + return False + elif sid == "flapping" and not avoptions["consider"]["flapping"]: + return False + elif sid == "host_down" and not avoptions["consider"]["host_down"]: + return False + elif sid in [ "warn", "unknown", "host_down" ] and sid not in state_groups: + return False + else: + return True + + # Render the stuff + do_csv = html.output_format == "csv_export" + no_html = do_csv or fetch + + # Sort according to host and service. First after site, then + # host (natural sort), then service + def cmp_av_entry(a, b): + return cmp(a[0], b[0]) or \ + cmp(num_split(a[1]) + (a[1],), num_split(b[1]) + (b[1],)) or \ + cmp(cmp_service_name_equiv(a[2]), cmp_service_name_equiv(b[2])) or \ + cmp(a[2], b[2]) + + group_availability.sort(cmp = cmp_av_entry) + show_summary = avoptions.get("summary") + summary = {} + summary_counts = {} + table.begin("av_items", group_title, css="availability", + searchable = False, limit = None, output_format = do_csv and "csv" or (fetch and "fetch" or "html"), + omit_headers = "omit_headers" in avoptions["labelling"]) + for site, host, service, display_name, states, considered_duration, total_duration, statistics, timeline_rows, group_ids in group_availability: + table.row() + + if what != "bi": + timeline_url = html.makeuri([ + ("timeline", "yes"), + ("timeline_site", site), + ("timeline_host", host), + ("timeline_service", service)]) + else: + timeline_url = html.makeuri([("timeline", "yes"), ("av_aggr_name", service), ("av_aggr_group", host)]) + + + if not "omit_buttons" in labelling and not no_html: + table.cell("", css="buttons") + if what != "bi": + history_url = history_url_of(site, host, service, from_time, until_time) + html.icon_button(history_url, _("Event History"), "history") + html.icon_button(timeline_url, _("Timeline"), "timeline") + else: + html.icon_button(timeline_url, _("Timeline"), "timeline") + + host_url = "view.py?" + html.urlencode_vars([("view_name", "hoststatus"), ("site", site), ("host", host)]) + if what == "bi": + table.cell(_("Aggregate")) + if no_html: + html.write(service) + else: + bi_url = "view.py?" + html.urlencode_vars([("view_name", "aggr_single"), ("aggr_group", host), ("aggr_name", service)]) + html.write('%s' % (bi_url, service)) + availability_columns = bi_availability_columns + else: + if not "omit_host" in labelling: + table.cell(_("Host")) + if no_html: + html.write(host) + else: + html.write('%s' % (host_url, host)) + if what == "service": + if "use_display_name" in labelling: + service_name = display_name + else: + service_name = service + + table.cell(_("Service")) + if no_html: + html.write(service_name) + else: + service_url = "view.py?" + html.urlencode_vars([("view_name", "service"), ("site", site), ("host", host), ("service", service)]) + html.write('%s' % (service_url, service_name)) + + availability_columns = service_availability_columns + else: + availability_columns = host_availability_columns + + if show_timeline: + table.cell(_("Timeline"), css="timeline") + if not no_html: + html.write('' % timeline_url) + render_timeline(timeline_rows, from_time, until_time, total_duration, (site, host, service), range_title, render_number, what, "", avoptions, fetch, style="inline") + if not no_html: + html.write('') + + for sid, css, sname, help in availability_columns: + if not cell_active(sid): + continue + if avoptions["av_mode"]: + sname = _("Avail.") + + number = states.get(sid, 0) + if not number: + css = "unused" + elif show_summary: + summary.setdefault(sid, 0.0) + if avoptions["timeformat"].startswith("percentage"): + if considered_duration > 0: + summary[sid] += float(number) / considered_duration + else: + summary[sid] += number + + # Apply visual availability levels (render OK in yellow/red, if too low) + if number and av_levels and sid in [ "ok", "up" ]: + css = "state%d" % check_av_levels(number, av_levels, considered_duration) + table.cell(sname, render_number(number, considered_duration), css="narrow number " + css, help=help) + + # Statistics? + x_cnt, x_min, x_max = statistics.get(sid, (None, None, None)) + os_aggrs, os_states = avoptions.get("outage_statistics", ([],[])) + if sid in os_states: + for aggr in os_aggrs: + title = statistics_headers[aggr] + if x_cnt != None: + if aggr == "avg": + r = render_number(number / x_cnt, considered_duration) + elif aggr == "min": + r = render_number(x_min, considered_duration) + elif aggr == "max": + r = render_number(x_max, considered_duration) + else: + r = str(x_cnt) + summary_counts.setdefault(sid, 0) + summary_counts[sid] += x_cnt + table.cell(title, r, css="number stats " + css) + else: + table.cell(title, "") + + + + if show_summary: + table.row(css="summary") + if not "omit_buttons" in labelling and not no_html: + table.cell("") + if not "omit_host" in labelling or what == "bi": + table.cell("", _("Summary"), css="heading") + if what == "service": + table.cell("", "") + + if show_timeline and not do_csv: + table.cell("") + + for sid, css, sname, help in availability_columns: + if not cell_active(sid): + continue + number = summary.get(sid, 0) + if show_summary == "average" or avoptions["timeformat"].startswith("percentage"): + number /= len(group_availability) + if avoptions["timeformat"].startswith("percentage"): + number *= considered_duration + if not number: + css = "unused" + + if number and av_levels and sid in [ "ok", "up" ]: + css = "state%d" % check_av_levels(number, av_levels, considered_duration) + table.cell(sname, render_number(number, considered_duration), css="heading number " + css, help=help) + os_aggrs, os_states = avoptions.get("outage_statistics", ([],[])) + if sid in os_states: + for aggr in os_aggrs: + title = statistics_headers[aggr] + if aggr == "cnt": + count = summary_counts.get(sid, 0) + if show_summary == "average": + count = float(count) / len(group_availability) + text = "%.2f" % count + else: + text = str(count) + table.cell(sname, text, css="number stats " + css, help=help) + else: + table.cell(title, "") + + return table.end() # returns Table data if fetch == True + +def check_av_levels(number, av_levels, considered_duration): + if considered_duration == 0: + return 0 + + perc = 100 * float(number) / float(considered_duration) + warn, crit = av_levels + if perc < crit: + return 2 + elif perc < warn: + return 1 + else: + return 0 + + +def compute_bi_availability(avoptions, aggr_rows): + rows = [] + for aggr_row in aggr_rows: + these_rows, tree_state = get_bi_timeline(aggr_row["aggr_tree"], aggr_row["aggr_group"], avoptions, False) + rows += these_rows + return do_render_availability(rows, "bi", avoptions, timeline=False, timewarpcode=None, fetch=True) + + +# Render availability of a BI aggregate. This is currently +# no view and does not support display options +def render_bi_availability(title, aggr_rows): + html.add_status_icon("download_csv", _("Export as CSV"), html.makeuri([("output_format", "csv_export")])) + + timeline = html.var("timeline") + if timeline: + title = _("Timeline of ") + title + else: + title = _("Availability of ") + title + if html.output_format != "csv_export": + html.body_start(title, stylesheets=["pages","views","status", "bi"], javascripts=['bi']) + html.top_heading(title) + html.begin_context_buttons() + togglebutton("avoptions", False, "painteroptions", _("Configure details of the report")) + html.context_button(_("Status View"), html.makeuri([("mode", "status")]), "status") + if timeline: + html.context_button(_("Availability"), html.makeuri([("timeline", "")]), "availability") + elif len(aggr_rows) == 1: + aggr_name = aggr_rows[0]["aggr_name"] + aggr_group = aggr_rows[0]["aggr_group"] + timeline_url = html.makeuri([("timeline", "1"), ("av_aggr_name", aggr_name), ("av_aggr_group", aggr_group)]) + html.context_button(_("Timeline"), timeline_url, "timeline") + html.end_context_buttons() + + html.plug() + avoptions = render_availability_options("bi") + range, range_title = avoptions["range"] + avoptions_html = html.drain() + html.unplug() + if html.output_format == "csv_export": + av_output_csv_mimetype(title) + else: + html.write(avoptions_html) + + timewarpcode = "" + + if not html.has_user_errors(): + rows = [] + for aggr_row in aggr_rows: + tree = aggr_row["aggr_tree"] + reqhosts = tree["reqhosts"] + try: + timewarp = int(html.var("timewarp")) + except: + timewarp = None + these_rows, tree_state = get_bi_timeline(tree, aggr_row["aggr_group"], avoptions, timewarp) + rows += these_rows + if timewarp and tree_state: + state, assumed_state, node, subtrees = tree_state + eff_state = state + if assumed_state != None: + eff_state = assumed_state + row = { + "aggr_tree" : tree, + "aggr_treestate" : tree_state, + "aggr_state" : state, # state disregarding assumptions + "aggr_assumed_state" : assumed_state, # is None, if there are no assumptions + "aggr_effective_state" : eff_state, # is assumed_state, if there are assumptions, else real state + "aggr_name" : node["title"], + "aggr_output" : eff_state["output"], + "aggr_hosts" : node["reqhosts"], + "aggr_function" : node["func"], + "aggr_group" : html.var("aggr_group"), + } + tdclass, htmlcode = bi.render_tree_foldable(row, boxes=False, omit_root=False, + expansion_level=bi.load_ex_level(), only_problems=False, lazy=False) + html.plug() + html.write('

    ') + + # render icons for back and forth + if int(these_rows[0]["from"]) == timewarp: + html.disabled_icon_button("back_off") + have_forth = False + previous_row = None + for row in these_rows: + if int(row["from"]) == timewarp and previous_row != None: + html.icon_button(html.makeuri([("timewarp", str(int(previous_row["from"])))]), _("Jump one phase back"), "back") + elif previous_row and int(previous_row["from"]) == timewarp and row != these_rows[-1]: + html.icon_button(html.makeuri([("timewarp", str(int(row["from"])))]), _("Jump one phase forth"), "forth") + have_forth = True + previous_row = row + if not have_forth: + html.disabled_icon_button("forth_off") + + html.write("   ") + html.icon_button(html.makeuri([("timewarp", "")]), _("Close Timewarp"), "closetimewarp") + timewarpcode = html.drain() + html.unplug() + timewarpcode += '%s %s

    ' % (_("Timewarp to "), time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timewarp))) + \ + '
    ' % tdclass + \ + htmlcode + \ + '
    ' + else: + timewarpcode = "" + + do_render_availability(rows, "bi", avoptions, timeline, timewarpcode) + + if html.output_format != "csv_export": + html.bottom_footer() + html.body_end() + +def get_bi_timeline(tree, aggr_group, avoptions, timewarp): + range, range_title = avoptions["range"] + # Get state history of all hosts and services contained in the tree. + # In order to simplify the query, we always fetch the information for + # all hosts of the aggregates. + only_sites = set([]) + hosts = [] + for site, host in tree["reqhosts"]: + only_sites.add(site) + hosts.append(host) + + columns = [ "host_name", "service_description", "from", "log_output", "state", "in_downtime" ] + html.live.set_only_sites(list(only_sites)) + html.live.set_prepend_site(True) + html.live.set_limit() # removes limit + query = "GET statehist\n" + \ + "Columns: " + " ".join(columns) + "\n" +\ + "Filter: time >= %d\nFilter: time < %d\n" % range + + # Create a specific filter. We really only want the services and hosts + # of the aggregation in question. That prevents status changes + # irrelevant services from introducing new phases. + by_host = {} + for site, host, service in bi.find_all_leaves(tree): + by_host.setdefault(host, set([])).add(service) + + for host, services in by_host.items(): + query += "Filter: host_name = %s\n" % host + query += "Filter: service_description = \n" + for service in services: + query += "Filter: service_description = %s\n" % service + query += "Or: %d\nAnd: 2\n" % (len(services) + 1) + if len(hosts) != 1: + query += "Or: %d\n" % len(hosts) + + data = html.live.query(query) + if not data: + return [], None + # raise MKGeneralException(_("No historical data available for this aggregation. Query was:
    %s
    ") % query) + + html.live.set_prepend_site(False) + html.live.set_only_sites(None) + columns = ["site"] + columns + rows = [ dict(zip(columns, row)) for row in data ] + + # Now comes the tricky part: recompute the state of the aggregate + # for each step in the state history and construct a timeline from + # it. As a first step we need the start state for each of the + # hosts/services. They will always be the first consecute rows + # in the statehist table + + # First partition the rows into sequences with equal start time + phases = {} + for row in rows: + from_time = row["from"] + phases.setdefault(from_time, []).append(row) + + # Convert phases to sorted list + sorted_times = phases.keys() + sorted_times.sort() + phases_list = [] + for from_time in sorted_times: + phases_list.append((from_time, phases[from_time])) + + states = {} + def update_states(phase_entries): + for row in phase_entries: + service = row["service_description"] + key = row["site"], row["host_name"], service + states[key] = row["state"], row["log_output"], row["in_downtime"] + + + update_states(phases_list[0][1]) + # states does now reflect the host/services states at the beginning + # of the query range. + tree_state = compute_tree_state(tree, states) + tree_time = range[0] + if timewarp == int(tree_time): + timewarp_state = tree_state + else: + timewarp_state = None + + timeline = [] + def append_to_timeline(from_time, until_time, tree_state): + timeline.append({ + "state" : tree_state[0]['state'], + "log_output" : tree_state[0]['output'], + "from" : from_time, + "until" : until_time, + "site" : "", + "host_name" : aggr_group, + "service_description" : tree['title'], + "in_notification_period" : 1, + "in_service_period" : 1, + "in_downtime" : tree_state[0]['in_downtime'], + "in_host_downtime" : 0, + "host_down" : 0, + "is_flapping" : 0, + "duration" : until_time - from_time, + }) + + + for from_time, phase in phases_list[1:]: + update_states(phase) + next_tree_state = compute_tree_state(tree, states) + duration = from_time - tree_time + append_to_timeline(tree_time, from_time, tree_state) + tree_state = next_tree_state + tree_time = from_time + if timewarp == tree_time: + timewarp_state = tree_state + + # Add one last entry - for the state until the end of the interval + append_to_timeline(tree_time, range[1], tree_state) + + return timeline, timewarp_state + +def compute_tree_state(tree, status): + # Convert our status format into that needed by BI + services_by_host = {} + hosts = {} + for site_host_service, state_output in status.items(): + site_host = site_host_service[:2] + service = site_host_service[2] + if service: + services_by_host.setdefault(site_host, []).append(( + service, # service description + state_output[0], # state + 1, # has_been_checked + state_output[1], # output + state_output[0], # hard state (we use the soft state here) + 1, # attempt + 1, # max_attempts (not relevant) + state_output[2], # in_downtime + False, # acknowledged + )) + else: + hosts[site_host] = state_output + + status_info = {} + for site_host, state_output in hosts.items(): + status_info[site_host] = [ + state_output[0], + state_output[0], # host hard state + state_output[1], + state_output[2], # in_downtime + False, # acknowledged + services_by_host.get(site_host,[]) + ] + + + # Finally we can execute the tree + bi.load_assumptions() + tree_state = bi.execute_tree(tree, status_info) + return tree_state + +#. +# .--Annotations---------------------------------------------------------. +# | _ _ _ _ | +# | / \ _ __ _ __ ___ | |_ __ _| |_(_) ___ _ __ ___ | +# | / _ \ | '_ \| '_ \ / _ \| __/ _` | __| |/ _ \| '_ \/ __| | +# | / ___ \| | | | | | | (_) | || (_| | |_| | (_) | | | \__ \ | +# | /_/ \_\_| |_|_| |_|\___/ \__\__,_|\__|_|\___/|_| |_|___/ | +# | | +# +----------------------------------------------------------------------+ +# | This code deals with retrospective annotations and downtimes. | +# '----------------------------------------------------------------------' + +# Example for annotations: +# { +# ( "mysite", "foohost", "myservice" ) : # service might be None +# [ +# { +# "from" : 1238288548, +# "until" : 1238292845, +# "text" : u"Das ist ein Text über mehrere Zeilen, oder was weiß ich", +# "downtime" : True, # Treat as scheduled Downtime, +# "date" : 12348854885, # Time of entry +# "author" : "mk", +# }, +# # ... further entries +# ] +# } + + +def save_annotations(annotations): + file(defaults.var_dir + "/web/statehist_annotations.mk", "w").write(repr(annotations) + "\n") + +def load_annotations(lock = False): + path = defaults.var_dir + "/web/statehist_annotations.mk" + if os.path.exists(path): + if lock: + aquire_lock(path) + return eval(file(path).read()) + else: + return {} + +def update_annotations(site_host_svc, annotation): + annotations = load_annotations(lock = True) + entries = annotations.get(site_host_svc, []) + new_entries = [] + for entry in entries: + if entry["from"] == annotation["from"] \ + and entry["until"] == annotation["until"]: + continue # Skip existing entries with same identity + new_entries.append(entry) + new_entries.append(annotation) + annotations[site_host_svc] = new_entries + save_annotations(annotations) + + +def find_annotation(annotations, site_host_svc, fromtime, untiltime): + entries = annotations.get(site_host_svc) + if not entries: + return None + for annotation in entries: + if annotation["from"] == fromtime \ + and annotation["until"] == untiltime: + return annotation + return None + +def delete_annotation(annotations, site_host_svc, fromtime, untiltime): + entries = annotations.get(site_host_svc) + if not entries: + return + found = None + for nr, annotation in enumerate(entries): + if annotation["from"] == fromtime \ + and annotation["until"] == untiltime: + found = nr + break + if found != None: + del entries[nr] + + +def render_annotations(annotations, from_time, until_time, by_host, what, avoptions, omit_service): + format = "%H:%M:%S" + if time.localtime(from_time)[:3] != time.localtime(until_time-1)[:3]: + format = "%Y-%m-%d " + format + def render_date(ts): + return time.strftime(format, time.localtime(ts)) + + annos_to_render = [] + for site_host, avail_entries in by_host.iteritems(): + for service in avail_entries.keys(): + site_host_svc = site_host[0], site_host[1], (service or None) + for annotation in annotations.get(site_host_svc, []): + if (annotation["from"] >= from_time and annotation["from"] <= until_time) or \ + (annotation["until"] >= from_time and annotation["until"] <= until_time): + annos_to_render.append((site_host_svc, annotation)) + + annos_to_render.sort(cmp=lambda a,b: cmp(a[1]["from"], b[1]["from"]) or cmp(a[0], b[0])) + + labelling = avoptions["labelling"] + + table.begin(title = _("Annotations"), omit_if_empty = True) + for (site_id, host, service), annotation in annos_to_render: + table.row() + table.cell("", css="buttons") + anno_vars = [ + ( "anno_site", site_id ), + ( "anno_host", host ), + ( "anno_service", service or "" ), + ( "anno_from", int(annotation["from"]) ), + ( "anno_until", int(annotation["until"]) ), + ] + edit_url = html.makeuri(anno_vars) + html.icon_button(edit_url, _("Edit this annotation"), "edit") + delete_url = html.makeactionuri([("_delete_annotation", "1")] + anno_vars) + html.icon_button(delete_url, _("Delete this annotation"), "delete") + + if not omit_service: + if not "omit_host" in labelling: + host_url = "view.py?" + html.urlencode_vars([("view_name", "hoststatus"), ("site", site_id), ("host", host)]) + table.cell(_("Host"), '%s' % (host_url, host)) + + if service: + service_url = "view.py?" + html.urlencode_vars([("view_name", "service"), ("site", site_id), ("host", host), ("service", service)]) + # TODO: honor use_display_name. But we have no display names here... + service_name = service + table.cell(_("Service"), '%s' % (service_url, service_name)) + + table.cell(_("From"), render_date(annotation["from"]), css="nobr narrow") + table.cell(_("Until"), render_date(annotation["until"]), css="nobr narrow") + table.cell(_("Annotation"), html.attrencode(annotation["text"])) + table.cell(_("Author"), annotation["author"]) + table.cell(_("Entry"), render_date(annotation["date"]), css="nobr narrow") + table.end() + + + +def edit_annotation(): + site_id = html.var("anno_site") or "" + hostname = html.var("anno_host") + service = html.var("anno_service") or None + fromtime = float(html.var("anno_from")) + untiltime = float(html.var("anno_until")) + site_host_svc = (site_id, hostname, service) + + # Find existing annotation with this specification + annotations = load_annotations() + annotation = find_annotation(annotations, site_host_svc, fromtime, untiltime) + if not annotation: + annotation = { + "from" : fromtime, + "until" : untiltime, + "text" : "", + } + annotation["host"] = hostname + annotation["service"] = service + annotation["site"] = site_id + + html.plug() + + title = _("Edit annotation of ") + hostname + if service: + title += "/" + service + html.body_start(title, stylesheets=["pages","views","status"]) + html.top_heading(title) + + html.begin_context_buttons() + html.context_button(_("Abort"), html.makeuri([("anno_host", "")]), "abort") + html.end_context_buttons() + + value = forms.edit_dictionary([ + ( "site", TextAscii(title = _("Site")) ), + ( "host", TextUnicode(title = _("Hostname")) ), + ( "service", Optional(TextUnicode(allow_empty=False), sameline = True, title = _("Service")) ), + ( "from", AbsoluteDate(title = _("Start-Time"), include_time = True) ), + ( "until", AbsoluteDate(title = _("End-Time"), include_time = True) ), + ( "text", TextAreaUnicode(title = _("Annotation"), allow_empty = False) ), ], + annotation, + varprefix = "editanno_", + formname = "editanno", + focus = "text") + + if value: + site_host_svc = value["site"], value["host"], value["service"] + del value["site"] + del value["host"] + value["date"] = time.time() + value["author"] = config.user_id + update_annotations(site_host_svc, value) + html.drain() # omit previous HTML code, not needed + html.unplug() + html.del_all_vars(prefix = "editanno_") + html.del_var("filled_in") + return False + + html.unplug() # show HTML code + + html.bottom_footer() + html.body_end() + return True + + +# Called at the beginning of every availability page +def handle_delete_annotations(): + if html.var("_delete_annotation"): + site_id = html.var("anno_site") or "" + hostname = html.var("anno_host") + service = html.var("anno_service") or None + fromtime = float(html.var("anno_from")) + untiltime = float(html.var("anno_until")) + site_host_svc = (site_id, hostname, service) + + annotations = load_annotations() + annotation = find_annotation(annotations, site_host_svc, fromtime, untiltime) + if not annotation: + return + + if not html.confirm(_("Are you sure that you want to delete the annotation '%s'?" % annotation["text"])): + return + + delete_annotation(annotations, site_host_svc, fromtime, untiltime) + save_annotations(annotations) + +def handle_edit_annotations(): + if html.var("anno_host") and not html.var("_delete_annotation"): + finished = edit_annotation() + else: + finished = False + + return finished + + diff -Nru check-mk-1.2.2p3/plugins/views/bi.py check-mk-1.2.6p12/plugins/views/bi.py --- check-mk-1.2.2p3/plugins/views/bi.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/views/bi.py 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -37,7 +37,7 @@ multisite_datasources["bi_aggregations"] = { "title" : _("BI Aggregations"), "table" : bi.table, - "infos" : [ "aggr" ], + "infos" : [ "aggr", "aggr_group", ], "keys" : [], "idkeys" : [ 'aggr_name' ], } @@ -45,7 +45,7 @@ multisite_datasources["bi_host_aggregations"] = { "title" : _("BI Aggregations affected by one host"), "table" : bi.host_table, - "infos" : [ "host", "aggr" ], + "infos" : [ "host", "aggr", "aggr_group" ], "keys" : [], "idkeys" : [ 'aggr_name' ], } @@ -55,7 +55,16 @@ multisite_datasources["bi_hostname_aggregations"] = { "title" : _("BI Hostname Aggregations"), "table" : bi.hostname_table, - "infos" : [ "host", "aggr" ], + "infos" : [ "host", "aggr", "aggr_group" ], + "keys" : [], + "idkeys" : [ 'aggr_name' ], +} + +# The same but with group information +multisite_datasources["bi_hostnamebygroup_aggregations"] = { + "title" : _("BI Aggregations for Hosts by Hostgroups"), + "table" : bi.hostname_by_group_table, + "infos" : [ "host", "aggr", "hostgroup", "aggr_group" ], "keys" : [], "idkeys" : [ 'aggr_name' ], } @@ -68,6 +77,30 @@ # |_| \__,_|_|_| |_|\__\___|_| |___/ # +def paint_bi_icons(row): + html.plug() + single_url = "view.py?" + html.urlencode_vars([ + ("view_name", "aggr_single"), + ("aggr_name", row["aggr_name"])]) + html.icon_button(single_url, _("Show only this aggregation"), "showbi") + avail_url = single_url + "&mode=availability" + html.icon_button(avail_url, _("Analyse availability of this aggregation"), "availability") + if row["aggr_effective_state"]["in_downtime"]: + html.icon(_("This aggregation is currently in a scheduled downtime"), "downtime") + if row["aggr_effective_state"]["acknowledged"]: + html.icon(_("The critical problems that make this aggregation non-OK have been acknowledged"), "ack") + code = html.drain() + html.unplug() + return "buttons", code + +multisite_painters["aggr_icons"] = { + "title" : _("Links"), + "columns" : [ "aggr_group", "aggr_name", "aggr_effective_state" ], + "printable" : False, + "paint" : paint_bi_icons, +} + + def paint_aggr_state_short(state, assumed = False): if state == None: return "", "" @@ -150,38 +183,55 @@ } multisite_painter_options["aggr_expand"] = { - "title" : _("Initial expansion of aggregations"), - "default" : "0", - "values" : [ ("0", "collapsed"), ("1", "first level"), ("2", "two levels"), ("3", "three levels"), ("999", "complete")] + 'valuespec' : DropdownChoice( + title = _("Initial expansion of aggregations"), + default_value = "0", + choices = [ + ("0", _("collapsed")), + ("1", _("first level")), + ("2", _("two levels")), + ("3", _("three levels")), + ("999", _("complete")) + ] + ) } multisite_painter_options["aggr_onlyproblems"] = { - "title" : _("Show only problems"), - "default" : "0", - "values" : [ ("0", "show all"), ("1", "show only problems")] + 'valuespec' : DropdownChoice( + title = _("Show only problems"), + default_value = "0", + choices = [ + ("0", _("show all")), + ("1", _("show only problems")) + ], + ) } multisite_painter_options["aggr_treetype"] = { - "title" : _("Type of tree layout"), - "default" : "foldable", - "values" : [ - ("foldable", _("foldable")), - ("boxes", _("boxes")), - ("boxes-omit-root", _("boxes (omit root)")), - ("bottom-up", _("bottom up")), - ("top-down", _("top down"))] + 'valuespec' : DropdownChoice( + title = _("Type of tree layout"), + default_value = "foldable", + choices = [ + ("foldable", _("foldable")), + ("boxes", _("boxes")), + ("boxes-omit-root", _("boxes (omit root)")), + ("bottom-up", _("bottom up")), + ("top-down", _("top down")), + ], + ) } multisite_painter_options["aggr_wrap"] = { - "title" : _("Handling of too long texts"), - "default" : "wrap", - "values" : [ ("wrap", "wrap"), ("nowrap", "don't wrap")] + 'valuespec' : DropdownChoice( + title = _("Handling of too long texts"), + default_value = "wrap", + choices = [ + ("wrap", _("wrap")), + ("nowrap", _("don't wrap")), + ], + ) } - - - - def paint_aggr_tree_ltr(row, mirror): wrap = get_painter_option("aggr_wrap") @@ -265,185 +315,3 @@ "paint" : lambda row: bi.render_tree_foldable(row, boxes=True, omit_root=True, expansion_level=bi.load_ex_level(), only_problems=False, lazy=True), } - -# _____ _ _ _ -# | ___(_) | |_ ___ _ __ ___ -# | |_ | | | __/ _ \ '__/ __| -# | _| | | | || __/ | \__ \ -# |_| |_|_|\__\___|_| |___/ -# - -class BIGroupFilter(Filter): - def __init__(self): - self.column = "aggr_group" - Filter.__init__(self, self.column, _("Aggregation group"), "aggr", [self.column], [self.column]) - - def variable_settings(self, row): - return [ (self.htmlvars[0], row[self.column]) ] - - def display(self): - htmlvar = self.htmlvars[0] - html.select(htmlvar, [ ("", "") ] + [(g, g) for g in bi.aggregation_groups()]) - - def selected_group(self): - return html.var(self.htmlvars[0]) - - def filter_table(self, rows): - group = self.selected_group() - if not group: - return rows - else: - return [ row for row in rows if row[self.column] == group ] - - def heading_info(self, infoname): - return html.var(self.htmlvars[0]) - -declare_filter( 90, BIGroupFilter()) - -class BITextFilter(Filter): - def __init__(self, what): - self.column = "aggr_" + what - label = '' - if what == 'name': - label = _('Aggregation name') - elif what == 'output': - label = _('Aggregation output') - Filter.__init__(self, self.column, label, "aggr", [self.column], [self.column]) - - def variable_settings(self, row): - return [ (self.htmlvars[0], row[self.column]) ] - - def display(self): - html.text_input(self.htmlvars[0]) - - def heading_info(self, infoname): - return html.var(self.htmlvars[0]) - - def filter_table(self, rows): - val = html.var(self.htmlvars[0]) - if not val: - return rows - reg = re.compile(val.lower()) - return [ row for row in rows if reg.search(row[self.column].lower()) ] - -declare_filter(120, BITextFilter("name")) -declare_filter(121, BITextFilter("output")) - -class BIHostFilter(Filter): - def __init__(self): - self.column = "aggr_hosts" - Filter.__init__(self, self.column, _("Affected hosts contain"), "aggr", ["site", "host"], []) - - def display(self): - html.text_input(self.htmlvars[1]) - - def heading_info(self, infoname): - return html.var(self.htmlvars[1]) - - def find_host(self, host, hostlist): - for s, h in hostlist: - if h == host: - return True - return False - - # Used for linking - def variable_settings(self, row): - return [ ("host", row["host_name"]), ("site", row["site"]) ] - - def filter_table(self, rows): - val = html.var(self.htmlvars[1]) - if not val: - return rows - return [ row for row in rows if self.find_host(val, row["aggr_hosts"]) ] - -declare_filter(130, BIHostFilter(), _("Filter for all aggregations that base on status information of that host. Exact match (no regular expression)")) - -class BIServiceFilter(Filter): - def __init__(self): - Filter.__init__(self, "aggr_service", _("Affected by service"), "aggr", ["site", "host", "service"], []) - - def double_height(self): - return True - - def display(self): - html.write(_("Host") + ": ") - html.text_input("host") - html.write(_("Service") + ": ") - html.text_input("service") - - def heading_info(self, infoname): - return html.var_utf8("host") + " / " + html.var_utf8("service") - - def service_spec(self): - return html.var_utf8("site"), html.var_utf8("host"), html.var_utf8("service") - - # Used for linking - def variable_settings(self, row): - return [ ("site", row["site"]), ("host", row["host_name"]), ("service", row["service_description"]) ] - -declare_filter(131, BIServiceFilter(), _("Filter for all aggregations that are affected by one specific service on a specific host (no regular expression)")) - -class BIStatusFilter(Filter): - def __init__(self, what): - title = (what.replace("_", " ") + " state").title() - self.column = "aggr_" + what + "state" - if what == "": - self.code = 'r' - else: - self.code = what[0] - self.prefix = "bi%ss" % self.code - vars = [ self.prefix + str(x) for x in [ -1, 0, 1, 2, 3 ] ] - if self.code == 'a': - vars.append(self.prefix + "n") - Filter.__init__(self, self.column, title, "aggr", vars, []) - - def filter(self, tablename): - return "" - - def double_height(self): - return self.column == "aggr_assumed_state" - - def display(self): - if html.var("filled_in"): - defval = "" - else: - defval = "on" - for varend, text in [('0', 'OK'), ('1', 'WARN'), ('2', 'CRIT'), - ('3', 'UNKN'), ('-1', 'PENDING'), ('n', _('no assumed state set'))]: - if self.code != 'a' and varend == 'n': - continue # no unset for read and effective state - if varend == 'n': - html.write("
    ") - var = self.prefix + varend - html.checkbox(var, defval, label = text) - - def filter_table(self, rows): - jeaders = [] - if html.var("filled_in"): - defval = "" - else: - defval = "on" - - allowed_states = [] - for i in ['0','1','2','3','-1','n']: - if html.var(self.prefix + i, defval) == "on": - if i == 'n': - s = None - else: - s = int(i) - allowed_states.append(s) - newrows = [] - for row in rows: - if row[self.column] != None: - s = row[self.column]["state"] - else: - s = None - if s in allowed_states: - newrows.append(row) - return newrows - -declare_filter(150, BIStatusFilter("")) -declare_filter(151, BIStatusFilter("effective_")) -declare_filter(152, BIStatusFilter("assumed_")) - - diff -Nru check-mk-1.2.2p3/plugins/views/builtin.py check-mk-1.2.6p12/plugins/views/builtin.py --- check-mk-1.2.2p3/plugins/views/builtin.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/views/builtin.py 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,11 +24,26 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. +# Painters used in list of services views +service_view_painters = [ + ('service_state', None), + ('service_description', 'service'), + ('service_icons', None), + ('svc_plugin_output', None), + ('svc_state_age', None), + ('svc_check_age', None), + ('perfometer', None), +] + +# Same as list of services, but extended by the hostname +host_service_view_painters = service_view_painters[:] +host_service_view_painters.insert(1, ('host', 'host')) + multisite_builtin_views.update({ 'allhosts': {'browser_reload': 30, 'column_headers': 'pergroup', 'datasource': 'hosts', - 'description': 'Overall state of allhosts, with counts of services in the various states.', + 'description': _('Overall state of all hosts, with counts of services in the various states.'), 'group_painters': [('sitealias', None)], 'hard_filters': ['summary_host'], 'hard_filtervars': [('is_summary_host', '0')], @@ -56,16 +71,60 @@ 'host_acknowledged', 'hostregex', 'host_notifications_enabled', + 'hostgroups', 'opthostgroup', 'host_check_command', - 'opthost_contactgroup'], + 'opthost_contactgroup', + 'hostalias', + 'host_tags', + ], 'sorters': [('site', False), ('site_host', False)], 'title': _('All hosts'), 'topic': _('Hosts')}, + 'starred_hosts': {'browser_reload': 30, + 'column_headers': 'pergroup', + 'datasource': 'hosts', + 'description': _('Overall state of your favorite hosts'), + 'group_painters': [('sitealias', None)], + 'hard_filters': ['summary_host'], + 'hard_filtervars': [('is_summary_host', '0'), ('is_host_favorites', '1')], + 'hidden': False, + 'hide_filters': [], + 'layout': 'table', + 'mustsearch': False, + 'name': 'allhosts', + 'num_columns': 3, + 'owner': '', + 'painters': [('host_state', None), + ('host', 'host'), + ('host_icons', None), + ('num_services_ok', 'host_ok'), + ('num_services_warn', 'host_warn'), + ('num_services_unknown', 'host_unknown'), + ('num_services_crit', 'host_crit'), + ('num_services_pending', 'host_pending')], + 'play_sounds': False, + 'public': True, + 'show_filters': ['host_scheduled_downtime_depth', + 'host_in_notification_period', + 'hoststate', + 'siteopt', + 'host_acknowledged', + 'hostregex', + 'host_notifications_enabled', + 'hostgroups', + 'opthostgroup', + 'host_check_command', + 'opthost_contactgroup', + 'hostalias', + 'host_favorites'], + 'sorters': [('site', False), ('site_host', False)], + 'title': _('Favorite hosts'), + 'topic': _('Hosts')}, 'allhosts_mini': {'browser_reload': 30, 'column_headers': 'pergroup', 'datasource': 'hosts', - 'description': '', + 'description': _('Showing all hosts in a compact layout.'), 'group_painters': [('sitealias', None)], 'hard_filters': ['summary_host',], 'hard_filtervars': [('site', ''), @@ -100,7 +159,66 @@ 'allservices': {'browser_reload': 90, 'column_headers': 'pergroup', 'datasource': 'services', - 'description': 'All services grouped\r\nby hosts.', + 'description': _('All services grouped by hosts.'), + 'group_painters': [('sitealias', 'sitehosts'), + ('host_with_state', 'host')], + 'hard_filters': ['summary_host'], + 'hard_filtervars': [('is_service_in_notification_period', '-1'), + ('optservicegroup', ''), + ('is_service_notifications_enabled', '-1'), + ('is_host_in_notification_period', '-1'), + ('is_in_downtime', '-1'), + ('is_service_scheduled_downtime_depth', '-1'), + ('is_service_acknowledged', '-1'), + ('host', ''), + ('is_service_active_checks_enabled', '-1'), + ('is_summary_host', '0'), + ('service', ''), + ('check_command', ''), + ('st0', 'on'), + ('st1', 'on'), + ('st2', 'on'), + ('st3', 'on'), + ('stp', 'on'), + ('opthostgroup', ''), + ('service_output', ''), + ('is_service_is_flapping', '-1')], + 'hidden': False, + 'hide_filters': [], + 'layout': 'table', + 'mustsearch': False, + 'name': 'allservices', + 'num_columns': 1, + 'owner': '', + 'painters': service_view_painters, + 'play_sounds': False, + 'public': True, + 'show_filters': ['service_in_notification_period', + 'optservicegroup', + 'service_notifications_enabled', + 'host_in_notification_period', + 'in_downtime', + 'service_scheduled_downtime_depth', + 'service_acknowledged', + 'hostregex', + 'service_active_checks_enabled', + 'summary_host', + 'serviceregex', + 'check_command', + 'svcstate', + 'opthostgroup', + 'output', + 'service_is_flapping', + 'siteopt'], + 'sorters': [('site', False), + ('site_host', False), + ('svcdescr', False)], + 'title': _('All services'), + 'topic': _('Services')}, + 'starred_services': {'browser_reload': 90, + 'column_headers': 'pergroup', + 'datasource': 'services', + 'description': _('All of your favorites services by hosts.'), 'group_painters': [('sitealias', 'sitehosts'), ('host_with_state', 'host')], 'hard_filters': ['summary_host'], @@ -128,21 +246,16 @@ ('stp', 'on'), ('opthostgroup', ''), ('service_output', ''), - ('is_service_is_flapping', '-1')], + ('is_service_is_flapping', '-1'), + ('is_service_favorites', '1')], 'hidden': False, 'hide_filters': [], 'layout': 'table', 'mustsearch': False, - 'name': 'allservices', + 'name': 'starred_services', 'num_columns': 1, 'owner': '', - 'painters': [('service_state', None), - ('service_description', 'service'), - ('svc_plugin_output', None), - ('service_icons', None), - ('svc_state_age', None), - ('svc_check_age', None), - ('perfometer', None)], + 'painters': service_view_painters, 'play_sounds': False, 'public': True, 'show_filters': ['service_in_notification_period', @@ -161,15 +274,17 @@ 'opthostgroup', 'output', 'service_is_flapping', - 'siteopt'], + 'siteopt', + 'host_favorites', + 'service_favorites'], 'sorters': [('site', False), ('site_host', False), ('svcdescr', False)], - 'title': _('All services'), + 'title': _('Favorite services'), 'topic': _('Services')}, 'comments': {'column_headers': 'pergroup', 'datasource': 'comments', - 'description': 'All host- and service comments', + 'description': _('All host- and service comments'), 'group_painters': [('comment_what', None)], 'hard_filters': [], 'hard_filtervars': [('host', ''), ('service', '')], @@ -190,18 +305,18 @@ ('service_description', 'service'), ('comment_id', None)], 'public': True, - 'show_filters': ['hostregex', 'serviceregex'], + 'show_filters': ['hostregex', 'comment_entry_time', 'serviceregex'], 'sorters': [('comment_type', False), ('comment_author', False)], 'title': _('Comments') }, 'comments_of_host': {'column_headers': 'pergroup', 'datasource': 'comments', - 'description': 'Linkable view showing\r\nall comments of a specific host', + 'description': _('Linkable view showing all comments of a specific host'), 'group_painters': [], 'hard_filters': ['service'], 'hard_filtervars': [('service', '')], 'hidden': True, - 'hide_filters': ['site', 'host'], + 'hide_filters': ['siteopt', 'host'], 'icon' : 'comment', 'layout': 'table', 'mustsearch': False, @@ -211,6 +326,7 @@ 'painters': [('comment_author', None), ('comment_comment', None), ('comment_time', None), + ('comment_expires', None), ('comment_entry_type', None)], 'public': True, 'show_filters': [], @@ -221,12 +337,12 @@ 'comments_of_service': {'column_headers': 'pergroup', 'datasource': 'comments', - 'description': 'Linkable view showing\r\nall comments of a specific service', + 'description': _('Linkable view showing all comments of a specific service'), 'group_painters': [], 'hard_filters': [], 'hard_filtervars': [], 'hidden': True, - 'hide_filters': ['site', 'host', 'service'], + 'hide_filters': ['siteopt', 'host', 'service'], 'icon' : 'comment', 'layout': 'table', 'mustsearch': False, @@ -236,6 +352,7 @@ 'painters': [('comment_author', None), ('comment_comment', None), ('comment_time', None), + ('comment_expires', None), ('comment_entry_type', None)], 'public': True, 'show_filters': [], @@ -245,7 +362,7 @@ }, 'downtimes': {'column_headers': 'pergroup', 'datasource': 'downtimes', - 'description': 'All host- and service-downtimes', + 'description': _('All host- and service-downtimes'), 'group_painters': [('downtime_what', None)], 'hard_filters': [], 'hard_filtervars': [('is_service_scheduled_downtime_depth', @@ -273,18 +390,91 @@ 'public': True, 'show_filters': ['service_scheduled_downtime_depth', 'hostregex', + 'downtime_entry_time', 'serviceregex'], 'sorters': [('downtime_what', False), ('downtime_start_time', False)], 'title': _('Downtimes')}, + +'downtime_history': {'browser_reload': 0, + 'column_headers': 'pergroup', + 'datasource': 'log_events', + 'description': _('All historic scheduled downtimes of hosts and services'), + 'group_painters': [('log_what', None)], + 'hard_filters': [ 'log_type' ], + 'hard_filtervars': [('logtime_from_range', '86400'), + ('logtime_from', '60'), + ('log_type', 'DOWNTIME ALERT'), + ], + 'hidden': False, + 'hide_filters': [], + 'icon' : 'downtime', + 'layout': 'table', + 'linktitle': _('Host Dt-History'), + 'mustsearch': False, + 'num_columns': 1, + 'painters': [('log_icon', None), + ('log_time', None), + ('host', 'host_dt_hist'), + ('service_description', 'svc_dt_hist'), + ('log_state_type', None), + ('log_plugin_output', None), + ], + 'play_sounds': False, + 'public': True, + 'show_filters': ['logtime', 'hostregex', 'serviceregex', 'log_state_type' ], + 'sorters': [('log_what', True), ('log_time', True), ('log_lineno', True), ], + 'title': _('History of scheduled downtimes'), + 'topic': _('Other'), +}, + + 'api_downtimes': {'column_headers': 'pergroup', + 'datasource': 'downtimes', + 'description': _('All host- and service-downtimes (including ids)'), + 'group_painters': [('downtime_what', None)], + 'hard_filters': [], + 'hard_filtervars': [('is_service_scheduled_downtime_depth', + '-1'), + ('host', ''), + ('service', '')], + 'hidden': True, + 'hide_filters': [], + 'icon' : 'downtime', + 'layout': 'table', + 'mustsearch': False, + 'name': 'downtimes', + 'num_columns': 1, + 'owner': '', + 'painters': [ + ('host', 'host'), + ('service_description', 'service'), + ('downtime_author', None), + ('downtime_entry_time', None), + ('downtime_start_time', None), + ('downtime_end_time', None), + ('downtime_fixed', None), + ('downtime_duration', None), + ('downtime_comment', None), + ('downtime_id', None), + ], + 'public': True, + 'show_filters': [ + 'service_scheduled_downtime_depth', + 'hostregex', + 'serviceregex', + 'downtime_id', + ], + 'sorters': [('downtime_what', False), + ('downtime_start_time', False)], + 'title': _('Downtimes')}, 'downtimes_of_host': {'column_headers': 'pergroup', 'datasource': 'downtimes', - 'description': '', + 'description': _('Lists all host downtimes.'), 'group_painters': [], 'hard_filters': [], 'hard_filtervars': [], 'hidden': True, - 'hide_filters': ['site', 'host'], + 'hide_filters': ['siteopt', 'host'], 'icon' : 'downtime', 'layout': 'table', 'mustsearch': False, @@ -305,12 +495,12 @@ 'title': _('Downtimes of host')}, 'downtimes_of_service': {'column_headers': 'pergroup', 'datasource': 'downtimes', - 'description': '', + 'description': _('Lists all downtimes for services.'), 'group_painters': [], 'hard_filters': [], 'hard_filtervars': [], 'hidden': True, - 'hide_filters': ['site', 'service', 'host'], + 'hide_filters': ['siteopt', 'service', 'host'], 'icon' : 'downtime', 'layout': 'table', 'mustsearch': False, @@ -332,7 +522,7 @@ 'host': {'browser_reload': 30, 'column_headers': 'pergroup', 'datasource': 'services', - 'description': 'All services of a given host. The host and site must be set via HTML variables.', + 'description': _('All services of a given host. The host and site must be set via HTML variables.'), 'group_painters': [('host_with_state', 'hoststatus')], 'hard_filters': [], 'hard_filtervars': [('st0', 'on'), @@ -341,30 +531,26 @@ ('st3', 'on'), ('stp', 'on')], 'hidden': True, - 'hide_filters': ['site', 'host'], - 'icon': 'services', + 'hide_filters': ['siteopt', 'host'], + 'icon': 'status', 'layout': 'boxed', 'mustsearch': False, 'name': 'host', 'num_columns': 2, 'owner': '', - 'painters': [('service_state', None), - ('service_description', 'service'), - ('service_icons', None), - ('svc_plugin_output', None), - ('svc_state_age', None), - ('svc_check_age', None), - ('perfometer', None)], + 'painters': service_view_painters, 'play_sounds': False, 'public': True, 'show_filters': ['svcstate', 'serviceregex'], - 'sorters': [('svcdescr', False)], + 'sorters': [('site', False), + ('site_host', False), + ('svcdescr', False)], 'linktitle': _('Services'), 'title': _('Services of Host')}, 'host_export': {'browser_reload': 30, 'column_headers': 'pergroup', 'datasource': 'services', - 'description': 'All services of a given host. The host and site must be set via HTML variables.', + 'description': _('All services of a given host. The host and site must be set via HTTP variables.'), 'group_painters': [('host_with_state', 'hoststatus')], 'hard_filters': [], 'hard_filtervars': [('st0', 'on'), @@ -374,7 +560,7 @@ ('stp', 'on')], 'hidden': True, 'hidebutton' : True, - 'hide_filters': ['site', 'host'], + 'hide_filters': ['siteopt', 'host'], 'icon': 'services', 'layout': 'boxed', 'mustsearch': False, @@ -393,7 +579,7 @@ 'hosts': {'browser_reload': 30, 'column_headers': 'off', 'datasource': 'services', - 'description': 'All services of of hosts which match a name', + 'description': _('All services of hosts which match a name'), 'group_painters': [('sitealias', 'sitehosts'), ('host', 'host')], 'hard_filters': [], 'hard_filtervars': [('host', ''), @@ -410,13 +596,7 @@ 'name': 'hosts', 'num_columns': 1, 'owner': '', - 'painters': [('service_state', None), - ('service_description', 'service'), - ('svc_plugin_output', None), - ('svc_state_age', None), - ('svc_check_age', None), - ('service_icons', None), - ('perfometer', None)], + 'painters': service_view_painters, 'play_sounds': False, 'public': True, 'show_filters': ['hostregex', 'svcstate','siteopt'], @@ -428,7 +608,7 @@ 'host_ok': {'browser_reload': 30, 'column_headers': 'pergroup', 'datasource': 'services', - 'description': 'All services of a given host that are in state OK', + 'description': _('All services of a given host that are in state OK'), 'group_painters': [('host_with_state', 'hoststatus')], 'hard_filters': ['svcstate'], 'hard_filtervars': [('st0', 'on'), @@ -437,20 +617,14 @@ ('st3', ''), ('stp', '')], 'hidden': True, - 'hide_filters': ['site', 'host'], + 'hide_filters': ['siteopt', 'host'], 'hidebutton' : True, 'layout': 'boxed', 'mustsearch': False, 'name': 'host_lk', 'num_columns': 2, 'owner': '', - 'painters': [('service_state', None), - ('service_description', 'service'), - ('svc_plugin_output', None), - ('svc_state_age', None), - ('svc_check_age', None), - ('service_icons', None), - ('perfometer', None)], + 'painters': service_view_painters, 'play_sounds': False, 'public': True, 'show_filters': ['svcstate'], @@ -460,7 +634,7 @@ 'host_warn': {'browser_reload': 30, 'column_headers': 'pergroup', 'datasource': 'services', - 'description': 'All services of a given host that are in state WARN', + 'description': _('All services of a given host that are in state WARN'), 'group_painters': [('host_with_state', 'hoststatus')], 'hard_filters': ['svcstate'], 'hard_filtervars': [('st0', ''), @@ -469,20 +643,14 @@ ('st3', ''), ('stp', '')], 'hidden': True, - 'hide_filters': ['site', 'host'], + 'hide_filters': ['siteopt', 'host'], 'hidebutton' : True, 'layout': 'boxed', 'mustsearch': False, 'name': 'host_warn', 'num_columns': 2, 'owner': '', - 'painters': [('service_state', None), - ('service_description', 'service'), - ('svc_plugin_output', None), - ('svc_state_age', None), - ('svc_check_age', None), - ('service_icons', None), - ('perfometer', None)], + 'painters': service_view_painters, 'play_sounds': False, 'public': True, 'show_filters': ['svcstate'], @@ -492,7 +660,7 @@ 'host_crit': {'browser_reload': 30, 'column_headers': 'pergroup', 'datasource': 'services', - 'description': 'All services of a given host that are in state CRIT', + 'description': _('All services of a given host that are in state CRIT'), 'group_painters': [('host_with_state', 'hoststatus')], 'hard_filters': ['svcstate'], 'hard_filtervars': [('st0', ''), @@ -501,20 +669,14 @@ ('st3', ''), ('stp', '')], 'hidden': True, - 'hide_filters': ['site', 'host'], + 'hide_filters': ['siteopt', 'host'], 'hidebutton' : True, 'layout': 'boxed', 'mustsearch': False, 'name': 'host_crit', 'num_columns': 2, 'owner': '', - 'painters': [('service_state', None), - ('service_description', 'service'), - ('svc_plugin_output', None), - ('svc_state_age', None), - ('svc_check_age', None), - ('service_icons', None), - ('perfometer', None)], + 'painters': service_view_painters, 'play_sounds': False, 'public': True, 'show_filters': ['svcstate'], @@ -524,7 +686,7 @@ 'host_unknown': {'browser_reload': 30, 'column_headers': 'pergroup', 'datasource': 'services', - 'description': 'All services of a given host that are in state UNKNOWN', + 'description': _('All services of a given host that are in state UNKNOWN'), 'group_painters': [('host_with_state', 'hoststatus')], 'hard_filters': ['svcstate'], 'hard_filtervars': [('st0', ''), @@ -533,20 +695,14 @@ ('st3', 'on'), ('stp', '')], 'hidden': True, - 'hide_filters': ['site', 'host'], + 'hide_filters': ['siteopt', 'host'], 'hidebutton' : True, 'layout': 'boxed', 'mustsearch': False, 'name': 'host_unknown', 'num_columns': 2, 'owner': '', - 'painters': [('service_state', None), - ('service_description', 'service'), - ('svc_plugin_output', None), - ('svc_state_age', None), - ('svc_check_age', None), - ('service_icons', None), - ('perfometer', None)], + 'painters': service_view_painters, 'play_sounds': False, 'public': True, 'show_filters': ['svcstate'], @@ -556,7 +712,7 @@ 'host_pending': {'browser_reload': 30, 'column_headers': 'pergroup', 'datasource': 'services', - 'description': 'All services of a given host that are PENDING', + 'description': _('All services of a given host that are PENDING'), 'group_painters': [('host_with_state', 'hoststatus')], 'hard_filters': ['svcstate'], 'hard_filtervars': [('st0', ''), @@ -565,20 +721,14 @@ ('st3', ''), ('stp', 'on')], 'hidden': True, - 'hide_filters': ['site', 'host'], + 'hide_filters': ['siteopt', 'host'], 'hidebutton' : True, 'layout': 'boxed', 'mustsearch': False, 'name': 'host_pending', 'num_columns': 2, 'owner': '', - 'painters': [('service_state', None), - ('service_description', 'service'), - ('svc_plugin_output', None), - ('svc_state_age', None), - ('svc_check_age', None), - ('service_icons', None), - ('perfometer', None)], + 'painters': service_view_painters, 'play_sounds': False, 'public': True, 'show_filters': ['svcstate'], @@ -588,7 +738,7 @@ 'problemsofhost': {'browser_reload': 30, 'column_headers': 'pergroup', 'datasource': 'services', - 'description': 'All problem services of a given host. The host and site must be set via HTML variables.', + 'description': _('All problem services of a given host. The host and site must be set via HTTP variables.'), 'group_painters': [('host_with_state', 'hoststatus')], 'hard_filters': ['svcstate'], 'hard_filtervars': [('st0', ''), @@ -598,19 +748,13 @@ ('stp', '')], 'hidden': True, 'hidebutton' : True, - 'hide_filters': ['site', 'host'], + 'hide_filters': ['siteopt', 'host'], 'layout': 'boxed', 'mustsearch': False, 'name': 'problemsofhost', 'num_columns': 2, 'owner': '', - 'painters': [('service_state', None), - ('service_description', 'service'), - ('svc_plugin_output', None), - ('svc_state_age', None), - ('svc_check_age', None), - ('service_icons', None), - ('perfometer', None)], + 'painters': service_view_painters, 'play_sounds': False, 'public': True, 'show_filters': ['svcstate'], @@ -618,9 +762,9 @@ 'linktitle' : _('Host Problems'), 'title': _('Problems of host')}, 'hostgroup': {'browser_reload': 30, - 'column_headers': 'off', + 'column_headers': 'pergroup', 'datasource': 'hosts', - 'description': '', + 'description': _('Lists members of a host group with the number of services in the different states.'), 'group_painters': [('site_icon', None), ('sitealias', 'sitehosts')], 'hard_filters': [], @@ -628,7 +772,7 @@ 'hidden': True, 'hide_filters': ['hostgroup'], 'layout': 'boxed', - 'linktitle': _('Hostgroup Overview'), + 'linktitle': _('Host Group Overview'), 'mustsearch': False, 'name': 'hostgroup', 'num_columns': 2, @@ -646,12 +790,12 @@ 'public': True, 'show_filters': [], 'sorters': [('site', False), ('site_host', False)], - 'title': _('Hostgroup'), + 'title': _('Host Group'), 'topic': _('hidden')}, 'hostgroupservices': {'browser_reload': 90, 'column_headers': 'off', 'datasource': 'services', - 'description': 'All services of a certain hostgroup', + 'description': _('All services of a certain hostgroup'), 'group_painters': [('sitealias', 'sitehosts'), ('host_with_state', 'host')], 'hard_filters': [], @@ -717,7 +861,7 @@ 'hostgroupgrid': {'browser_reload': 30, 'column_headers': 'pergroup', 'datasource': 'hostsbygroup', - 'description': 'Hosts grouped by hostgroups, with a brief list of all services', + 'description': _('Hosts grouped by hostgroups, with a brief list of all services'), 'group_painters': [('sitealias', 'sitehosts'), ('hg_alias', 'hostgroup')], 'hard_filters': [], @@ -746,12 +890,12 @@ 'sorters': [('site', False), ('hostgroup', False), ('site_host', False)], - 'title': _('Hostgroups (Grid)'), - 'topic': _('Hostgroups')}, + 'title': _('Host Groups (Grid)'), + 'topic': _('Host Groups')}, 'hostgroups': {'browser_reload': 30, 'column_headers': 'pergroup', 'datasource': 'hostgroups', - 'description': 'A short overview over all host groups, without an explicity listing of the actual hosts', + 'description': _('A short overview over all host groups, without an explicity listing of the actual hosts'), 'group_painters': [('sitealias', 'sitehosts')], 'hard_filters': [], 'hard_filtervars': [], @@ -774,15 +918,15 @@ ('hg_num_services_unknown', None), ('hg_num_services_pending', None)], 'public': True, - 'show_filters': [], + 'show_filters': ['hostgroupnameregex', 'hostgroupvisibility'], 'sorters': [], - 'title': _('Hostgroups (Summary)'), - 'topic': _('Hostgroups') }, + 'title': _('Host Groups (Summary)'), + 'topic': _('Host Groups') }, 'hostproblems': {'browser_reload': 30, 'column_headers': 'pergroup', 'datasource': 'hosts', - 'description': 'A complete list of all host problems with a search form for selecting handled and unhandled', + 'description': _('A complete list of all host problems with a search form for selecting handled and unhandled'), 'group_painters': [('host_state', None)], 'hard_filters': ['host_scheduled_downtime_depth', 'summary_host'], 'hard_filtervars': [('is_host_scheduled_downtime_depth', @@ -829,7 +973,7 @@ 'hostsbygroup': {'browser_reload': 30, 'column_headers': 'pergroup', 'datasource': 'hostsbygroup', - 'description': 'A complete listing of\r\nall host groups and each of their hosts', + 'description': _('A complete listing of all host groups and each of their hosts'), 'group_painters': [('sitealias', 'sitehosts'), ('hg_alias', 'hostgroup')], 'hard_filters': [], @@ -863,17 +1007,17 @@ 'sorters': [('site', False), ('hostgroup', False), ('site_host', False)], - 'title': _('Hostgroups'), - 'topic': _('Hostgroups')}, + 'title': _('Host Groups'), + 'topic': _('Host Groups')}, 'hoststatus': {'browser_reload': 30, 'column_headers': 'pergroup', 'datasource': 'hosts', - 'description': '', + 'description': _('Shows details of a host.'), 'group_painters': [], 'hard_filters': [], 'hard_filtervars': [], 'hidden': True, - 'hide_filters': ['site', 'host'], + 'hide_filters': ['siteopt', 'host'], 'icon' : 'status', 'layout': 'dataset', 'mustsearch': False, @@ -888,11 +1032,13 @@ ('host_group_memberlist', None), ('host_parents', None), ('host_childs', None), + ('host_servicelevel', None), ('host_contact_groups', None), ('host_contacts', None), ('host_plugin_output', None), ('host_perf_data', None), ('host_attempt', None), + ('host_notification_number', None), ('host_check_interval', None), ('host_check_type', None), ('host_state_age', None), @@ -905,6 +1051,7 @@ ('host_in_downtime', None), ('host_in_notifper', None), ('host_notifper', None), + ('host_custom_vars', None), ('num_services', None), ('num_services_ok', 'host_ok'), ('num_services_warn', 'host_warn'), @@ -922,7 +1069,7 @@ 'pendingsvc': {'browser_reload': 30, 'column_headers': 'off', 'datasource': 'services', - 'description': '', + 'description': _('Lists all services in state PENDING.'), 'group_painters': [('host', 'host')], 'hard_filters': ['summary_host', 'svcstate'], 'hard_filtervars': [('is_summary_host', '0'), @@ -933,7 +1080,7 @@ ('stp', 'on')], 'hidden': False, 'hide_filters': [], - 'layout': 'boxed', + 'layout': 'table', 'linktitle': _('Pending Services'), 'mustsearch': False, 'name': 'pendingsvc', @@ -949,7 +1096,7 @@ 'searchhost': {'browser_reload': 60, 'column_headers': 'pergroup', 'datasource': 'hosts', - 'description': 'A form for search hosts after a couple of criteria.', + 'description': _('A form for searching hosts using flexible filters'), 'group_painters': [('sitealias', None)], 'hard_filters': [], 'hard_filtervars': [('is_host_scheduled_downtime_depth', @@ -982,24 +1129,32 @@ ('num_services_pending', 'host_pending')], 'play_sounds': False, 'public': True, - 'show_filters': ['host_scheduled_downtime_depth', - 'host_in_notification_period', - 'hoststate', - 'siteopt', - 'hostregex', - 'summary_host', - 'opthostgroup', - 'opthost_contactgroup', - 'host_check_command', - 'host_address', - ], + 'show_filters': [ + 'host_scheduled_downtime_depth', + 'host_in_notification_period', + 'hoststate', + 'siteopt', + 'hostregex', + 'summary_host', + 'hostgroups', + 'opthostgroup', + 'opthost_contactgroup', + 'host_check_command', + 'host_address', + 'host_notif_number', + 'host_staleness', + 'host_tags', + 'hostalias', + 'host_favorites', + 'host_num_services', + ], 'sorters': [], 'title': _('Host search'), 'topic': _('Hosts')}, 'searchsvc': {'browser_reload': 60, 'column_headers': 'pergroup', 'datasource': 'services', - 'description': 'Almost all available filters, used for searching services and maybe doing actions', + 'description': _('Almost all available filters, used for searching services and maybe doing actions'), 'group_painters': [('sitealias', 'sitehosts'), ('host', 'host')], 'hard_filters': [], @@ -1034,18 +1189,14 @@ 'name': 'searchsvc', 'num_columns': 1, 'owner': '', - 'painters': [('service_state', None), - ('service_description', 'service'), - ('svc_plugin_output', None), - ('service_icons', None), - ('svc_state_age', None), - ('svc_check_age', None), - ('perfometer', None)], + 'painters': service_view_painters, 'play_sounds': False, 'public': True, 'show_filters': ['service_in_notification_period', 'optservicegroup', 'optservice_contactgroup', + 'hostgroups', + 'servicegroups', 'service_notifications_enabled', 'host_in_notification_period', 'in_downtime', @@ -1056,6 +1207,7 @@ 'service_active_checks_enabled', 'summary_host', 'serviceregex', + 'service_display_name', 'check_command', 'hoststate', 'svcstate', @@ -1067,7 +1219,14 @@ 'svc_last_state_change', 'svc_last_check', 'siteopt', - 'aggr_service_used'], + 'aggr_service_used', + 'svc_notif_number', + 'service_staleness', + 'host_tags', + 'hostalias', + 'host_favorites', + 'service_favorites', + ], 'sorters': [('site', False), ('site_host', False), ('svcdescr', False)], @@ -1076,12 +1235,12 @@ 'service': {'browser_reload': 30, 'column_headers': 'pergroup', 'datasource': 'services', - 'description': 'Status of a single service, to be used for linking', + 'description': _('Status of a single service, to be used for linking'), 'group_painters': [], 'hard_filters': [], 'hard_filtervars': [], 'hidden': True, - 'hide_filters': ['site', 'service', 'host'], + 'hide_filters': ['siteopt', 'service', 'host'], 'layout': 'dataset', 'mustsearch': False, 'name': 'service', @@ -1093,6 +1252,7 @@ ('service_icons', None), ('service_state', None), ('svc_group_memberlist', None), + ('svc_servicelevel', None), ('svc_contact_groups', None), ('svc_contacts', None), ('svc_plugin_output', None), @@ -1102,8 +1262,10 @@ ('svc_check_command', None), ('svc_check_interval', None), ('svc_attempt', None), + ('svc_notification_number', None), ('svc_check_type', None), ('svc_state_age', None), + ('svc_last_time_ok', None), ('svc_check_age', None), ('svc_next_check', None), ('svc_next_notification', None), @@ -1113,6 +1275,8 @@ ('svc_in_downtime', None), ('svc_in_notifper', None), ('svc_notifper', None), + ('service_display_name', None), + ('svc_custom_vars', None), ('check_manpage', None), ('svc_custom_notes', None), ('svc_pnpgraph', None), @@ -1123,10 +1287,10 @@ 'linktitle': _('Details'), 'title': _('Service')}, 'servicedesc': {'browser_reload': 30, - 'column_headers': 'off', + 'column_headers': 'pergroup', 'datasource': 'services', - 'description': 'All Services with\r\na certain description\r\n', - 'group_painters': [('service_description', 'servicedesc')], + 'description': _('All Services with a certain description'), + 'group_painters': [], 'hard_filters': [], 'hard_filtervars': [('host', ''), ('st0', 'on'), @@ -1144,19 +1308,21 @@ 'num_columns': 2, 'owner': '', 'painters': [('service_state', None), - ('host', 'host'), + ('service_icons', None), + ('host', 'service'), ('svc_plugin_output', None), ('perfometer', None)], 'public': True, 'show_filters': ['hostregex', 'svcstate', 'opthostgroup'], 'sorters': [('site', False), ('site_host', False)], + 'user_sortable' : 'on', 'linktitle': _('Service globally'), 'title': _('All Services with this description:')}, 'servicedescpnp': {'browser_reload': 90, 'column_headers': 'off', 'datasource': 'services', - 'description': 'PNP graphs for all Services with\r\na certain description\r\n', + 'description': _('PNP graphs for all Services with a certain description'), 'group_painters': [('host', 'hostpnp')], 'hard_filters': [], 'hard_filtervars': [('host', ''), @@ -1188,7 +1354,7 @@ 'servicegroup': {'browser_reload': 30, 'column_headers': 'pergroup', 'datasource': 'services', - 'description': '', + 'description': _('Services of a service group'), 'group_painters': [('sitealias', 'sitehosts'), ('host', 'host')], 'hard_filters': [], @@ -1196,26 +1362,24 @@ 'hidden': True, 'hide_filters': ['servicegroup'], 'layout': 'table', - 'linktitle': _('Servicegroup'), + 'linktitle': _('Service Group'), 'mustsearch': False, 'name': 'servicegroup', 'num_columns': 1, 'owner': '', - 'painters': [('service_state', None), - ('svc_state_age', None), - ('service_description', 'service'), - ('svc_plugin_output', None), - ('perfometer', None)], + 'painters': service_view_painters, 'play_sounds': False, 'public': True, 'show_filters': [], - 'sorters': [], - 'title': _('Servicegroup'), + 'sorters': [('site', False), + ('site_host', False), + ('svcdescr', False)], + 'title': _('Service Group'), 'topic': _('Other')}, 'sitehosts': {'browser_reload': 30, 'column_headers': 'off', 'datasource': 'hosts', - 'description': 'Link view showing all\r\nhosts of one site', + 'description': _('Link view showing all hosts of one site'), 'group_painters': [('site_icon', None), ('sitealias', 'sitesvcs')], 'hard_filters': ['summary_host'], 'hard_filtervars': [ @@ -1245,7 +1409,7 @@ 'svcbygroups': {'browser_reload': 30, 'column_headers': 'pergroup', 'datasource': 'servicesbygroup', - 'description': 'Service grouped by service groups. Services not member of a group are not displayed. Services being in more groups, are displayed once for each group', + 'description': _('Service grouped by service groups. Services not member of a group are not displayed. Services being in more groups, are displayed once for each group'), 'group_painters': [('sg_alias', 'servicegroup')], 'hard_filters': [], 'hard_filtervars': [], @@ -1269,11 +1433,11 @@ ('site_host', False), ('svcdescr', False)], 'title': _('Services by group'), - 'topic': _('Servicegroups')}, + 'topic': _('Service Groups')}, 'svcbyhgroups': {'browser_reload': 30, 'column_headers': 'pergroup', 'datasource': 'servicesbyhostgroup', - 'description': 'Service grouped by host groups. Services not member of a host group are not displayed. Services being in more groups, are displayed once for each group', + 'description': _('Service grouped by host groups. Services not member of a host group are not displayed. Services being in more groups, are displayed once for each group'), 'group_painters': [('hg_alias', 'hostgroup')], 'hard_filters': [], 'hard_filtervars': [], @@ -1284,12 +1448,7 @@ 'name': 'svcbyhgroups', 'num_columns': 2, 'owner': '', - 'painters': [('host', 'host'), - ('service_state', None), - ('svc_state_age', None), - ('service_description', None), - ('service_icons', None), - ('svc_plugin_output', None)], + 'painters': host_service_view_painters, 'public': True, 'show_filters': [], 'sorters': [('hostgroup', False), @@ -1300,7 +1459,7 @@ 'svcgroups': {'browser_reload': 30, 'column_headers': 'pergroup', 'datasource': 'servicegroups', - 'description': 'A short overview over all servicegroups, without explicity listing of the actual hosts and services', + 'description': _('A short overview over all service groups, without explicity listing of the actual hosts and services'), 'group_painters': [('sitealias', 'sitehosts')], 'hard_filters': [], 'hard_filtervars': [], @@ -1319,14 +1478,14 @@ ('sg_num_services_unknown', None), ('sg_num_services_pending', None)], 'public': True, - 'show_filters': [], + 'show_filters': ['servicegroupnameregex'], 'sorters': [], - 'title': _('Servicegroups (Summary)'), - 'topic': _('Servicegroups')}, + 'title': _('Service Groups (Summary)'), + 'topic': _('Service Groups')}, 'svcgroups_grid': {'browser_reload': 30, 'column_headers': 'pergroup', 'datasource': 'servicegroups', - 'description': 'A short overview over all servicegroups, without explicity listing of the actual hosts and services', + 'description': _('A short overview over all service groups, without explicity listing of the actual hosts and services'), 'group_painters': [('sitealias', 'sitehosts')], 'hard_filters': [], 'hard_filtervars': [], @@ -1343,14 +1502,14 @@ 'public': True, 'show_filters': [], 'sorters': [], - 'title': _('Servicegroups (Grid)'), - 'topic': _('Servicegroups'), + 'title': _('Service Groups (Grid)'), + 'topic': _('Service Groups'), }, 'svcproblems': { 'browser_reload': 30, 'column_headers': 'pergroup', 'datasource': 'services', - 'description': 'All problems of services not currently in a downtime.', + 'description': _('All problems of services not currently in a downtime.'), 'group_painters': [('service_state', None)], 'hard_filters': ['summary_host', 'in_downtime'], 'hard_filtervars': [('is_service_in_notification_period', '-1'), @@ -1373,19 +1532,15 @@ 'name': 'svcproblems', 'num_columns': 1, 'owner': '', - 'painters': [('host', 'host'), - ('service_description', 'service'), - ('svc_plugin_output', None), - ('svc_state_age', None), - ('svc_check_age', None), - ('service_icons', None), - ('perfometer', None)], + 'painters': host_service_view_painters, 'play_sounds': True, 'public': True, 'show_filters': ['service_in_notification_period', 'service_acknowledged', 'svcstate', 'svchardstate', + 'serviceregex', + 'host_tags', 'hoststate'], 'sorters': [('svcstate', True), ('stateage', False), @@ -1396,7 +1551,7 @@ 'hosttiles': {'browser_reload': 30, 'column_headers': 'off', 'datasource': 'hostsbygroup', - 'description': '', + 'description': _('Displays hosts in a tiled layout, where each host is a single tile.'), 'group_painters': [('hg_name', 'hostgroup'), ('hg_alias', None)], 'hard_filters': ['summary_host'], @@ -1432,7 +1587,7 @@ 'searchpnp': {'browser_reload': 90, 'column_headers': 'off', 'datasource': 'services', - 'description': 'Search for services and display PNP graphs', + 'description': _('Search for services and display PNP graphs'), 'group_painters': [('sitealias', 'sitehosts'), ('host', 'host'), ('service_description', 'service'), @@ -1501,7 +1656,7 @@ 'hostpnp': {'browser_reload': 90, 'column_headers': 'off', 'datasource': 'services', - 'description': 'All PNP performance graphs for a certain host.', + 'description': _('All PNP performance graphs for a certain host.'), 'group_painters': [('sitealias', 'sitehosts'), ('host', 'host'), ('service_description', 'service'), @@ -1521,7 +1676,7 @@ ('stp', '')], 'hidden': True, 'icon' : 'pnp', - 'hide_filters': ['site', 'host'], + 'hide_filters': ['siteopt', 'host'], 'layout': 'boxed', 'mustsearch': False, 'name': 'hostpnp', @@ -1540,7 +1695,7 @@ 'recentsvc': {'browser_reload': 30, 'column_headers': 'pergroup', 'datasource': 'services', - 'description': 'Service whose state changed in the last\r\n60 minutes', + 'description': _('Service whose state changed in the last 60 minutes'), 'group_painters': [], 'hard_filters': ['summary_host'], 'hard_filtervars': [('svc_last_state_change_from_range', '3600'), @@ -1559,13 +1714,7 @@ 'name': 'svcrecent', 'num_columns': 1, 'owner': '', - 'painters': [('host', 'host'), - ('service_state', None), - ('service_description', 'service'), - ('svc_plugin_output', None), - ('service_icons', None), - ('svc_state_age', None), - ('svc_check_age', None)], + 'painters': host_service_view_painters, 'play_sounds': False, 'public': True, 'show_filters': ['svc_last_state_change', 'svcstate', 'siteopt'], @@ -1575,44 +1724,35 @@ 'uncheckedsvc': {'browser_reload': 30, 'column_headers': 'pergroup', 'datasource': 'services', - 'description': 'Services that have not been checked within the last 24 hours (pending services excluded).', - 'group_painters': [('host', 'host')], - 'hard_filters': ['summary_host'], - 'hard_filtervars': [('svc_last_check_until_range', '3600'), - ('svc_last_check_until', '24'), - ('is_summary_host', '0'), - ('st0', 'on'), - ('st1', 'on'), - ('st2', 'on'), - ('st3', 'on'), - ('stp', '')], + 'description': _('Services that have not been checked for too long according to their configured check intervals.'), + 'group_painters': [('host', 'host', '')], + 'hard_filters': ['service_staleness', 'summary_host'], + 'hard_filtervars': [('is_service_staleness', '1'), + ('is_summary_host', '0')], 'hidden': False, 'hide_filters': [], + 'hidebutton': False, + 'icon': None, 'layout': 'table', - 'linktitle': _('Unchecked services'), + 'mobile': False, 'mustsearch': False, 'name': 'uncheckedsvc', 'num_columns': 1, - 'owner': '', - 'painters': [('service_state', None), - ('service_description', 'service'), - ('svc_plugin_output', None), - ('service_icons', None), - ('svc_state_age', None), - ('svc_check_age', None)], + 'painters': service_view_painters, 'play_sounds': False, 'public': True, - 'show_filters': ['svc_last_check', 'svcstate'], + 'show_filters': [], 'sorters': [('site_host', False), ('svcdescr', False)], - 'title': _('Unchecked services'), - 'topic': _('Problems')}, + 'title': _('Stale services'), + 'topic': _('Problems'), + 'user_sortable': 'on'}, 'events': {'browser_reload': 0, 'column_headers': 'pergroup', 'datasource': 'log_events', - 'description': 'All historic events of hosts or services (alerts, downtimes, etc.)', + 'description': _('All historic events of hosts or services (alerts, downtimes, etc.)'), 'group_painters': [('log_date', None)], 'hard_filters': [], - 'hard_filtervars': [('site', ''), + 'hard_filtervars': [('siteopt', ''), ('host', ''), ('service', ''), ('logtime_from_range', '86400'), @@ -1643,7 +1783,7 @@ 'log_plugin_output', 'log_state', 'log_class'], - 'sorters': [('log_time', False), ('log_lineno', False)], + 'sorters': [('log_time', True), ('log_lineno', True)], 'title': _('Host- and Service events'), 'topic': _('Other')}, @@ -1651,14 +1791,14 @@ 'hostevents': {'browser_reload': 0, 'column_headers': 'pergroup', 'datasource': 'log_host_events', - 'description': 'All historic events concerning the state of a certain host (without services)', + 'description': _('All historic events concerning the state of a certain host (without services)'), 'group_painters': [('log_date', None)], 'hard_filters': [], 'hard_filtervars': [('logtime_from_range', '86400'), - ('logtime_from', '31'), + ('logtime_from', '7'), ], 'hidden': True, - 'hide_filters': ['site', 'host'], + 'hide_filters': ['siteopt', 'host'], 'icon' : 'history', 'layout': 'table', 'linktitle': _('Host history'), @@ -1676,19 +1816,48 @@ 'play_sounds': False, 'public': True, 'show_filters': ['logtime', 'log_state'], - 'sorters': [('log_time', False), ('log_lineno', False)], + 'sorters': [('log_time', True), ('log_lineno', True)], 'title': _('Events of host')}, + +'host_dt_hist': {'browser_reload': 0, + 'column_headers': 'pergroup', + 'datasource': 'log_events', + 'description': _('All historic scheduled downtimes of a certain host'), + 'group_painters': [], + 'hard_filters': [ 'log_type' ], + 'hard_filtervars': [('logtime_from_range', '86400'), + ('logtime_from', '60'), + ('log_type', 'HOST DOWNTIME ALERT'), + ], + 'hidden': True, + 'hide_filters': ['siteopt', 'host' ], + 'icon' : 'downtime', + 'layout': 'table', + 'linktitle': _('Host Dt-History'), + 'mustsearch': False, + 'num_columns': 1, + 'painters': [('log_icon', None), + ('log_time', None), + ('log_state_type', None), + ('log_plugin_output', None), + ], + 'play_sounds': False, + 'public': True, + 'show_filters': ['logtime', ], + 'sorters': [('log_time', True), ('log_lineno', True)], + 'title': _('Historic downtimes of host')}, + 'svcevents': {'browser_reload': 0, 'column_headers': 'pergroup', 'datasource': 'log_events', - 'description': 'All historic events concerning the state of a certain service', + 'description': _('All historic events concerning the state of a certain service'), 'group_painters': [('log_date', None)], 'hard_filters': [], 'hard_filtervars': [('logtime_from_range', '86400'), - ('logtime_from', '31'), + ('logtime_from', '7'), ], 'hidden': True, - 'hide_filters': ['site', 'host', 'service'], + 'hide_filters': ['siteopt', 'host', 'service'], 'icon' : 'history', 'layout': 'table', 'linktitle': _('History'), @@ -1705,19 +1874,48 @@ 'play_sounds': False, 'public': True, 'show_filters': ['logtime', 'log_state'], - 'sorters': [('log_time', False), ('log_lineno', False)], + 'sorters': [('log_time', True), ('log_lineno', True)], 'title': _('Events of service')}, + +'svc_dt_hist': {'browser_reload': 0, + 'column_headers': 'pergroup', + 'datasource': 'log_events', + 'description': _('All historic scheduled downtimes of a certain service'), + 'group_painters': [], + 'hard_filters': [ 'log_type' ], + 'hard_filtervars': [('logtime_from_range', '86400'), + ('logtime_from', '60'), + ('log_type', '(HOST|SERVICE) DOWNTIME ALERT'), + ], + 'hidden': True, + 'hide_filters': ['siteopt', 'host', 'service'], + 'icon' : 'downtime', + 'layout': 'table', + 'linktitle': _('Downtime-History'), + 'mustsearch': False, + 'num_columns': 1, + 'painters': [('log_icon', None), + ('log_time', None), + ('log_state_type', None), + ('log_plugin_output', None), + ], + 'play_sounds': False, + 'public': True, + 'show_filters': ['logtime', ], + 'sorters': [('log_time', True), ('log_lineno', True)], + 'title': _('Historic downtimes of service')}, + 'hostsvcevents': {'browser_reload': 0, 'column_headers': 'pergroup', 'datasource': 'log_events', - 'description': 'All historic events concerning the state of a certain host (including services)', + 'description': _('All historic events concerning the state of a certain host (including services)'), 'group_painters': [('log_date', None)], 'hard_filters': [], 'hard_filtervars': [('logtime_from_range', '86400'), - ('logtime_from', '31'), + ('logtime_from', '7'), ], 'hidden': True, - 'hide_filters': ['site', 'host'], + 'hide_filters': ['siteopt', 'host'], 'icon' : 'history', 'layout': 'table', 'linktitle': _('Host/Svc history'), @@ -1735,13 +1933,13 @@ ], 'play_sounds': False, 'public': True, - 'show_filters': ['logtime', 'log_state', 'log_state'], - 'sorters': [('log_time', False), ('log_lineno', False)], + 'show_filters': ['logtime', 'log_state', 'log_class'], + 'sorters': [('log_time', True), ('log_lineno', True)], 'title': _('Events of host & services')}, 'logfile': {'browser_reload': 0, 'column_headers': 'off', 'datasource': 'log', - 'description': '', + 'description': _('Displays entries from the logfile of the monitoring core.'), 'group_painters': [('log_date', None)], 'hard_filters': [], 'hard_filtervars': [('optservicegroup', ''), @@ -1787,14 +1985,14 @@ 'opthostgroup', 'logtime', 'log_state'], - 'sorters': [('log_time', False), ('log_lineno', False)], + 'sorters': [('log_time', True), ('log_lineno', True)], 'title': _('Search Global Logfile'), 'topic': _('Other')}, 'sitesvcs': {'browser_reload': 60, 'column_headers': 'pergroup', 'datasource': 'services', - 'description': 'All services of a given site.', + 'description': _('All services of a given site.'), 'group_painters': [('host_with_state', 'hoststatus')], 'hard_filters': [], 'hard_filtervars': [('optservicegroup', ''), @@ -1812,7 +2010,7 @@ ('service_output', '')], 'hidden': True, 'hidebutton': True, - 'hide_filters': ['site'], + 'hide_filters': ['siteopt'], 'layout': 'boxed', 'linktitle': _('Services of Site'), 'mustsearch': False, @@ -1842,7 +2040,7 @@ 'alertstats': {'browser_reload': 0, 'column_headers': 'pergroup', 'datasource': 'alert_stats', - 'description': '', + 'description': _('Shows number of alerts grouped for each service.'), 'group_painters': [], 'hard_filters': [], 'hard_filtervars': [('optservicegroup', ''), @@ -1898,7 +2096,7 @@ 'nagstamon_hosts': {'browser_reload': 30, 'column_headers': 'pergroup', 'datasource': 'hosts', - 'description': 'The view is intended for NagStaMon as web service.', + 'description': _('The view is intended for NagStaMon as web service.'), 'group_painters': [('host_state', None)], 'hard_filters': ['summary_host'], 'hard_filtervars': [('is_host_scheduled_downtime_depth', @@ -1913,7 +2111,7 @@ ('is_host_notifications_enabled', '-1'), ('is_summary_host', '0')], - 'hidden': False, + 'hidden': True, 'hide_filters': [], 'hidebutton': True, 'layout': 'table', @@ -1936,7 +2134,9 @@ ('host_acknowledged', None, ''), ('sitename_plain', None, ''), ('host_flapping', None, ''), - ('host_is_active', None, ''),], + ('host_is_active', None, ''), + ('host_notifications_enabled', None, ''), + ], 'play_sounds': False, 'public': True, 'show_filters': ['host_scheduled_downtime_depth', @@ -1951,7 +2151,7 @@ 'nagstamon_svc': {'browser_reload': 30, 'column_headers': 'pergroup', 'datasource': 'services', - 'description': 'This view is intended for usage as web service for NagStaMon.', + 'description': _('This view is intended for usage as web service for NagStaMon.'), 'group_painters': [('service_state', None)], 'hard_filters': ['summary_host'], 'hard_filtervars': [('is_service_in_notification_period', @@ -1974,7 +2174,7 @@ ('st3', 'on'), ('stp', ''), ('is_in_downtime', '-1')], - 'hidden': False, + 'hidden': True, 'hide_filters': [], 'hidebutton': True, 'layout': 'table', @@ -2036,11 +2236,11 @@ 'browser_reload': 0, 'column_headers': 'pergroup', 'datasource': 'bi_aggregations', - 'description': '', + 'description': _('Displays all BI aggregations.'), 'group_painters': [('aggr_group', 'aggr_group')], 'hard_filters': [], 'hard_filtervars': [('host', ''), - ('aggr_name', ''), + ('aggr_name_regex', ''), ('aggr_output', ''), ('birs0', 'on'), ('birs1', 'on'), @@ -2069,14 +2269,15 @@ 'name': 'aggr_all', 'num_columns': 1, 'owner': 'omdadmin', - 'painters': [('aggr_state', None, ''), + 'painters': [('aggr_icons', None, ''), + ('aggr_state', None, ''), ('aggr_treestate', None, ''), ('aggr_hosts', None, '')], 'play_sounds': False, 'public': False, 'show_filters': ['aggr_group', 'aggr_hosts', - 'aggr_name', + 'aggr_name_regex', 'aggr_state', 'aggr_output', 'aggr_assumed_state', @@ -2090,11 +2291,11 @@ 'browser_reload': 0, 'column_headers': 'pergroup', 'datasource': 'bi_aggregations', - 'description': '', + 'description': _('Displays all aggregations of a certain group.'), 'group_painters': [], 'hard_filters': [], 'hard_filtervars': [('host', ''), - ('aggr_name', ''), + ('aggr_name_regex', ''), ('aggr_output', ''), ('birs0', 'on'), ('birs1', 'on'), @@ -2123,13 +2324,14 @@ 'name': 'aggr_group', 'num_columns': 1, 'owner': 'omdadmin', - 'painters': [('aggr_state', None, ''), + 'painters': [('aggr_icons', None, ''), + ('aggr_state', None, ''), ('aggr_treestate', None, ''), ('aggr_hosts', None, '')], 'play_sounds': False, 'public': False, 'show_filters': ['aggr_hosts', - 'aggr_name', + 'aggr_name_regex', 'aggr_state', 'aggr_output', 'aggr_assumed_state', @@ -2143,11 +2345,11 @@ 'browser_reload': 0, 'column_headers': 'off', 'datasource': 'bi_host_aggregations', - 'description': '', - 'group_painters': [('aggr_group', 'biaggr_group')], + 'description': _('Lists all aggregations which only rely on information of one host.'), + 'group_painters': [('aggr_group', 'aggr_group')], 'hard_filters': ['summary_host'], 'hard_filtervars': [('is_host_scheduled_downtime_depth', '-1'), - ('aggr_name', ''), + ('aggr_name_regex', ''), ('aggr_group', ''), ('birs0', 'on'), ('birs1', 'on'), @@ -2183,14 +2385,15 @@ 'name': 'aggr_singlehosts', 'num_columns': 1, 'owner': 'omdadmin', - 'painters': [('host', 'aggr_host', ''), + 'painters': [('aggr_icons', None, ''), + ('host', 'aggr_host', ''), ('host_icons', None, ''), ('aggr_treestate', None, ''), ], 'play_sounds': False, 'public': True, 'show_filters': ['host_scheduled_downtime_depth', - 'aggr_name', + 'aggr_name_regex', 'aggr_group', 'aggr_state', 'host_in_notification_period', @@ -2210,11 +2413,11 @@ 'browser_reload': 0, 'column_headers': 'off', 'datasource': 'bi_hostname_aggregations', - 'description': '', - 'group_painters': [('aggr_group', 'biaggr_group')], + 'description': _('Host related aggregations'), + 'group_painters': [('aggr_group', 'aggr_group')], 'hard_filters': ['summary_host'], 'hard_filtervars': [('is_host_scheduled_downtime_depth', '-1'), - ('aggr_name', ''), + ('aggr_name_regex', ''), ('aggr_group', ''), ('birs0', 'on'), ('birs1', 'on'), @@ -2249,14 +2452,15 @@ 'mustsearch': False, 'num_columns': 1, 'owner': 'omdadmin', - 'painters': [('host', 'aggr_host', ''), + 'painters': [('aggr_icons', None, ''), + ('host', 'aggr_host', ''), ('host_icons', None, ''), ('aggr_treestate', None, ''), ], 'play_sounds': False, 'public': True, 'show_filters': ['host_scheduled_downtime_depth', - 'aggr_name', + 'aggr_name_regex', 'aggr_group', 'aggr_state', 'host_in_notification_period', @@ -2275,12 +2479,12 @@ 'aggr_singlehost': {'browser_reload': 0, 'column_headers': 'pergroup', 'datasource': 'bi_host_aggregations', - 'description': '', + 'description': _('A single host related aggregation'), 'group_painters': [('aggr_name', None)], 'hard_filters': [], 'hard_filtervars': [], 'hidden': True, - 'hide_filters': ['site', 'host'], + 'hide_filters': ['siteopt', 'host'], 'hidebutton': False, 'icon' : 'aggr', 'layout': 'table', @@ -2289,7 +2493,8 @@ 'name': 'aggrhost', 'num_columns': 1, 'owner': 'omdadmin', - 'painters': [('aggr_state', None, ''), + 'painters': [('aggr_icons', None, ''), + ('aggr_state', None, ''), ('aggr_treestate', None, '')], 'play_sounds': False, 'public': True, @@ -2303,10 +2508,10 @@ 'browser_reload': 0, 'column_headers': 'pergroup', 'datasource': 'bi_aggregations', - 'description': '', + 'description': _('All aggregations the given host is part of'), 'group_painters': [('aggr_group', 'aggr_group')], 'hard_filters': [], - 'hard_filtervars': [('aggr_name', ''), + 'hard_filtervars': [('aggr_name_regex', ''), ('aggr_output', ''), ('birs0', 'on'), ('birs1', 'on'), @@ -2326,7 +2531,7 @@ ('bies-1', 'on'), ], 'hidden': True, - 'hide_filters': ["aggr_hosts"], + 'hide_filters': [], 'hidebutton': False, 'icon' : 'aggr', 'layout': 'table', @@ -2335,16 +2540,18 @@ 'name': 'aggr_host', 'num_columns': 1, 'owner': 'omdadmin', - 'painters': [('aggr_state', None, ''), + 'painters': [('aggr_icons', None, ''), + ('aggr_state', None, ''), ('aggr_treestate', None, '')], 'play_sounds': False, 'public': False, 'show_filters': ['aggr_group', - 'aggr_name', + 'aggr_name_regex', 'aggr_state', 'aggr_output', 'aggr_assumed_state', - 'aggr_effective_state'], + 'aggr_effective_state', + 'aggr_hosts'], 'sorters': [ ('aggr_name', False) ], 'title': _('Aggregations Affected by Host'), 'topic': _('Business Intelligence')}, @@ -2354,10 +2561,10 @@ 'browser_reload': 0, 'column_headers': 'pergroup', 'datasource': 'bi_aggregations', - 'description': '', + 'description': _('All aggregations affected by a certain service'), 'group_painters': [('aggr_group', 'aggr_group')], 'hard_filters': [], - 'hard_filtervars': [('aggr_name', ''), + 'hard_filtervars': [('aggr_name_regex', ''), ('aggr_output', ''), ('birs0', 'on'), ('birs1', 'on'), @@ -2377,7 +2584,7 @@ ('bies-1', 'on'), ], 'hidden': True, - 'hide_filters': ["aggr_service"], + 'hide_filters': [], 'hidebutton': False, 'icon' : 'aggr', 'layout': 'table', @@ -2386,16 +2593,18 @@ 'name': 'aggr_service', 'num_columns': 1, 'owner': 'omdadmin', - 'painters': [('aggr_state', None, ''), + 'painters': [('aggr_icons', None, ''), + ('aggr_state', None, ''), ('aggr_treestate', None, '')], 'play_sounds': False, 'public': False, 'show_filters': ['aggr_group', - 'aggr_name', + 'aggr_name_regex', 'aggr_state', 'aggr_output', 'aggr_assumed_state', - 'aggr_effective_state'], + 'aggr_effective_state', + 'aggr_service'], 'sorters': [ ('aggr_name', False) ], 'title': _('Aggregations Affected by Service'), 'topic': _('Business Intelligence')}, @@ -2405,11 +2614,11 @@ 'browser_reload': 0, 'column_headers': 'pergroup', 'datasource': 'bi_aggregations', - 'description': 'All aggregations that have a non-OK state (honoring state assumptions)', + 'description': _('All aggregations that have a non-OK state (honoring state assumptions)'), 'group_painters': [('aggr_group', 'aggr_group')], 'hard_filters': [], 'hard_filtervars': [('host', ''), - ('aggr_name', ''), + ('aggr_name_regex', ''), ('aggr_output', ''), ('birs0', ''), ('birs1', 'on'), @@ -2438,14 +2647,15 @@ 'name': 'aggr_all', 'num_columns': 1, 'owner': 'omdadmin', - 'painters': [('aggr_state', None, ''), + 'painters': [('aggr_icons', None, ''), + ('aggr_state', None, ''), ('aggr_treestate', None, ''), ('aggr_hosts', None, '')], 'play_sounds': True, 'public': False, 'show_filters': ['aggr_group', 'aggr_hosts', - 'aggr_name', + 'aggr_name_regex', 'aggr_state', 'aggr_output', 'aggr_assumed_state', @@ -2459,11 +2669,11 @@ 'browser_reload': 0, 'column_headers': 'off', 'datasource': 'bi_host_aggregations', - 'description': 'All single-host aggregations that are in non-OK state (honoring state assumptions)', - 'group_painters': [('aggr_group', 'biaggr_group')], + 'description': _('All single-host aggregations that are in non-OK state (honoring state assumptions)'), + 'group_painters': [('aggr_group', 'aggr_group')], 'hard_filters': ['summary_host'], 'hard_filtervars': [('is_host_scheduled_downtime_depth', '-1'), - ('aggr_name', ''), + ('aggr_name_regex', ''), ('aggr_group', 'Hosts'), ('is_host_in_notification_period', '-1'), ('aggr_output', ''), @@ -2494,19 +2704,20 @@ 'hidebutton': True, 'icon' : 'aggr', 'layout': 'table', - 'linktitle': 'Single-Host Problems', + 'linktitle': _('Single-Host Problems'), 'mustsearch': False, 'name': 'aggr_hostproblems', 'num_columns': 1, 'owner': 'omdadmin', - 'painters': [('host', 'aggr_host', ''), + 'painters': [('aggr_icons', None, ''), + ('host', 'aggr_host', ''), ('host_icons', None, ''), ('aggr_treestate', None, ''), ], 'play_sounds': False, 'public': True, 'show_filters': ['host_scheduled_downtime_depth', - 'aggr_name', + 'aggr_name_regex', 'aggr_group', 'aggr_state', 'host_in_notification_period', @@ -2518,14 +2729,14 @@ 'opthostgroup', 'aggr_effective_state'], 'sorters': [('aggr_group', False), ('site_host', False)], - 'title': 'Single-Host Problems', + 'title': _('Single-Host Problems'), 'topic': _('Business Intelligence')}, # Shows a single aggregation which has to be set via aggr_name= 'aggr_single': {'browser_reload': 0, 'column_headers': 'pergroup', 'datasource': 'bi_aggregations', - 'description': u'', + 'description': _('Shows a single aggregation.'), 'group_painters': [], 'hard_filters': [], 'hard_filtervars': [], @@ -2540,7 +2751,8 @@ 'name': 'aggr_single', 'num_columns': 1, 'owner': 'omdadmin', - 'painters': [('aggr_state', None, ''), + 'painters': [('aggr_icons', None, ''), + ('aggr_state', None, ''), ('aggr_treestate', None, '')], 'play_sounds': False, 'public': True, @@ -2551,12 +2763,46 @@ 'topic': u'Business Intelligence', 'user_sortable': None}, +# Shows minimal information about a multiple aggregation +# Use together with output_format=python for API calls +'aggr_all_api': {'browser_reload': 0, + 'column_headers': 'pergroup', + 'datasource': 'bi_aggregations', + 'description': _('List of all aggregations, containing the name of aggregations and state information'), + 'group_painters': [], + 'hard_filters': [], + 'hard_filtervars': [], + 'hidden': True, + 'hide_filters': [], + 'hidebutton': True, + 'icon': 'aggr', + 'layout': 'table', + 'linktitle': 'All Aggregations', + 'mobile': False, + 'mustsearch': False, + 'name': 'aggr_all_api', + 'num_columns': 1, + 'owner': 'omdadmin', + 'painters': [('aggr_group', None, ''), + ('aggr_name', None, ''), + ('aggr_state_num', None, ''), + ('aggr_output', None, ''), + ('aggr_treestate', None, '')], + 'play_sounds': False, + 'public': True, + 'show_checkboxes': None, + 'show_filters': [], + 'sorters': [], + 'title': u'List of all Aggregations for simple API calls', + 'topic': u'Business Intelligence', + 'user_sortable': None}, + # Shows minimal information about a single aggregation which has to be set via aggr_name=. # Use together with output_format=python for API calls 'aggr_single_api': {'browser_reload': 0, 'column_headers': 'pergroup', 'datasource': 'bi_aggregations', - 'description': u'Single Aggregation for simple API calls. Contains the state and state output.', + 'description': _('Single Aggregation for simple API calls. Contains the state and state output.'), 'group_painters': [], 'hard_filters': [], 'hard_filtervars': [], @@ -2586,7 +2832,7 @@ 'aggr_summary': {'browser_reload': 0, 'column_headers': 'off', 'datasource': 'bi_aggregations', - 'description': u'Simple summary page of all BI aggregates that is used as a web services.', + 'description': _('Simple summary page of all BI aggregates that is used as a web services.'), 'group_painters': [], 'hard_filters': [], 'hard_filtervars': [], @@ -2613,35 +2859,40 @@ 'user_sortable': 'on'}, # Hostgroup with boxed BIs for each host -'aggr_hostgroup_boxed': {'browser_reload': 0, - 'column_headers': 'off', - 'datasource': 'bi_hostname_aggregations', - 'description': '', - 'group_painters': [('site_icon', None), - ('sitealias', 'sitehosts')], - 'hard_filters': [], - 'hard_filtervars': [], - 'hidden': True, - 'hide_filters': ['hostgroup'], - 'icon' : 'aggr', - 'layout': 'boxed', - 'linktitle': _('BI Boxes'), - 'mustsearch': False, - 'name': 'hostgroup', - 'num_columns': 2, - 'owner': 'admin', - 'painters': [('host_state', None), - ('host', 'host'), - ('host_icons', None), - ('alias', None), - ('aggr_treestate_boxed', None), - ], - 'play_sounds': False, - 'public': True, - 'show_filters': [ 'siteopt', 'hostregex', 'aggr_group' ], - 'sorters': [('site', False), ('site_host', False)], - 'title': _('Hostgroup with BI state'), - 'topic': _('hidden')}, +'aggr_hostgroup_boxed': { + 'browser_reload' : 0, + 'column_headers' : 'off', + 'context' : { + 'aggr_group': {'aggr_group' : ''}, + 'hostregex' : {'host_regex' : ''}, + }, + 'datasource' : 'bi_hostnamebygroup_aggregations', + 'description' : u'Hostgroup with boxed BIs for each host\n', + 'group_painters' : [ + ('site_icon', '', None), + ('sitealias', 'sitehosts', None), + ], + 'hidden' : True, + 'hidebutton' : False, + 'icon' : 'aggr', + 'layout' : 'boxed', + 'linktitle' : u'BI Boxes', + 'name' : 'aggr_hostgroup_boxed', + 'num_columns' : 2, + 'painters' : [ + ('host_state', None, None), + ('host', 'host', None), + ('host_icons', None, None), + ('alias', None, None), + ('aggr_treestate_boxed', None, None), + ], + 'public' : True, + 'single_infos' : ['hostgroup'], + 'sorters' : [('site', False), ('site_host', False)], + 'title' : u'Hostgroup with BI state', + 'topic' : u'hidden', + 'user_sortable' : True +}, # +----------------------------------------------------------------------+ @@ -2672,7 +2923,7 @@ ('logtime_from', '90'), ], 'hidden': True, - 'hide_filters': ['site', 'host'], + 'hide_filters': ['siteopt', 'host'], 'hidebutton': False, 'icon': 'notification', 'layout': 'table', @@ -2683,6 +2934,7 @@ 'owner': 'omdadmin', 'painters': [('log_time', None, ''), ('log_contact_name', 'contactnotifications', ''), + ('log_command', '', ''), ('log_state', None, ''), ('log_plugin_output', None, '')], 'play_sounds': False, @@ -2691,7 +2943,7 @@ 'log_plugin_output', 'logtime', 'log_state'], - 'sorters': [('log_time', False), ('log_lineno', False)], + 'sorters': [('log_time', True), ('log_lineno', True)], 'title': _('Notifications of host'), 'topic': _('Other')}, 'hostsvcnotifications': {'browser_reload': 0, @@ -2714,7 +2966,7 @@ ('logtime_from', '90'), ], 'hidden': True, - 'hide_filters': ['site', 'host'], + 'hide_filters': ['siteopt', 'host'], 'hidebutton': False, 'icon': 'notification', 'layout': 'table', @@ -2725,6 +2977,7 @@ 'owner': 'omdadmin', 'painters': [('log_time', None, ''), ('log_contact_name', 'contactnotifications', ''), + ('log_command', '', ''), ('log_type', None, ''), ('host', 'hostsvcnotifications', ''), ('service_description', @@ -2738,7 +2991,7 @@ 'log_plugin_output', 'logtime', 'log_state'], - 'sorters': [('log_time', False), ('log_lineno', False)], + 'sorters': [('log_time', True), ('log_lineno', True)], 'title': _('Notifications of host & services'), 'topic': _('Other')}, 'notifications': {'browser_reload': 0, @@ -2773,6 +3026,7 @@ 'owner': 'omdadmin', 'painters': [('log_time', None, ''), ('log_contact_name', 'contactnotifications', ''), + ('log_command', '', ''), ('log_type', None, ''), ('host', 'hostsvcnotifications', ''), ('service_description', @@ -2788,7 +3042,7 @@ 'log_plugin_output', 'logtime', 'log_state'], - 'sorters': [('log_time', False), ('log_lineno', False)], + 'sorters': [('log_time', True), ('log_lineno', True)], 'title': _('Host- and Service notifications'), 'topic': _('Other')}, 'svcnotifications': {'browser_reload': 0, @@ -2809,7 +3063,7 @@ ('logtime_from', '90'), ], 'hidden': True, - 'hide_filters': ['site', 'service', 'host'], + 'hide_filters': ['siteopt', 'service', 'host'], 'hidebutton': False, 'icon': 'notification', 'layout': 'table', @@ -2820,13 +3074,12 @@ 'owner': 'omdadmin', 'painters': [('log_time', None, ''), ('log_contact_name', 'contactnotifications', ''), + ('log_command', '', ''), ('host', None, ''), ('log_state', None, ''), ('log_plugin_output', None, '')], - 'play_sounds': False, - 'public': True, 'show_filters': ['log_plugin_output', 'logtime', 'log_state'], - 'sorters': [('log_time', False), ('log_lineno', False)], + 'sorters': [('log_time', True), ('log_lineno', True)], 'title': _('Service Notifications'), 'topic': _('Other')}, 'contactnotifications': {'browser_reload': 0, @@ -2859,6 +3112,7 @@ 'num_columns': 1, 'owner': 'omdadmin', 'painters': [('log_time', None, ''), + ('log_command', '', ''), ('log_type', None, ''), ('host', 'hostsvcnotifications', ''), ('service_description', @@ -2873,7 +3127,7 @@ 'log_plugin_output', 'logtime', 'log_state'], - 'sorters': [('log_time', False), ('log_lineno', False)], + 'sorters': [('log_time', True), ('log_lineno', True)], 'title': _('Notifications of contact'), 'topic': _('Other')}, }) diff -Nru check-mk-1.2.2p3/plugins/views/commands.py check-mk-1.2.6p12/plugins/views/commands.py --- check-mk-1.2.2p3/plugins/views/commands.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/views/commands.py 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -36,25 +36,40 @@ # - function that outputs the HTML input fields # - function that creates the nagios command and title -#import datetime, traceback -#file('/tmp/1', 'a').write('%s %s\n%s' % (datetime.datetime.now(), current_language, ''.join(traceback.format_stack()))) - # RESCHEDULE ACTIVE CHECKS +def command_reschedule(cmdtag, spec, row, row_nr, total_rows): + if html.var("_resched_checks"): + spread = saveint(html.var("_resched_spread")) + text = "" + _("reschedule an immediate check") + if spread: + text += _(" spread over %d minutes ") % spread + + text += "" + _("of") + + t = time.time() + if spread: + t += spread * 60.0 * row_nr / total_rows + + command = "SCHEDULE_FORCED_" + cmdtag + "_CHECK;%s;%d" % (spec, int(t)) + return command, text + config.declare_permission("action.reschedule", _("Reschedule checks"), _("Reschedule host and service checks"), [ "user", "admin" ]) + multisite_commands.append({ "tables" : [ "host", "service" ], "permission" : "action.reschedule", - "title" : _("Reschedule"), + "title" : _("Reschedule active checks"), "render" : lambda: \ - html.button("_resched_checks", _("Reschedule active checks")), - "action" : lambda cmdtag, spec, row: - html.var("_resched_checks") and ( - "SCHEDULE_FORCED_" + cmdtag + "_CHECK;%s;%d" % (spec, int(time.time())), - _("reschedule an immediate check of")) + html.button("_resched_checks", _("Reschedule")) == \ + html.write(_("and spread over") + " ") == \ + html.number_input("_resched_spread", 0, size=3) == \ + html.write(" " + _("minutes") + " "), + "action" : command_reschedule, + "row_stats" : True, # Get information about number of rows and current row nr. }) @@ -131,8 +146,8 @@ # CLEAR MODIFIED ATTRIBUTES config.declare_permission("action.clearmodattr", - _("Clear modified attributes"), - _("Remove the information that an attribute (like check enabling) has been changed"), + _("Reset modified attributes"), + _("Reset all manually modified attributes of a host or service (like disabled notifications)"), [ "admin" ]) multisite_commands.append({ @@ -140,11 +155,11 @@ "permission" : "action.clearmodattr", "title" : _("Modified attributes"), "render" : lambda: \ - html.button("_clear_modattr", _('Clear information about modified attributes')), + html.button("_clear_modattr", _('Clear modified attributes')), "action" : lambda cmdtag, spec, row: ( html.var("_clear_modattr") and ( "CHANGE_" + cmdtag + "_MODATTR;%s;0" % spec, - _("clear the information about modified attributes of"))), + _("clear the modified attributes of"))), }) # FAKE CHECKS @@ -154,25 +169,51 @@ [ "admin" ]) def command_fake_checks(cmdtag, spec, row): - for s in [0,1,2,3]: + for s in [0, 1, 2, 3]: statename = html.var("_fake_%d" % s) if statename: - pluginoutput = _("Manually set to %s by %s") % (statename, config.user_id) + pluginoutput = html.var_utf8("_fake_output").strip() + if not pluginoutput: + pluginoutput = _("Manually set to %s by %s") % (html.attrencode(statename), config.user_id) + perfdata = html.var("_fake_perfdata") + if perfdata: + pluginoutput += "|" + perfdata if cmdtag == "SVC": cmdtag = "SERVICE" - command = "PROCESS_%s_CHECK_RESULT;%s;%s;%s" % (cmdtag, spec, s, pluginoutput) - title = _("manually set check results to %s for") % statename + command = "PROCESS_%s_CHECK_RESULT;%s;%s;%s" % (cmdtag, spec, s, lqencode(pluginoutput)) + title = _("manually set check results to %s for") % html.attrencode(statename) return command, title +def render_fake_form(what): + html.write("
    ") + html.write("%s: " % _("Plugin output")) + html.write("") + html.text_input("_fake_output", "", size=50) + html.write("
    ") + html.write("%s: " % _("Performance data")) + html.write("") + html.text_input("_fake_perfdata", "", size=50) + html.write("
    ") + html.write(_("Set to:")) + html.write("") + if what == "host": + html.button("_fake_0", _("Up")) + html.button("_fake_1", _("Down")) + html.button("_fake_2", _("Unreachable")) + else: + html.button("_fake_0", _("OK")) + html.button("_fake_1", _("Warning")) + html.button("_fake_2", _("Critical")) + html.button("_fake_3", _("Unknown")) + html.write("
    ") + multisite_commands.append({ "tables" : [ "host" ], "permission" : "action.fakechecks", "title" : _("Fake check results"), - "render" : lambda: \ - html.button("_fake_0", _("Up")) == \ - html.button("_fake_1", _("Down")) == \ - html.button("_fake_2", _("Unreachable")), + "group" : _("Fake check results"), + "render" : lambda: render_fake_form("host"), "action" : command_fake_checks, }) @@ -180,11 +221,8 @@ "tables" : [ "service" ], "permission" : "action.fakechecks", "title" : _("Fake check results"), - "render" : lambda: \ - html.button("_fake_0", _("OK")) == \ - html.button("_fake_1", _("Warning")) == \ - html.button("_fake_2", _("Critical")) == \ - html.button("_fake_3", _("Unknown")), + "group" : _("Fake check results"), + "render" : lambda: render_fake_form("service"), "action" : command_fake_checks, }) @@ -201,7 +239,7 @@ broadcast = html.get_checkbox("_cusnot_broadcast") and 1 or 0 forced = html.get_checkbox("_cusnot_forced") and 2 or 0 command = "SEND_CUSTOM_%s_NOTIFICATION;%s;%s;%s;%s" % \ - ( cmdtag, spec, broadcast + forced, config.user_id, comment) + ( cmdtag, spec, broadcast + forced, config.user_id, lqencode(comment)) title = _("send a custom notification regarding") return command, title @@ -232,12 +270,24 @@ comment = html.var_utf8("_ack_comment") if not comment: raise MKUserError("_ack_comment", _("You need to supply a comment.")) + if ";" in comment: + raise MKUserError("_ack_comment", _("The comment must not contain semicolons.")) sticky = html.var("_ack_sticky") and 2 or 0 sendnot = html.var("_ack_notify") and 1 or 0 perscomm = html.var("_ack_persistent") and 1 or 0 + + expire_secs = Age().from_html_vars("_ack_expire") + if expire_secs: + expire = int(time.time()) + expire_secs + expire_text = ";%d" % expire + else: + expire_text = "" + command = "ACKNOWLEDGE_" + cmdtag + "_PROBLEM;%s;%d;%d;%d;%s" % \ - (spec, sticky, sendnot, perscomm, config.user_id) + (";%s" % comment) - title = _("acknowledge the problems of") + (spec, sticky, sendnot, perscomm, config.user_id) + (";%s" % lqencode(comment)) \ + + expire_text + title = _("acknowledge the problems%s of") % \ + (expire_text and (_(" for a period of %s") % Age().value_to_text(expire_secs)) or "") return command, title elif html.var("_remove_ack"): @@ -249,15 +299,18 @@ multisite_commands.append({ "tables" : [ "host", "service" ], "permission" : "action.acknowledge", - "title" : _("Acknowledge"), + "title" : _("Acknowledge Problems"), "render" : lambda: \ html.button("_acknowledge", _("Acknowledge")) == \ - html.button("_remove_ack", _("Remove Acknowledge")) == \ + html.button("_remove_ack", _("Remove Acknowledgement")) == \ html.write("
    ") == \ html.checkbox("_ack_sticky", True, label=_("sticky")) == \ html.checkbox("_ack_notify", True, label=_("send notification")) == \ html.checkbox("_ack_persistent", False, label=_('persistent comment')) == \ html.write("
    ") == \ + Age(display=["days", "hours", "minutes"], label=_("Expire acknowledgement after")).render_input("_ack_expire", 0) == \ + html.help(_("Note: Expiration of acknowledgements only works when using the Check_MK Micro Core.")) == \ + html.write("
    ") == \ html.write(_("Comment") + ": ") == \ html.text_input("_ack_comment", size=48, submit="_acknowledge"), "action" : command_acknowledgement, @@ -277,7 +330,7 @@ if not comment: raise MKUserError("_comment", _("You need to supply a comment.")) command = "ADD_" + cmdtag + "_COMMENT;%s;1;%s" % \ - (spec, config.user_id) + (";%s" % comment) + (spec, config.user_id) + (";%s" % lqencode(comment)) title = _("add a comment to") return command, title @@ -348,6 +401,11 @@ down_to = time.time() + minutes * 60 title = _("schedule an immediate downtime for the next %d minutes on" % minutes) + elif html.var("_down_adhoc"): + minutes = config.adhoc_downtime.get("duration",0) + down_to = time.time() + minutes * 60 + title = _("schedule an immediate downtime for the next %d minutes on" % minutes) + elif html.var("_down_custom"): down_from = html.get_datetime_input("_down_from") down_to = html.get_datetime_input("_down_to") @@ -360,6 +418,9 @@ time.asctime(time.localtime(down_to))) elif html.var("_down_remove"): + if html.var("_on_hosts"): + raise MKUserError("_on_hosts", _("The checkbox for setting host downtimes does not work when removing downtimes.")) + downtime_ids = [] if cmdtag == "HOST": prefix = "host_" @@ -376,7 +437,10 @@ return commands, title if down_to: - comment = html.var_utf8("_down_comment") + if html.var("_down_adhoc"): + comment = config.adhoc_downtime.get("comment","") + else: + comment = html.var_utf8("_down_comment") if not comment: raise MKUserError("_down_comment", _("You need to supply a comment for your downtime.")) if html.var("_down_flexible"): @@ -388,14 +452,19 @@ if html.var("_include_childs"): # only for hosts specs = [ spec ] + get_child_hosts(row["site"], [spec], recurse = not not html.var("_include_childs_recurse")) + elif html.var("_on_hosts"): # set on hosts instead of services + specs = [ spec.split(";")[0] ] + title += " the hosts of" + cmdtag = "HOST" else: specs = [ spec ] commands = [(("SCHEDULE_" + cmdtag + "_DOWNTIME;%s;" % spec ) \ + ("%d;%d;%d;0;%d;%s;" % (down_from, down_to, fixed, duration, config.user_id)) \ - + comment) for spec in specs] + + lqencode(comment)) for spec in specs] return commands, title + def get_child_hosts(site, hosts, recurse): hosts = set(hosts) html.live.set_only_sites([site]) @@ -413,9 +482,16 @@ new_childs.update(rec_childs) return list(new_childs) + def paint_downtime_buttons(what): + html.write(_('Downtime Comment')+": ") - html.text_input("_down_comment", size=40, submit="") + html.text_input("_down_comment", "", size=60, submit="") + html.write("
    ") + html.button("_down_from_now", _("From now for")) + html.write(" ") + html.number_input("_down_minutes", 60, size=4, submit="_down_from_now") + html.write("  " + _("minutes")) html.write("
    ") html.button("_down_2h", _("2 hours")) html.button("_down_today", _("Today")) @@ -425,24 +501,31 @@ html.write("   -  ") html.button("_down_remove", _("Remove all")) html.write("
    ") + if config.adhoc_downtime and config.adhoc_downtime.get("duration"): + adhoc_duration = config.adhoc_downtime.get("duration") + adhoc_comment = config.adhoc_downtime.get("comment", "") + html.button("_down_adhoc", _("Adhoc for %d minutes") % adhoc_duration) + html.write(" ") + html.write(_('with comment')+": ") + html.write(adhoc_comment) + html.write("
    ") + html.button("_down_custom", _("Custom time range")) html.datetime_input("_down_from", time.time(), submit="_down_custom") html.write("  "+_('to')+"  ") html.datetime_input("_down_to", time.time() + 7200, submit="_down_custom") html.write("
    ") - html.button("_down_from_now", _("From now for")) - html.write(" ") - html.number_input("_down_minutes", 60, size=4, submit="_down_from_now") - html.write("  " + _("minutes")) - html.write("
    ") html.checkbox("_down_flexible", False, label=_('flexible with max. duration')+" ") html.time_input("_down_duration", 2, 0) html.write(" "+_('(HH:MM)')) + html.write("
    ") if what == "host": - html.write("
    ") html.checkbox("_include_childs", False, label=_('Also set downtime on child hosts')) html.write(" ") html.checkbox("_include_childs_recurse", False, label=_('Do this recursively')) + else: + html.checkbox("_on_hosts", False, label=_('Schedule downtimes on the affected hosts instead of their services')) + multisite_commands.append({ @@ -488,3 +571,50 @@ ( "DEL_%s_COMMENT;%d" % (cmdtag, spec), _("remove")) }) + +# .--Stars *-------------------------------------------------------------. +# | ____ _ | +# | / ___|| |_ __ _ _ __ ___ __/\__ | +# | \___ \| __/ _` | '__/ __| \ / | +# | ___) | || (_| | | \__ \ /_ _\ | +# | |____/ \__\__,_|_| |___/ \/ | +# | | +# '----------------------------------------------------------------------' + +def command_star(cmdtag, spec, row): + if html.var("_star") or html.var("_unstar"): + star = html.var("_star") and 1 or 0 + if star: + title = _("add to you favorites") + else: + title = _("remove from your favorites") + return "STAR;%d;%s" % (star, spec), title + + +def command_executor_star(command, site): + foo, star, spec = command.split(";", 2) + stars = config.load_stars() + if star == "0" and spec in stars: + stars.remove(spec) + elif star == "1": + stars.add(spec) + config.save_stars(stars) + +config.declare_permission("action.star", + _("Use favorites"), + _("This permission allows a user to make certain host and services " + "his personal favorites. Favorites can be used for a having a fast " + "access to items that are needed on a regular base."), + [ "user", "admin" ]) + +multisite_commands.append({ + "tables" : [ "host", "service" ], + "permission" : "action.star", + "title" : _("Favorites"), + "render" : lambda: \ + html.button("_star", _("Add to Favorites")) == \ + html.button("_unstar", _("Remove from Favorites")), + "action" : command_star, + "executor" : command_executor_star, +}) + diff -Nru check-mk-1.2.2p3/plugins/views/dashboard.py check-mk-1.2.6p12/plugins/views/dashboard.py --- check-mk-1.2.2p3/plugins/views/dashboard.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/views/dashboard.py 2015-06-24 09:48:38.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,6 +24,9 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. +# FIXME: Can be removed once all dashboards have been converted +# to have the view definitions right inside the dashboards + multisite_builtin_views.update({ # A similar view, used in the dashboard @@ -46,7 +49,7 @@ ('is_host_acknowledged', '0'), ('is_summary_host', '0'), ], - 'hidden': False, + 'hidden': True, 'hidebutton': True, 'hide_filters': [], 'layout': 'table', @@ -98,7 +101,7 @@ ('hst2', ''), ('hstp', 'on'), ('is_summary_host', '0')], - 'hidden': False, + 'hidden': True, 'hide_filters': [], 'layout': 'table', 'mustsearch': False, @@ -135,7 +138,7 @@ ('logtime_from_range', '3600'), ('logtime_from', '4'), ], - 'hidden': False, + 'hidden': True, 'hide_filters': [], 'layout': 'table', 'linktitle': 'Events', diff -Nru check-mk-1.2.2p3/plugins/views/datasources.py check-mk-1.2.6p12/plugins/views/datasources.py --- check-mk-1.2.2p3/plugins/views/datasources.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/views/datasources.py 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -79,6 +79,18 @@ "keys" : [ "host_name", "host_downtimes" ], "join" : ( "services", "host_name" ), "idkeys" : [ "site", "host_name" ], + "description" : _("Displays a list of hosts."), + # When the single info "hostgroup" is used, use the "opthostgroup" filter + # to handle the data provided by the single_spec value of the "hostgroup" + # info, which is in fact the name of the wanted hostgroup + "link_filters" : { "hostgroup": "opthostgroup" }, + # When these filters are set, the site hint will not be added to urls + # which link to views using this datasource, because the resuling view + # should show the objects spread accross the sites + "multiple_site_filters" : [ + "hostgroup", + "servicegroup", + ], } multisite_datasources["hostsbygroup"] = { @@ -88,6 +100,7 @@ "keys" : [ "host_name", "host_downtimes" ], "join" : ( "services", "host_name" ), "idkeys" : [ "site", "hostgroup_name", "host_name" ], + "description" : _("This datasource has a separate row for each group membership that a host has."), } multisite_datasources["services"] = { @@ -97,6 +110,20 @@ "keys" : [ "host_name", "service_description", "service_downtimes" ], "joinkey" : "service_description", "idkeys" : [ "site", "host_name", "service_description" ], + # When the single info "hostgroup" is used, use the "opthostgroup" filter + # to handle the data provided by the single_spec value of the "hostgroup" + # info, which is in fact the name of the wanted hostgroup + "link_filters" : { + "hostgroup" : "opthostgroup", + "servicegroup" : "optservicegroup", + }, + # When these filters are set, the site hint will not be added to urls + # which link to views using this datasource, because the resuling view + # should show the objects spread accross the sites + "multiple_site_filters" : [ + "hostgroup", + "servicegroup", + ], } multisite_datasources["servicesbygroup"] = { @@ -123,6 +150,7 @@ "idkeys" : [ "site", "hostgroup_name" ], } +# Merged groups across sites multisite_datasources["merged_hostgroups"] = { "title" : _("Hostgroups, merged"), "table" : "hostgroups", @@ -140,6 +168,7 @@ "idkeys" : [ "site", "servicegroup_name" ], } +# Merged groups across sites multisite_datasources["merged_servicegroups"] = { "title" : _("Servicegroups, merged"), "table" : "servicegroups", @@ -158,7 +187,7 @@ } multisite_datasources["downtimes"] = { - "title" : _("Schedules Downtimes"), + "title" : _("Scheduled Downtimes"), "table" : "downtimes", "infos" : [ "downtime", "host", "service" ], "keys" : [ "downtime_id", "service_description" ], @@ -171,6 +200,7 @@ "infos" : [ "log", "host", "service", "contact", "command" ], "keys" : [], "idkeys" : [ "log_lineno" ], + "time_filters" : [ "logtime" ], } multisite_datasources["log_events"] = { @@ -180,6 +210,7 @@ "infos" : [ "log", "host", "service" ], "keys" : [], "idkeys" : [ "log_lineno" ], + "time_filters" : [ "logtime" ], } multisite_datasources["log_host_events"] = { @@ -189,6 +220,7 @@ "infos" : [ "log", "host" ], "keys" : [], "idkeys" : [ "log_lineno" ], + "time_filters" : [ "logtime" ], } multisite_datasources["alert_stats"] = { @@ -200,4 +232,5 @@ "keys" : [], "idkeys" : [ 'host_name', 'service_description' ], "ignore_limit" : True, + "time_filters" : [ "logtime" ], } diff -Nru check-mk-1.2.2p3/plugins/views/filters.py check-mk-1.2.6p12/plugins/views/filters.py --- check-mk-1.2.2p3/plugins/views/filters.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/views/filters.py 2015-06-24 09:48:38.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,615 +24,9 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. +# This file was shipped with Check_MK previous to 1.2.5i6. To prevent +# problems during update, this dummy file is shipped. It overwrites +# the outdated existing file and invalidates it. This can be removed +# in later versions. - -# Filters for substring search, displaying a text input field -class FilterText(Filter): - def __init__(self, name, title, info, column, htmlvar, op): - Filter.__init__(self, name, title, info, [htmlvar], [column]) - self.op = op - self.column = column - - def display(self): - htmlvar = self.htmlvars[0] - current_value = html.var(htmlvar, "") - html.text_input(htmlvar, current_value) - - def filter(self, infoname): - htmlvar = self.htmlvars[0] - current_value = html.var(htmlvar) - if current_value: - return "Filter: %s %s %s\n" % (self.column, self.op, current_value) - else: - return "" - - def variable_settings(self, row): - return [ (self.htmlvars[0], row[self.column]) ] - - def heading_info(self, infoname): - return html.var(self.htmlvars[0]) - -# filter title info column htmlvar -declare_filter(100, FilterText("hostregex", _("Hostname"), "host", "host_name", "host", "~~"), - _("Search field allowing regular expressions and partial matches")) - -declare_filter(101, FilterText("host", _("Hostname (exact match)"), "host", "host_name", "host", "="), - _("Exact match, used for linking")) - -declare_filter(200, FilterText("serviceregex", _("Service"), "service", "service_description", "service", "~~"), - _("Search field allowing regular expressions and partial matches")) - -declare_filter(201, FilterText("service", _("Service (exact match)"), "service", "service_description", "service", "="), - _("Exact match, used for linking")) - - -declare_filter(101, FilterText("servicegroupnameregex", _("Servicegroup"), "servicegroup", "servicegroup_name", "servicegroup_name", "~~"), - _("Search field allowing regular expression and partial matches")) - -declare_filter(101, FilterText("servicegroupname", _("Servicegroup (enforced)"), "servicegroup", "servicegroup_name", "servicegroup_name", "="), - _("Exact match, used for linking")) - -declare_filter(202, FilterText("output", _("Status detail"), "service", "service_plugin_output", "service_output", "~~")) - - - -class FilterIPAddress(Filter): - def __init__(self): - Filter.__init__(self, "host_address", _("Host IP Address"), "host", ["host_address", "host_address_prefix"], ["host_address"]) - - def display(self): - html.text_input("host_address") - html.write("

    ") - html.begin_radio_group() - html.radiobutton("host_address_prefix", "yes", True, _("Prefix match")) - html.radiobutton("host_address_prefix", "no", False, _("Exact match")) - html.end_radio_group() - - def double_height(self): - return True - - def filter(self, infoname): - address = html.var("host_address") - if address: - if html.var("host_address_prefix") == "yes": - return "Filter: host_address ~ ^%s\n" % address - else: - return "Filter: host_address = %s\n" % address - else: - return "" - - def variable_settings(self, row): - return [ ("host_address", row["host_address"]) ] - - def heading_info(self, infoname): - return html.var("host_address") - -declare_filter(102, FilterIPAddress()) - - -# Helper that retrieves the list of host/service/contactgroups via Livestatus -def all_groups(what): - groups = dict(html.live.query("GET %sgroups\nColumns: name alias\n" % what)) - names = groups.keys() - names.sort() - # use alias by default but fallback to name if no alias defined - return [ (name, groups[name] or name) for name in names ] - -class FilterGroupCombo(Filter): - def __init__(self, what, title, enforce): - self.enforce = enforce - self.prefix = not self.enforce and "opt" or "" - htmlvars = [ self.prefix + what + "group" ] - if not enforce: - htmlvars.append("neg_" + htmlvars[0]) - Filter.__init__(self, self.prefix + what + "group", # name, e.g. "hostgroup" - title, # title, e.g. "Hostgroup" - what.split("_")[0], # info, e.g. "host" - htmlvars, # htmlvars, e.g. "hostgroup" - [ what + "group_name" ]) # rows needed to fetch for link information - self.what = what - - def display(self): - choices = all_groups(self.what.split("_")[-1]) - if not self.enforce: - choices = [("", "")] + choices - html.select(self.htmlvars[0], choices) - if not self.enforce: - html.write(" ") - html.checkbox(self.htmlvars[1], label=_("negate")) - html.write("") - - def current_value(self, infoname): - htmlvar = self.htmlvars[0] - return html.var(htmlvar) - - def filter(self, infoname): - current_value = self.current_value(infoname) - if not current_value: - if not self.enforce: - return "" - # Take first group with the name we search - current_value = html.live.query_value("GET %sgroups\nColumns: name\nLimit: 1\n" % self.what, None) - - if current_value == None: - return "" # no {what}group exists! - - col = self.what + "_groups" - if not self.enforce and html.var(self.htmlvars[1]): - negate = "!" - else: - negate = "" - return "Filter: %s %s>= %s\n" % (col, negate, current_value) - - def variable_settings(self, row): - varname = self.htmlvars[0] - value = row.get(self.what + "group_name") - if value: - s = [(varname, value)] - if not self.enforce: - negvar = self.htmlvars[1] - if html.var(negvar): - s.append((negvar, html.var(negvar))) - return s - else: - return [] - - def heading_info(self, infoname): - current_value = self.current_value(infoname) - if current_value: - alias = html.live.query_value("GET %sgroups\nColumns: alias\nFilter: name = %s\n" % - (self.what, current_value), current_value) - return alias - - -declare_filter(104, FilterGroupCombo("host", _("Hostgroup"), False), _("Optional selection of host group")) -declare_filter(104, FilterGroupCombo("host", _("Hostgroup (enforced)"), True), _("Dropdown list, selection of host group is enforced")) -declare_filter(204, FilterGroupCombo("service", _("Servicegroup"), False), _("Optional selection of service group")) -declare_filter(205, FilterGroupCombo("service", _("Servicegroup (enforced)"), True), _("Dropdown list, selection of service group is enforced")) -declare_filter(106, FilterGroupCombo("host_contact", _("Host Contactgroup"), False), _("Optional selection of host contact group group")) -declare_filter(206, FilterGroupCombo("service_contact", _("Service Contactgroup"), False), _("Optional selection of service contact group group")) - -declare_filter(107, FilterText("host_ctc", _("Host Contact"), "host", "host_contacts", "host_ctc", ">=")) -declare_filter(207, FilterText("service_ctc", _("Service Contact"), "service", "service_contacts", "service_ctc", ">=")) - - -# Livestatus still misses "contact_groups" column. -# declare_filter(FilterGroupCombo("contact")) - -class FilterQueryDropdown(Filter): - def __init__(self, name, title, info, query, filterline): - Filter.__init__(self, name, title, info, [ name ], []) - self.query = query - self.filterline = filterline - - def display(self): - selection = html.live.query_column_unique(self.query) - html.sorted_select(self.name, [("", "")] + [(x,x) for x in selection]) - - def filter(self, infoname): - current = html.var(self.name) - if current: - return self.filterline % current - else: - return "" - -declare_filter(110, FilterQueryDropdown("host_check_command", _("Host check command"), "host", \ - "GET commands\nColumns: name\n", "Filter: host_check_command = %s\n")) -declare_filter(210, FilterQueryDropdown("check_command", _("Service check command"), "service", \ - "GET commands\nColumns: name\n", "Filter: service_check_command = %s\n")) - -class FilterServiceState(Filter): - def __init__(self, name, title, prefix): - Filter.__init__(self, name, title, - "service", [ prefix + "st0", prefix + "st1", prefix + "st2", prefix + "st3", prefix + "stp" ], []) - self.prefix = prefix - - def display(self): - html.begin_checkbox_group() - for var, text in [(self.prefix + "st0", "OK"), (self.prefix + "st1", "WARN"), \ - (self.prefix + "st2", "CRIT"), (self.prefix + "st3", "UNKNOWN"), - (self.prefix + "stp", "PEND.")]: - #if html.mobile: - #text = text[:1] - html.checkbox(var, True, label=text) - # html.write(" %s " % text) - html.end_checkbox_group() - - def filter(self, infoname): - headers = [] - for i in [0,1,2,3]: - if html.get_checkbox(self.prefix + "st%d" % i) == False: - if self.prefix == "hd": - column = "service_last_hard_state" - else: - column = "service_state" - headers.append("Filter: %s = %d\n" - "Filter: service_has_been_checked = 1\n" - "And: 2\nNegate:\n" % (column, i)) - if html.get_checkbox(self.prefix + "stp") == False: - headers.append("Filter: service_has_been_checked = 1\n") - if len(headers) == 5: # none allowed = all allowed (makes URL building easier) - return "" - else: - return "".join(headers) - -declare_filter(215, FilterServiceState("svcstate", _("Service states"), "")) -declare_filter(216, FilterServiceState("svchardstate", _("Service hard states"), "hd")) - -class FilterHostState(Filter): - def __init__(self): - Filter.__init__(self, "hoststate", _("Host states"), - "host", [ "hst0", "hst1", "hst2", "hstp" ], []) - - def display(self): - html.begin_checkbox_group() - for var, text in [("hst0", _("UP")), ("hst1", _("DOWN")), - ("hst2", _("UNREACH")), ("hstp", _("PENDING"))]: - html.checkbox(var, True, label=text) - html.end_checkbox_group() - - def filter(self, infoname): - headers = [] - for i in [0,1,2]: - if html.get_checkbox("hst%d" % i) == False: - headers.append("Filter: host_state = %d\n" - "Filter: host_has_been_checked = 1\n" - "And: 2\nNegate:\n" % i) - if html.get_checkbox("hstp") == False: - headers.append("Filter: host_has_been_checked = 1\n") - if len(headers) == 4: # none allowed = all allowed (makes URL building easier) - return "" - else: - return "".join(headers) - -declare_filter(115, FilterHostState()) - -class FilterTristate(Filter): - def __init__(self, name, title, info, column, deflt = -1): - self.column = column - self.varname = "is_" + name - Filter.__init__(self, name, title, info, [ self.varname ], []) - self.deflt = deflt - - def display(self): - current = html.var(self.varname) - html.begin_radio_group(horizontal = True) - for value, text in [("1", _("yes")), ("0", _("no")), ("-1", _("(ignore)"))]: - checked = current == value or (current in [ None, ""] and int(value) == self.deflt) - html.radiobutton(self.varname, value, checked, text + "   ") - html.end_radio_group() - - def tristate_value(self): - current = html.var(self.varname) - if current in [ None, "" ]: - return self.deflt - return int(current) - - def filter(self, infoname): - current = self.tristate_value() - if current == -1: # ignore - return "" - elif current == 1: - return self.filter_code(infoname, True) - else: - return self.filter_code(infoname, False) - -class FilterNagiosFlag(FilterTristate): - def __init__(self, info, column, title, deflt = -1): - FilterTristate.__init__(self, column, title, info, column, deflt) - - def filter_code(self, infoname, positive): - if positive: - return "Filter: %s != 0\n" % self.column - else: - return "Filter: %s = 0\n" % self.column - -class FilterNagiosExpression(FilterTristate): - def __init__(self, info, name, title, pos, neg, deflt = -1): - FilterTristate.__init__(self, name, title, info, None, deflt) - self.pos = pos - self.neg = neg - - def filter_code(self, infoname, positive): - return positive and self.pos or self.neg - -declare_filter(120, FilterNagiosExpression("host", "summary_host", _("Is summary host"), - "Filter: host_custom_variable_names >= _REALNAME\n", - "Filter: host_custom_variable_names < _REALNAME\n")) - -declare_filter(250, FilterNagiosFlag("service", "service_process_performance_data", _("Processes performance data"))) -declare_filter(251, FilterNagiosExpression("service", "has_performance_data", _("Has performance data"), - "Filter: service_perf_data != \n", - "Filter: service_perf_data = \n")) - -declare_filter(130, FilterNagiosFlag("host", "host_in_notification_period", _("Host in notif. period"))) -declare_filter(131, FilterNagiosFlag("host", "host_acknowledged", _("Host problem has been acknowledged"))) -declare_filter(132, FilterNagiosFlag("host", "host_active_checks_enabled", _("Host active checks enabled"))) -declare_filter(133, FilterNagiosFlag("host", "host_notifications_enabled", _("Host notifications enabled"))) -declare_filter(230, FilterNagiosFlag("service", "service_acknowledged", _("Problem acknowledged"))) -declare_filter(231, FilterNagiosFlag("service", "service_in_notification_period", _("Service in notif. per."))) -declare_filter(232, FilterNagiosFlag("service", "service_active_checks_enabled", _("Active checks enabled"))) -declare_filter(233, FilterNagiosFlag("service", "service_notifications_enabled", _("Notifications enabled"))) -declare_filter(236, FilterNagiosFlag("service", "service_is_flapping", _("Flapping"))) -declare_filter(231, FilterNagiosFlag("service", "service_scheduled_downtime_depth", _("Service in downtime"))) -declare_filter(132, FilterNagiosFlag("host", "host_scheduled_downtime_depth", _("Host in downtime"))) -declare_filter(232, FilterNagiosExpression("service", "in_downtime", _("Host/service in downtime"), - "Filter: service_scheduled_downtime_depth > 0\nFilter: host_scheduled_downtime_depth > 0\nOr: 2\n", - "Filter: service_scheduled_downtime_depth = 0\nFilter: host_scheduled_downtime_depth = 0\nAnd: 2\n")) - - -class FilterSite(Filter): - def __init__(self, name, enforce): - Filter.__init__(self, name, _("Site") + (enforce and _( " (enforced)") or ""), None, ["site"], []) - self.enforce = enforce - - def visible(self): - return config.is_multisite() - - def display(self): - site_selector(html, "site", self.enforce) - - def filter(self, infoname): - if config.is_multisite(): - site = html.var("site") - if site: - return "Sites: %s\n" % (html.var("site", "")) - elif not self.enforce: - return "" - else: - return "Sites:\n" # no site at all - else: - return "" - - def heading_info(self, infoname): - current_value = html.var("site") - if current_value: - alias = config.site(current_value)["alias"] - return alias - - def variable_settings(self, row): - return [("site", row["site"])] - -declare_filter(500, FilterSite("siteopt", False), _("Optional selection of a site")) -declare_filter(501, FilterSite("site", True), _("Selection of site is enforced, use this filter for joining")) - -# Filter for setting time ranges, e.g. on last_state_change and last_check -# Variante eins: -# age [ ] seconds [ ] minutes [ ] hours [ ] days -# Variante zwei: (not implemented) -# since [2010-01-02] [00:00:00] -# Variante drei: (not implemented) -# from [2010-01-02] [00:00:00] until [2010-01-02] [00:00:00] -class FilterTime(Filter): - def __init__(self, info, name, title, column): - self.column = column - self.name = name - self.ranges = [ - (86400, _("days")), - (3600, _("hours")), - (60, _("min")), - (1, _("sec")), - ] - varnames = [ name + "_from", name + "_from_range", - name + "_until", name + "_until_range" ] - - Filter.__init__(self, name, title, info, varnames, [column]) - - def double_height(self): - return True - - def display(self): - choices = [ (str(sec), title + " " + _("ago")) for sec, title in self.ranges ] + \ - [ ("abs", _("Date (YYYY-MM-DD)")) ] - - html.write("") - for what, whatname in [ - ( "from", _("From") ), - ( "until", _("Until") ) ]: - varprefix = self.name + "_" + what - html.write("" % whatname) - html.write("") - html.write("
    %s:") - html.text_input(varprefix, style="width: 116px;") - html.write("") - html.select(varprefix + "_range", choices, "3600") - html.write("
    ") - - - def filter(self, infoname): - fromsecs, untilsecs = self.get_time_range() - filtertext = "" - if fromsecs != None: - filtertext += "Filter: %s >= %d\n" % (self.column, fromsecs) - if untilsecs != None: - filtertext += "Filter: %s <= %d\n" % (self.column, untilsecs) - return filtertext - - - # Extract timerange user has selected from HTML variables - def get_time_range(self): - range = [] - for what in [ "from", "until" ]: - varprefix = self.name + "_" + what - count = html.var(varprefix) - if count == "": - range.append(None) - else: - rangename = html.var(varprefix + "_range") - if rangename == "abs": - try: - range.append(time.mktime(time.strptime(count, "%Y-%m-%d"))) - except: - html.add_user_error(varprefix, _("Please enter the date in the format YYYY-MM-DD.")) - range.append(None) - else: - try: - count = int(count) - secs = count * int(rangename) - range.append(int(time.time()) - secs) - except: - range.append(None) - html.set_var(varprefix, "") - - return range - - # I'm not sure if this function is useful or ever been called. - # Problem is, that it is not clear wether to use "since" or "before" - # here. - # def variable_settings(self, row): - # vars = [] - # secs = int(time.time()) - row[self.column] - # for s, n in self.ranges[::-1]: - # v = secs / s - # secs -= v * s - # vars.append((self.name + "_" + n, secs)) - # return vars - - # def heading_info(self, infoname): - # return _("since the last couple of seconds") - -declare_filter(250, FilterTime("service", "svc_last_state_change", _("Last service state change"), "service_last_state_change")) -declare_filter(251, FilterTime("service", "svc_last_check", _("Last service check"), "service_last_check")) - -declare_filter(250, FilterTime("host", "host_last_state_change", _("Last host state change"), "host_last_state_change")) -declare_filter(251, FilterTime("host", "host_last_check", _("Last host check"), "host_last_check")) - -# _ -# | | ___ __ _ -# | | / _ \ / _` | -# | |__| (_) | (_| | -# |_____\___/ \__, | -# |___/ - -declare_filter(252, FilterTime("log", "logtime", _("Time of log entry"), "log_time")) -# INFO 0 // all messages not in any other class -# ALERT 1 // alerts: the change service/host state -# PROGRAM 2 // important programm events (restart, ...) -# NOTIFICATION 3 // host/service notifications -# PASSIVECHECK 4 // passive checks -# COMMAND 5 // external commands -# STATE 6 // initial or current states - -class FilterLogClass(Filter): - def __init__(self): - self.log_classes = [ - (0, _("Informational")), (1, _("Alerts")), (2, _("Program")), - (3, _("Notifications")), (4, _("Passive checks")), - (5, _("Commands")), (6, _("States")) ] - - Filter.__init__(self, "log_class", _("Logentry class"), - "log", [ "logclass%d" % l for l, c in self.log_classes ], []) - - def double_height(self): - return True - - def display(self): - if html.var("filled_in"): - defval = "" - else: - defval = "on" - html.write("") - if config.filter_columns == 1: - num_cols = 4 - else: - num_cols = 2 - col = 1 - for l, c in self.log_classes: - if col == 1: - html.write("") - html.write("") - if col == num_cols: - html.write("\n") - col = 1 - else: - col += 1 - if col < num_cols: - html.write("") - html.write("
    ") - html.checkbox("logclass%d" % l, defval) - html.write(c) - html.write("
    \n") - - def filter(self, infoname): - headers = [] - if html.var("filled_in"): - defval = "" - else: - defval = "on" - - for l, c in self.log_classes: - if html.var("logclass%d" % l, defval) == "on": - headers.append("Filter: class = %d\n" % l) - if len(headers) == 0: - return "Limit: 0\n" # no class allowed - else: - return "".join(headers) + ("Or: %d\n" % len(headers)) - -declare_filter(255, FilterLogClass()) -declare_filter(202, FilterText("log_plugin_output", _("Log: plugin output"), "log", "log_plugin_output", "log_plugin_output", "~~")) -declare_filter(260, FilterText("log_contact_name", _("Log: contact name"), "log", "log_contact_name", "log_contact_name", "="), - _("Exact match, used for linking")) - -class FilterLogState(Filter): - def __init__(self): - self._items = [ ("h0", "host", 0, _("Up")),("h1", "host", 1, _("Down")),("h2", "host", 2, _("Unreachable")), - ("s0", "service", 0, _("OK")), ("s1", "service", 1, _("Warning")), - ("s2", "service", 2, _("Critical")),("s3", "service", 3, _("Unknown")) ] - - Filter.__init__(self, "log_state", _("Type of alerts of hosts and services"), - "log", [ "logst_" + e[0] for e in self._items ], []) - - def double_height(self): - return True - - def display(self): - html.write("
    ") - html.begin_checkbox_group() - for varsuffix, what, state, text in self._items: - if state == 0: - html.write("%s:" % (_(what.title()))) - html.write("  ") - html.checkbox("logst_" + varsuffix, True, label=text) - if not html.mobile: - html.write("
    ") - if varsuffix == "h2": - html.write("
    ") - html.end_checkbox_group() - html.write("
    ") - - def filter(self, infoname): - headers = [] - for varsuffix, what, state, text in self._items: - if html.get_checkbox("logst_" + varsuffix) != False: # None = form not filled in = allow - headers.append("Filter: log_type ~ %s .*\nFilter: log_state = %d\nAnd: 2\n" % - (what.upper(), state)) - if len(headers) == 0: - return "Limit: 0\n" # no allowed state - elif len(headers) == len(self._items): - return "" # all allowed or form not filled in - else: - return "".join(headers) + ("Or: %d\n" % len(headers)) - -declare_filter(270, FilterLogState()) - -class BIServiceIsUsedFilter(FilterTristate): - def __init__(self): - FilterTristate.__init__(self, "aggr_service_used", _("Used in BI aggregate"), "service", None) - - def filter(self, infoname): - return "" - - def filter_table(self, rows): - current = self.tristate_value() - if current == -1: - return rows - new_rows = [] - for row in rows: - is_part = bi.is_part_of_aggregation( - "service", row["site"], row["host_name"], row["service_description"]) - if (is_part and current == 1) or \ - (not is_part and current == 0): - new_rows.append(row) - return new_rows - -declare_filter(300, BIServiceIsUsedFilter()) +# The filters are now defined in plugins/visuals/filters.py diff -Nru check-mk-1.2.2p3/plugins/views/inventory.py check-mk-1.2.6p12/plugins/views/inventory.py --- check-mk-1.2.2p3/plugins/views/inventory.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/views/inventory.py 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,697 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +import inventory + +def paint_host_inventory(row, invpath): + invdata = inventory.get(row["host_inventory"], invpath) + if not invdata: + return "", "" # _("No inventory data available") + + hint = inv_display_hint(invpath) + if "paint_function" in hint: + return hint["paint_function"](invdata) + elif invdata == None: + return "", "" + elif type(invdata) in ( str, unicode ): + return "", invdata + elif type(invdata) in ( list, dict ): + return paint_inv_tree(row, invpath) + else: + return "number", str(invdata) + +def cmp_inventory_node(a, b, invpath): + val_a = inventory.get(a["host_inventory"], invpath) + val_b = inventory.get(b["host_inventory"], invpath) + return cmp(val_a, val_b) + +inv_filter_info = { + "bytes" : { "unit" : _("MB"), "scale" : 1024*1024 }, + "bytes_rounded" : { "unit" : _("MB"), "scale" : 1024*1024 }, + "hz" : { "unit" : _("MHz"), "scale" : 1000000 }, + "volt" : { "unit" : _("Volt") }, + "timestamp" : { "unit" : _("secs") }, +} + + +# Declare all three with one simple call (for simple data types) +def declare_inv_column(invpath, datatype, title, short = None): + if invpath == ".": + name = "inv" + else: + name = "inv_" + invpath.replace(":", "_").replace(".", "_").strip("_") + + # Declare column painter + multisite_painters[name] = { + "title" : invpath == "." and _("Inventory Tree") or (_("Inventory") + ": " + title), + "columns" : [], + "load_inv" : True, + "paint" : lambda row: paint_host_inventory(row, invpath), + "sorter" : name, + } + if short: + multisite_painters[name]["short"] = short + + # Sorters and Filters only for leaf nodes + if invpath[-1] not in ":.": + # Declare sorter. It will detect numbers automatically + multisite_sorters[name] = { + "title" : _("Inventory") + ": " + title, + "columns" : [], + "load_inv" : True, + "cmp" : lambda a, b: cmp_inventory_node(a, b, invpath), + } + + # Declare filter. + if datatype == "str": + visuals.declare_filter(800, visuals.FilterInvText(name, invpath, title)) + else: + filter_info = inv_filter_info.get(datatype, {}) + visuals.declare_filter(800, visuals.FilterInvFloat(name, invpath, title, + unit = filter_info.get("unit"), + scale = filter_info.get("scale", 1.0))) + + +# Tree painter +def paint_inv_tree(row, invpath = "."): + hostname = row["host_name"] + tree = row["host_inventory"] + node = inventory.get(tree, invpath) + html.plug() + render_inv_subtree_container(hostname, invpath, node) + code = html.drain() + html.unplug() + return "invtree", code + +def render_inv_subtree(hostname, invpath, node): + if type(node) in (dict, list): + render_inv_subtree_foldable(hostname, invpath, node) + else: + render_inv_subtree_leaf(hostname, invpath, node) + +def render_inv_subtree_foldable(hostname, invpath, node): + if node: # omit empty nodes completely + icon, title = inv_titleinfo(invpath, node) + + if "%d" in title: # Replace with list index + list_index = int(invpath.split(":")[-1].rstrip(".")) + 1 + title = title % list_index + + fetch_url = html.makeuri_contextless([("host", hostname), ("path", invpath)], "ajax_inv_render_tree.py") + if html.begin_foldable_container("inv_" + hostname, invpath, False, title, icon=icon, fetch_url=fetch_url): + # Render only if it is open. We'll get the stuff via ajax later if it's closed + render_inv_subtree_container(hostname, invpath, node) + html.end_foldable_container() + +def render_inv_subtree_container(hostname, invpath, node): + hint = inv_display_hint(invpath) + if "render" in hint: + hint["render"](hostname, invpath, node) + elif type(node) == dict: + render_inv_subtree_dict(hostname, invpath, node) + else: + render_inv_subtree_list(hostname, invpath, node) + +def render_inv_subtree_dict(hostname, invpath, node): + items = node.items() + items.sort() + + leaf_nodes = [] + for key, value in items: + if type(value) not in (list, dict): + invpath_sub = invpath + key + icon, title = inv_titleinfo(invpath_sub, value) + leaf_nodes.append((title, invpath_sub, value)) + + if leaf_nodes: + leaf_nodes.sort() + html.write("") + for title, invpath_sub, value in leaf_nodes: + html.write("") + html.write("
    %s" % (invpath_sub, title)) + render_inv_subtree(hostname, invpath_sub, value) + html.write("
    ") + + non_leaf_nodes = [ item for item in items if type(item[1]) in (list, dict) ] + non_leaf_nodes.sort() + for key, value in non_leaf_nodes: + invpath_sub = invpath + key + if type(value) == dict: + invpath_sub += "." + elif type(value) == list: + invpath_sub += ":" + render_inv_subtree_foldable(hostname, invpath_sub, value) + +def render_inv_subtree_list(hostname, invpath, node): + if not node: + return + for nr, value in enumerate(node): + invpath_sub = invpath + str(nr) + if type(value) == dict: + invpath_sub += "." + elif type(value) == list: + invpath_sub += ":" + render_inv_subtree(hostname, invpath_sub, value) + +def render_inv_subtree_leaf(hostname, invpath, node): + hint = inv_display_hint(invpath) + if "paint_function" in hint: + tdclass, code = hint["paint_function"](node) + html.write(code) + elif "render" in hint: + hint["render"](node) + elif type(node) == str: + try: + text = node.decode("utf-8") + except: + text = node + html.write(html.attrencode(text)) + elif type(node) == unicode: + html.write(html.attrencode(node)) + elif type(node) == int: + html.write(str(node)) + elif type(node) == float: + html.write("%.2f" % node) + elif node != None: + html.write(str(node)) + html.write("
    ") + + +def render_inv_dicttable(hostname, invpath, node): + hint = inv_display_hint(invpath) + keyorder = hint.get("keyorder", []) # well known keys + + # Add titles for those keys + titles = [] + for key in keyorder: + icon, title = inv_titleinfo(invpath + "0." + key, None) + titles.append((title, key)) + + # Determine *all* keys, in order to find unknown ones + keys = set([]) + for entry in node: + keys.update(entry.keys()) + + # Order not well-known keys alphabetically + extratitles = [] + for key in keys: + if key not in keyorder: + icon, title = inv_titleinfo(invpath + "0." + key, None) + extratitles.append((title, key)) + extratitles.sort() + titles += extratitles + + # We cannot use table here, since html.plug() does not work recursively + html.write('') + html.write('') + for title, key in titles: + html.write('' % title) + html.write('') + + for nr, entry in enumerate(node): + html.write('') + for title, key in titles: + value = entry.get(key) + invpath_sub = invpath + "%d.%s" % (nr, key) + if type(value) == dict: + invpath_sub += "." + elif type(value) == list: + invpath_sub += ":" + html.write('') + html.write('') + html.write('
    %s
    ') + render_inv_subtree(hostname, invpath_sub, value) + html.write('
    ') + + +# Convert .foo.bar:18.test to .foo.bar:*.test +def inv_display_hint(invpath): + r = regex(":[0-9]+") + invpath = r.sub(":*", invpath) + hint = inventory_displayhints.get(invpath, {}) + + # Convert paint type to paint function, for the convenciance of the called + if "paint" in hint: + paint_function_name = "inv_paint_" + hint["paint"] + hint["paint_function"] = globals()[paint_function_name] + + return hint + +def inv_titleinfo(invpath, node): + hint = inv_display_hint(invpath) + icon = hint.get("icon") + if "title" in hint: + title = hint["title"] + if type(title) == type(lambda: None): + title = title(node) + else: + title = invpath.rstrip(".").split('.')[-1].split(':')[-1].replace("_", " ").title() + return icon, title + +# The titles of the last two path components of the node, e.g. "BIOS / Vendor" +def inv_titleinfo_long(invpath, node): + icon, last_title = inv_titleinfo(invpath, node) + parent = inventory.parent_path(invpath) + if parent: + icon, parent_title = inv_titleinfo(parent, None) + return parent_title + u" ➤ " + last_title + else: + return last_title + + +multisite_painters["inventory_tree"] = { + "title" : _("Hardware & Software Tree"), + "columns" : [], + "load_inv" : True, + "paint" : paint_inv_tree, +} + + +def inv_paint_hz(hz): + if hz == None: + return "", _("unknown") + + if hz < 10: + return "number", "%.2f" % hz + elif hz < 100: + return "number", "%.11" % hz + elif hz < 1500: + return "number", "%.0f" % hz + elif hz < 1000000: + return "number", "%.1f kHz" % (hz / 1000) + elif hz < 1000000000: + return "number", "%.1f MHz" % (hz / 1000000) + else: + return "number", "%.2f GHz" % (hz / 1000000000) + +def inv_paint_bytes(b): + if b == None: + return "", _("unknown") + elif b == 0: + return "number", "0" + + units = [ 'B', 'kB', 'MB', 'GB', 'TB' ] + i = 0 + while b % 1024 == 0 and i+1 < len(units): + b = b / 1024 + i += 1 + return "number", "%d %s" % (b, units[i]) + +def inv_paint_count(b): + if b == None: + return "", "" + else: + return "number", str(b) + +def inv_paint_bytes_rounded(b): + if b == None: + return "", "" + elif b == 0: + return "number", "0" + + units = [ 'B', 'kB', 'MB', 'GB', 'TB' ] + i = len(units) - 1 + fac = 1024 ** (len(units) - 1) + while b < fac * 1.5 and i > 0: + i -= 1 + fac = fac / 1024.0 + + if i: + return "number", "%.2f %s" % (b / fac, units[i]) + else: + return "number", "%d %s" % (b, units[0]) + +def inv_paint_volt(volt): + if volt: + return "number", "%.1f V" % volt + else: + return "", "" + +def inv_paint_timestamp(stamp): + if stamp: + return "Unix time", "%i" % stamp + else: + return "", "" + +def inv_paint_date(stamp): + if stamp: + date_painted = time.strftime("%Y-%m-%d", time.localtime(stamp)) + return "Date", "%s" % date_painted + else: + return "", "" + +inventory_displayhints.update({ + "." : { "title" : _("Inventory") }, + ".hardware." : { "title" : _("Hardware"), "icon" : "hardware", }, + ".hardware.bios." : { "title" : _("BIOS"), }, + ".hardware.bios.vendor" : { "title" : _("Vendor"), }, + ".hardware.bios.version" : { "title" : _("Version"), }, + ".hardware.bios.date" : { "title" : _("Date"), "paint": "date"}, + ".hardware.chassis." : { "title" : _("Chassis"), }, + ".hardware.cpu." : { "title" : _("Processor"), }, + ".hardware.cpu.model" : { "title" : _("Model"), "short" : _("CPU Model"), }, + ".hardware.cpu.cache_size" : { "title" : _("Cache Size"), "paint" : "bytes" }, + ".hardware.cpu.max_speed" : { "title" : _("Maximum Speed"), "paint" : "hz" }, + ".hardware.cpu.bus_speed" : { "title" : _("Bus Speed"), "paint" : "hz" }, + ".hardware.cpu.voltage" : { "title" : _("Voltage"), "paint" : "volt" }, + ".hardware.cpu.cores_per_cpu" : { "title" : _("Cores per CPU"), "paint" : "count" }, + ".hardware.cpu.threads_per_cpu" : { "title" : _("Hyperthreads per CPU"), "paint" : "count" }, + ".hardware.cpu.threads" : { "title" : _("Total Number of Hyperthreads"), "paint" : "count" }, + ".hardware.cpu.cpus" : { "title" : _("Total Number of CPUs"), "short" : _("CPUs"), "paint" : "count" }, + ".hardware.cpu.arch" : { "title" : _("CPU Architecture"), "short" : _("CPU Arch"), }, + ".hardware.cpu.cores" : { "title" : _("Total Number of Cores"), "short" : _("Cores"), "paint" : "count" }, + ".hardware.memory." : { "title" : _("Memory (RAM)"), }, + ".hardware.memory.total_ram_usable" : { "title" : _("Total usable RAM"), "paint" : "bytes_rounded" }, + ".hardware.memory.total_swap" : { "title" : _("Total swap space"), "paint" : "bytes_rounded" }, + ".hardware.memory.total_vmalloc" : { "title" : _("Virtual addresses for mapping"), "paint" : "bytes_rounded" }, + ".hardware.memory.arrays:" : { "title" : _("Arrays (Controllers)") }, + ".hardware.memory.arrays:*." : { "title" : _("Controller %d") }, + ".hardware.memory.arrays:*.devices:" : { "title" : _("Devices"), "render" : render_inv_dicttable, + "keyorder" : [ "locator", "bank_locator", "type", "form_factor", "speed", + "data_width", "total_width", "manufacturer", "serial" ]}, + ".hardware.memory.arrays:*.maximum_capacity" : { "title" : _("Maximum Capacity"), "paint" : "bytes" }, + ".hardware.memory.arrays:*.devices:*." : { "title" : lambda v: v["locator"], }, + ".hardware.memory.arrays:*.devices:*.size" : { "title" : _("Size"), "paint" : "bytes", }, + ".hardware.memory.arrays:*.devices:*.speed" : { "title" : _("Speed"), "paint" : "hz", }, + ".hardware.system." : { "title" : _("System") }, + ".hardware.storage." : { "title" : _("Storage") }, + ".hardware.storage.disks:" : { "title" : _("Block Devices") }, + ".hardware.storage.disks:*." : { "title" : _("Block Device %d") }, + ".hardware.storage.disks:*.signature" : { "title" : _("Disk ID") }, + ".hardware.storage.disks:*.vendor" : { "title" : _("Vendor") }, + ".hardware.storage.disks:*.local" : { "title" : _("Local") }, + ".hardware.storage.disks:*.bus" : { "title" : _("Bus") }, + ".hardware.storage.disks:*.product" : { "title" : _("Product") }, + ".hardware.storage.disks:*.fsnode" : { "title" : _("Filesystem Node") }, + ".hardware.storage.disks:*.serial" : { "title" : _("Serial Number") }, + ".hardware.storage.disks:*.size" : { "title" : _("Size") }, + ".hardware.storage.disks:*.type" : { "title" : _("Type") }, + ".hardware.video:" : { "title" : _("Graphic Cards") }, + ".hardware.video:*." : { "title" : _("Graphic Card %d") }, + ".hardware.video:*.name" : { "title" : _("Graphic Card Name"), "short" : _("Card Name") }, + ".hardware.video:*.subsystem" : { "title" : _("Vendor and Device ID"), "short" : _("Vendor") }, + ".hardware.video:*.driver" : { "title" : _("Driver"), "short" : _("Driver") }, + ".hardware.video:*.driver_date" : { "title" : _("Driver Date"), "short" : _("Driver Date") }, + ".hardware.video:*.driver_version" : { "title" : _("Driver Version"), "short" : _("Driver Version") }, + + ".software." : { "title" : _("Software"), "icon" : "software" }, + ".software.os." : { "title" : _("Operating System") }, + ".software.os.name" : { "title" : _("Name"), "short" : _("Operating System") }, + ".software.os.version" : { "title" : _("Version"), }, + ".software.os.vendor" : { "title" : _("Vendor"), }, + ".software.os.type" : { "title" : _("Type"), }, # e.g. "linux" + ".software.os.install_date" : { "title" : _("Install Date"), "paint" : "date" }, + ".software.os.kernel_version" : { "title" : _("Kernel Version"), "short" : _("Kernel") }, + ".software.os.arch" : { "title" : _("Kernel Architecture"), "short" : _("Architecture") }, + ".software.os.service_pack" : { "title" : _("Service Pack"), "short" : _("Service Pack") }, + ".software.os.service_packs:" : { "title" : _("Service Packs"), "render" : render_inv_dicttable, + "keyorder" : [ "name" ] }, + ".software.packages:" : { "title" : _("Packages"), "icon" : "packages", "render": render_inv_dicttable, + "keyorder" : [ "name", "version", "arch", "package_type", "summary"] }, + ".software.packages:*.name" : { "title" : _("Name"), }, + ".software.packages:*.arch" : { "title" : _("Architecture"), }, + ".software.packages:*.package_type" : { "title" : _("Type"), }, + ".software.packages:*.summary" : { "title" : _("Description"), }, + ".software.packages:*.version" : { "title" : _("Version"), }, + ".software.packages:*.vendor" : { "title" : _("Publisher"), }, + ".software.packages:*.package_version" : { "title" : _("Package Version"), }, + ".software.packages:*.install_date" : { "title" : _("Install Date"), "paint" : "date"}, + ".software.packages:*.size" : { "title" : _("Size"), "paint" : "count" }, + ".software.packages:*.path" : { "title" : _("Path"), }, +}) + +# TEST: create painters for node with a display hint +for invpath, hint in inventory_displayhints.items(): + if "*" not in invpath: + datatype = hint.get("paint", "str") + long_title = inv_titleinfo_long(invpath, None) + declare_inv_column(invpath, datatype, long_title, hint.get("short", long_title)) + +# View for Inventory tree of one host +multisite_builtin_views["inv_host"] = { + # General options + 'datasource' : 'hosts', + 'topic' : _('Inventory'), + 'title' : _('Inventory of host'), + 'linktitle' : _('Inventory'), + 'description' : _('The complete hardware- and software inventory of a host'), + 'icon' : 'inventory', + 'hidebutton' : False, + 'public' : True, + 'hidden' : True, + + # Layout options + 'layout' : 'dataset', + 'num_columns' : 1, + 'browser_reload' : 0, + 'column_headers' : 'pergroup', + 'user_sortable' : False, + 'play_sounds' : False, + 'force_checkboxes' : False, + 'mustsearch' : False, + 'mobile' : False, + + # Columns + 'group_painters' : [], + 'painters' : [ + ('host', 'host', ''), + ('inv', None, ''), + ], + + # Filters + 'hard_filters' : [], + 'hard_filtervars' : [], + 'hide_filters' : ['host', 'site'], + 'show_filters' : [], + 'sorters' : [], +} + +# View with table of all hosts, with some basic information +multisite_builtin_views["inv_hosts_cpu"] = { + # General options + 'datasource' : 'hosts', + 'topic' : _('Inventory'), + 'title' : _('CPU Related Inventory of all Hosts'), + 'linktitle' : _('CPU Inv. (all Hosts)'), + 'description' : _('A list of all hosts with some CPU related inventory data'), + 'public' : True, + 'hidden' : False, + + # Layout options + 'layout' : 'table', + 'num_columns' : 1, + 'browser_reload' : 0, + 'column_headers' : 'pergroup', + 'user_sortable' : True, + 'play_sounds' : False, + 'force_checkboxes' : False, + 'mustsearch' : False, + 'mobile' : False, + + # Columns + 'group_painters' : [], + 'painters' : [ + ('host', 'inv_host', ''), + ('inv_software_os_name', None, ''), + ('inv_hardware_cpu_cpus', None, ''), + ('inv_hardware_cpu_cores', None, ''), + ('inv_hardware_cpu_max_speed', None, ''), + ('perfometer', None, '', 'CPU load'), + ('perfometer', None, '', 'CPU utilization'), + + ], + + # Filters + 'hard_filters' : [ + 'has_inv' + ], + 'hard_filtervars' : [ + ('is_has_inv', '1' ), + ], + 'hide_filters' : [], + 'show_filters' : [ + 'inv_hardware_cpu_cpus', + 'inv_hardware_cpu_cores', + 'inv_hardware_cpu_max_speed', + ], + 'sorters' : [], +} + + +def inv_software_table(columns, add_headers, only_sites, limit, filters): + # Create livestatus filter for filtering out hosts + filter_code = "" + for filt in filters: + header = filt.filter("invswpacs") + if not header.startswith("Sites:"): + filter_code += header + host_columns = list(set([ "host_name" ] + filter(lambda c: c.startswith("host_"), columns))) + + html.live.set_only_sites(only_sites) + html.live.set_prepend_site(True) + + query = "GET hosts\n" + query += "Columns: " + (" ".join(host_columns)) + "\n" + query += filter_code + + if config.debug_livestatus_queries \ + and html.output_format == "html" and 'W' in html.display_options: + html.write('
    ' + '%s
    \n' % (query.replace('\n', '
    \n'))) + + html.live.set_only_sites(only_sites) + html.live.set_prepend_site(True) + + data = html.live.query(query) + + html.live.set_prepend_site(False) + html.live.set_only_sites(None) + + headers = [ "site" ] + host_columns + + # Now create big table of all software packages of these hosts + rows = [] + hostnames = [ row[1] for row in data ] + for row in data: + site = row[0] + hostname = row[1] + tree = inventory.host(hostname) + hostrow = dict(zip(headers, row)) + packages = inventory.get(tree, ".software.packages:") + for package in packages: + newrow = {} + for key, value in package.items(): + newrow["invswpac_" + key] = value + newrow.update(hostrow) + rows.append(newrow) + + return rows + +def declare_swpacs_columns(name, title, sortfunc): + column = "invswpac_" + name + multisite_painters[column] = { + "title" : _("Package") + " " + title, + "short" : title, + "columns" : [ "invswpac_name" ], + "paint" : lambda row: ("", str(row.get(column))), + "sorter" : column, + } + multisite_sorters[column] = { + "title" : _("Inventory") + ": " + title, + "columns" : [], + "cmp" : lambda a, b: sortfunc(a.get(column), b.get(column)) + } + + if sortfunc == visuals.cmp_version: + visuals.declare_filter(801, visuals.FilterSWPacsVersion(name, _("Software Package") + ": " + title)) + else: + visuals.declare_filter(800, visuals.FilterSWPacsText(name, _("Software Package") + ": " + title)) + + +for name, title, sortfunc in [ + ( "name", _("Name"), cmp ), + ( "summary", _("Summary"), cmp ), + ( "arch", _("CPU Architecture"), cmp ), + ( "package_type", _("Type"), cmp ), + ( "package_version", _("Package Version"), visuals.cmp_version ), + ( "version", _("Version"), visuals.cmp_version ), + ( "install_date", _("Install Date"), cmp ), + ]: + declare_swpacs_columns(name, title, sortfunc) + + + + +multisite_datasources["invswpacs"] = { + "title" : _("Inventory: Software Packages"), + "table" : inv_software_table, + "infos" : [ "host", "invswpac" ], + "keys" : [], + "idkeys" : [], +} + + +# View for searching for a certain software +multisite_builtin_views["inv_swpacs"] = { + # General options + 'datasource' : 'invswpacs', + 'topic' : _('Inventory'), + 'title' : _('Software Package Search'), + 'description' : _('Search for software packages installed on hosts'), + 'public' : True, + 'hidden' : False, + + # Layout options + 'layout' : 'table', + 'num_columns' : 1, + 'browser_reload' : 0, + 'column_headers' : 'pergroup', + 'user_sortable' : True, + 'play_sounds' : False, + 'force_checkboxes' : False, + 'mustsearch' : True, + 'mobile' : False, + + # Columns + 'group_painters' : [], + 'painters' : [ + ('host', 'inv_host', ''), + ('invswpac_name', '', ''), + ('invswpac_summary', '', ''), + ('invswpac_version', '', ''), + ('invswpac_package_version', '', ''), + ('invswpac_arch', '', ''), + ('invswpac_package_type', '', ''), + ], + + # Filters + 'show_filters' : [ + 'siteopt', + 'hostregex', + 'hostgroups', + 'opthostgroup', + 'opthost_contactgroup', + 'host_address', + 'host_tags', + 'hostalias', + 'host_favorites', + 'invswpac', + 'invswpac_name', + 'invswpac_summary', + 'invswpac_arch', + 'invswpac_package_type', + 'invswpac_version', + 'invswpac_package_version', + ], + 'hard_filters' : [ + 'has_inv' + ], + 'hard_filtervars' : [ + ('is_has_inv', '1' ), + ], + 'hide_filters' : [], + 'sorters' : [], +} diff -Nru check-mk-1.2.2p3/plugins/views/layouts.py check-mk-1.2.6p12/plugins/views/layouts.py --- check-mk-1.2.2p3/plugins/views/layouts.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/views/layouts.py 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -184,7 +184,7 @@ html.write("\n") column_headers = view.get("column_headers") - if column_headers: + if column_headers != "off": show_header_line() visible_row_number = 0 @@ -200,11 +200,14 @@ else: trclass = "odd" # state = row.get("service_state", row.get("aggr_state")) - state = row.get("service_state") + state = saveint(row.get("service_state")) if state == None: state = saveint(row.get("host_state", 0)) if state > 0: state +=1 # 1 is critical for hosts - html.write('' % (trclass, state)) + stale = '' + if is_stale(row): + stale = ' stale' + html.write('' % (trclass, state, stale)) if show_checkboxes: render_checkbox_td(view, row, len(painters)) for p in painters: diff -Nru check-mk-1.2.2p3/plugins/views/mobile.py check-mk-1.2.6p12/plugins/views/mobile.py --- check-mk-1.2.2p3/plugins/views/mobile.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/views/mobile.py 2015-06-24 09:48:38.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -392,7 +392,7 @@ ('hst1', 'on'), ('hst2', 'on'), ('hstp', ''), - ('is_host_acknowledged', '0'), + ('is_host_acknowledged', '-1'), ('host', ''), ('is_summary_host', '0'), ('opthostgroup', '') @@ -707,7 +707,6 @@ 'title': _('Service Notifications'), 'topic': _('Other')}), - }) diff -Nru check-mk-1.2.2p3/plugins/views/painters.py check-mk-1.2.6p12/plugins/views/painters.py --- check-mk-1.2.2p3/plugins/views/painters.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/views/painters.py 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -74,44 +74,42 @@ import bi # needed for aggregation icon -multisite_painter_options["pnpview"] = { - "title" : _("PNP Timerange"), - "default" : "1", - "values" : [ ("0", _("4 Hours")), ("1", _("25 Hours")), - ("2", _("One Week")), ("3", _("One Month")), - ("4", _("One Year")), ("", _("All")) ] +multisite_painter_options["pnp_timerange"] = { + 'valuespec' : PNPTimerange( + title = _("PNP Timerange"), + default_value = None, + include_time = True, + ) } multisite_painter_options["ts_format"] = { - "title" : _("Time stamp format"), - "default" : config.default_ts_format, - "values" : [ - ("mixed", _("Mixed")), - ("abs", _("Absolute")), - ("rel", _("Relative")), - ("both", _("Both")), - ] + 'valuespec': DropdownChoice( + title = _("Time stamp format"), + default_value = config.default_ts_format, + choices = [ + ("mixed", _("Mixed")), + ("abs", _("Absolute")), + ("rel", _("Relative")), + ("both", _("Both")), + ("epoch", _("Unix Timestamp (Epoch)")), + ], + ) } multisite_painter_options["ts_date"] = { - "title" : _("Date format"), - "default" : "%Y-%m-%d", - "values" : [ ("%Y-%m-%d", "1970-12-18"), - ("%d.%m.%Y", "18.12.1970"), - ("%m/%d/%Y", "12/18/1970"), - ("%d.%m.", "18.12."), - ("%m/%d", "12/18") ] + 'valuespec' : DateFormat(), } # This helper function returns the value of the given custom var -def paint_custom_host_var(what, row): - custom_vars = dict(zip(row["host_custom_variable_names"], - row["host_custom_variable_values"])) - - if what in custom_vars: - return what, custom_vars[what] - return what, "" - +def paint_custom_var(what, key, row): + if what: + what += '_' + custom_vars = dict(zip(row[what + "custom_variable_names"], + row[what + "custom_variable_values"])) + + if key in custom_vars: + return key, custom_vars[key] + return key, "" # ___ # |_ _|___ ___ _ __ ___ @@ -153,7 +151,23 @@ except Exception, e: output += 'Exception in icon plugin!
    ' + traceback.format_exc() - return "icons", output + if html.output_format == "html": + return "icons", output + else: + # Strip icon names out of HTML code that is generated by htmllib.render_icon() + icon_rename_regex = regex(']*>') + icons = [] + for n in icon_rename_regex.findall(output): + if n.startswith("images/"): + n = n[7:] + if n.startswith("icon_"): + n = n[5:] + if n.endswith(".png"): + n = n[:-4] + elif n.endswith(".gif"): + n = n[:-4] + icons.append(n.encode('utf-8')) + return "icons", " ".join(icons) def iconpainter_columns(what): cols = set(['site', @@ -180,15 +194,19 @@ multisite_painters["service_icons"] = { "title": _("Service icons"), "short": _("Icons"), + "printable" : False, # does not contain printable text "columns": iconpainter_columns("service"), - "paint": lambda row: paint_icons("service", row) + "groupby" : lambda row: "", # Do not account for in grouping + "paint": lambda row: paint_icons("service", row) } multisite_painters["host_icons"] = { "title": _("Host icons"), "short": _("Icons"), + "printable" : False, # does not contain printable text "columns": iconpainter_columns("host"), - "paint": lambda row: paint_icons("host", row) + "groupby" : lambda row: "", # Do not account for in grouping + "paint": lambda row: paint_icons("host", row) } # ----------------------------------------------------------------------- @@ -197,26 +215,30 @@ # We need to use the Nagios-URL as configured # in sites. baseurl = config.site(row["site"])["url_prefix"] + "nagios/cgi-bin" - url = baseurl + "/extinfo.cgi?host=" + htmllib.urlencode(row["host_name"]) + url = baseurl + "/extinfo.cgi?host=" + html.urlencode(row["host_name"]) svc = row.get("service_description") if svc: - url += "&type=2&service=" + htmllib.urlencode(svc) + url += "&type=2&service=" + html.urlencode(svc) what = "service" else: url += "&type=1" what = "host" - return "singleicon", "" % (url, _('Show this %s in Nagios') % what) + return "singleicon", "%s" % \ + (url, html.render_icon('nagios', _('Show this %s in Nagios') % what)) -def paint_age(timestamp, has_been_checked, bold_if_younger_than, mode=None): +def paint_age(timestamp, has_been_checked, bold_if_younger_than, mode=None, what='past'): if not has_been_checked: return "age", "-" if mode == None: mode = get_painter_option("ts_format") + if mode == "epoch": + return "", str(int(timestamp)) + if mode == "both": - css, h1 = paint_age(timestamp, has_been_checked, bold_if_younger_than, "abs") - css, h2 = paint_age(timestamp, has_been_checked, bold_if_younger_than, "rel") + css, h1 = paint_age(timestamp, has_been_checked, bold_if_younger_than, "abs", what=what) + css, h2 = paint_age(timestamp, has_been_checked, bold_if_younger_than, "rel", what=what) return css, "%s - %s" % (h1, h2) dateformat = get_painter_option("ts_date") @@ -225,6 +247,12 @@ (mode == "mixed" and age >= 48 * 3600 or age < -48 * 3600): return "age", time.strftime(dateformat + " %H:%M:%S", time.localtime(timestamp)) + warn_txt = '' + if what == 'future' and age > 0: + warn_txt = ' %s' % _('in the past!') + elif what == 'past' and age < 0: + warn_txt = ' %s' % _('in the future!') + # Time delta less than two days => make relative time if age < 0: age = -age @@ -235,14 +263,15 @@ age_class = "age recent" else: age_class = "age" - return age_class, prefix + html.age_text(age) + + return age_class, prefix + html.age_text(age) + warn_txt def paint_future_time(timestamp): if timestamp <= 0: return "", "-" else: - return paint_age(timestamp, True, 0) + return paint_age(timestamp, True, 0, what='future') def paint_day(timestamp): return "", time.strftime("%A, %Y-%m-%d", time.localtime(timestamp)) @@ -254,7 +283,7 @@ return None, "" multisite_painters["sitename_plain"] = { - "title" : _("Site id"), + "title" : _("Site ID"), "short" : _("Site"), "columns" : ["site"], "paint" : lambda row: (None, row["site"]), @@ -267,30 +296,41 @@ "paint" : lambda row: (None, config.site(row["site"])["alias"]), } -# ____ _ -# / ___| ___ _ ____ _(_) ___ ___ ___ -# \___ \ / _ \ '__\ \ / / |/ __/ _ \/ __| -# ___) | __/ | \ V /| | (_| __/\__ \ -# |____/ \___|_| \_/ |_|\___\___||___/ -# +#. +# .--Services------------------------------------------------------------. +# | ____ _ | +# | / ___| ___ _ ____ _(_) ___ ___ ___ | +# | \___ \ / _ \ '__\ \ / / |/ __/ _ \/ __| | +# | ___) | __/ | \ V /| | (_| __/\__ \ | +# | |____/ \___|_| \_/ |_|\___\___||___/ | +# | | +# +----------------------------------------------------------------------+ +# | Painters for services | +# '----------------------------------------------------------------------' -def paint_service_state_short(row ): +def paint_service_state_short(row): if row["service_has_been_checked"] == 1: - state = row["service_state"] - name = nagios_short_state_names[row["service_state"]] + state = str(row["service_state"]) + name = nagios_short_state_names.get(row["service_state"], "") else: state = "p" - name = "PEND" + name = _("PEND") + if is_stale(row): + state = str(state) + " stale" return "state svcstate state%s" % state, name def paint_host_state_short(row): # return None, str(row) if row["host_has_been_checked"] == 1: state = row["host_state"] - name = nagios_short_host_state_names[row["host_state"]] + # A state of 3 is sent by livestatus in cases where no normal state + # information is avaiable, e.g. for "DOWNTIMESTOPPED (UP)" + name = nagios_short_host_state_names.get(row["host_state"], "") else: state = "p" - name = "PEND" + name = _("PEND") + if is_stale(row): + state = str(state) + " stale" return "state hstate hstate%s" % state, name multisite_painters["service_nagios_link"] = { @@ -317,25 +357,24 @@ "sorter" : 'site', } - multisite_painters["svc_plugin_output"] = { "title" : _("Output of check plugin"), "short" : _("Status detail"), "columns" : ["service_plugin_output"], - "paint" : lambda row: ("", format_plugin_output(row["service_plugin_output"], row)), + "paint" : lambda row: paint_stalified(row, format_plugin_output(row["service_plugin_output"], row)), "sorter" : 'svcoutput', } multisite_painters["svc_long_plugin_output"] = { "title" : _("Long output of check plugin (multiline)"), "short" : _("Status detail"), "columns" : ["service_long_plugin_output"], - "paint" : lambda row: (None, row["service_long_plugin_output"].replace('\\n', '
    ')), + "paint" : lambda row: paint_stalified(row, format_plugin_output(row["service_long_plugin_output"], row).replace('\\n', '
    ').replace('\n', '
    ')), } multisite_painters["svc_perf_data"] = { "title" : _("Service performance data"), "short" : _("Perfdata"), "columns" : ["service_perf_data"], - "paint" : lambda row: (None, row["service_perf_data"]) + "paint" : lambda row: paint_stalified(row, row["service_perf_data"]) } def get_perfdata_nth_value(row, n, remove_unit = False): @@ -357,7 +396,7 @@ return str(e) def paint_perfdata_nth_value(row, n): - return "", get_perfdata_nth_value(row, n) + return paint_stalified(row, get_perfdata_nth_value(row, n)) multisite_painters["svc_perf_val01"] = { "title" : _("Service performance data - value number 1"), @@ -440,14 +479,14 @@ "title" : _("Service check command"), "short" : _("Check command"), "columns" : ["service_check_command"], - "paint" : lambda row: (None, row["service_check_command"]), + "paint" : lambda row: (None, html.attrencode(row["service_check_command"])), } multisite_painters["svc_check_command_expanded"] = { "title" : _("Service check command expanded"), "short" : _("Check command expanded"), "columns" : ["service_check_command_expanded"], - "paint" : lambda row: (None, row["service_check_command_expanded"]), + "paint" : lambda row: (None, html.attrencode(row["service_check_command_expanded"])), } multisite_painters["svc_contacts"] = { @@ -473,6 +512,14 @@ "sorter" : 'svcdescr', } +multisite_painters["service_display_name"] = { + "title" : _("Service alternative display name"), + "short" : _("Display name"), + "columns" : ["service_display_name"], + "paint" : lambda row: (None, row["service_display_name"]), + "sorter" : 'svcdispname', +} + multisite_painters["svc_state_age"] = { "title" : _("The age of the current service state"), "short" : _("Age"), @@ -481,12 +528,19 @@ "paint" : lambda row: paint_age(row["service_last_state_change"], row["service_has_been_checked"] == 1, 60 * 10), "sorter" : "stateage", } + +def paint_checked(what, row): + css, td = paint_age(row[what + "_last_check"], row[what + "_has_been_checked"] == 1, 0) + if is_stale(row): + css += " staletime" + return css, td + multisite_painters["svc_check_age"] = { "title" : _("The time since the last check of the service"), "short" : _("Checked"), "columns" : [ "service_has_been_checked", "service_last_check" ], "options" : [ "ts_format", "ts_date" ], - "paint" : lambda row: paint_age(row["service_last_check"], row["service_has_been_checked"] == 1, 0), + "paint" : lambda row: paint_checked("service", row), } multisite_painters["svc_next_check"] = { @@ -496,6 +550,13 @@ "paint" : lambda row: paint_future_time(row["service_next_check"]), } +multisite_painters["svc_last_time_ok"] = { + "title" : _("The last time the service was OK"), + "short" : _("Last OK"), + "columns" : [ "service_last_time_ok", "service_has_been_checked" ], + "paint" : lambda row: paint_age(row["service_last_time_ok"], row["service_has_been_checked"] == 1, 60 * 10), +} + multisite_painters["svc_next_notification"] = { "title" : _("The time of the next service notification"), "short" : _("Next notification"), @@ -511,6 +572,14 @@ "paint" : lambda row: paint_age(row["service_last_notification"], row["service_last_notification"], 0), } +multisite_painters['svc_notification_number'] = { + "title" : _("Service notification number"), + "short" : _("N#"), + "columns" : [ "service_current_notification_number" ], + "paint" : lambda row: ("", str(row["service_current_notification_number"])), +} + + multisite_painters["svc_check_latency"] = { "title" : _("Service check latency"), "short" : _("Latency"), @@ -561,7 +630,7 @@ def paint_nagiosflag(row, field, bold_if_nonzero): value = row[field] - yesno = {True:"yes", False:"no"}[value != 0] + yesno = {True: _("yes"), False: _("no")}[value != 0] if (value != 0) == bold_if_nonzero: return "badflag", yesno else: @@ -631,24 +700,36 @@ with_link = 'true' else: with_link = 'false' - pnpview = get_painter_option("pnpview") + + pnp_timerange = get_painter_option("pnp_timerange") + + pnpview = '1' + from_ts, to_ts = 'null', 'null' + if pnp_timerange != None: + if pnp_timerange[0] != 'pnp_view': + vs = multisite_painter_options["pnp_timerange"]['valuespec'] + from_ts, to_ts = map(int, vs.compute_range(pnp_timerange)[0]) + else: + pnpview = pnp_timerange[1] + return "pnpgraph", "
    " \ - "" % \ + "" % \ (container_id, container_id, sitename, host, service, pnpview, - defaults.url_prefix + "check_mk/", pnp_url, with_link) + defaults.url_prefix + "check_mk/", pnp_url, with_link, _('Add this graph to...'), from_ts, to_ts) multisite_painters["svc_pnpgraph" ] = { "title" : _("PNP service graph"), "short" : _("PNP graph"), "columns" : [ "host_name", "service_description" ], - "options" : [ "pnpview" ], + "options" : [ 'pnp_timerange' ], "paint" : lambda row: paint_pnpgraph(row["site"], row["host_name"], row["service_description"]), + "printable" : False, } def paint_check_manpage(row): command = row["service_check_command"] if not command.startswith("check_mk-"): - return "", "" + return "", "" checktype = command[9:] # Honor man-pages in OMD's local structure p = None @@ -659,23 +740,23 @@ if not p: p = defaults.check_manpages_dir + "/" + checktype if os.path.isfile(p): - description = None - for line in file(p): - line = line.rstrip() - if line == "description:": - description = "" - elif line.strip() == "" and description != None: - description += "

    " - elif not line.startswith(' ') and line[-1] == ':': - break - elif description != None: - description += " " + line - if not description: - return "", "" - else: - return "", description.replace("{", "").replace("}", "") + description = None + for line in file(p): + line = line.rstrip() + if line == "description:": + description = "" + elif line.strip() == "" and description != None: + description += "

    " + elif not line.startswith(' ') and line[-1] == ':': + break + elif description != None: + description += " " + line.replace("<", "<").replace(">", ">") + if not description: + return "", "" + else: + return "", description.replace("{", "").replace("}", "") else: - return "", _("Man-Page: %s not found.") % p + return "", _("Man-Page: %s not found.") % p multisite_painters["check_manpage"] = { "title" : _("Check manual (for Check_MK based checks)"), @@ -686,7 +767,7 @@ def paint_comments(prefix, row): comments = row[ prefix + "comments_with_info"] - text = ", ".join(["%s: %s" % (a, htmllib.attrencode(c)) for (id, a, c) in comments ]) + text = ", ".join(["%s: %s" % (a, html.attrencode(c)) for (id, a, c) in comments ]) return "", text multisite_painters["svc_comments"] = { @@ -718,10 +799,10 @@ matching.append(dir + "/" + pattern) return matching -def paint_custom_notes(row): +def paint_custom_notes(what, row): host = row["host_name"] svc = row.get("service_description") - if svc: + if what == "service": notes_dir = defaults.default_config_dir + "/notes/services" dirs = notes_matching_pattern_entries([notes_dir], host) item = svc @@ -756,15 +837,67 @@ "title" : _("Custom services notes"), "short" : _("Notes"), "columns" : [ "host_name", "host_address", "service_description", "service_plugin_output" ], - "paint" : paint_custom_notes, + "paint" : lambda row: paint_custom_notes("service", row), } -# _ _ _ -# | | | | ___ ___| |_ ___ -# | |_| |/ _ \/ __| __/ __| -# | _ | (_) \__ \ |_\__ \ -# |_| |_|\___/|___/\__|___/ -# +multisite_painters["svc_staleness"] = { + "title" : _("Service staleness value"), + "short" : _("Staleness"), + "columns" : ["service_staleness"], + "paint" : lambda row: ('', '%0.2f' % row.get('service_staleness', 0)), +} + +def paint_is_stale(row): + if is_stale(row): + return "badflag", _('yes') + else: + return "goodflag", _('no') + +multisite_painters["svc_is_stale"] = { + "title" : _("Service is stale"), + "short" : _("Stale"), + "columns" : ["service_staleness"], + "paint" : paint_is_stale, + "sorter" : 'svc_staleness', +} + +multisite_painters["svc_servicelevel"] = { + "title" : _("Service service level"), + "short" : _("Service Level"), + "columns" : [ "service_custom_variable_names", "service_custom_variable_values" ], + "paint" : lambda row: paint_custom_var('service', 'EC_SL', row), + "sorter" : 'servicelevel', +} + +def paint_custom_vars(what, row, blacklist=[]): + items = row[what + "_custom_variables"].items() + items.sort() + code = '' + for varname, value in items: + if varname not in blacklist: + code += '' % (varname, value) + code += '
    %s%s
    ' + return "", code + +multisite_painters["svc_custom_vars"] = { + "title" : _("Service custom variables"), + "columns" : [ "service_custom_variables" ], + "paint" : lambda row: paint_custom_vars('service', row), +} + + +#. +# .--Hosts---------------------------------------------------------------. +# | _ _ _ | +# | | | | | ___ ___| |_ ___ | +# | | |_| |/ _ \/ __| __/ __| | +# | | _ | (_) \__ \ |_\__ \ | +# | |_| |_|\___/|___/\__|___/ | +# | | +# +----------------------------------------------------------------------+ +# | Painters for hosts | +# '----------------------------------------------------------------------' + multisite_painters["host_state"] = { "title" : _("Host state"), @@ -823,7 +956,7 @@ "short" : _("Checked"), "columns" : [ "host_has_been_checked", "host_last_check" ], "options" : [ "ts_format", "ts_date" ], - "paint" : lambda row: paint_age(row["host_last_check"], row["host_has_been_checked"] == 1, 0), + "paint" : lambda row: paint_checked("host", row), } multisite_painters["host_next_check"] = { @@ -910,6 +1043,13 @@ "paint" : lambda row: (None, row["host_notification_period"]), } +multisite_painters['host_notification_number'] = { + "title" : _("Host notification number"), + "short" : _("N#"), + "columns" : [ "host_current_notification_number" ], + "paint" : lambda row: ("", str(row["host_current_notification_number"])), +} + multisite_painters["host_flapping"] = { "title" : _("Host is flapping"), "short" : _("Flap"), @@ -923,13 +1063,20 @@ "columns" : [ "host_active_checks_enabled" ], "paint" : lambda row: paint_nagiosflag(row, "host_active_checks_enabled", None), } +multisite_painters["host_notifications_enabled"] = { + "title" : _("Host notifications enabled"), + "short" : _("Notif."), + "columns" : [ "host_notifications_enabled" ], + "paint" : lambda row: paint_nagiosflag(row, "host_notifications_enabled", False), +} multisite_painters["host_pnpgraph" ] = { "title" : _("PNP host graph"), "short" : _("PNP graph"), "columns" : [ "host_name" ], - "options" : [ "pnpview" ], - "paint" : lambda row: paint_pnpgraph(row["site"], row["host_name"]) + "options" : [ 'pnp_timerange' ], + "paint" : lambda row: paint_pnpgraph(row["site"], row["host_name"]), + "printable" : False, } def paint_host_black(row): @@ -950,7 +1097,7 @@ def paint_host_black_with_link_to_old_nagios_services(row): host = row["host_name"] baseurl = config.site(row["site"])["url_prefix"] + "nagios/cgi-bin" - url = baseurl + "/status.cgi?host=" + htmllib.urlencode(host) + url = baseurl + "/status.cgi?host=" + html.urlencode(host) state = row["host_state"] if state != 0: return None, '

    ' % (url, host) @@ -1090,9 +1237,9 @@ host = row["host_name"] text = svc link = "view.py?view_name=service&site=%s&host=%s&service=%s" % ( - htmllib.urlencode(row["site"]), - htmllib.urlencode(host), - htmllib.urlencode(svc)) + html.urlencode(row["site"]), + html.urlencode(host), + html.urlencode(svc)) if checked: css = "state%d" % state else: @@ -1127,7 +1274,7 @@ for group in row["host_groups"]: link = "view.py?view_name=hostgroup&hostgroup=" + group if html.var("display_options"): - link += "&display_options=%s" % html.var("display_options") + link += "&display_options=%s" % html.attrencode(html.var("display_options")) links.append('%s' % (link, group)) return "", ", ".join(links) @@ -1156,21 +1303,11 @@ "title" : _("Custom host notes"), "short" : _("Notes"), "columns" : [ "host_name", "host_address", "host_plugin_output" ], - "paint" : paint_custom_notes, -} - -def paint_host_tags(row): - return "", get_host_tags(row) - -multisite_painters["host_tags"] = { - "title" : _("Host Tags (Check_MK)"), - "short" : _("Tags"), - "columns" : [ "host_custom_variable_names", "host_custom_variable_values" ], - "paint" : paint_host_tags, + "paint" : lambda row: paint_custom_notes("hosts", row), } multisite_painters["host_comments"] = { - "title" : _("Host Comments"), + "title" : _("Host comments"), "short" : _("Comments"), "columns" : [ "host_comments_with_info" ], "paint" : lambda row: paint_comments("host_", row), @@ -1190,6 +1327,35 @@ "paint" : lambda row: paint_nagiosflag(row, "host_acknowledged", False), } +multisite_painters["host_staleness"] = { + "title" : _("Host staleness value"), + "short" : _("Staleness"), + "columns" : ["host_staleness"], + "paint" : lambda row: ('', '%0.2f' % row.get('host_staleness', 0)), +} + +multisite_painters["host_is_stale"] = { + "title" : _("Host is stale"), + "short" : _("Stale"), + "columns" : ["host_staleness"], + "paint" : paint_is_stale, + "sorter" : 'svc_staleness', +} + +multisite_painters["host_servicelevel"] = { + "title" : _("Host service level"), + "short" : _("Service Level"), + "columns" : [ "host_custom_variable_names", "host_custom_variable_values" ], + "paint" : lambda row: paint_custom_var('host', 'EC_SL', row), + "sorter" : 'servicelevel', +} + +multisite_painters["host_custom_vars"] = { + "title" : _("Host custom variables"), + "columns" : [ "host_custom_variables" ], + "paint" : lambda row: paint_custom_vars('host', row, [ 'FILENAME', 'TAGS']), +} + # _ _ _ # | | | | ___ ___| |_ __ _ _ __ ___ _ _ _ __ ___ @@ -1202,8 +1368,8 @@ h = "
    " for host, state, checked in row["hostgroup_members_with_state"]: link = "view.py?view_name=host&site=%s&host=%s" % ( - htmllib.urlencode(row["site"]), - htmllib.urlencode(host)) + html.urlencode(row["site"]), + html.urlencode(host)) if checked: css = "hstate%d" % state else: @@ -1390,7 +1556,7 @@ "title" : _("Comment id"), "short" : _("ID"), "columns" : ["comment_id"], - "paint" : lambda row: (None, row["comment_id"]), + "paint" : lambda row: (None, str(row["comment_id"])), } multisite_painters["comment_author"] = { "title" : _("Comment author"), @@ -1402,7 +1568,7 @@ multisite_painters["comment_comment"] = { "title" : _("Comment text"), "columns" : ["comment_comment"], - "paint" : lambda row: (None, htmllib.attrencode(row["comment_comment"])), + "paint" : lambda row: (None, html.attrencode(row["comment_comment"])), } multisite_painters["comment_what"] = { @@ -1425,25 +1591,32 @@ "short" : _("Expires"), "columns" : ["comment_expire_time"], "options" : [ "ts_format", "ts_date" ], - "paint" : lambda row: paint_age(row["comment_expire_time"], row["comment_expire_time"] != 0, 3600), + "paint" : lambda row: paint_age(row["comment_expire_time"], row["comment_expire_time"] != 0, 3600, what='future'), } def paint_comment_entry_type(row): t = row["comment_entry_type"] linkview = None - if t == 1: icon = "comment" + if t == 1: + icon = "comment" + help = _("Comment") elif t == 2: icon = "downtime" + help = _("Downtime") if row["service_description"]: linkview = "downtimes_of_service" else: linkview = "downtimes_of_host" - elif t == 3: icon = "flapping" - elif t == 4: icon = "ack" + elif t == 3: + icon = "flapping" + help = _("Flapping") + elif t == 4: + icon = "ack" + help = _("Acknowledgement") else: return "", "" - code = '' % icon + code = html.render_icon(icon, help) if linkview: code = link_to_view(code, row, linkview) return "icons", code @@ -1481,21 +1654,21 @@ "title" : _("Downtime comment"), "short" : _("Comment"), "columns" : ["downtime_comment"], - "paint" : lambda row: (None, htmllib.attrencode(row["downtime_comment"])), + "paint" : lambda row: (None, html.attrencode(row["downtime_comment"])), } multisite_painters["downtime_fixed"] = { - "title" : _("Downtime is fixed"), - "short" : _("Fixed"), + "title" : _("Downtime start mode"), + "short" : _("Mode"), "columns" : ["downtime_fixed"], "paint" : lambda row: (None, row["downtime_fixed"] == 0 and _("flexible") or _("fixed")), } multisite_painters["downtime_what"] = { - "title" : _("Downtime type (host/service)"), - "short" : _("Type"), - "columns" : ["is_service"], - "paint" : lambda row: (None, row["is_service"] and _("Service") or _("Host")), + "title" : _("Downtime for host/service"), + "short" : _("for"), + "columns" : ["downtime_is_service"], + "paint" : lambda row: (None, row["downtime_is_service"] and _("Service") or _("Host")), } multisite_painters["downtime_type"] = { @@ -1518,7 +1691,7 @@ "short" : _("Start"), "columns" : ["downtime_start_time"], "options" : [ "ts_format", "ts_date" ], - "paint" : lambda row: paint_age(row["downtime_start_time"], True, 3600), + "paint" : lambda row: paint_age(row["downtime_start_time"], True, 3600, what=None), } multisite_painters["downtime_end_time"] = { @@ -1526,19 +1699,16 @@ "short" : _("End"), "columns" : ["downtime_end_time"], "options" : [ "ts_format", "ts_date" ], - "paint" : lambda row: paint_age(row["downtime_end_time"], True, 3600), + "paint" : lambda row: paint_age(row["downtime_end_time"], True, 3600, what=None), } def paint_downtime_duration(row): - if row["downtime_fixed"] == 1: - return None, "" - else: - return None, "%02d:%02d" % divmod(row["downtime_duration"] / 60, 60) + return "number", "%02d:%02d:00" % divmod(row["downtime_duration"] / 60, 60) multisite_painters["downtime_duration"] = { "title" : _("Downtime duration (if flexible)"), "short" : _("Duration"), - "columns" : ["downtime_duration", "downtime_fixed"], + "columns" : ["downtime_duration", ], # "downtime_fixed"], "paint" : paint_downtime_duration, } @@ -1553,13 +1723,16 @@ "title" : _("Log: complete message"), "short" : _("Message"), "columns" : ["log_message"], - "paint" : lambda row: ("", htmllib.attrencode(row["log_message"])), + "paint" : lambda row: ("", html.attrencode(row["log_message"])), } def paint_log_plugin_output(row): output = row["log_plugin_output"] + comment = row["log_comment"] if output: return "", format_plugin_output(output, row) + elif comment: + return "", comment else: log_type = row["log_type"] lst = row["log_state_type"] @@ -1573,21 +1746,43 @@ else: return "", _("The %s started flapping") % what - return "", (lst + " - " + log_type) + elif lst: + return "", (lst + " - " + log_type) + else: + return "", "" multisite_painters["log_plugin_output"] = { "title" : _("Log: output of check plugin"), "short" : _("Check output"), - "columns" : ["log_plugin_output", "log_type", "log_state_type" ], + "columns" : ["log_plugin_output", "log_type", "log_state_type", "log_comment" ], "paint" : paint_log_plugin_output, } + +def paint_log_type(row): + lt = row["log_type"] + if "HOST" in lt: + return "", _("Host") + elif "SERVICE" in lt or "SVC" in lt: + return "", _("Service") + else: + return "", _("Program") + + +multisite_painters["log_what"] = { + "title" : _("Log: host or service"), + "short" : _("Host/Service"), + "columns" : [ "log_type" ], + "paint" : paint_log_type, +} + + multisite_painters["log_attempt"] = { "title" : _("Log: number of check attempt"), "short" : _("Att."), "columns" : ["log_attempt"], - "paint" : lambda row: ("", row["log_attempt"]), + "paint" : lambda row: ("", str(row["log_attempt"])), } multisite_painters["log_state_type"] = { "title" : _("Log: type of state (hard/soft/stopped/started)"), @@ -1607,6 +1802,12 @@ "columns" : ["log_contact_name"], "paint" : lambda row: ("nowrap", row["log_contact_name"]), } +multisite_painters["log_command"] = { + "title" : _("Log: command/plugin"), + "short" : _("Command"), + "columns" : ["log_command_name"], + "paint" : lambda row: ("nowrap", row["log_command_name"]), +} def paint_log_icon(row): img = None log_type = row["log_type"] @@ -1615,7 +1816,7 @@ elif log_type == "HOST ALERT": img = { 0: "up", 1: "down", 2:"unreach" }.get(row["log_state"]) elif "DOWNTIME" in log_type: - if row["log_state_type"] == "STOPPED": + if row["log_state_type"] in [ "END", "STOPPED" ]: img = "downtimestop" else: img = "downtime" @@ -1627,13 +1828,18 @@ img = "restart" elif "starting..." in log_type: img = "start" - elif "shutdown..." in log_type: + elif "shutdown..." in log_type or "shutting down" in log_type: img = "stop" elif " FLAPPING " in log_type: img = "flapping" + elif "ACKNOWLEDGE ALERT" in log_type: + if row["log_state_type"] == "STARTED": + img = "ack" + else: + img = "ackstop" if img: - return "icon", '' % img + return "icon", '' % img else: return "icon", "" @@ -1648,14 +1854,14 @@ "title" : _("Log: informational part of message"), "short" : _("Info"), "columns" : ["log_options"], - "paint" : lambda row: ("", htmllib.attrencode(row["log_options"])), + "paint" : lambda row: ("", html.attrencode(row["log_options"])), } def paint_log_comment(msg): if ';' in msg: parts = msg.split(';') if len(parts) > 6: - return ("", htmllib.attrencode(parts[-1])) + return ("", html.attrencode(parts[-1])) return ("", "") multisite_painters["log_comment"] = { @@ -1738,3 +1944,85 @@ "columns" : [ "alerts_problem" ], "paint" : lambda row: paint_svc_count('s', row["alerts_problem"]) } + +# +# HOSTTAGS +# + +def paint_host_tags(row): + return "", get_host_tags(row) + +multisite_painters["host_tags"] = { + "title" : _("Host tags (raw)"), + "short" : _("Tags"), + "columns" : [ "host_custom_variable_names", "host_custom_variable_values" ], + "paint" : paint_host_tags, + "sorter" : 'host', +} + +def paint_host_tags_with_titles(row): + output = '' + misc_tags = [] + for tag in get_host_tags(row).split(): + group_title = config.tag_group_title(tag) + if group_title: + output += group_title + ': ' + (config.tag_alias(tag) or tag) + '
    \n' + else: + misc_tags.append(tag) + + if misc_tags: + output += _('Misc:') + ' ' + ', '.join(misc_tags) + + return "", output + +multisite_painters["host_tags_with_titles"] = { + "title" : _("Host tags (with titles)"), + "short" : _("Tags"), + "columns" : [ "host_custom_variable_names", "host_custom_variable_values" ], + "paint" : paint_host_tags_with_titles, + "sorter" : 'host', +} + + +def paint_host_tag(row, tgid): + tags_of_host = get_host_tags(row).split() + + for t in get_tag_group(tgid)[1]: + if t[0] in tags_of_host: + return "", t[1] + return "", _("N/A") + +# Use title of the tag value for grouping, not the complete +# dictionary of custom variables! +def groupby_host_tag(row, tgid): + cssclass, title = paint_host_tag(row, tgid) + return title + +def load_host_tag_painters(): + # first remove all old painters to reflect delted painters during runtime + for key in multisite_painters.keys(): + if key.startswith('host_tag_'): + del multisite_painters[key] + + for entry in config.wato_host_tags: + tgid = entry[0] + tit = entry[1] + ch = entry[2] + + long_tit = tit + if '/' in tit: + topic, tit = tit.split('/', 1) + if topic: + long_tit = topic + ' / ' + tit + else: + long_tit = tit + + multisite_painters["host_tag_" + tgid] = { + "title" : _("Host tag:") + ' ' + long_tit, + "name" : "host_tag_" + tgid, + "short" : tit, + "columns" : [ "host_custom_variables" ], + "paint" : paint_host_tag, + "groupby" : groupby_host_tag, + "args" : [ tgid ], + } diff -Nru check-mk-1.2.2p3/plugins/views/perfometer.py check-mk-1.2.6p12/plugins/views/perfometer.py --- check-mk-1.2.2p3/plugins/views/perfometer.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/views/perfometer.py 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -89,20 +89,53 @@ return result + '' +def perfometer_logarithmic_dual_independent\ + (value_left, color_left, half_value_left, base_left, value_right, color_right, half_value_right, base_right): + result = '' + for where, value, color, half_value, base in [ + ("left", value_left, color_left, half_value_left, base_left), + ("right", value_right, color_right, half_value_right, base_left) ]: + value = float(value) + if value == 0.0: + pos = 0 + else: + half_value = float(half_value) + h = math.log(half_value, base) # value to be displayed at 50% + pos = 25 + 10.0 * (math.log(value, base) - h) + if pos < 1: + pos = 1 + if pos > 49: + pos = 49 + + if where == "right": + result += perfometer_td(pos, color) + \ + perfometer_td(50 - pos, "white") + else: + result += perfometer_td(50 - pos, "white") + \ + perfometer_td(pos, color) + + return result + '
    ' def number_human_readable(n, precision=1, unit="B"): + base = 1024.0 + if unit == "Bit": + base = 1000.0 + n = float(n) f = "%." + str(precision) + "f" - if abs(n) > 1024 * 1024 * 1024: - return (f + "G%s") % (n / (1024.0 * 1024 * 1024), unit) - elif abs(n) > 1024 * 1024: - return (f + "M%s") % (n / (1024.0 * 1024), unit) - elif abs(n) > 1024: - return (f + "k%s") % (n / 1024.0, unit) + if abs(n) > base * base * base: + return (f + "G%s") % (n / (base * base * base), unit) + elif abs(n) > base * base: + return (f + "M%s") % (n / (base * base), unit) + elif abs(n) > base: + return (f + "k%s") % (n / base, unit) else: return (f + "%s") % (n, unit) -def age_human_readable(secs): +def age_human_readable(secs, min_only=False): + if min_only: + mins = secs / 60.0 + return "%.1f min" % mins if secs < 240: return "%d sec" % secs mins = secs / 60 @@ -114,6 +147,27 @@ days = hours / 24 return "%d days" % days +def bytes_human_readable(b, base=1024.0, bytefrac=True, unit="B"): + base = float(base) + # Handle negative bytes correctly + prefix = '' + if b < 0: + prefix = '-' + b *= -1 + + if b >= base * base * base * base: + return '%s%.2f T%s' % (prefix, b / base / base / base / base, unit) + elif b >= base * base * base: + return '%s%.2f G%s' % (prefix, b / base / base / base, unit) + elif b >= base * base: + return '%s%.2f M%s' % (prefix, b / base / base, unit) + elif b >= base: + return '%s%.2f k%s' % (prefix, b / base, unit) + elif bytefrac: + return '%s%.2f %s' % (prefix, b, unit) + else: # Omit byte fractions + return '%s%.0f %s' % (prefix, b, unit) + def paint_perfometer(row): perfstring = unicode(row["service_perf_data"].strip()) @@ -163,6 +217,10 @@ if not perf_data: return "", "" + if is_stale(row): + stale_css = " stale" + else: + stale_css = "" try: title, h = perf_painter(row, check_command, perf_data) content = '
    %s
    ' % h @@ -172,9 +230,9 @@ # pnpgraph_present: -1 means unknown (path not configured), 0: no, 1: yes if 'X' in html.display_options and \ row["service_pnpgraph_present"] != 0: - return "perfometer", ('%s' % (pnp_url(row, "service"), content)) + return "perfometer" + stale_css, ('%s' % (pnp_url(row, "service"), content)) else: - return "perfometer", content + return "perfometer" + stale_css, content except Exception, e: @@ -187,7 +245,9 @@ "short" : _("Perf-O-Meter"), "columns" : [ "service_perf_data", "service_state", "service_check_command", "service_pnpgraph_present", "service_plugin_output" ], - "paint" : paint_perfometer + "paint" : paint_perfometer, + "sorter" : "svc_perf_val01", + "printable" : False, # No printable on PDF, only in HTML } load_web_plugins("perfometer", globals()) diff -Nru check-mk-1.2.2p3/plugins/views/sorters.py check-mk-1.2.6p12/plugins/views/sorters.py --- check-mk-1.2.2p3/plugins/views/sorters.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/views/sorters.py 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -96,11 +96,8 @@ } def cmp_site_host(r1, r2): - c = cmp(r1["site"], r2["site"]) - if c != 0: - return c - else: - return cmp_simple_string("host_name", r1, r2) + return cmp(r1["site"], r2["site"]) or \ + cmp_num_split("host_name", r1, r2) multisite_sorters["site_host"] = { "title" : _("Host"), @@ -121,13 +118,39 @@ return cmp(get_host_tags(r1), get_host_tags(r2)) multisite_sorters["host"] = { - "title" : _("Host Tags (Check_MK)"), + "title" : _("Host Tags (raw)"), "columns" : [ "host_custom_variable_names", "host_custom_variable_values" ], "cmp" : cmp_host_tags, } +multisite_sorters['servicelevel'] = { + 'title' : _("Servicelevel"), + 'columns' : [ 'custom_variable_names', 'custom_variable_values' ], + 'cmp' : lambda r1, r2: cmp_custom_variable(r1, r2, 'EC_SL', cmp_simple_number) +} + +def cmp_service_name_equiv(r): + if r == "Check_MK": + return -5 + elif r == "Check_MK Discovery": + return -4 + elif r == "Check_MK inventory": + return -3 # FIXME: Remove old name one day + elif r == "Check_MK HW/SW Inventory": + return -2 + else: + return 0 + +def cmp_service_name(column, r1, r2): + o = cmp(cmp_service_name_equiv(r1[column]), cmp_service_name_equiv(r2[column])) + if o == 0: + return cmp_simple_string(column, r1, r2) + else: + return o + # name title column sortfunction -declare_simple_sorter("svcdescr", _("Service description"), "service_description", cmp_simple_string) +declare_simple_sorter("svcdescr", _("Service description"), "service_description", cmp_service_name) +declare_simple_sorter("svcdispname", _("Service alternative display name"), "service_display_name", cmp_simple_string) declare_simple_sorter("svcoutput", _("Service plugin output"), "service_plugin_output", cmp_simple_string) declare_simple_sorter("svc_long_plugin_output", _("Long output of check plugin"), "service_long_plugin_output", cmp_simple_string) declare_simple_sorter("site", _("Site"), "site", cmp_simple_string) @@ -155,6 +178,8 @@ declare_1to1_sorter("svc_is_active", cmp_simple_number) declare_1to1_sorter("svc_group_memberlist", cmp_string_list) declare_1to1_sorter("svc_acknowledged", cmp_simple_number) +declare_1to1_sorter("svc_staleness", cmp_simple_number) +declare_1to1_sorter("svc_servicelevel", cmp_simple_number) def cmp_perfdata_nth_value(r1, r2, n): return cmp(savefloat(get_perfdata_nth_value(r1, n)), savefloat(get_perfdata_nth_value(r2, n))) @@ -212,8 +237,8 @@ # Host -declare_1to1_sorter("alias", cmp_simple_string) -declare_1to1_sorter("host_address", cmp_simple_string) +declare_1to1_sorter("alias", cmp_num_split) +declare_1to1_sorter("host_address", cmp_ip_address) declare_1to1_sorter("host_plugin_output", cmp_simple_string) declare_1to1_sorter("host_perf_data", cmp_simple_string) declare_1to1_sorter("host_check_command", cmp_simple_string) @@ -243,6 +268,7 @@ declare_1to1_sorter("host_group_memberlist", cmp_string_list) declare_1to1_sorter("host_contacts", cmp_string_list) declare_1to1_sorter("host_contact_groups", cmp_string_list) +declare_1to1_sorter("host_servicelevel", cmp_simple_number) def cmp_host_problems(r1, r2): return cmp(r1["host_num_services"] - r1["host_num_services_ok"] - r1["host_num_services_pending"], @@ -293,7 +319,7 @@ declare_1to1_sorter("downtime_comment", cmp_simple_string) declare_1to1_sorter("downtime_fixed", cmp_simple_number) declare_1to1_sorter("downtime_type", cmp_simple_number) -declare_simple_sorter("downtime_what", _("Downtime type (host/service)"), "is_service", cmp_simple_number) +declare_simple_sorter("downtime_what", _("Downtime for host/service"), "downtime_is_service", cmp_simple_number) declare_simple_sorter("downtime_start_time", _("Downtime start"), "downtime_start_time", cmp_simple_number) declare_simple_sorter("downtime_end_time", _("Downtime end"), "downtime_end_time", cmp_simple_number) declare_simple_sorter("downtime_entry_time", _("Downtime entry time"), "downtime_entry_time", cmp_simple_number) @@ -307,6 +333,19 @@ declare_1to1_sorter("log_time", cmp_simple_number) declare_1to1_sorter("log_lineno", cmp_simple_number) +def cmp_log_what(col, a, b): + return cmp(log_what(a[col]), log_what(b[col])) + +def log_what(t): + if "HOST" in t: + return 1 + elif "SERVICE" in t or "SVC" in t: + return 2 + else: + return 0 + +declare_1to1_sorter("log_what", cmp_log_what) + import time def get_day_start_timestamp(t): st = time.localtime(int(t)) @@ -333,3 +372,34 @@ # Aggregations declare_simple_sorter("aggr_name", _("Aggregation name"), "aggr_name", cmp_simple_string) declare_simple_sorter("aggr_group", _("Aggregation group"), "aggr_group", cmp_simple_string) + +# +# SINGLE HOSTTAG FIELDS +# + +def cmp_host_tag(r1, r2, tgid): + tags1 = get_host_tags(r1).split() + tags2 = get_host_tags(r2).split() + + val1 = _('N/A') + val2 = _('N/A') + for t in get_tag_group(tgid)[1]: + if t[0] in tags1: + val1 = t[1] + if t[0] in tags2: + val2 = t[1] + + return cmp(val1, val2) + +for entry in config.wato_host_tags: + tgid = entry[0] + tit = entry[1] + + declare_simple_sorter("host_tag_" + tgid, _("Host tag:") + ' ' + tit, "host_tag_" + tgid, cmp_simple_string) + + multisite_sorters["host_tag_" + tgid] = { + "title" : _("Host tag:") + ' ' + tit, + "columns" : [ "host_custom_variable_names", "host_custom_variable_values" ], + "cmp" : cmp_host_tag, + "args" : [ tgid ], + } diff -Nru check-mk-1.2.2p3/plugins/views/wato.py check-mk-1.2.6p12/plugins/views/wato.py --- check-mk-1.2.2p3/plugins/views/wato.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/views/wato.py 2015-06-24 09:48:38.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -27,75 +27,6 @@ import config, wato -class FilterWatoFile(Filter): - def __init__(self): - Filter.__init__(self, "wato_folder", _("WATO Folder"), "host", ["filename"], []) - self.last_wato_data_update = None - - def available(self): - return config.wato_enabled and wato.have_folders() - - def load_wato_data(self): - self.tree = wato.api.get_folder_tree() - self.path_to_tree = {} # will be filled by self.folder_selection - self.selection = self.folder_selection(self.tree, "", 0) - self.last_wato_data_update = time.time() - - def check_wato_data_update(self): - if not self.last_wato_data_update or time.time() - self.last_wato_data_update > 5: - self.load_wato_data() - - def display(self): - self.check_wato_data_update() - html.select(self.name, [("", "")] + self.selection) - - def filter(self, infoname): - self.check_wato_data_update() - current = html.var(self.name) - if current: - return "Filter: host_filename ~ ^/wato/%s/\n" % current.replace("\n", "") # prevent insertions attack - else: - return "" - - # Construct pair-list of ( folder-path, title ) to be used - # by the HTML selection box. This also updates self._tree, - # a dictionary from the path to the title. - def folder_selection(self, folder, prefix, depth): - my_path = folder[".path"] - if depth: - title_prefix = "   " * depth + "` " + "- " * depth - else: - title_prefix = "" - self.path_to_tree[my_path] = folder["title"] - sel = [ (my_path , title_prefix + folder["title"]) ] - sel += self.sublist(folder.get(".folders", {}), my_path, depth) - return sel - - def sublist(self, elements, my_path, depth): - vs = elements.values() - vs.sort(lambda a, b: cmp(a["title"].lower(), b["title"].lower())) - sel = [] - for e in vs: - sel += self.folder_selection(e, my_path, depth + 1) - return sel - - def heading_info(self, info): - # FIXME: There is a problem with caching data and changing titles of WATO files - # Everything is changed correctly but the filter object is stored in the - # global multisite_filters var and self.path_to_tree is not refreshed when - # rendering this title. Thus the threads might have old information about the - # file titles and so on. - # The call below needs to use some sort of indicator wether the cache needs - # to be renewed or not. - self.check_wato_data_update() - current = html.var(self.name) - if current and current != "/": - return self.path_to_tree.get(current) - -declare_filter(10, FilterWatoFile()) -if "wato_folder" not in ubiquitary_filters: - ubiquitary_filters.append("wato_folder") # show in all views - multisite_painters["host_filename"] = { "title" : _("Check_MK config filename"), "short" : _("Filename"), @@ -108,7 +39,7 @@ if not filename.startswith("/wato/") or not filename.endswith("/hosts.mk"): return "" wato_path = filename[6:-9] - title_path = wato.api.get_folder_title_path(wato_path, with_links) + title_path = wato.get_folder_title_path(wato_path, with_links) if how == "plain": return title_path[-1] elif how == "abs": diff -Nru check-mk-1.2.2p3/plugins/views/webservice.py check-mk-1.2.6p12/plugins/views/webservice.py --- check-mk-1.2.2p3/plugins/views/webservice.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/views/webservice.py 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -41,8 +41,9 @@ for row in rows: html.write("[") for p in painters: - tdclass, content = p[0]["paint"](row) - html.write(repr(htmllib.strip_tags(content))) + joined_row = join_row(row, p) + tdclass, content = paint_painter(p[0], joined_row) + html.write(repr(html.strip_tags(content))) html.write(",") html.write("],") html.write("\n]\n") @@ -63,7 +64,14 @@ return '"' + json_escape.sub(lambda m: json_encoding_table[m.group(0)], s) + '"' -def render_json(rows, view, group_painters, painters, num_columns, show_checkboxes): +def render_json(rows, view, group_painters, painters, num_columns, show_checkboxes, export = False): + if export: + html.req.content_type = "appliation/json; charset=UTF-8" + filename = '%s-%s.json' % (view['name'], time.strftime('%Y-%m-%d_%H-%M-%S', time.localtime(time.time()))) + if type(filename) == unicode: + filename = filename.encode("utf-8") + html.req.headers_out['Content-Disposition'] = 'Attachment; filename=%s' % filename + html.write("[\n") first = True @@ -74,7 +82,7 @@ else: html.write(",") content = p[0]["name"] - stripped = htmllib.strip_tags(content) + stripped = html.strip_tags(content) utf8 = stripped.encode("utf-8") html.write(encode_string_json(utf8)) html.write("]") @@ -87,19 +95,91 @@ first = False else: html.write(",") - tdclass, content = p[0]["paint"](row) + joined_row = join_row(row, p) + tdclass, content = paint_painter(p[0], joined_row) + if type(content) == unicode: + content = content.encode("utf-8") + else: + content = str(content) content = content.replace("
    ","\n") - stripped = htmllib.strip_tags(content) - utf8 = stripped.encode("utf-8") - html.write(encode_string_json(utf8)) + stripped = html.strip_tags(content) + html.write(encode_string_json(stripped)) html.write("]") html.write("\n]\n") +multisite_layouts["json_export"] = { + "title" : _("JSON data export"), + "render" : lambda a,b,c,d,e,f: render_json(a,b,c,d,e,f,True), + "group" : False, + "hide" : True, +} + multisite_layouts["json"] = { "title" : _("JSON data output"), - "render" : render_json, + "render" : lambda a,b,c,d,e,f: render_json(a,b,c,d,e,f,False), "group" : False, "hide" : True, } + +def render_jsonp(rows, view, group_painters, painters, num_columns, show_checkboxes): + html.write("%s(\n" % html.var('jsonp')); + render_json(rows, view, group_painters, painters, num_columns, show_checkboxes) + html.write(");\n"); + +multisite_layouts["jsonp"] = { + "title" : _("JSONP data output"), + "render" : render_jsonp, + "group" : False, + "hide" : True, +} + +def render_csv(rows, view, group_painters, painters, num_columns, show_checkboxes, export = False): + if export: + html.req.content_type = "text/csv; charset=UTF-8" + filename = '%s-%s.csv' % (view['name'], time.strftime('%Y-%m-%d_%H-%M-%S', time.localtime(time.time()))) + if type(filename) == unicode: + filename = filename.encode("utf-8") + html.req.headers_out['Content-Disposition'] = 'Attachment; filename=%s' % filename + + csv_separator = html.var("csv_separator", ";") + first = True + for p in painters: + if first: + first = False + else: + html.write(csv_separator) + content = p[0]["name"] + content = type(content) in [ int, float ] and str(content) or content + stripped = html.strip_tags(content).replace('\n', '').replace('"', '""') + html.write('"%s"' % stripped.encode("utf-8")) + + for row in rows: + html.write("\n") + first = True + for p in painters: + if first: + first = False + else: + html.write(csv_separator) + joined_row = join_row(row, p) + tdclass, content = paint_painter(p[0], joined_row) + content = type(content) in [ int, float ] and str(content) or content + stripped = html.strip_tags(content).replace('\n', '').replace('"', '""') + html.write('"%s"' % stripped.encode("utf-8")) + +multisite_layouts["csv_export"] = { + "title" : _("CSV data export"), + "render" : lambda a,b,c,d,e,f: render_csv(a,b,c,d,e,f,True), + "group" : False, + "hide" : True, +} + +multisite_layouts["csv"] = { + "title" : _("CSV data output"), + "render" : lambda a,b,c,d,e,f: render_csv(a,b,c,d,e,f,False), + "group" : False, + "hide" : True, +} + diff -Nru check-mk-1.2.2p3/plugins/visuals/bi.py check-mk-1.2.6p12/plugins/visuals/bi.py --- check-mk-1.2.2p3/plugins/visuals/bi.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/visuals/bi.py 2015-06-24 09:48:38.000000000 +0000 @@ -0,0 +1,213 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +import bi + +class BIGroupFilter(Filter): + def __init__(self): + self.column = "aggr_group" + Filter.__init__(self, self.column, _("Aggregation group"), "aggr", [self.column], [self.column]) + + def variable_settings(self, row): + return [ (self.htmlvars[0], row[self.column]) ] + + def display(self): + htmlvar = self.htmlvars[0] + html.select(htmlvar, [ ("", "") ] + [(g, g) for g in bi.aggregation_groups()]) + + def selected_group(self): + return html.var(self.htmlvars[0]) + + def filter_table(self, rows): + group = self.selected_group() + if not group: + return rows + else: + return [ row for row in rows if row[self.column] == group ] + + def heading_info(self): + return html.var(self.htmlvars[0]) + +declare_filter( 90, BIGroupFilter()) + +# how is either "regex" or "exact" +class BITextFilter(Filter): + def __init__(self, what, how="regex", suffix=""): + self.how = how + self.column = "aggr_" + what + label = '' + if what == 'name': + label = _('Aggregation name') + elif what == 'output': + label = _('Aggregation output') + if how == "exact": + label += _(" (exact match)") + Filter.__init__(self, self.column + suffix, + label, "aggr", [self.column + suffix], [self.column]) + + def variable_settings(self, row): + return [ (self.htmlvars[0], row[self.column]) ] + + def display(self): + html.text_input(self.htmlvars[0]) + + def heading_info(self): + return html.var_utf8(self.htmlvars[0]) + + def filter_table(self, rows): + val = html.var_utf8(self.htmlvars[0]) + if not val: + return rows + if self.how == "regex": + reg = re.compile(val.lower()) + return [ row for row in rows if reg.search(row[self.column].lower()) ] + else: + return [ row for row in rows if row[self.column] == val ] + + +declare_filter(120, BITextFilter("name", suffix="_regex")) +declare_filter(120, BITextFilter("name", how="exact")) +declare_filter(121, BITextFilter("output")) + +class BIHostFilter(Filter): + def __init__(self): + self.column = "aggr_hosts" + Filter.__init__(self, self.column, _("Affected hosts contain"), "aggr", ["aggr_host_site", "aggr_host_host"], []) + + def display(self): + html.text_input(self.htmlvars[1]) + + def heading_info(self): + return html.var(self.htmlvars[1]) + + def find_host(self, host, hostlist): + for s, h in hostlist: + if h == host: + return True + return False + + # Used for linking + def variable_settings(self, row): + return [ ("aggr_host_host", row["host_name"]), ("aggr_host_site", row["site"]) ] + + def filter_table(self, rows): + val = html.var(self.htmlvars[1]) + if not val: + return rows + return [ row for row in rows if self.find_host(val, row["aggr_hosts"]) ] + +declare_filter(130, BIHostFilter(), _("Filter for all aggregations that base on status information of that host. Exact match (no regular expression)")) + +class BIServiceFilter(Filter): + def __init__(self): + Filter.__init__(self, "aggr_service", _("Affected by service"), "aggr", ["aggr_service_site", "aggr_service_host", "aggr_service_service"], []) + + def double_height(self): + return True + + def display(self): + html.write(_("Host") + ": ") + html.text_input(self.htmlvars[1]) + html.write(_("Service") + ": ") + html.text_input(self.htmlvars[2]) + + def heading_info(self): + return html.var_utf8(self.htmlvars[1]) + " / " + html.var_utf8(self.htmlvars[2]) + + def service_spec(self): + if html.has_var(self.htmlvars[2]): + return html.var_utf8(self.htmlvars[0]), html.var_utf8(self.htmlvars[1]), html.var_utf8(self.htmlvars[2]) + + # Used for linking + def variable_settings(self, row): + return [ ("site", row["site"]), ("host", row["host_name"]), ("service", row["service_description"]) ] + +declare_filter(131, BIServiceFilter(), _("Filter for all aggregations that are affected by one specific service on a specific host (no regular expression)")) + +class BIStatusFilter(Filter): + def __init__(self, what): + title = (what.replace("_", " ") + " state").title() + self.column = "aggr_" + what + "state" + if what == "": + self.code = 'r' + else: + self.code = what[0] + self.prefix = "bi%ss" % self.code + vars = [ self.prefix + str(x) for x in [ -1, 0, 1, 2, 3 ] ] + if self.code == 'a': + vars.append(self.prefix + "n") + Filter.__init__(self, self.column, title, "aggr", vars, []) + + def filter(self, tablename): + return "" + + def double_height(self): + return self.column == "aggr_assumed_state" + + def display(self): + if html.var("filled_in"): + defval = "" + else: + defval = "on" + for varend, text in [('0', _('OK')), ('1', _('WARN')), ('2', _('CRIT')), + ('3', _('UNKN')), ('-1', _('PENDING')), ('n', _('no assumed state set'))]: + if self.code != 'a' and varend == 'n': + continue # no unset for read and effective state + if varend == 'n': + html.write("
    ") + var = self.prefix + varend + html.checkbox(var, defval, label = text) + + def filter_table(self, rows): + jeaders = [] + if html.var("filled_in"): + defval = "" + else: + defval = "on" + + allowed_states = [] + for i in ['0','1','2','3','-1','n']: + if html.var(self.prefix + i, defval) == "on": + if i == 'n': + s = None + else: + s = int(i) + allowed_states.append(s) + newrows = [] + for row in rows: + if row[self.column] != None: + s = row[self.column]["state"] + else: + s = None + if s in allowed_states: + newrows.append(row) + return newrows + +declare_filter(150, BIStatusFilter("")) +declare_filter(151, BIStatusFilter("effective_")) +declare_filter(152, BIStatusFilter("assumed_")) + + diff -Nru check-mk-1.2.2p3/plugins/visuals/filters.py check-mk-1.2.6p12/plugins/visuals/filters.py --- check-mk-1.2.2p3/plugins/visuals/filters.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/visuals/filters.py 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,964 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + + +# Filters for substring search, displaying a text input field +class FilterText(Filter): + def __init__(self, name, title, info, column, htmlvar, op): + Filter.__init__(self, name, title, info, [htmlvar], [column]) + self.op = op + self.column = column + + def _current_value(self): + htmlvar = self.htmlvars[0] + return html.var(htmlvar, "") + + def display(self): + html.text_input(self.htmlvars[0], self._current_value()) + + def filter(self, infoname): + current_value = self._current_value() + if current_value: + return "Filter: %s %s %s\n" % (self.column, self.op, lqencode(current_value)) + else: + return "" + + def variable_settings(self, row): + return [ (self.htmlvars[0], row[self.column]) ] + + def heading_info(self): + return self._current_value() + + +class FilterUnicode(FilterText): + def _current_value(self): + htmlvar = self.htmlvars[0] + return html.var_utf8(htmlvar, "") + + def filter(self, infoname): + current_value = self._current_value() + if current_value: + return "Filter: %s %s %s\n" % (self.column, self.op, lqencode(current_value.encode('utf-8'))) + else: + return "" + +# filter title info column htmlvar +declare_filter(100, FilterText("hostregex", _("Hostname"), "host", "host_name", "host_regex", "~~"), + _("Search field allowing regular expressions and partial matches")) + +declare_filter(101, FilterText("host", _("Hostname (exact match)"), "host", "host_name", "host", "="), + _("Exact match, used for linking")) + +declare_filter(102, FilterUnicode("hostalias", _("Hostalias"), "host", "host_alias", "hostalias", "~~"), + _("Search field allowing regular expressions and partial matches")) + +declare_filter(200, FilterUnicode("serviceregex", _("Service"), "service", "service_description", "service_regex", "~~"), + _("Search field allowing regular expressions and partial matches")) + +declare_filter(201, FilterUnicode("service", _("Service (exact match)"), "service", "service_description", "service", "="), + _("Exact match, used for linking")) + +declare_filter(202, FilterUnicode("service_display_name", _("Service alternative display name"), "service", "service_display_name", "service_display_name", "~~"), + _("Alternative display name of the service, regex match")) + +declare_filter(202, FilterUnicode("output", _("Status detail"), "service", "service_plugin_output", "service_output", "~~")) + +class FilterIPAddress(Filter): + def __init__(self): + Filter.__init__(self, "host_address", _("Host IP Address"), "host", ["host_address", "host_address_prefix"], ["host_address"]) + + def display(self): + html.text_input("host_address") + html.write("

    ") + html.begin_radio_group() + html.radiobutton("host_address_prefix", "yes", True, _("Prefix match")) + html.radiobutton("host_address_prefix", "no", False, _("Exact match")) + html.end_radio_group() + + def double_height(self): + return True + + def filter(self, infoname): + address = html.var("host_address") + if address: + if html.var("host_address_prefix") == "yes": + return "Filter: host_address ~ ^%s\n" % lqencode(address) + else: + return "Filter: host_address = %s\n" % lqencode(address) + else: + return "" + + def variable_settings(self, row): + return [ ("host_address", row["host_address"]) ] + + def heading_info(self): + return html.var("host_address") + +declare_filter(102, FilterIPAddress()) + + +# Helper that retrieves the list of host/service/contactgroups via Livestatus +# use alias by default but fallback to name if no alias defined +def all_groups(what): + groups = dict(html.live.query("GET %sgroups\nCache: reload\nColumns: name alias\n" % what)) + return [ (name, groups[name] or name) for name in groups.keys() ] + +class FilterMultigroup(Filter): + def __init__(self, what, title): + htmlvars = [ what + "groups" ] + Filter.__init__(self, htmlvars[0], # name + title, + what, # info, e.g. "service" + htmlvars, + []) # no link info needed + self.what = what + self.htmlvar = htmlvars[0] + + def double_height(self): + return True + + def valuespec(self): + return DualListChoice(choices = all_groups(self.what), autoheight=False, enlarge_active=True) + + def selection(self): + current = html.var(self.htmlvar, "").strip().split("|") + if current == ['']: + return [] + else: + return current + + def display(self): + html.write('
    ') + self.valuespec().render_input(self.htmlvar, self.selection()) + html.write('
    ') + + def filter(self, infoname): + current = self.selection() + if len(current) == 0: + return "" # No group selected = all groups selected, filter unused + filters = "" + for group in current: + filters += "Filter: %s_groups >= %s\n" % (self.what, lqencode(group)) + filters += "Or: %d\n" % len(current) + return filters + + +# Selection of a host/service(-contact) group as an attribute of a host or service +class FilterGroupCombo(Filter): + def __init__(self, what, title, enforce): + self.enforce = enforce + self.prefix = not self.enforce and "opt" or "" + htmlvars = [ self.prefix + what + "_group" ] + if not enforce: + htmlvars.append("neg_" + htmlvars[0]) + Filter.__init__(self, self.prefix + what + "group", # name, e.g. "hostgroup" + title, # title, e.g. "Hostgroup" + what.split("_")[0], # info, e.g. "host" + htmlvars, # htmlvars, e.g. "host_group" + [ what + "group_name" ]) # rows needed to fetch for link information + self.what = what + + def double_height(self): + return True + + def display(self): + choices = all_groups(self.what.split("_")[-1]) + if not self.enforce: + choices = [("", "")] + choices + html.sorted_select(self.htmlvars[0], choices) + if not self.enforce: + html.write(" ") + html.checkbox(self.htmlvars[1], label=_("negate")) + html.write("") + + def current_value(self): + htmlvar = self.htmlvars[0] + return html.var(htmlvar) + + def filter(self, infoname): + if not html.has_var(self.htmlvars[0]): + return "" # Skip if filter is not being set at all + + current_value = self.current_value() + if not current_value: + if not self.enforce: + return "" + # Take first group with the name we search + table = self.what.replace("host_contact", "contact").replace("service_contact", "contact") + current_value = html.live.query_value("GET %sgroups\nCache: reload\nColumns: name\nLimit: 1\n" % table, None) + + if current_value == None: + return "" # no {what}group exists! + + col = self.what + "_groups" + if not self.enforce and html.var(self.htmlvars[1]): + negate = "!" + else: + negate = "" + return "Filter: %s %s>= %s\n" % (col, negate, lqencode(current_value)) + + def variable_settings(self, row): + varname = self.htmlvars[0] + value = row.get(self.what + "group_name") + if value: + s = [(varname, value)] + if not self.enforce: + negvar = self.htmlvars[1] + if html.var(negvar): + s.append((negvar, html.var(negvar))) + return s + else: + return [] + + def heading_info(self): + current_value = self.current_value() + if current_value: + table = self.what.replace("host_contact", "contact").replace("service_contact", "contact") + alias = html.live.query_value("GET %sgroups\nCache: reload\nColumns: alias\nFilter: name = %s\n" % + (table, lqencode(current_value)), current_value) + return alias + +declare_filter(104, FilterGroupCombo("host", _("Host is in Group"), False), _("Optional selection of host group")) +declare_filter(105, FilterMultigroup("host", _("Several Host Groups")), _("Selection of multiple host groups")) +declare_filter(204, FilterGroupCombo("service", _("Service is in Group"), False), _("Optional selection of service group")) +declare_filter(205, FilterGroupCombo("service", _("Servicegroup (enforced)"), True), _("Dropdown list, selection of service group is enforced")) +declare_filter(205, FilterMultigroup("service", _("Several Service Groups")), _("Selection of multiple service groups")) + +declare_filter(106, FilterGroupCombo("host_contact", _("Host Contact Group"), False), _("Optional selection of host contact group")) +declare_filter(206, FilterGroupCombo("service_contact", _("Service Contact Group"), False), _("Optional selection of service contact group")) + +declare_filter(107, FilterText("host_ctc", _("Host Contact"), "host", "host_contacts", "host_ctc", ">=")) +declare_filter(207, FilterText("service_ctc", _("Service Contact"), "service", "service_contacts", "service_ctc", ">=")) + + +# Selection of one group to be used in the info "hostgroup" or "servicegroup". +class FilterGroupSelection(Filter): + def __init__(self, infoname, title): + Filter.__init__(self, name=infoname, title=title, info=infoname, htmlvars=[infoname], link_columns=[]) + self.what = infoname + + def display(self): + choices = all_groups(self.what[:-5]) # chop off "group", leaves host or service + html.sorted_select(self.htmlvars[0], choices) + + def current_value(self): + return html.var(self.htmlvars[0]) + + def filter(self, infoname): + current_value = self.current_value() + if current_value: + return "Filter: %s_name = %s\n" % (self.what, lqencode(current_value)) + else: + return "" + + def variable_settings(self, row): + group_name = row[self.what + "_name"] + return [ (self.htmlvars[0], group_name) ] + +# Filter for selecting one specific host group in the hostgroup views +declare_filter(104, FilterGroupSelection("hostgroup", _("Host Group")), _("Selection of the host group")) +declare_filter(104, FilterGroupSelection("servicegroup", _("Service Group")), _("Selection of the service group")) + +class FilterHostgroupVisibility(Filter): + def __init__(self, name, title): + Filter.__init__(self, name=name, title=title, info="hostgroup", htmlvars=[ "hostgroupshowempty" ], link_columns=[]) + + def display(self): + html.checkbox("hostgroupshowempty", False, label="Show empty groups") + + def filter(self, infoname): + if html.var("hostgroupshowempty"): + return "" + else: + return "Filter: hostgroup_num_hosts > 0\n" + +declare_filter(101, FilterText("hostgroupnameregex", _("Hostgroup (Regex)"), "hostgroup", "hostgroup_name", "hostgroup_regex", "~~"), + _("Search field allowing regular expressions and partial matches on the names of hostgroups")) + +declare_filter(102, FilterHostgroupVisibility("hostgroupvisibility", _("Empty Hostgroup Visibilitiy")), + _("You can enable this checkbox to show empty hostgroups")) + +declare_filter(101, FilterText("servicegroupnameregex", _("Servicegroup (Regex)"), "servicegroup", "servicegroup_name", "servicegroup_regex", "~~"), + _("Search field allowing regular expression and partial matches")) + +declare_filter(101, FilterText("servicegroupname", _("Servicegroup (enforced)"), "servicegroup", "servicegroup_name", "servicegroup_name", "="), + _("Exact match, used for linking")) + +class FilterQueryDropdown(Filter): + def __init__(self, name, title, info, query, filterline): + Filter.__init__(self, name, title, info, [ name ], []) + self.query = query + self.filterline = filterline + + def display(self): + selection = html.live.query_column_unique(self.query) + html.sorted_select(self.name, [("", "")] + [(x,x) for x in selection]) + + def filter(self, infoname): + current = html.var(self.name) + if current: + return self.filterline % lqencode(current) + else: + return "" + +declare_filter(110, FilterQueryDropdown("host_check_command", _("Host check command"), "host", \ + "GET commands\nCache: reload\nColumns: name\n", "Filter: host_check_command ~ ^%s(!.*)?\n")) +declare_filter(210, FilterQueryDropdown("check_command", _("Service check command"), "service", \ + "GET commands\nCache: reload\nColumns: name\n", "Filter: service_check_command ~ ^%s(!.*)?$\n")) + +class FilterServiceState(Filter): + def __init__(self, name, title, prefix): + Filter.__init__(self, name, title, + "service", [ prefix + "st0", prefix + "st1", prefix + "st2", prefix + "st3", prefix + "stp" ], []) + self.prefix = prefix + + def display(self): + html.begin_checkbox_group() + for var, text in [(self.prefix + "st0", _("OK")), (self.prefix + "st1", _("WARN")), \ + (self.prefix + "st2", _("CRIT")), (self.prefix + "st3", _("UNKNOWN")), + (self.prefix + "stp", _("PEND"))]: + html.checkbox(var, True, label=text) + html.end_checkbox_group() + + def filter(self, infoname): + headers = [] + for i in [0,1,2,3]: + if html.get_checkbox(self.prefix + "st%d" % i) == False: + if self.prefix == "hd": + column = "service_last_hard_state" + else: + column = "service_state" + headers.append("Filter: %s = %d\n" + "Filter: service_has_been_checked = 1\n" + "And: 2\nNegate:\n" % (column, i)) + if html.get_checkbox(self.prefix + "stp") == False: + headers.append("Filter: service_has_been_checked = 1\n") + if len(headers) == 5: # none allowed = all allowed (makes URL building easier) + return "" + else: + return "".join(headers) + +declare_filter(215, FilterServiceState("svcstate", _("Service states"), "")) +declare_filter(216, FilterServiceState("svchardstate", _("Service hard states"), "hd")) + +class FilterHostState(Filter): + def __init__(self): + Filter.__init__(self, "hoststate", _("Host states"), + "host", [ "hst0", "hst1", "hst2", "hstp" ], []) + + def display(self): + html.begin_checkbox_group() + for var, text in [("hst0", _("UP")), ("hst1", _("DOWN")), + ("hst2", _("UNREACH")), ("hstp", _("PENDING"))]: + html.checkbox(var, True, label=text) + html.end_checkbox_group() + + def filter(self, infoname): + headers = [] + for i in [0,1,2]: + if html.get_checkbox("hst%d" % i) == False: + headers.append("Filter: host_state = %d\n" + "Filter: host_has_been_checked = 1\n" + "And: 2\nNegate:\n" % i) + if html.get_checkbox("hstp") == False: + headers.append("Filter: host_has_been_checked = 1\n") + if len(headers) == 4: # none allowed = all allowed (makes URL building easier) + return "" + else: + return "".join(headers) + +declare_filter(115, FilterHostState()) + + + +class FilterTristate(Filter): + def __init__(self, name, title, info, column, deflt = -1): + self.column = column + self.varname = "is_" + name + Filter.__init__(self, name, title, info, [ self.varname ], []) + self.deflt = deflt + + def display(self): + current = html.var(self.varname) + html.begin_radio_group(horizontal = True) + for value, text in [("1", _("yes")), ("0", _("no")), ("-1", _("(ignore)"))]: + checked = current == value or (current in [ None, ""] and int(value) == self.deflt) + html.radiobutton(self.varname, value, checked, text + "   ") + html.end_radio_group() + + def tristate_value(self): + current = html.var(self.varname) + if current in [ None, "" ]: + return self.deflt + return int(current) + + def filter(self, infoname): + current = self.tristate_value() + if current == -1: # ignore + return "" + elif current == 1: + return self.filter_code(infoname, True) + else: + return self.filter_code(infoname, False) + + +class FilterStateType(FilterTristate): + def __init__(self, info, column, title, deflt = -1): + FilterTristate.__init__(self, column, title, info, None, deflt) + + def display(self): + current = html.var(self.varname) + html.begin_radio_group(horizontal = True) + for value, text in [("0", _("SOFT")), ("1", _("HARD")), ("-1", _("(ignore)"))]: + checked = current == value or (current in [ None, ""] and int(value) == self.deflt) + html.radiobutton(self.varname, value, checked, text + "   ") + html.end_radio_group() + + def filter_code(self, infoname, positive): + return "Filter: state_type = %d\n" % int(positive) + +declare_filter(116, FilterStateType("host", "host_state_type", _("Host state type"))) +declare_filter(217, FilterStateType("service", "service_state_type", _("Service state type"))) + +class FilterNagiosFlag(FilterTristate): + def __init__(self, info, column, title, deflt = -1): + FilterTristate.__init__(self, column, title, info, column, deflt) + + def filter_code(self, infoname, positive): + if positive: + return "Filter: %s != 0\n" % self.column + else: + return "Filter: %s = 0\n" % self.column + +class FilterNagiosExpression(FilterTristate): + def __init__(self, info, name, title, pos, neg, deflt = -1): + FilterTristate.__init__(self, name, title, info, None, deflt) + self.pos = pos + self.neg = neg + + def filter_code(self, infoname, positive): + return positive and self.pos or self.neg + +declare_filter(120, FilterNagiosExpression("host", "summary_host", _("Is summary host"), + "Filter: host_custom_variable_names >= _REALNAME\n", + "Filter: host_custom_variable_names < _REALNAME\n")) + +declare_filter(250, FilterNagiosFlag("service", "service_process_performance_data", _("Processes performance data"))) +declare_filter(251, FilterNagiosExpression("service", "has_performance_data", _("Has performance data"), + "Filter: service_perf_data != \n", + "Filter: service_perf_data = \n")) + +declare_filter(130, FilterNagiosFlag("host", "host_in_notification_period", _("Host in notif. period"))) +declare_filter(131, FilterNagiosFlag("host", "host_acknowledged", _("Host problem has been acknowledged"))) +declare_filter(132, FilterNagiosFlag("host", "host_active_checks_enabled", _("Host active checks enabled"))) +declare_filter(133, FilterNagiosFlag("host", "host_notifications_enabled", _("Host notifications enabled"))) +declare_filter(230, FilterNagiosFlag("service", "service_acknowledged", _("Problem acknowledged"))) +declare_filter(231, FilterNagiosFlag("service", "service_in_notification_period", _("Service in notif. per."))) +declare_filter(233, FilterNagiosFlag("service", "service_active_checks_enabled", _("Active checks enabled"))) +declare_filter(234, FilterNagiosFlag("service", "service_notifications_enabled", _("Notifications enabled"))) +declare_filter(236, FilterNagiosFlag("service", "service_is_flapping", _("Flapping"))) +declare_filter(231, FilterNagiosFlag("service", "service_scheduled_downtime_depth", _("Service in downtime"))) +declare_filter(132, FilterNagiosFlag("host", "host_scheduled_downtime_depth", _("Host in downtime"))) +declare_filter(232, FilterNagiosExpression("service", "in_downtime", _("Host/service in downtime"), + "Filter: service_scheduled_downtime_depth > 0\nFilter: host_scheduled_downtime_depth > 0\nOr: 2\n", + "Filter: service_scheduled_downtime_depth = 0\nFilter: host_scheduled_downtime_depth = 0\nAnd: 2\n")) + +declare_filter(232, FilterNagiosExpression("host", "host_staleness", _("Host is stale"), + "Filter: host_staleness >= %0.2f\n" % config.staleness_threshold, + "Filter: host_staleness < %0.2f\n" % config.staleness_threshold)) +declare_filter(232, FilterNagiosExpression("service", "service_staleness", _("Service is stale"), + "Filter: service_staleness >= %0.2f\n" % config.staleness_threshold, + "Filter: service_staleness < %0.2f\n" % config.staleness_threshold)) + +class FilterSite(Filter): + def __init__(self, name, enforce): + Filter.__init__(self, name, _("Site") + (enforce and _( " (enforced)") or ""), 'host', ["site"], []) + self.enforce = enforce + + def visible(self): + return config.is_multisite() + + def display(self): + if not config.is_multisite(): + choices = [("", _("(local)"))] + else: + if self.enforce: + choices = [] + else: + choices = [("","")] + for sitename, state in html.site_status.items(): + if state["state"] == "online": + choices.append((sitename, config.site(sitename)["alias"])) + html.sorted_select("site", choices) + + def heading_info(self): + current_value = html.var("site") + if current_value: + alias = config.site(current_value)["alias"] + return alias + + def variable_settings(self, row): + return [("site", row["site"])] + +declare_filter(500, FilterSite("siteopt", False), _("Optional selection of a site")) +declare_filter(501, FilterSite("site", True), _("Selection of site is enforced, use this filter for joining")) + +# name: internal id of filter +# title: user displayed title of the filter +# info: usually either "host" or "service" +# column: a livestatus column of type int or float +class FilterNumberRange(Filter): # type is int + def __init__(self, name, title, info, column): + self.column = column + varnames = [ name + "_from", name + "_until" ] + Filter.__init__(self, name, title, info, varnames, []) + + def display(self): + html.write(_("From:") + " ") + html.text_input(self.htmlvars[0], style="width: 80px;") + html.write("   " + _("To:") + " ") + html.text_input(self.htmlvars[1], style="width: 80px;") + + def filter(self, tablename): + lql = "" + for i, op in [ (0, ">="), (1, "<=") ]: + try: + txt = html.var(self.htmlvars[i]) + int(txt.strip()) + lql += "Filter: %s %s %s\n" % (self.column, op, txt.strip()) + except: + pass + return lql + + +declare_filter(232, FilterNumberRange("host_notif_number", _("Current Host Notification Number"), "host", "current_notification_number")) +declare_filter(232, FilterNumberRange("svc_notif_number", _("Current Service Notification Number"), "service", "current_notification_number")) + +declare_filter(234, FilterNumberRange("host_num_services", _("Number of Services of the Host"), "host", "num_services")) + + + +# Filter for setting time ranges, e.g. on last_state_change and last_check +class FilterTime(Filter): + def __init__(self, info, name, title, column): + self.column = column + self.name = name + self.ranges = [ + (86400, _("days")), + (3600, _("hours")), + (60, _("min")), + (1, _("sec")), + ] + varnames = [ name + "_from", name + "_from_range", + name + "_until", name + "_until_range" ] + + Filter.__init__(self, name, title, info, varnames, [column]) + + def double_height(self): + return True + + def display(self): + choices = [ (str(sec), title + " " + _("ago")) for sec, title in self.ranges ] + \ + [ ("abs", _("Date (YYYY-MM-DD)")), + ("unix", _("UNIX timestamp")) ] + + html.write("") + for what, whatname in [ + ( "from", _("From") ), + ( "until", _("Until") ) ]: + varprefix = self.name + "_" + what + html.write("" % whatname) + html.write("") + html.write("
    %s:") + html.text_input(varprefix, style="width: 116px;") + html.write("") + html.select(varprefix + "_range", choices, "3600") + html.write("
    ") + + + def filter(self, infoname): + fromsecs, untilsecs = self.get_time_range() + filtertext = "" + if fromsecs != None: + filtertext += "Filter: %s >= %d\n" % (self.column, fromsecs) + if untilsecs != None: + filtertext += "Filter: %s <= %d\n" % (self.column, untilsecs) + return filtertext + + + # Extract timerange user has selected from HTML variables + def get_time_range(self): + range = [] + for what in [ "from", "until" ]: + varprefix = self.name + "_" + what + count = html.var(varprefix) + if count == "": + range.append(None) + else: + rangename = html.var(varprefix + "_range") + if rangename == "abs": + try: + range.append(time.mktime(time.strptime(count, "%Y-%m-%d"))) + except: + html.add_user_error(varprefix, _("Please enter the date in the format YYYY-MM-DD.")) + range.append(None) + elif rangename == "unix": + range.append(int(count)) + else: + try: + count = int(count) + secs = count * int(rangename) + range.append(int(time.time()) - secs) + except: + range.append(None) + html.set_var(varprefix, "") + + return range + + # I'm not sure if this function is useful or ever been called. + # Problem is, that it is not clear wether to use "since" or "before" + # here. + # def variable_settings(self, row): + # vars = [] + # secs = int(time.time()) - row[self.column] + # for s, n in self.ranges[::-1]: + # v = secs / s + # secs -= v * s + # vars.append((self.name + "_" + n, secs)) + # return vars + + # def heading_info(self): + # return _("since the last couple of seconds") + +declare_filter(250, FilterTime("service", "svc_last_state_change", _("Last service state change"), "service_last_state_change")) +declare_filter(251, FilterTime("service", "svc_last_check", _("Last service check"), "service_last_check")) + +declare_filter(250, FilterTime("host", "host_last_state_change", _("Last host state change"), "host_last_state_change")) +declare_filter(251, FilterTime("host", "host_last_check", _("Last host check"), "host_last_check")) +declare_filter(253, FilterTime("comment", "comment_entry_time", _("Time of comment"), "comment_entry_time" )) +declare_filter(253, FilterTime("downtime", "downtime_entry_time", _("Time when downtime was created"), "downtime_entry_time" )) +# _ +# | | ___ __ _ +# | | / _ \ / _` | +# | |__| (_) | (_| | +# |_____\___/ \__, | +# |___/ + +declare_filter(252, FilterTime("log", "logtime", _("Time of log entry"), "log_time")) +# INFO 0 // all messages not in any other class +# ALERT 1 // alerts: the change service/host state +# PROGRAM 2 // important programm events (restart, ...) +# NOTIFICATION 3 // host/service notifications +# PASSIVECHECK 4 // passive checks +# COMMAND 5 // external commands +# STATE 6 // initial or current states + +class FilterLogClass(Filter): + def __init__(self): + self.log_classes = [ + (0, _("Informational")), (1, _("Alerts")), (2, _("Program")), + (3, _("Notifications")), (4, _("Passive checks")), + (5, _("Commands")), (6, _("States")) ] + + Filter.__init__(self, "log_class", _("Logentry class"), + "log", [ "logclass%d" % l for l, c in self.log_classes ], []) + + def double_height(self): + return True + + def display(self): + if html.var("filled_in"): + defval = "" + else: + defval = "on" + html.write("") + if config.filter_columns == 1: + num_cols = 4 + else: + num_cols = 2 + col = 1 + for l, c in self.log_classes: + if col == 1: + html.write("") + html.write("") + if col == num_cols: + html.write("\n") + col = 1 + else: + col += 1 + if col < num_cols: + html.write("") + html.write("
    ") + html.checkbox("logclass%d" % l, defval) + html.write(c) + html.write("
    \n") + + def filter(self, infoname): + headers = [] + if html.var("filled_in"): + defval = "" + else: + defval = "on" + + for l, c in self.log_classes: + if html.var("logclass%d" % l, defval) == "on": + headers.append("Filter: class = %d\n" % l) + if len(headers) == 0: + return "Limit: 0\n" # no class allowed + else: + return "".join(headers) + ("Or: %d\n" % len(headers)) + +declare_filter(255, FilterLogClass()) +declare_filter(202, FilterUnicode("log_plugin_output", _("Log: plugin output"), "log", "log_plugin_output", "log_plugin_output", "~~")) +declare_filter(203, FilterText("log_type", _("Log: message type"), "log", "log_type", "log_type", "~~")) +declare_filter(204, FilterText("log_state_type", _("Log: state type"), "log", "log_state_type", "log_state_type", "~~")) +declare_filter(260, FilterText("log_contact_name", _("Log: contact name"), "log", "log_contact_name", "log_contact_name", "="), + _("Exact match, used for linking")) + +class FilterLogState(Filter): + def __init__(self): + self._items = [ ("h0", "host", 0, _("Up")),("h1", "host", 1, _("Down")),("h2", "host", 2, _("Unreachable")), + ("s0", "service", 0, _("OK")), ("s1", "service", 1, _("Warning")), + ("s2", "service", 2, _("Critical")),("s3", "service", 3, _("Unknown")) ] + + Filter.__init__(self, "log_state", _("Type of alerts of hosts and services"), + "log", [ "logst_" + e[0] for e in self._items ], []) + + def double_height(self): + return True + + def display(self): + html.write("
    ") + html.begin_checkbox_group() + for varsuffix, what, state, text in self._items: + if state == 0: + html.write("%s:" % (_(what.title()))) + html.write("  ") + html.checkbox("logst_" + varsuffix, True, label=text) + if not html.mobile: + html.write("
    ") + if varsuffix == "h2": + html.write("
    ") + html.end_checkbox_group() + html.write("
    ") + + def filter(self, infoname): + headers = [] + for varsuffix, what, state, text in self._items: + if html.get_checkbox("logst_" + varsuffix) != False: # None = form not filled in = allow + headers.append("Filter: log_type ~ %s .*\nFilter: log_state = %d\nAnd: 2\n" % + (what.upper(), state)) + if len(headers) == 0: + return "Limit: 0\n" # no allowed state + elif len(headers) == len(self._items): + return "" # all allowed or form not filled in + else: + return "".join(headers) + ("Or: %d\n" % len(headers)) + +declare_filter(270, FilterLogState()) + +class BIServiceIsUsedFilter(FilterTristate): + def __init__(self): + FilterTristate.__init__(self, "aggr_service_used", _("Used in BI aggregate"), "service", None) + + def filter(self, infoname): + return "" + + def filter_table(self, rows): + current = self.tristate_value() + if current == -1: + return rows + new_rows = [] + for row in rows: + is_part = bi.is_part_of_aggregation( + "service", row["site"], row["host_name"], row["service_description"]) + if (is_part and current == 1) or \ + (not is_part and current == 0): + new_rows.append(row) + return new_rows + +declare_filter(300, BIServiceIsUsedFilter()) + +declare_filter(301, FilterText("downtime_id", _("Downtime ID"), "downtime", "downtime_id", "downtime_id", "=")) + +class FilterHostTags(Filter): + def __init__(self): + self.count = 3 + htmlvars = [] + for num in range(self.count): + htmlvars += [ 'host_tag_%d_grp' % num, 'host_tag_%d_op' % num, 'host_tag_%d_val' % num ] + + Filter.__init__(self, + name = 'host_tags', + title = _('Host Tags'), + info = 'host', + htmlvars = htmlvars, + link_columns = [] + ) + + def display(self): + groups = [ (e[0], e[1].lstrip("/") ) for e in config.wato_host_tags ] + operators = [ + ("is", _("=")), + ("isnot", HTML(_("≠"))), + ] + + # replace unicode strings, before writing out as "json" + grouped = {} + for entry in config.wato_host_tags: + grouped.setdefault(entry[0], [["", ""]]) + + for tag_entry in entry[2]: + tag = tag_entry[0] + title = tag_entry[1] + if tag is None: + tag = '' + + if type(title) == unicode: + title = title.encode("utf-8") + grouped[entry[0]].append([tag, title]) + + html.javascript('g_hosttag_groups = %r;' % grouped) + html.write('') + for num in range(self.count): + prefix = 'host_tag_%d' % num + html.write('') + html.write('
    ') + html.sorted_select(prefix + '_grp', + [("", "")] + groups, + onchange = 'host_tag_update_value(\'%s\', this.value)' % prefix, + attrs = {'style': 'width:129px'} + ) + html.write('') + html.sorted_select(prefix + '_op', [("", "")] + operators, + attrs = {'style': 'width:36px'}) + html.write('') + html.sorted_select(prefix + '_val', + html.var(prefix + '_grp') and grouped[html.var(prefix + '_grp')] or [("", "")], + attrs = {'style': 'width:129px'}) + html.write('
    ') + + def hosttag_filter(self, negate, tag): + return 'Filter: host_custom_variables %s TAGS (^|[ ])%s($|[ ])' % (negate and '!~' or '~', lqencode(tag)) + + def filter(self, infoname): + headers = [] + + # Do not restrict to a certain number, because we'd like to link to this + # via an URL, e.g. from the virtual host tree snapin + num = 0 + while html.has_var('host_tag_%d_op' % num): + prefix = 'host_tag_%d' % num + op = html.var(prefix + '_op') + tag = html.var(prefix + '_val') + + if op: + if tag: # positive host tag + headers.append(self.hosttag_filter(op != "is", tag)) + else: + # empty host tag. Darn. We need to create a filter that excludes all other host tags + # of the group + group = html.var(prefix + '_grp') + grouptags = None + for entry in config.wato_host_tags: + if entry[0] == group: # found our group + grouptags = [ x[0] for x in entry[2] if x[0] ] + break + if grouptags: # should never be empty, but maybe faked URL + for tag in grouptags: + headers.append(self.hosttag_filter(False, tag)) + if len(grouptags) > 1: + headers.append("Or: %d" % len(grouptags)) + if op == "is": + headers.append("Negate:") + + num += 1 + + if headers: + return '\n'.join(headers) + '\n' + else: + return '' + + def double_height(self): + return True + +declare_filter(302, FilterHostTags()) + + +class FilterStarred(FilterTristate): + def __init__(self, what): + self.what = what + icon = ' ' + FilterTristate.__init__(self, + name = what + "_favorites", + title = icon + (what == "host" and _("Favorite Hosts") or _("Favorite Services")), + info = what, + column = what + "_favorite", # Column, not used + deflt = -1, + ) + + def filter(self, infoname): + current = self.tristate_value() + if current == -1: + return "" + elif current: + aand, oor, eq = "And", "Or", "=" + else: + aand, oor, eq = "Or", "And", "!=" + + stars = config.load_stars() + filters = "" + count = 0 + if self.what == "host": + for star in stars: + if ";" in star: + continue + filters += "Filter: host_name %s %s\n" % (eq, lqencode(star)) + count += 1 + else: + for star in stars: + if ";" not in star: + continue + h, s = star.split(";") + filters += "Filter: host_name %s %s\n" % (eq, lqencode(h)) + filters += "Filter: service_description %s %s\n" % (eq, lqencode(s)) + filters += "%s: 2\n" % aand + count += 1 + + # No starred object and show only starred -> show nothing + if count == 0 and current: + return "Filter: host_state = -4612\n" + + # no starred object and show unstarred -> show everything + elif count == 0: + return "" + + filters += "%s: %d\n" % (oor, count) + return filters + +declare_filter(501, FilterStarred("host")) +declare_filter(501, FilterStarred("service")) diff -Nru check-mk-1.2.2p3/plugins/visuals/infos.py check-mk-1.2.6p12/plugins/visuals/infos.py --- check-mk-1.2.2p3/plugins/visuals/infos.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/visuals/infos.py 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,128 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +infos['host'] = { + 'title' : _('Host'), + 'single_spec' : [ + ('host', TextUnicode( + title = _('Hostname'), + )), + ], +} + +infos['service'] = { + 'title' : _('Service'), + 'single_spec' : [ + ('service', TextUnicode( + title = _('Service Description'), + )), + ], +} + +infos['hostgroup'] = { + 'title' : _('Host Group'), + 'single_site' : False, # spread over multiple sites + 'single_spec' : [ + ('hostgroup', TextUnicode( + title = _('Host Group Name'), + )), + ], +} + +infos['servicegroup'] = { + 'title' : _('Service Group'), + 'single_site' : False, # spread over multiple sites + 'single_spec' : [ + ('servicegroup', TextUnicode( + title = _('Service Group Name'), + )), + ], +} + +infos['log'] = { + 'title' : _('Log Entry'), + 'single_spec' : None, +} + +infos['comment'] = { + 'title' : _('Comment'), + 'single_spec' : [ + ('comment_id', Integer( + title = _('Comment ID'), + )), + ] +} + +infos['downtime'] = { + 'title' : _('Downtime'), + 'single_spec' : [ + ('downtime_id', Integer( + title = _('Downtime ID'), + )), + ] +} + +infos['contact'] = { + 'title' : _('Contact'), + 'single_spec' : [ + ('log_contact_name', TextUnicode( + title = _('Contact Name'), + )), + ] +} + +infos['command'] = { + 'title' : _('Command'), + 'single_spec' : [ + ('command_name', TextUnicode( + title = _('Command Name'), + )), + ] +} + +infos['aggr'] = { + 'title' : _('BI Aggregation'), + 'single_spec' : [ + ('aggr_name', TextAscii( + title = _('Aggregation Name'), + )), + ], +} + +infos['invswpac'] = { + 'title' : _('Software Package'), + 'single_spec' : None, +} + +infos['aggr_group'] = { + 'title' : _('BI Aggregation Group'), + 'title_plural': _('BI Aggregation Groups'), + 'single_spec' : [ + ('aggr_group', TextAscii( + title = _('Aggregation group'), + )), + ], +} diff -Nru check-mk-1.2.2p3/plugins/visuals/inventory.py check-mk-1.2.6p12/plugins/visuals/inventory.py --- check-mk-1.2.2p3/plugins/visuals/inventory.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/visuals/inventory.py 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,280 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +import inventory + +# Try to magically compare two software versions. +# Currently we only assume the format A.B.C.D.... +# When we suceed converting A to a number, then we +# compare by integer, otherwise by text. +def try_int(x): + try: + return int(x) + except: + return x + +def cmp_version(a, b): + if a == None or b == None: + return cmp(a, b) + aa = map(try_int, a.split(".")) + bb = map(try_int, b.split(".")) + return cmp(aa, bb) + +class FilterInvText(Filter): + def __init__(self, name, invpath, title): + self._invpath = invpath + Filter.__init__(self, name, title, "host", [name], []) + + def need_inventory(self): + return True + + def display(self): + htmlvar = self.htmlvars[0] + current_value = html.var(htmlvar, "") + html.text_input(htmlvar, current_value) + + def filter_table(self, rows): + htmlvar = self.htmlvars[0] + filtertext = html.var(htmlvar, "").strip().lower() + if not filtertext: + return rows + + regex = re.compile(filtertext, re.IGNORECASE) + + newrows = [] + for row in rows: + invdata = inventory.get(row["host_inventory"], self._invpath) + if invdata == None: + invdata = "" + if regex.search(invdata): + newrows.append(row) + return newrows + +class FilterInvFloat(Filter): + def __init__(self, name, invpath, title, unit="", scale=1.0): + self._invpath = invpath + self._unit = unit + self._scale = scale + Filter.__init__(self, name, title, "host", [name + "_from", name + "_to"], []) + + def need_inventory(self): + return True + + def display(self): + html.write(_("From: ")) + htmlvar = self.htmlvars[0] + current_value = html.var(htmlvar, "") + html.number_input(htmlvar, current_value) + if self._unit: + html.write(self._unit) + + html.write("  " + _("To: " )) + htmlvar = self.htmlvars[1] + current_value = html.var(htmlvar, "") + html.number_input(htmlvar, current_value) + if self._unit: + html.write(self._unit) + + def filter_table(self, rows): + fromvar = self.htmlvars[0] + fromtext = html.var(fromvar) + lower = None + if fromtext: + try: + lower = float(fromtext) * self._scale + except: + pass + + tovar = self.htmlvars[1] + totext = html.var(tovar) + upper = None + if totext: + try: + upper = float(totext) * self._scale + except: + pass + + if lower == None and upper == None: + return rows + + newrows = [] + for row in rows: + invdata = inventory.get(row["host_inventory"], self._invpath) + if lower != None and invdata < lower: + continue + if upper != None and invdata > upper: + continue + newrows.append(row) + return newrows + +class FilterHasInventory(FilterTristate): + def __init__(self): + FilterTristate.__init__(self, "has_inv", _("Has Inventory Data"), "host", "host_inventory") + + def filter(self, infoname): + return "" # No Livestatus filtering right now + + def filter_table(self, rows): + tri = self.tristate_value() + if tri == -1: + return rows + elif tri == 1: + return [ row for row in rows if row["host_inventory"] ] + else: # not + return [ row for row in rows if not row["host_inventory"] ] + +declare_filter(801, FilterHasInventory()) + +class FilterInvHasSoftwarePackage(Filter): + def __init__(self): + self._varprefix = "invswpac_host_" + Filter.__init__(self, "invswpac", _("Host has software package"), "host", + [ self._varprefix + "name", self._varprefix + "version_from", + self._varprefix + "version_to", self._varprefix + "negate"], []) + + def double_height(self): + return True + + def need_inventory(self): + return True + + def display(self): + html.text_input(self._varprefix + "name") + html.write("
    ") + html.begin_radio_group(horizontal=True) + html.radiobutton(self._varprefix + "match", "exact", True, label=_("exact match")) + html.radiobutton(self._varprefix + "match", "regex", False, label=_("regular expression, substring match")) + html.end_radio_group() + html.write("
    ") + html.write(_("Min. Version:")) + html.text_input(self._varprefix + "version_from", size = 9) + html.write("   ") + html.write(_("Max. Vers.:")) + html.text_input(self._varprefix + "version_to", size = 9) + html.write("
    ") + html.checkbox(self._varprefix + "negate", False, label=_("Negate: find hosts not having this package")) + + def filter_table(self, rows): + name = html.var_utf8(self._varprefix + "name") + if not name: + return rows + + from_version = html.var(self._varprefix + "from_version") + to_version = html.var(self._varprefix + "to_version") + negate = html.get_checkbox(self._varprefix + "negate") + match = html.var(self._varprefix + "match") + if match == "regex": + name = re.compile(name) + + new_rows = [] + for row in rows: + packages = inventory.get(row["host_inventory"], ".software.packages:") + is_in = self.find_package(packages, name, from_version, to_version) + if is_in != negate: + new_rows.append(row) + return new_rows + + def find_package(self, packages, name, from_version, to_version): + for package in packages: + if type(name) == unicode: + if package["name"] != name: + continue + else: + if not name.search(package["name"]): + continue + if not from_version and not to_version: + return True # version not relevant + version = package["version"] + if from_version == to_version and from_version != version: + continue + if from_version and self.version_is_lower(version, from_version): + continue + if to_version and self.version_is_higher(version, to_version): + continue + return False + + def version_is_lower(self, a, b): + return a != b and not self.version_is_higher(a, b) + + def version_is_higher(self, a, b): + return cmp_version(a, b) == 1 + +declare_filter(801, FilterInvHasSoftwarePackage()) + +class FilterSWPacsText(Filter): + def __init__(self, name, title): + varname = "invswpac_" + name + Filter.__init__(self, varname, title, "invswpacs", [varname], []) + + def display(self): + htmlvar = self.htmlvars[0] + current_value = html.var(htmlvar, "") + html.text_input(htmlvar, current_value) + + def filter_table(self, rows): + htmlvar = self.htmlvars[0] + filtertext = html.var(htmlvar, "").strip().lower() + if not filtertext: + return rows + + regex = re.compile(filtertext, re.IGNORECASE) + + newrows = [] + for row in rows: + if regex.search(row.get(htmlvar, "")): + newrows.append(row) + return newrows + +class FilterSWPacsVersion(Filter): + def __init__(self, name, title): + varname = "invswpac_" + name + Filter.__init__(self, varname, title, "invswpacs", [varname + "_from", varname + "_to"], []) + + def display(self): + htmlvar = self.htmlvars[0] + html.write(_("Min. Version:")) + html.text_input(self.htmlvars[0], size = 9) + html.write("   ") + html.write(_("Max. Version:")) + html.text_input(self.htmlvars[1], size = 9) + + def filter_table(self, rows): + from_version = html.var(self.htmlvars[0]) + to_version = html.var(self.htmlvars[1]) + if not from_version and not to_version: + return rows # Filter not used + + new_rows = [] + for row in rows: + version = row.get(self.name, "") + if from_version and cmp_version(version, from_version) == -1: + continue + if to_version and cmp_version(version, to_version) == 1: + continue + new_rows.append(row) + + return new_rows + diff -Nru check-mk-1.2.2p3/plugins/visuals/wato.py check-mk-1.2.6p12/plugins/visuals/wato.py --- check-mk-1.2.2p3/plugins/visuals/wato.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/visuals/wato.py 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,119 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +import wato + +class FilterWatoFile(Filter): + def __init__(self): + Filter.__init__(self, "wato_folder", _("WATO Folder"), "host", ["filename"], []) + self.last_wato_data_update = None + + def available(self): + # This filter is also available on slave sites with disabled WATO + # To determine if this site is a slave we check the existance of the distributed_wato.mk + # file and the absence of any site configuration + return (config.wato_enabled or\ + (not wato.is_distributed() and os.path.exists(defaults.check_mk_configdir + "/distributed_wato.mk")))\ + and wato.have_folders() + + def load_wato_data(self): + self.tree = wato.get_folder_tree() + self.path_to_tree = {} # will be filled by self.folder_selection + self.selection = self.folder_selection(self.tree, "", 0) + self.last_wato_data_update = time.time() + + def check_wato_data_update(self): + if not self.last_wato_data_update or time.time() - self.last_wato_data_update > 5: + self.load_wato_data() + + def display(self): + self.check_wato_data_update() + # Note: WATO Folders that the user has not permissions to must not be visible. + # Permissions in this case means, that the user has view permissions for at + # least one host in that folder. + result = html.live.query("GET hosts\nCache: reload\nColumns: filename\nStats: state >= 0\n") + allowed_folders = set([""]) + for path, host_count in result: + # convert '/wato/server/hosts.mk' to 'server' + folder = path[6:-9] + # allow the folder an all of its parents + parts = folder.split("/") + subfolder = "" + for part in parts: + if subfolder: + subfolder += "/" + subfolder += part + allowed_folders.add(subfolder) + + html.select(self.name, [("", "")] + [ entry for entry in self.selection if (entry[0] in allowed_folders) ]) + + def filter(self, infoname): + self.check_wato_data_update() + current = html.var(self.name) + if current: + return "Filter: host_filename ~ ^/wato/%s/\n" % current.replace("\n", "") # prevent insertions attack + else: + return "" + + # Construct pair-list of ( folder-path, title ) to be used + # by the HTML selection box. This also updates self._tree, + # a dictionary from the path to the title. + def folder_selection(self, folder, prefix, depth): + my_path = folder[".path"] + if depth: + title_prefix = "   " * depth + "` " + "- " * depth + else: + title_prefix = "" + self.path_to_tree[my_path] = folder["title"] + sel = [ (my_path , HTML(title_prefix + html.attrencode(folder["title"]))) ] + sel += self.sublist(folder.get(".folders", {}), my_path, depth) + return sel + + def sublist(self, elements, my_path, depth): + vs = elements.values() + vs.sort(lambda a, b: cmp(a["title"].lower(), b["title"].lower())) + sel = [] + for e in vs: + sel += self.folder_selection(e, my_path, depth + 1) + return sel + + def heading_info(self): + # FIXME: There is a problem with caching data and changing titles of WATO files + # Everything is changed correctly but the filter object is stored in the + # global multisite_filters var and self.path_to_tree is not refreshed when + # rendering this title. Thus the threads might have old information about the + # file titles and so on. + # The call below needs to use some sort of indicator wether the cache needs + # to be renewed or not. + self.check_wato_data_update() + current = html.var(self.name) + if current and current != "/": + return self.path_to_tree.get(current) + +declare_filter(10, FilterWatoFile()) + +if "wato_folder" not in ubiquitary_filters: + ubiquitary_filters.append("wato_folder") # show in all views diff -Nru check-mk-1.2.2p3/plugins/vxvm check-mk-1.2.6p12/plugins/vxvm --- check-mk-1.2.2p3/plugins/vxvm 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/vxvm 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,50 @@ +#!/bin/sh +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# This plugin has been tested on Linux and HPUX. + + +if type vxdmpadm >/dev/null 2>&1; then + echo '<<>>' + vxdmpadm listenclosure all | grep -v -w -e ^[dD]isk -e ^other_disks -e ^ENCLR_NAME -e \^= + echo '<<>>' + ENCS=$( vxdmpadm listenclosure all | grep -v -w -e ^[dD]isk -e ENCLR_NAME -e \^= | awk '{print $1}') + + echo "$ENCS" | while read enc ; do + vxdmpadm getdmpnode enclosure=$enc | grep -v -e \^= -e NAME + done +fi + +if type vxdg >/dev/null 2>&1; then + echo '<<>>' + # Get a list of the in-use disk groups. + DGS=$(vxdg list | grep enabled | awk '{print $1}') + # Deported or otherwise inactive needs no performance monitoring + if [ "X${DGS}" != "X" ]; then + for DG in $DGS ; do + vxprint -g $DG -v -q -Q -F "%type %dgname %name %admin_state %kstate" + done + fi +fi diff -Nru check-mk-1.2.2p3/plugins/wato/active_checks.py check-mk-1.2.6p12/plugins/wato/active_checks.py --- check-mk-1.2.2p3/plugins/wato/active_checks.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/wato/active_checks.py 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -29,49 +29,329 @@ _("Configure active networking checks like HTTP and TCP")) group = "activechecks" +# This elements are also used in check_parameters.py +check_icmp_params = [ + ( "rta", + Tuple( + title = _("Round trip average"), + elements = [ + Float(title = _("Warning if above"), unit = "ms", default_value = 200.0), + Float(title = _("Critical if above"), unit = "ms", default_value = 500.0), + ])), + ( "loss", + Tuple( + title = _("Packet loss"), + help = _("When the percentage of lost packets is equal or greater then " + "this level, then the according state is triggered. The default for critical " + "is 100%. That means that the check is only critical if all packets " + "are lost."), + elements = [ + Percentage(title = _("Warning if above"), default_value = 80.0), + Percentage(title = _("Critical if above"), default_value = 100.0), + ])), + + ( "packets", + Integer( + title = _("Number of packets"), + help = _("Number ICMP echo request packets to send to the target host on each " + "check execution. All packets are sent directly on check execution. Afterwards " + "the check waits for the incoming packets."), + minvalue = 1, + maxvalue = 20, + default_value = 5, + )), + + ( "timeout", + Integer( + title = _("Total timeout of check"), + help = _("After this time (in seconds) the check is aborted, regardless " + "of how many packets have been received yet."), + minvalue = 1, + )), +] + +mail_receiving_params = [ + ('fetch', CascadingDropdown( + title = _('Mail Receiving'), + choices = [ + ('IMAP', _('IMAP'), Dictionary( + optional_keys = ['server'], + elements = [ + ('server', TextAscii( + title = _('IMAP Server'), + allow_empty = False, + help = _('You can specify a hostname or IP address different from the IP address ' + 'of the host this check will be assigned to.') + )), + ('ssl', CascadingDropdown( + title = _('SSL Encryption'), + default_value = (False, 143), + choices = [ + (False, _('Use no encryption'), + Optional(Integer( + allow_empty = False, + default_value = 143, + ), + title = _('TCP Port'), + help = _('By default the standard IMAP Port 143 is used.'), + )), + (True, _('Encrypt IMAP communication using SSL'), + Optional(Integer( + allow_empty = False, + default_value = 993, + ), + title = _('TCP Port'), + help = _('By default the standard IMAP/SSL Port 993 is used.'), + )), + ], + )), + ('auth', Tuple( + title = _('Authentication'), + elements = [ + TextAscii( + title = _('Username'), + allow_empty = False, + size = 24 + ), + Password( + title = _('Password'), + allow_empty = False, + size = 12 + ), + ], + )), + ], + )), + ('POP3', _('POP3'), Dictionary( + optional_keys = ['server'], + elements = [ + ('server', TextAscii( + title = _('POP3 Server'), + allow_empty = False, + help = _('You can specify a hostname or IP address different from the IP address ' + 'of the host this check will be assigned to.') + )), + ('ssl', CascadingDropdown( + title = _('SSL Encryption'), + default_value = (False, 110), + choices = [ + (False, _('Use no encryption'), + Optional(Integer( + allow_empty = False, + default_value = 110, + ), + title = _('TCP Port'), + help = _('By default the standard POP3 Port 110 is used.'), + )), + (True, _('Encrypt POP3 communication using SSL'), + Optional(Integer( + allow_empty = False, + default_value = 995, + ), + title = _('TCP Port'), + help = _('By default the standard POP3/SSL Port 995 is used.'), + )), + ], + )), + ('auth', Tuple( + title = _('Authentication'), + elements = [ + TextAscii( + title = _('Username'), + allow_empty = False, + size = 24 + ), + Password( + title = _('Password'), + allow_empty = False, + size = 12 + ), + ], + )), + ], + )), + ] + )) +] + + +register_rule(group, + "active_checks:ssh", + Dictionary( + title = _("Check SSH service"), + help = _("This rulset allow you to configure a SSH check for a host"), + elements = [ + ("port", + Integer( + title = _("TCP port number"), + default_value = 22), + ), + ("timeout", + Integer( + title = _("Connect Timeout"), + help = _("Seconds before connection times out"), + default_value = 10), + ), + ("remote_version", + TextAscii( + title = _("Version of Server"), + help = _("Warn if string doesn't match expected server version (ex: OpenSSH_3.9p1)"), + )), + ("remote_protocol", + TextAscii( + title = _("Protocol of Server"), + help = _("Warn if protocol doesn't match expected protocol version (ex: 2.0)"), + )), + ] + ), + match="all") + +register_rule(group, + "active_checks:icmp", + Dictionary( + title = _("Check hosts with PING (ICMP Echo Request)"), + help = _("This ruleset allows you to configure explicit PING monitoring of hosts. " + "Usually a PING is being used as a host check, so this is not neccessary. " + "There are some situations, however, where this can be useful. One of them " + "is when using the Check_MK Micro Core with SMART Ping and you want to " + "track performance data of the PING to some hosts, nevertheless."), + elements = [ + ( "description", + TextUnicode( + title = _("Service Description"), + allow_empty = False, + default_value = "PING", + )) + ] + check_icmp_params, + ), + match = "all", +) + +register_rule(group, + "active_checks:ftp", + Transform( + Dictionary( + elements = [ + ( "response_time", + Tuple( + title = _("Expected response time"), + elements = [ + Float( + title = _("Warning if above"), + unit = "ms", + default_value = 100.0), + Float( + title = _("Critical if above"), + unit = "ms", + default_value = 200.0), + ]) + ), + ( "timeout", + Integer( + title = _("Seconds before connection times out"), + unit = _("sec"), + default_value = 10, + ) + ), + ( "refuse_state", + DropdownChoice( + title = _("State for connection refusal"), + choices = [ ('crit', _("CRITICAL")), + ('warn', _("WARNING")), + ('ok', _("OK")), + ]) + ), + + ( "send_string", + TextAscii( + title = _("String to send"), + size = 30) + ), + ( "expect", + ListOfStrings( + title = _("Strings to expect in response"), + orientation = "horizontal", + valuespec = TextAscii(size = 30), + ) + ), + + ( "ssl", + FixedValue( + value = True, + totext = _("use SSL"), + title = _("Use SSL for the connection.")) + + ), + ( "cert_days", + Integer( + title = _("SSL certificate validation"), + help = _("Minimum number of days a certificate has to be valid"), + unit = _("days"), + default_value = 30) + ), + ]), + forth = lambda x: type(x) == tuple and x[1] or x, + title = _("Check FTP Service"), + ), + match = "all", +) + + register_rule(group, "active_checks:dns", Tuple( title = _("Check DNS service"), - help = _("Check the resultion of a hostname into an IP address by a DNS server. " + help = _("Check the resolution of a hostname into an IP address by a DNS server. " "This check uses check_dns from the standard Nagios plugins."), elements = [ - TextAscii(title = _("Hostname"), allow_empty = False, - help = _('The name or address you want to query')), + TextAscii( + title = _("Queried Hostname or IP address"), + allow_empty = False, + help = _('The name or IPv4 address you want to query')), Dictionary( title = _("Optional parameters"), elements = [ ( "server", - TextAscii( - title = _("DNS Server"), - allow_empty = False, - help = _("Optional DNS server you want to use for the lookup"))), + Alternative( + title = _("DNS Server"), + elements = [ + FixedValue( value=None, totext=_("use local configuration"), + title = _("Use local DNS configuration of monitoring site")), + TextAscii( + title = _("Specify DNS Server"), + allow_empty = False, + help = _("Optional DNS server you want to use for the lookup")), + ]) + ), ( "expected_address", - TextAscii( - title = _("Expected Address"), - allow_empty = False, - help = _("Optional IP-ADDRESS you expect the DNS server to return. HOST" - "must end with a dot (.) " )), + Transform( + ListOfStrings( + title = _("Expected answer (IP address or hostname)"), + help = _("List all allowed expected answers here. If query for an " + "IP address then the answer will be host names, that end " + "with a dot."), + ), + forth = lambda old: type(old) in (str, unicode) and [old] or old, + ), ), ( "expected_authority", FixedValue( value = True, title = _("Expect Authoritative DNS Server"), totext = _("Expect Authoritative"), - help = _("Optional expect the DNS server to be authoriative" - "for the lookup ")), + ) ), ( "response_time", Tuple( title = _("Expected response time"), elements = [ Float( - title = _("Warning at"), - unit = "sec", + title = _("Warning if above"), + unit = _("sec"), default_value = 1), Float( - title = _("Critical at"), - unit = "sec", + title = _("Critical if above"), + unit = _("sec"), default_value = 2), ]) ), @@ -85,20 +365,126 @@ ]), ] ), - match = 'all') + match = 'all' +) +register_rule(group, + "active_checks:sql", + Dictionary( + title = _("Check SQL Database"), + help = _("This check connects to the specified database, sends a custom SQL-statement " + "or starts a procedure, and checks that the result has a defined format " + "containing three columns, a number, a text, and performance data. Upper or " + "lower levels may be defined here. If they are not defined the number is taken " + "as the state of the check. If a procedure is used, input parameters of the " + "procedures may by given as comma separated list. " + "This check uses the active check check_sql."), + optional_keys = [ "levels", "levels_low", "perfdata", "port", "procedure" ], + elements = [ + ( "description", + TextUnicode(title = _("Service Description"), + help = _("The name of this active service to be displayed."), + allow_empty = False, + )), + ( "dbms", + DropdownChoice( + title = _("Type of Database"), + choices = [ + ("mysql", _("MySQL")), + ("postgres", _("PostgreSQL")), + ("mssql", _("MSSQL")), + ("oracle", _("Oracle")), + ("db2", _("DB2")), + ], + default_value = "postgres", + ), + ), + ( "port", + Integer(title = _("Database Port"), allow_empty = True, + help = _('The port the DBMS listens to')) + ), + ( "name", + TextAscii(title = _("Database Name"), allow_empty = False, + help = _('The name of the database on the DBMS')) + ), + ( "user", + TextAscii(title = _("Database User"), allow_empty = False, + help = _('The username used to connect to the database')) + ), + ( "password", + Password(title = _("Database Password"), allow_empty = False, + help = _('The password used to connect to the database')) + ), + ( "sql", + TextAscii(title = _("SQL-statement or procedure name"), allow_empty = False, + help = _('The SQL-statement or procedure name which is executed on the DBMS')) + ), + ( "procedure", + Dictionary( + optional_keys = [ "input" ], + title = _("Use procedure call instead of SQL statement"), + help = _("If you activate this option, a name of a stored " + "procedure is used instead of an SQL statement. " + "The procedure should return one output variable, " + "which is evaluated in the check. If input parameters " + "are required, they may be specified below."), + elements = [ + ("useprocs", + FixedValue( + value = True, + totext = _("procedure call is used"), + )), + ("input", + TextAscii( + title = _("Input Parameters"), + allow_empty = True, + help = _("Input parameters, if required by the database procedure. " + "If several parameters are required, use commas to separate them."), + )), + ] + ), + ), + ( "levels", + Tuple( + title = _("Upper levels for first output item"), + elements = [ + Float( title = _("Warning if above")), + Float( title = _("Critical if above")) + ]) + ), + ( "levels_low", + Tuple( + title = _("Lower levels for first output item"), + elements = [ + Float( title = _("Warning if below")), + Float( title = _("Critical if below")) + ]) + ), + ( "perfdata", + FixedValue(True, totext=_("Store output value into RRD database"), title = _("Performance Data"), ), + ) + ] + ), + match = 'all' +) register_rule(group, "active_checks:tcp", Tuple( title = _("Check connecting to a TCP port"), - help = _("This check test the connection to a TCP port. It uses " + help = _("This check tests the connection to a TCP port. It uses " "check_tcp from the standard Nagios plugins."), elements = [ Integer(title = _("TCP Port"), minvalue=1, maxvalue=65535), Dictionary( title = _("Optional parameters"), elements = [ + ( "svc_description", + TextUnicode( + title = _("Service description"), + allow_empty = False, + help = _("Here you can specify a service description. " + "If this parameter is not set, the service is named TCP Port {Portnumber}"))), ( "hostname", TextAscii( title = _("DNS Hostname"), @@ -111,11 +497,11 @@ title = _("Expected response time"), elements = [ Float( - title = _("Warning at"), + title = _("Warning if above"), unit = "ms", default_value = 100.0), Float( - title = _("Critical at"), + title = _("Critical if above"), unit = "ms", default_value = 200.0), ]) @@ -221,23 +607,64 @@ ]), ] ), - match = 'all') + match = 'all' +) + +register_rule(group, + "active_checks:uniserv", Dictionary( + title = _("Check uniserv service"), optional_keys = False, elements = [ + ("port", + Integer(title = _("Port") )), + ("service", + TextAscii( + title = _("Service Name"), + help = _("Enter the uniserve service name here (has nothing to do with service description).") + )), + ("job", + CascadingDropdown( + title = _("Mode of the Check"), + help = _("Choose, whether you just want to query the version number," + " or if you want to check the response to an address query."), + choices = [ + ("version", _("Check for Version")), + ("address", _("Check for an Address"), + Dictionary( + title = _("Address Check mode"), + optional_keys = False, + elements = [ + ( "street", + TextAscii( title = _("Street name"))), + ( "street_no", + Integer( title = _("Street number"))), + ( "city", + TextAscii( title = _("City name"))), + ( "search_regex", + TextAscii( title = _("Check City against Regex"), + help = _( "The city name from the response will be checked against " + "the regular expression specified here"), + )), + ] + )), + ] + )), + ])) register_rule(group, "active_checks:http", Tuple( title = _("Check HTTP service"), help = _("Check HTTP/HTTPS service using the plugin check_http " - "from the standard Nagios Plugins. " + "from the standard Monitoring Plugins. " "This plugin tests the HTTP service on the specified host. " "It can test normal (HTTP) and secure (HTTPS) servers, follow " "redirects, search for strings and regular expressions, check " - "connection times, and report on certificate expiration times. "), + "connection times, and report on certificate expiration times."), elements = [ TextUnicode( title = _("Name"), - help = _("Will be used in the service description"), + help = _("Will be used in the service description. If the name starts with " + "a caret (^), the service description will not be prefixed with HTTP." ), allow_empty = False), Alternative( title = _("Mode of the Check"), @@ -253,7 +680,7 @@ TextAscii( title = _("Name of the virtual host"), help = _("Set this in order to specify the name of the " - "virtual host for the query (using HTTP/1.1). When you " + "virtual host for the query (using HTTP/1.1). If you " "leave this empty, then the IP address of the host " "will be used instead."), allow_empty = False), @@ -300,11 +727,11 @@ title = _("Expected response time"), elements = [ Float( - title = _("Warning at"), + title = _("Warning if above"), unit = "ms", default_value = 100.0), Float( - title = _("Critical at"), + title = _("Critical if above"), unit = "ms", default_value = 200.0), ]) @@ -339,7 +766,7 @@ title = _("Username"), size = 12, allow_empty = False), - TextAscii( + Password( title = _("Password"), size = 12, allow_empty = False), @@ -354,7 +781,7 @@ title = _("Username"), size = 12, allow_empty = False), - TextAscii( + Password( title = _("Password"), size = 12, allow_empty = False), @@ -373,6 +800,11 @@ ], default_value = 'follow'), ), + ( "expect_response_header", + TextAscii( + title = _("String to expect in response headers"), + ) + ), ( "expect_response", ListOfStrings( title = _("Strings to expect in server response"), @@ -390,21 +822,26 @@ ) ), ( "expect_regex", - Tuple( - title = _("Regular expression to expect in content"), - orientation = "vertical", - show_titles = False, - elements = [ - RegExp(label = _("Regular expression: ")), - Checkbox(label = _("Case insensitive")), - Checkbox(label = _("return CRITICAL if found, OK if not")), - ]) + Transform( + Tuple( + orientation = "vertical", + show_titles = False, + elements = [ + RegExp(label = _("Regular expression: ")), + Checkbox(label = _("Case insensitive")), + Checkbox(label = _("return CRITICAL if found, OK if not")), + Checkbox(label = _("Multiline string matching")), + ] + ), + forth = lambda x: len(x) == 3 and tuple(list(x) + [False]) or x, + title = _("Regular expression to expect in content"), + ), ), ( "post_data", Tuple( title = _("Send HTTP POST data"), elements = [ - TextAscii( + TextUnicode( title = _("HTTP POST data"), help = _("Data to send via HTTP POST method. " "Please make sure, that the data is URL-encoded."), @@ -435,7 +872,7 @@ value = True, title = _("Don't wait for document body"), help = _("Note: this still does an HTTP GET or POST, not a HEAD."), - totext = _("dont wait for body")) + totext = _("don't wait for body")) ), ( "page_size", Tuple( @@ -478,9 +915,9 @@ ), ( "cert_host", TextAscii( - title = _("Check Cerficate on diffrent IP/ DNS Name"), - help = _("For each SSL cerficate on a host, a diffrent IP address is needed. " - "Here you can specify there address if it differs from the " + title = _("Check Cerficate of different IP / DNS Name"), + help = _("For each SSL cerficate on a host, a different IP address is needed. " + "Here, you can specify the address if it differs from the " "address from the host primary address."), ) ), @@ -492,6 +929,13 @@ default_value = 443, ) ), + ( "sni", + FixedValue( + value = True, + totext = _("enable SNI"), + title = _("Enable SSL/TLS hostname extension support (SNI)"), + ) + ), ], required_keys = [ "cert_days" ], ), @@ -499,7 +943,8 @@ ), ] ), - match = 'all') + match = 'all' +) register_rule(group, "active_checks:ldap", @@ -541,9 +986,9 @@ allow_empty = False, size = 60, ), - TextAscii( + Password( title = _("Password"), - help = _("Password for binding, if you server requires an authentication"), + help = _("Password for binding, if your server requires an authentication"), allow_empty = False, size = 20, ) @@ -553,7 +998,7 @@ ( "port", Integer( title = _("TCP Port"), - help = _("Default is 389 for normal connetions and 636 for SSL connections."), + help = _("Default is 389 for normal connections and 636 for SSL connections."), minvalue = 1, maxvalue = 65535, default_value = 389) @@ -583,11 +1028,11 @@ title = _("Expected response time"), elements = [ Float( - title = _("Warning at"), + title = _("Warning if above"), unit = "ms", default_value = 1000.0), Float( - title = _("Critical at"), + title = _("Critical if above"), unit = "ms", default_value = 2000.0), ]) @@ -623,17 +1068,21 @@ TextAscii( title = _("DNS Hostname or IP address"), allow_empty = False, - help = _("You can specify a hostname or IP address different from IP address " + help = _("You can specify a hostname or IP address different from the IP address " "of the host as configured in your host properties."))), ( "port", - TextAscii( - title = _("TCP Port to connect to"), - help = _("The TCP Port the SMTP server is listening on. " - "The default is 25."), - size = 5, - allow_empty = False, - default_value = "25", - ) + Transform( + Integer( + title = _("TCP Port to connect to"), + help = _("The TCP Port the SMTP server is listening on. " + "The default is 25."), + size = 5, + minvalue = 1, + maxvalue = 65535, + default_value = "25", + ), + forth = int, + ) ), ( "ip_version", Alternative( @@ -702,7 +1151,7 @@ ("starttls", FixedValue( True, - totext = "STARTTLS enabled.", + totext = _("STARTTLS enabled."), title = _("Use STARTTLS for the connection.") ) ), @@ -715,7 +1164,7 @@ title = _("Username"), size = 12, allow_empty = False), - TextAscii( + Password( title = _("Password"), size = 12, allow_empty = False), @@ -727,12 +1176,12 @@ title = _("Expected response time"), elements = [ Integer( - title = _("Warning at"), - unit = "sec" + title = _("Warning if above"), + unit = _("sec") ), Integer( - title = _("Critical at"), - unit = "sec" + title = _("Critical if above"), + unit = _("sec") ), ]) ), @@ -744,19 +1193,105 @@ ) ), ]) - ]), + ] + ), + match = 'all' +) + +register_rule(group, + "active_checks:disk_smb", + Dictionary( + title = _("Check access to SMB share"), + help = _("This ruleset helps you to configure the classical Nagios " + "plugin check_disk_smb that checks the access to " + "filesystem shares that are exported via SMB/CIFS."), + elements = [ + ( "share", + TextUnicode( + title = _("SMB share to check"), + help = _("Enter the plain name of the share only, e. g. iso, not " + "the full UNC like \\\\servername\\iso"), + size = 32, + allow_empty = False, + )), + ( "workgroup", + TextUnicode( + title = _("Workgroup"), + help = _("Workgroup or domain used (defaults to WORKGROUP)"), + size = 32, + allow_empty = False, + )), + ( "host", + TextAscii( + title = _("NetBIOS name of the server"), + help = _("If omitted then the IP address is being used."), + size = 32, + allow_empty = False, + )), + ( "port", + Integer( + title = _("TCP Port"), + help = _("TCP port number to connect to. Usually either 139 or 445."), + default_value = 445, + minvalue = 1, + maxvalue = 65535, + )), + ( "levels", + Tuple( + title = _("Levels for used disk space"), + elements = [ + Percentage(title = _("Warning if above"), default_value = 85, allow_int = True), + Percentage(title = _("Critical if above"), default_value = 95, allow_int = True), + ] + )), + ( "auth", + Tuple( + title = _("Authorization"), + elements = [ + TextAscii( + title = _("Username"), + allow_empty = False, + size = 24), + Password( + title = _("Password"), + allow_empty = False, + size = 12), + ], + )), + ], + required_keys = [ "share", "levels" ], + ), match = 'all' ) +def PluginCommandLine(addhelp = ""): + return TextAscii( + title = _("Command line"), + help = _("Please enter the complete shell command including " + "path name and arguments to execute. You can use monitoring " + "macros here. The most important are:
      " + "
    • $HOSTADDRESS$: The IP address of the host
    • " + "
    • $HOSTNAME$: The name of the host
    • " + "
    • $USER1$: user macro 1 (usually path to shipped plugins)
    • " + "
    • $USER2$: user marco 2 (usually path to your own plugins)
    • " + "
    " + "If you are using OMD, you can omit the path and just specify " + "the command (e.g. check_foobar). This command will be " + "searched first in the local plugins directory " + "(~/local/lib/nagios/plugins) and then in the shipped plugins " + "directory (~/lib/nagios/plugins) within your site directory."), + size = "max", + ) + register_rule(group, "custom_checks", Dictionary( - title = _("Classical active and passive Nagios checks"), - help = _("With this ruleset you can configure "classical Nagios checks" " + title = _("Classical active and passive Monitoring checks"), + help = _("With this ruleset you can configure "classical Monitoring checks" " "to be executed directly on your monitoring server. These checks " "will not use Check_MK. It is also possible to configure passive " - "checks that are fed with data from external sources via the Nagios " - "command pipe."), + "checks that are fed with data from external sources via the " + "command pipe of the monitoring core."), elements = [ ( "service_description", TextUnicode( @@ -767,32 +1302,16 @@ default_value = _("Customcheck")) ), ( "command_line", - TextAscii( - title = _("Command line"), - help = _("Please enter the complete shell command including " - "path name and arguments to execute. You can use Nagios " - "macros here. The most important are:
      " - "
    • $HOSTADDRESS$: The IP address of the host
    • " - "
    • $HOSTNAME$: The name of the host
    • " - "
    • $USER1$: user macro 1 (usually path to shipped plugins)
    • " - "
    • $USER2$: user marco 2 (usually path to your own plugins)
    • " - "
    " - "If you are using OMD, then you can omit the path and just specify " - "the command (e.g. check_foobar). This command will be " - "searched first in the local plugins directory " - "(~/local/lib/nagios/plugins) and then in the shipped plugins " - "directory (~/lib/nagios/plugins) within your site directory.

    " - "Passive checks: Do no specify a command line if you want " - "to define passive checks."), - size = 80, - ) + PluginCommandLine(addhelp = _("

    " + "Passive checks: Do no specify a command line if you want " + "to define passive checks.")), ), ( "command_name", TextAscii( title = _("Internal command name"), - help = _("If you want, then you can specify a name that will be used " + help = _("If you want, you can specify a name that will be used " "in the define command section for these checks. This " - "allows you to a assign a customer PNP template for the performance " + "allows you to a assign a custom PNP template for the performance " "data of the checks. If you omit this, then check-mk-custom " "will be used."), size = 32) @@ -807,9 +1326,13 @@ ( "freshness", Dictionary( title = _("Check freshness"), - help = _("Freshness checking is only useful for passive checks. It makes sure that passive " - "check results are submitted on a regular base. If not, the check is being set to " - "warning, critical or unknown."), + help = _("Freshness checking is only useful for passive checks when the staleness feature " + "is not enough for you. It changes the state of a check to a configurable other state " + "when the check results are not arriving in time. Staleness will still grey out the " + "test after the corrsponding interval. If you don't want that, you might want to adjust " + "the staleness interval as well. The staleness interval is calculated from the normal " + "check interval multiplied by the staleness value in the Global Settings. " + "The normal check interval can be configured in a separate rule for your check."), optional_keys = False, elements = [ ( "interval", @@ -832,7 +1355,7 @@ )), ( "output", TextUnicode( - title = _("Plugin output in case of absent abdates"), + title = _("Plugin output in case of absent updates"), size = 40, allow_empty = False, default_value = _("Check result did not arrive in time") @@ -847,3 +1370,471 @@ match = 'all' ) +register_rule(group, + "active_checks:bi_aggr", + Tuple( + title = _("Check State of BI Aggregation"), + help = _("Connect to the local or a remote monitoring host, which uses Check_MK BI to aggregate " + "several states to a single BI aggregation, which you want to show up as a single " + "service."), + elements = [ + TextAscii( + title = _("Base URL (OMD Site)"), + help = _("The base URL to the monitoring instance. For example http://mycheckmk01/mysite. You can use " + "macros like $HOSTADDRESS$ and $HOSTNAME$ within this URL to make them be replaced by " + "the hosts values."), + size = 60, + allow_empty = False + ), + TextAscii( + title = _("Aggregation Name"), + help = _("The name of the aggregation to fetch. It will be added to the service description. You can use " + "macros like $HOSTADDRESS$ and $HOSTNAME$ within this parameter to make them be replaced by " + "the hosts values."), + allow_empty = False + ), + TextAscii( + title = _("Username"), + help = _("The name of the user account to use for fetching the BI aggregation via HTTP. When " + "using the cookie based authentication mode (default), this must be a user where " + "authentication is set to \"Automation Secret\" based authentication."), + allow_empty = False + ), + Password( + title = _("Password"), + help = _("Valid automation secret or password for the user, depending on the choosen " + "authentication mode."), + allow_empty = False + ), + Dictionary( + title = _("Optional parameters"), + elements = [ + ("auth_mode", DropdownChoice( + title = _('Authentication Mode'), + default_value = 'cookie', + choices = [ + ('cookie', _('Form (Cookie) based')), + ('basic', _('HTTP Basic')), + ('digest', _('HTTP Digest')), + ], + )), + ("timeout", Integer( + title = _("Seconds before connection times out"), + unit = _("sec"), + default_value = 60, + )), + ] + ), + ] + ), + match = 'all' +) + +register_rule(group, + "active_checks:form_submit", + Tuple( + title = _("Check HTML Form Submit"), + help = _("Check submission of HTML forms via HTTP/HTTPS using the plugin check_form_submit " + "provided with Check_MK. This plugin provides more functionality as check_http, " + "as it automatically follows HTTP redirect, accepts and uses cookies, parses forms " + "from the requested pages, changes vars and submits them to check the response " + "afterwards."), + elements = [ + TextUnicode( + title = _("Name"), + help = _("The name will be used in the service description"), + allow_empty = False + ), + Dictionary( + title = _("Check the URL"), + elements = [ + ("hosts", ListOfStrings( + title = _('Check specific host(s)'), + help = _('By default, if you do not specify any host addresses here, ' + 'the host address of the host this service is assigned to will ' + 'be used. But by specifying one or several host addresses here, ' + 'it is possible to let the check monitor one or multiple hosts.') + )), + ("virthost", TextAscii( + title = _("Virtual host"), + help = _("Set this in order to specify the name of the " + "virtual host for the query (using HTTP/1.1). When you " + "leave this empty, then the IP address of the host " + "will be used instead."), + allow_empty = False, + )), + ("uri", TextAscii( + title = _("URI to fetch (default is /)"), + allow_empty = False, + default_value = "/", + regex = '^/.*', + )), + ("port", Integer( + title = _("TCP Port"), + minvalue = 1, + maxvalue = 65535, + default_value = 80, + )), + ("ssl", FixedValue( + value = True, + totext = _("use SSL/HTTPS"), + title = _("Use SSL/HTTPS for the connection.")) + ), + ("timeout", Integer( + title = _("Seconds before connection times out"), + unit = _("sec"), + default_value = 10, + )), + ("expect_regex", RegExp( + title = _("Regular expression to expect in content"), + )), + ("form_name", TextAscii( + title = _("Name of the form to populate and submit"), + help = _("If there is only one form element on the requested page, you " + "do not need to provide the name of that form here. But if you " + "have several forms on that page, you need to provide the name " + "of the form here, to enable the check to identify the correct " + "form element."), + allow_empty = True, + )), + ("query", TextAscii( + title = _("Send HTTP POST data"), + help = _("Data to send via HTTP POST method. Please make sure, that the data " + "is URL-encoded (for example \"key1=val1&key2=val2\")."), + size = 40, + )), + ("num_succeeded", Tuple( + title = _("Multiple Hosts: Number of successful results"), + elements = [ + Integer(title = _("Warning if equal or below")), + Integer(title = _("Critical if equal or below")), + ] + )), + ] + ), + ] + ), + match = 'all' +) + +register_rule(group, + "active_checks:notify_count", + Tuple( + title = _("Check Number of Notifications per Contact"), + help = _("Check the number of sent notifications per contact using the plugin check_notify_count " + "provided with Check_MK. This plugin counts the total number of notifications sent by the local " + "monitoring core and creates graphs for each individual contact. You can configure thresholds " + "on the number of notifications per contact in a defined time interval. " + "This plugin queries livestatus to extract the notification related log entries from the " + "log file of your monitoring core."), + elements = [ + TextUnicode( + title = _("Service Description"), + help = _("The name that will be used in the service description"), + allow_empty = False + ), + Integer( + title = _("Interval to monitor"), + label = _("notifications within last"), + unit = _("minutes"), + minvalue = 1, + default_value = 60, + ), + Dictionary( + title = _("Optional parameters"), + elements = [ + ("num_per_contact", Tuple( + title = _("Thresholds for Notifications per Contact"), + elements = [ + Integer(title = _("Warning if above"), default_value = 20), + Integer(title = _("Critical if above"), default_value = 50), + ] + )), + ] + ), + ] + ), + match = 'all' +) + +register_rule(group, + "active_checks:traceroute", + Dictionary( + title = _("Check current routing (uses traceroute)"), + help = _("This active check uses traceroute in order to determine the current " + "routing from the monitoring host to the target host. You can specify any number " + "of missing or expected routes in that way detect e.g. an (unintended) failover " + "to a secondary route."), + elements = [ + ( "dns", + Checkbox( + title = _("Name resolution"), + label = _("Use DNS to convert IP addresses into hostnames"), + help = _("If you use this option, then traceroute is not being " + "called with the option -n. That means that all IP addresses " + "are tried to be converted into names. This usually adds additional " + "execution time. Also DNS resolution might fail for some addresses."), + )), + ( "routers", + ListOf( + Tuple( + elements = [ + TextAscii( + title = _("Router (FQDN, IP-Address)"), + allow_empty = False, + ), + DropdownChoice( + title = _("How"), + choices = [ + ( 'W', _("WARN - if this router is not being used") ), + ( 'C', _("CRIT - if this router is not being used") ), + ( 'w', _("WARN - if this router is being used") ), + ( 'c', _("CRIT - if this router is being used") ), + ] + ), + ] + ), + title = _("Router that must or must not be used"), + add_label = _("Add Condition"), + ) + ), + ( "method", + DropdownChoice( + title = _("Method of probing"), + choices = [ + ( None, _("UDP (default behaviour of tcpdump)") ), + ( "icmp", _("ICMP Echo Request") ), + ( "tcp", _("TCP SYN") ), + ] + ) + ), + ], + optional_keys = False, + ), + match = 'all' +) + +register_rule(group, + 'active_checks:mail_loop', + Dictionary( + title = _('Check Email Delivery'), + help = _('This active check sends out special E-Mails to a defined mail address using ' + 'the SMTP protocol and then tries to receive these mails back by querying the ' + 'inbox of a IMAP or POP3 mailbox. With this check you can verify that your whole ' + 'mail delivery progress is working.'), + optional_keys = ['smtp_server', 'smtp_tls', 'smtp_port', 'smtp_auth', 'connect_timeout', 'delete_messages', 'duration'], + elements = [ + ('item', TextUnicode( + title = _('Name'), + help = _('The service description will be Mail Loop plus this name'), + allow_empty = False + )), + ('smtp_server', TextAscii( + title = _('SMTP Server'), + allow_empty = False, + help = _('You can specify a hostname or IP address different from the IP address ' + 'of the host this check will be assigned to.') + )), + ('smtp_tls', FixedValue(True, + title = _('Use TLS over SMTP'), + totext = _('Encrypt SMTP communication using TLS'), + )), + ('smtp_port', Integer( + title = _('SMTP TCP Port to connect to'), + help = _('The TCP Port the SMTP server is listening on. Defaulting to 25.'), + allow_empty = False, + default_value = 25, + )), + ('smtp_auth', Tuple( + title = _('SMTP Authentication'), + elements = [ + TextAscii( + title = _('Username'), + allow_empty = False, + size = 24 + ), + Password( + title = _('Password'), + allow_empty = False, + size = 12 + ), + ], + )), + ] + mail_receiving_params + [ + ('mail_from', EmailAddress( + title = _('From: email address'), + )), + ('mail_to', EmailAddress( + title = _('Destination email address'), + )), + ('connect_timeout', Integer( + title = _('Connect Timeout'), + minvalue = 1, + default_value = 10, + unit = _('sec'), + )), + ("duration", Tuple( + title = _("Loop duration"), + elements = [ + Age(title = _("Warning at")), + Age(title = _("Critical at")), + ]) + ), + ('delete_messages', FixedValue(True, + title = _('Delete processed messages'), + totext = _('Delete all processed message belonging to this check'), + help = _('Delete all messages identified as being related to this ' + 'check. This is disabled by default, which will make ' + 'your mailbox grow when you not clean it up on your own.'), + )), + ] + ), + match = 'all' +) + +register_rule(group, + 'active_checks:mail', + Dictionary( + title = _('Check Email'), + help = _('The basic function of this check is to log in into an IMAP or POP3 mailbox to ' + 'monitor whether or not the login is possible. A extended feature is, that the ' + 'check can fetch all (or just some) from the mailbox and forward them as events ' + 'to the Event Console.'), + required_keys = [ 'service_description', 'fetch' ], + elements = [ + ('service_description', + TextUnicode( + title = _('Service description'), + help = _('Please make sure that this is unique per host ' + 'and does not collide with other services.'), + allow_empty = False, + default_value = "Email") + ) + ] + mail_receiving_params + [ + ('connect_timeout', Integer( + title = _('Connect Timeout'), + minvalue = 1, + default_value = 10, + unit = _('sec'), + )), + ('forward', Dictionary( + title = _("Forward mails as events to Event Console"), + elements = [ + ('method', Alternative( + title = _("Forwarding Method"), + elements = [ + Alternative( + title = _("Send events to local event console"), + elements = [ + FixedValue( + "", + totext = _("Directly forward to event console"), + title = _("Send events to local event console in same OMD site"), + ), + TextAscii( + title = _("Send events to local event console into unix socket"), + allow_empty = False, + ), + + FixedValue( + "spool:", + totext = _("Spool to event console"), + title = _("Spooling: Send events to local event console in same OMD site"), + ), + Transform( + TextAscii(), + title = _("Spooling: Send events to local event console into given spool directory"), + allow_empty = False, + forth = lambda x: x[6:], # remove prefix + back = lambda x: "spool:" + x, # add prefix + ), + ], + match = lambda x: x and (x == 'spool:' and 2 or x.startswith('spool:') and 3 or 1) or 0 + ), + Tuple( + title = _("Send events to remote syslog host"), + elements = [ + DropdownChoice( + choices = [ + ('udp', _('UDP')), + ('tcp', _('TCP')), + ], + title = _("Protocol"), + ), + TextAscii( + title = _("Address"), + allow_empty = False, + ), + Integer( + title = _("Port"), + allow_empty = False, + default_value = 514, + minvalue = 1, + maxvalue = 65535, + size = 6, + ), + ] + ), + ], + )), + ('match_subject', RegExpUnicode( + title = _('Only process mails with matching subject'), + help = _('Use this option to not process all messages found in the inbox, ' + 'but only the those whose subject matches the given regular expression.'), + )), + ('facility', DropdownChoice( + title = _("Events: Syslog facility"), + help = _("Use this syslog facility for all created events"), + choices = syslog_facilities, + default_value = 2, # mail + )), + ('application', Alternative( + title = _("Events: Syslog application"), + help = _("Use this syslog application for all created events"), + elements = [ + FixedValue(None, + title = _("Use the mail subject"), + totext = _("The mail subject is used as syslog appliaction"), + ), + TextUnicode( + title = _("Specify the application"), + help = _("Use this text as application. You can use macros like \\1, \\2, ... " + "here when you configured subject matching in this rule with a regular expression " + "that declares match groups (using braces)."), + allow_empty = False, + ), + ] + )), + ('host', TextAscii( + title = _('Events: Hostname'), + help = _('Use this hostname for all created events instead of the name of the mailserver'), + )), + ('body_limit', Integer( + title = _('Limit length of mail body'), + help = _('When forwarding mails from the mailbox to the event console, the ' + 'body of the mail is limited to the given number of characters.'), + default_value = 1000, + )), + ('cleanup', Alternative( + title = _("Cleanup messages"), + help = _("The handled messages (see subject matching) can be cleaned up by either " + "deleting them or moving them to a subfolder. By default nothing is cleaned up."), + elements = [ + FixedValue(True, + title = _('Delete messages'), + totext = _('Delete all processed message belonging to this check'), + ), + TextUnicode( + title = _("Move to subfolder"), + help = _("Specify the destination path in the format Path/To/Folder, for example" + "INBOX/Processed_Mails."), + allow_empty = False, + ), + ] + )), + ] + )), + ] + ), + match = 'all' +) diff -Nru check-mk-1.2.2p3/plugins/wato/agents.py check-mk-1.2.6p12/plugins/wato/agents.py --- check-mk-1.2.2p3/plugins/wato/agents.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/wato/agents.py 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,110 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +import glob + +def download_table(title, paths): + forms.header(title) + forms.container() + for path in paths: + os_path = path + relpath = path.replace(defaults.agents_dir+'/', '') + filename = path.split('/')[-1] + + size_kb = os.stat(os_path).st_size / 1024.0 + + # FIXME: Rename classes etc. to something generic + html.write('
    ') + html.write('%s' % (relpath, filename)) + html.write('%s
    ' % ("." * 100)) + html.write('
    %d KB
    ' % size_kb) + html.write('
    ') + forms.end() + +def mode_download_agents(phase): + if phase == "title": + return _("Monitoring Agents") + + elif phase == "buttons": + global_buttons() + return + + elif phase == "action": + return + + html.write('
    ') + packed = glob.glob(defaults.agents_dir + "/*.deb") \ + + glob.glob(defaults.agents_dir + "/*.rpm") \ + + glob.glob(defaults.agents_dir + "/windows/c*.msi") + + download_table(_("Packed Agents"), packed) + + titles = { + '' : _('Linux / Unix Agents'), + '/plugins' : _('Linux / Unix Plugins'), + '/windows' : _('Windows Agent'), + '/windows/plugins' : _('Windows Plugins'), + '/windows/mrpe' : _('Windows MRPE Scripts'), + '/cfg_examples' : _('Example Configurations'), + '/z_os' : _('z/OS'), + '/sap' : _('SAP'), + '/special' : _('Special Agents'), + } + + others = [] + for root, dirs, files in os.walk(defaults.agents_dir): + file_paths = [] + relpath = root.split('agents')[1] + title = titles.get(relpath, relpath) + for filename in files: + path = root + '/' + filename + if path not in packed and 'deprecated' not in path: + file_paths.append(path) + + others.append((title, file_paths)) + + others.sort() + + for title, file_paths in others: + if file_paths: + download_table(title, sorted(file_paths)) + html.write('
    ') + +# Don't do anything when the agent bakery exists. Otherwise register +# a simple download page for the default agents +if "agents" not in modes: + modules.append( + ("download_agents", _("Monitoring Agents"), "download_agents", "download_agents", + _("Downloads the Check_MK monitoring agents")) + ) + + modes["download_agents"] = (["download_agents"], mode_download_agents) + + config.declare_permission("wato.download_agents", + _("Monitoring Agents"), + _("Download the default Check_MK monitoring agents for Linux, i" + "Windows and other operating systems."), + [ "admin", "user", "guest" ]) diff -Nru check-mk-1.2.2p3/plugins/wato/auth.py check-mk-1.2.6p12/plugins/wato/auth.py --- check-mk-1.2.2p3/plugins/wato/auth.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/wato/auth.py 2015-06-24 09:48:38.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/plugins/wato/backup_domains.py check-mk-1.2.6p12/plugins/wato/backup_domains.py --- check-mk-1.2.2p3/plugins/wato/backup_domains.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/wato/backup_domains.py 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,233 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +backup_domains = {} + +# Temporary variable which stores settings during the backup process +backup_perfdata_enabled = True +def performancedata_restore(pre_restore = True): + global backup_perfdata_enabled + site = default_site() + html.live.set_only_sites([site]) + + if pre_restore: + data = html.live.query("GET status\nColumns: process_performance_data") + backup_perfdata_enabled = data[0][0] == 1 + # Return if perfdata is not activated - nothing to do.. + if not backup_perfdata_enabled: + return [] + elif not backup_perfdata_enabled: + return [] + command = pre_restore and "DISABLE_PERFORMANCE_DATA" or "ENABLE_PERFORMANCE_DATA" + html.live.command("[%d] %s" % (int(time.time()), command), site) + html.live.set_only_sites() + return [] + +if not defaults.omd_root: + backup_domains.update( { + "noomd-config": { + "group" : _("Configuration"), + "title" : _("WATO Configuration"), + "prefix" : defaults.default_config_dir, + "paths" : [ + ("dir", "conf.d/wato"), + ("dir", "multisite.d/wato"), + ("file", "multisite.d/sites.mk") + ], + "default" : True, + }, + "noomd-personalsettings": { + "title" : _("Personal User Settings and Custom Views"), + "prefix" : defaults.var_dir, + "paths" : [ ("dir", "web") ], + "default" : True + }, + "noomd-authorization": { + "group" : _("Configuration"), + "title" : _("Local Authentication Data"), + "prefix" : os.path.dirname(defaults.htpasswd_file), + "paths" : [ + ("file", "htpasswd"), + ("file", "auth.secret"), + ("file", "auth.serials") + ], + "cleanup" : False, + "default" : True + }}) +else: + backup_domains.update({ + "check_mk": { + "group" : _("Configuration"), + "title" : _("Hosts, Services, Groups, Timeperiods, Business Intelligence and Monitoring Configuration"), + "prefix" : defaults.default_config_dir, + "paths" : [ + ("file", "liveproxyd.mk"), + ("file", "main.mk"), + ("file", "final.mk"), + ("file", "local.mk"), + ("file", "mkeventd.mk"), + + ("dir", "conf.d"), + ("dir", "multisite.d"), + ("dir", "mkeventd.d"), + ("dir", "mknotifyd.d"), + ], + "default" : True, + }, + "authorization": { + # This domain is obsolete + # It no longer shows up in the backup screen + "deprecated" : True, + "group" : _("Configuration"), + "title" : _("Local Authentication Data"), + "prefix" : os.path.dirname(defaults.htpasswd_file), + "paths" : [ + ("file", "htpasswd"), + ("file", "auth.secret"), + ("file", "auth.serials") + ], + "cleanup" : False, + "default" : True, + }, + "authorization_v1": { + "group" : _("Configuration"), + "title" : _("Local Authentication Data"), + "prefix" : defaults.omd_root, + "paths" : [ + ("file", "etc/htpasswd"), + ("file", "etc/auth.secret"), + ("file", "etc/auth.serials"), + ("file", "var/check_mk/web/*/serial.mk") + ], + "cleanup" : False, + "default" : True + }, + "personalsettings": { + "title" : _("Personal User Settings and Custom Views"), + "prefix" : defaults.var_dir, + "paths" : [ ("dir", "web") ], + "exclude" : [ "*/serial.mk" ], + "cleanup" : False, + }, + "autochecks": { + "group" : _("Configuration"), + "title" : _("Automatically Detected Services"), + "prefix" : defaults.autochecksdir, + "paths" : [ ("dir", "") ], + }, + "snmpwalks": { + "title" : _("Stored SNMP Walks"), + "prefix" : defaults.snmpwalks_dir, + "paths" : [ ("dir", "") ], + }, + "logwatch": { + "group" : _("Historic Data"), + "title" : _("Logwatch Data"), + "prefix" : defaults.var_dir, + "paths" : [ + ("dir", "logwatch"), + ], + }, + "corehistory": { + "group" : _("Historic Data"), + "title" : _("Monitoring History"), + "prefix" : defaults.omd_root, + "paths" : [ + ("dir", "var/nagios/archive"), + ("file", "var/nagios/nagios.log"), + ("dir", "var/icinga/archive"), + ("file", "var/icinga/icinga.log"), + ("dir", "var/check_mk/core/archive"), + ("file", "var/check_mk/core/history"), + ], + }, + "performancedata": { + "group" : _("Historic Data"), + "title" : _("Performance Data"), + "prefix" : defaults.omd_root, + "paths" : [ + ("dir", "var/pnp4nagios/perfdata"), + ("dir", "var/rrdcached"), + ], + "pre_restore" : lambda: performancedata_restore(pre_restore = True), + "post_restore" : lambda: performancedata_restore(pre_restore = False), + "checksum" : False, + }, + "applicationlogs": { + "group" : _("Historic Data"), + "title" : _("Application Logs"), + "prefix" : defaults.omd_root, + "paths" : [ + ("dir", "var/log"), + ("file", "var/nagios/livestatus.log"), + ("dir", "var/pnp4nagios/log"), + ], + "checksum" : False, + }, + "mkeventstatus": { + "group" : _("Configuration"), + "title" : _("Event Console Configuration"), + "prefix" : defaults.omd_root, + "paths" : [ + ("dir", "etc/check_mk/mkeventd.d"), + ], + "default" : True + }, + "mkeventhistory": { + "group" : _("Historic Data"), + "title" : _("Event Console Archive and Current State"), + "prefix" : defaults.omd_root, + "paths" : [ + ("dir", "var/mkeventd/history"), + ("file", "var/mkeventd/status"), + ("file", "var/mkeventd/messages"), + ("dir", "var/mkeventd/messages-history"), + ], + }, + "dokuwiki": { + "title" : _("Doku Wiki Pages and Settings"), + "prefix" : defaults.omd_root, + "paths" : [ + ("dir", "var/dokuwiki"), + ], + }, + "nagvis": { + "title" : _("NagVis Maps, Configurations and User Files"), + "prefix" : defaults.omd_root, + "exclude" : [ + "etc/nagvis/apache.conf", + "etc/nagvis/conf.d/authorisation.ini.php", + "etc/nagvis/conf.d/omd.ini.php", + "etc/nagvis/conf.d/cookie_auth.ini.php", + "etc/nagvis/conf.d/urls.ini.php" + ], + "paths" : [ + ("dir", "local/share/nagvis"), + ("dir", "etc/nagvis"), + ("dir", "var/nagvis"), + ], + }, + }) diff -Nru check-mk-1.2.2p3/plugins/wato/bi.py check-mk-1.2.6p12/plugins/wato/bi.py --- check-mk-1.2.2p3/plugins/wato/bi.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/wato/bi.py 2015-06-24 09:48:38.000000000 +0000 @@ -0,0 +1,230 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +bi_aggregation_functions["worst"] = { + "title" : _("Worst - take worst of all node states"), + "valuespec" : Tuple( + elements = [ + Integer( + help = _("Normally this value is 1, which means that the worst state " + "of all child nodes is being used as the total state. If you set it for example " + "to 2, then the node with the worst state is not being regarded. " + "If the states of the child nodes would be CRIT, WARN and OK, then to total " + "state would be WARN."), + title = _("Take n'th worst state for n = "), + default_value = 1, + min_value = 1), + MonitoringState( + title = _("Restrict severity to at worst"), + help = _("Here a maximum severity of the node state can be set. This severity is not " + "exceeded, even if some of the childs have more severe states."), + default_value = 2, + ), + ]), +} + +bi_aggregation_functions["best"] = { + "title" : _("Best - take best of all node states"), + "valuespec" : Tuple( + elements = [ + Integer( + help = _("Normally this value is 1, which means that the best state " + "of all child nodes is being used as the total state. If you set it for example " + "to 2, then the node with the best state is not being regarded. " + "If the states of the child nodes would be CRIT, WARN and OK, then to total " + "state would be WARN."), + title = _("Take n'th best state for n = "), + default_value = 1, + min_value = 1), + MonitoringState( + title = _("Restrict severity to at worst"), + help = _("Here a maximum severity of the node state can be set. This severity is not " + "exceeded, even if some of the childs have more severe states."), + default_value = 2, + ), + ]), +} + +def vs_count_ok_count(title, defval, defvalperc): + return Alternative( + title = title, + style = "dropdown", + match = lambda x: str(x).endswith("%") and 1 or 0, + elements = [ + Integer( + title = _("Explicit number"), + label=_("Number of OK-nodes"), + min_value = 0, + default_value = defval + ), + Transform( + Percentage( + label=_("Percent of OK-nodes"), + display_format = "%.0f", + default_value = defvalperc), + title = _("Percentage"), + forth = lambda x: float(x[:-1]), + back = lambda x: "%d%%" % x, + ), + ] + ) + +bi_aggregation_functions["count_ok"] = { + "title" : _("Count the number of nodes in state OK"), + "valuespec" : Tuple( + elements = [ + vs_count_ok_count(_("Required number of OK-nodes for a total state of OK:"), 2, 50), + vs_count_ok_count(_("Required number of OK-nodes for a total state of WARN:"), 1, 25), + ]), +} + + +bi_example = ''' +aggregation_rules["host"] = ( + "Host $HOST$", + [ "HOST" ], + "worst", + [ + ( "general", [ "$HOST$" ] ), + ( "performance", [ "$HOST$" ] ), + ( "filesystems", [ "$HOST$" ] ), + ( "networking", [ "$HOST$" ] ), + ( "applications", [ "$HOST$" ] ), + ( "logfiles", [ "$HOST$" ] ), + ( "hardware", [ "$HOST$" ] ), + ( "other", [ "$HOST$" ] ), + ] +) + +aggregation_rules["general"] = ( + "General State", + [ "HOST" ], + "worst", + [ + ( "$HOST$", HOST_STATE ), + ( "$HOST$", "Uptime" ), + ( "checkmk", [ "$HOST$" ] ), + ] +) + +aggregation_rules["filesystems"] = ( + "Disk & Filesystems", + [ "HOST" ], + "worst", + [ + ( "$HOST$", "Disk|MD" ), + ( "multipathing", [ "$HOST$" ]), + ( FOREACH_SERVICE, "$HOST$", "fs_(.*)", "filesystem", [ "$HOST$", "$1$" ] ), + ( FOREACH_SERVICE, "$HOST$", "Filesystem(.*)", "filesystem", [ "$HOST$", "$1$" ] ), + ] +) + +aggregation_rules["filesystem"] = ( + "$FS$", + [ "HOST", "FS" ], + "worst", + [ + ( "$HOST$", "fs_$FS$$" ), + ( "$HOST$", "Filesystem$FS$$" ), + ( "$HOST$", "Mount options of $FS$$" ), + ] +) + +aggregation_rules["multipathing"] = ( + "Multipathing", + [ "HOST" ], + "worst", + [ + ( "$HOST$", "Multipath" ), + ] +) + +aggregation_rules["performance"] = ( + "Performance", + [ "HOST" ], + "worst", + [ + ( "$HOST$", "CPU|Memory|Vmalloc|Kernel|Number of threads" ), + ] +) + +aggregation_rules["hardware"] = ( + "Hardware", + [ "HOST" ], + "worst", + [ + ( "$HOST$", "IPMI|RAID" ), + ] +) + +aggregation_rules["networking"] = ( + "Networking", + [ "HOST" ], + "worst", + [ + ( "$HOST$", "NFS|Interface|TCP" ), + ] +) + +aggregation_rules["checkmk"] = ( + "Check_MK", + [ "HOST" ], + "worst", + [ + ( "$HOST$", "Check_MK|Uptime" ), + ] +) + +aggregation_rules["logfiles"] = ( + "Logfiles", + [ "HOST" ], + "worst", + [ + ( "$HOST$", "LOG" ), + ] +) +aggregation_rules["applications"] = ( + "Applications", + [ "HOST" ], + "worst", + [ + ( "$HOST$", "ASM|ORACLE|proc" ), + ] +) + +aggregation_rules["other"] = ( + "Other", + [ "HOST" ], + "worst", + [ + ( "$HOST$", REMAINING ), + ] +) + +host_aggregations += [ + ( DISABLED, "Hosts", FOREACH_HOST, [ "tcp" ], ALL_HOSTS, "host", ["$1$"] ), +] +''' diff -Nru check-mk-1.2.2p3/plugins/wato/builtin_attributes.py check-mk-1.2.6p12/plugins/wato/builtin_attributes.py --- check-mk-1.2.2p3/plugins/wato/builtin_attributes.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/wato/builtin_attributes.py 2015-06-24 09:48:38.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -25,7 +25,7 @@ # Boston, MA 02110-1301 USA. declare_host_attribute(ContactGroupsAttribute(), - show_in_table = True, + show_in_table = False, show_in_folder = True) declare_host_attribute(NagiosTextAttribute("alias", "alias", _("Alias"), @@ -37,15 +37,32 @@ declare_host_attribute(TextAttribute("ipaddress", _("IP address"), _("In case the name of the host is not resolvable via /etc/hosts " "or DNS by your monitoring server, you can specify an explicit IP " - "address or a resolvable DNS name of the host here. Note: If you leave " - "this attribute empty, then DNS resolution will be done when you activate " - "the configuration. When you enter a DNS name here, the DNS resolution will " - "be done each time the host is checked. Use this only for hosts with " - "dynamic IP addresses."), + "address or a resolvable DNS name of the host here.
    Notes:
    " + "1. If you leave this attribute empty, hostname resolution will be done when " + "you activate the configuration. " + "Check_MKs builtin DNS cache is activated per default in the global " + "configuration to speed up the activation process. The cache is normally " + "updated daily with a cron job. You can manually update the cache with the " + "command cmk -v --update-dns-cache.
    " + "2. If you enter a DNS name here, the DNS resolution will be carried out " + "each time the host is checked. Check_MKs DNS cache will NOT be queried. " + "Use this only for hosts with dynamic IP addresses." + ), allow_empty = False), show_in_table = True, show_in_folder = False) +declare_host_attribute(TextAttribute("snmp_community", _("SNMP Community"), + _("Using this option you can configure the community which should be used when " + "contacting this host via SNMP v1 or v2. It is possible to configure the SNMP community by " + "using the SNMP Communities ruleset, but when you configure " + "a community here, this will override the community defined by the rules.") % \ + html.makeuri([('mode', 'edit_ruleset'), ('varname', 'snmp_communities')]), + allow_empty = False), + show_in_table = False, + show_in_folder = True, + depends_on_tags = ['snmp']) + # Attribute for configuring parents class ParentsAttribute(ValueSpecAttribute): def __init__(self): @@ -70,7 +87,7 @@ def paint(self, value, hostname): parts = [ '%s' % ( - "wato.py?" + htmllib.urlencode_vars([("mode", "edithost"), ("host", hn)]), hn) + "wato.py?" + html.urlencode_vars([("mode", "edithost"), ("host", hn)]), hn) for hn in value ] return "", ", ".join(parts) @@ -94,5 +111,5 @@ "relation is used to describe the reachability of hosts by one monitoring daemon.") % (parentname, parent["site"], effective_host["site"])) -api.register_hook('validate-host', validate_host_parents) +register_hook('validate-host', validate_host_parents) diff -Nru check-mk-1.2.2p3/plugins/wato/builtin_modules.py check-mk-1.2.6p12/plugins/wato/builtin_modules.py --- check-mk-1.2.2p3/plugins/wato/builtin_modules.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/wato/builtin_modules.py 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -28,8 +28,8 @@ # defined in a plugin because they contain i18n strings. # fields: mode, title, icon, permission, help -modules = [ - ( "folder", _("Hosts & Folders"), "folder", "hosts", +modules += [ + ( "folder", _("Hosts"), "folder", "hosts", _("Manage monitored hosts and services and the hosts' folder structure.")), ( "hosttags", _("Host Tags"), "hosttag", "hosttags", @@ -39,18 +39,18 @@ ( "globalvars", _("Global Settings"), "configuration", "global", _("Global settings for Check_MK, Multisite and the monitoring core.")), - ( "ruleeditor", _("Host & Service Parameters"), "rulesets", "rulesets", + ( "ruleeditor", _("Host & Service Parameters"), "rulesets", "rulesets", _("Check parameters and other configuration variables on " "hosts and services") ), - ( "host_groups", _("Host Groups"), "hostgroups", "groups", - _("Organize your hosts in groups independent of the tree structure.") ), + ( "static_checks", _("Manual Checks"), "static_checks", "rulesets", + _("Configure fixed checks without using service discovery")), - ( "service_groups", _("Service Groups"), "servicegroups", "groups", - _("Organize services in groups for a better overview in the status display.") ), + ( "host_groups", _("Host & Service Groups"), "hostgroups", "groups", + _("Organize your hosts and services in groups independent of the tree structure.") ), - ( "users", _("Users & Contacts"), "users", "users", - _("Manage users of Multisite and contacts of the monitoring system.") ), + ( "users", _("Users"), "users", "users", + _("Manage users of the monitoring system.") ), ( "roles", _("Roles & Permissions"), "roles", "users", _("User roles are configurable sets of permissions." ) ), @@ -58,20 +58,24 @@ ( "contact_groups", _("Contact Groups"), "contactgroups", "users", _("Contact groups are used to assign persons to hosts and services") ), + ( "notifications", _("Notifications"), "notifications", "notifications", + _("Rules for the notification of contacts about host and service problems")), + ( "timeperiods", _("Time Periods"), "timeperiods", "timeperiods", _("Timeperiods restrict notifications and other things to certain periods of " "the day.") ), + ( "pattern_editor", _("Logfile Pattern Analyzer"), "analyze", "pattern_editor", + _("Analyze logfile pattern rules and validate logfile patterns against custom text.")), + + ( "bi_rules", _("BI - Business Intelligence"), "aggr", "bi_rules", + _("Configuration of Check_MK's Business Intelligence component.")), + ( "sites", _("Distributed Monitoring"), "sites", "sites", _("Distributed monitoring via Multsite, distributed configuration via WATO")), - ( "auditlog", _("Audit Logfile"), "auditlog", "auditlog", - _("Keep track of all modifications and actions of the users in WATO.")), - ( "snapshot", _("Backup & Restore"), "backup", "snapshots", _("Make snapshots of your configuration, download, upload and restore snapshots.")), - ( "pattern_editor", _("Logfile Pattern Analyzer"), "analyze", "pattern_editor", - _("Analyze logfile pattern rules and validate logfile patterns against custom text.")), ] diff -Nru check-mk-1.2.2p3/plugins/wato/check_mk_configuration.py check-mk-1.2.6p12/plugins/wato/check_mk_configuration.py --- check-mk-1.2.2p3/plugins/wato/check_mk_configuration.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/wato/check_mk_configuration.py 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,22 +24,21 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -# +----------------------------------------------------------------------+ -# | ____ __ _ | -# | / ___|___ _ __ / _(_) __ ___ ____ _ _ __ ___ | -# | | | / _ \| '_ \| |_| |/ _` \ \ / / _` | '__/ __| | -# | | |__| (_) | | | | _| | (_| |\ V / (_| | | \__ \ | -# | \____\___/|_| |_|_| |_|\__, | \_/ \__,_|_| |___/ | -# | |___/ | -# +----------------------------------------------------------------------+ -# | Configuration variables for main.mk | -# +----------------------------------------------------------------------+ -group = _("Configuration of Checks") +# .--Global Settings-----------------------------------------------------. +# | ____ _ _ _ ____ _ _ _ | +# | / ___| | ___ | |__ __ _| | / ___| ___| |_| |_(_)_ __ __ _ ___ | +# || | _| |/ _ \| '_ \ / _` | | \___ \ / _ \ __| __| | '_ \ / _` / __| | +# || |_| | | (_) | |_) | (_| | | ___) | __/ |_| |_| | | | | (_| \__ \ | +# | \____|_|\___/|_.__/ \__,_|_| |____/ \___|\__|\__|_|_| |_|\__, |___/ | +# | |___/ | +# +----------------------------------------------------------------------+ +# | Global configuration settings for main.mk and multisite.mk | +# '----------------------------------------------------------------------' -# ignored_checktypes --> Hier brauchen wir noch einen neuen Value-Typ +deprecated = _("Deprecated") -group = _("Multisite & WATO") +group = _("Status GUI (Multisite)") register_configvar(group, "debug", @@ -75,6 +74,7 @@ default_value = False), domain = "multisite") + register_configvar(group, "buffered_http_stream", Checkbox(title = _("Buffered HTTP stream"), @@ -113,13 +113,13 @@ register_configvar(group, "enable_sounds", - Checkbox(title = _("Enabled sounds in views"), + Checkbox(title = _("Enable sounds in views"), label = _("enable sounds"), help = _("If sounds are enabled then the user will be alarmed by problems shown " "in a Multisite status view if that view has been configured for sounds. " "From the views shipped in with Multisite all problem views have sounds " "enabled."), - default_value = True), + default_value = False), domain = "multisite") register_configvar(group, @@ -173,13 +173,25 @@ domain = "multisite") register_configvar(group, + "table_row_limit", + Integer(title = _("Limit the number of rows shown in tables"), + help = _("Several pages which use tables to show data in rows, like the " + "\"Users\" configuration page, can be configured to show " + "only a limited number of rows when accessing the pages."), + minvalue = 1, + default_value = 100, + unit = _('rows')), + domain = "multisite") + +register_configvar(group, "start_url", TextAscii(title = _("Start-URL to display in main frame"), help = _("When you point your browser to the Multisite GUI, usually the dashboard " "is shown in the main (right) frame. You can replace this with any other " "URL you like here."), size = 80, - default_value = "dashboard.py"), + default_value = "dashboard.py", + attrencode = True), domain = "multisite") register_configvar(group, @@ -189,46 +201,102 @@ "using OMD then you can embed a %s. This will be replaced by the name " "of the OMD site."), size = 80, - default_value = u"Check_MK %s"), + default_value = u"Check_MK %s", + attrencode = True), domain = "multisite") register_configvar(group, - "wato_hide_filenames", - Checkbox(title = _("Hide internal folder names in WATO"), - label = _("hide folder names"), - help = _("When enabled, then the internal names of WATO folder in the filesystem " - "are not shown. They will automatically be derived from the name of the folder " - "when a new folder is being created. Disable this option if you want to see and " - "set the filenames manually."), - default_value = True), + "pagetitle_date_format", + DropdownChoice( + title = _("Date format for page titles"), + help = _("When enabled, the headline of each page also displays "\ + "the date in addition the time."), + choices = [ + (None, _("Do not display a date")), + ('yyyy-mm-dd', _("YYYY-MM-DD")), + ('dd.mm.yyyy', _("DD.MM.YYYY")), + ], + default_value = None + ), domain = "multisite") register_configvar(group, - "wato_hide_hosttags", - Checkbox(title = _("Hide hosttags in WATO folder view"), - label = _("hide hosttags"), - help = _("When enabled, hosttags are no longer shown within the WATO folder view"), - default_value = False), + "escape_plugin_output", + Checkbox(title = _("Escape HTML codes in plugin output"), + label = _("Prevent loading HTML from plugin output or log messages"), + help = _("By default, for security reasons, Multisite does not interpret any HTML " + "code received from external sources, like plugin output or log messages. " + "If you are really sure what you are doing and need to have HTML codes, like " + "links rendered, disable this option. Be aware, you might open the way " + "for several injection attacks."), + default_value = True), domain = "multisite") + register_configvar(group, - "wato_hide_varnames", - Checkbox(title = _("Hide names of configuration variables"), - label = _("hide variable names"), - help = _("When enabled, internal configuration variable names of Check_MK are hidded " - "from the user (for example in the rule editor)"), - default_value = True), + "multisite_draw_ruleicon", + Checkbox(title = _("Show icon linking to WATO parameter editor for services"), + label = _("Show WATO icon"), + help = _("When enabled a rule editor icon is displayed for each " + "service in the multisite views. It is only displayed if the user " + "does have the permission to edit rules."), + default_value = True), domain = "multisite") + +def wato_host_tag_group_choices(): + # We add to the choices: + # 1. All host tag groups with their id + # 2. All *topics* that: + # - consist only of checkbox tags + # - contain at least two entries + choices = [] + by_topic = {} + for entry in config.wato_host_tags: + tgid = entry[0] + topic, tit = parse_hosttag_title(entry[1]) + choices.append((tgid, tit)) + by_topic.setdefault(topic, []).append(entry) + + # Now search for checkbox-only-topics + for topic, entries in by_topic.items(): + for entry in entries: + tgid, title, tags = entry + if len(tags) != 1: + break + else: + if len(entries) > 1: + choices.append(("topic:" + topic, _("Topic") + ": " + topic)) + + return choices + + register_configvar(group, - "wato_max_snapshots", - Integer(title = _("Number of configuration snapshots to keep"), - help = _("Whenever you successfully activate changes a snapshot of the configuration " - "will be created. You can also create snapshots manually. WATO will delete old " - "snapshots when the maximum number of snapshots is reached."), - minvalue = 1, - default_value = 50), - domain = "multisite") + "virtual_host_trees", + ListOf( + Tuple( + elements = [ + TextUnicode( + title = _("Title of the tree"), + allow_empty = False, + ), + DualListChoice( + allow_empty = False, + custom_order = True, + choices = wato_host_tag_group_choices, + ) + ] + ), + add_label = _("Create new virtual host tree configuration"), + title = _("Virtual Host Trees"), + help = _("Here you can define tree configurations for the snapin Virtual Host-Trees. " + "These trees organize your hosts based on their values in certain host tag groups. " + "Each host tag group you select will create one level in the tree."), + ), + domain = "multisite", +) + + register_configvar(group, "reschedule_timeout", @@ -257,17 +325,51 @@ domain = "multisite") register_configvar(group, - "wato_activation_method", - DropdownChoice( - title = _("WATO restart mode for Nagios"), - help = _("Should WATO restart or reload Nagios when activating changes"), - choices = [ - ('restart', _("Restart")), - ('reload' , _("Reload") ), - ]), - domain = "multisite" - ) + "sidebar_notify_interval", + Optional( + Float( + minvalue = 10.0, + default_value = 60.0, + unit = "sec", + display_format = "%.1f" + ), + title = _("Interval of sidebar popup notification updates"), + help = _("The sidebar can be configured to regularly check for pending popup notififcations. " + "This is disabled by default."), + none_label = _('(disabled)'), + ), + domain = "multisite") +register_configvar(group, + "adhoc_downtime", + Optional( + Dictionary( + optional_keys = False, + elements = [ + ("duration", Integer( + title = _("Duration"), + help = _("The duration in minutes of the adhoc downtime."), + minvalue = 1, + unit = _("minutes"), + default_value = 60, + )), + ("comment", TextUnicode( + title = _("Adhoc comment"), + help = _("The comment which is automatically sent with an adhoc downtime"), + size = 80, + allow_empty = False, + attrencode = True, + )), + ], + ), + title = _("Adhoc downtime"), + label = _("Enable adhoc downtime"), + help = _("This setting allows to set an adhoc downtime comment and its duration. " + "When enabled a new button Adhoc downtime for __ minutes will " + "be available in the command form."), + ), + domain = "multisite", +) register_configvar(group, "bi_precompile_on_demand", @@ -298,7 +400,210 @@ "details for each executed compilation.")), domain = "multisite") -# .----------------------------------------------------------------------. +register_configvar(group, + "auth_by_http_header", + Optional( + TextAscii( + label = _("HTTP Header Variable"), + help = _("Configure the name of the environment variable to read " + "from the incoming HTTP requests"), + default_value = 'REMOTE_USER', + attrencode = True, + ), + title = _("Authenticate users by incoming HTTP requests"), + label = _("Activate HTTP header authentication (Warning: Only activate " + "in trusted environments, see help for details)"), + help = _("If this option is enabled, multisite reads the configured HTTP header " + "variable from the incoming HTTP request and simply takes the string " + "in this variable as name of the authenticated user. " + "Be warned: Only allow access from trusted ip addresses " + "(Apache Allow from), like proxy " + "servers, to this webpage. A user with access to this page could simply fake " + "the authentication information. This option can be useful to " + " realize authentication in reverse proxy environments.") + ), + domain = "multisite") + +register_configvar(group, + "staleness_threshold", + Float( + title = _('Staleness value to mark hosts / services stale'), + help = _('The staleness value of a host / service is calculated by measuring the ' + 'configured check intervals a check result is old. A value of 1.5 means the ' + 'current check result has been gathered one and a half check intervals of an object. ' + 'This would mean 90 seconds in case of a check which is checked each 60 seconds.'), + minvalue = 1, + default_value = 1.5, + ), + domain = "multisite", +) + +register_configvar(group, + "user_localizations", + Transform( + ListOf( + Tuple( + elements = [ + TextUnicode(title = _("Original Text"), size = 40), + Dictionary( + title = _("Translations"), + elements = [ + ( l or "en", TextUnicode(title = a, size = 32) ) + for (l,a) in get_languages() + ], + columns = 2, + ), + ], + ), + title = _("Custom localizations"), + movable = False, + totext = _("%d translations"), + default_value = sorted(default_user_localizations.items()), + ), + forth = lambda d: sorted(d.items()), + back = lambda l: dict(l), + ), + domain = "multisite", +) + +# Helper that retrieves the list of hostgroups via Livestatus +# use alias by default but fallback to name if no alias defined +def list_hostgroups(): + groups = dict(html.live.query("GET hostgroups\nCache: reload\nColumns: name alias\n")) + return [ (name, groups[name] or name) for name in groups.keys() ] + +register_configvar(group, + "topology_default_filter_group", + Optional(DropdownChoice( + choices = list_hostgroups, + sorted = True, + ), + title = _("Network Topology: Default Filter Group"), + help = _("By default the network topology view shows you the parent / child relations " + "of all hosts within your local site. The list can be filtered based on hostgroup " + "memberships by the users. You can define a default group to use for filtering " + "which is used when a user opens the network topology view."), + none_label = _("Show all hosts when opening the network topology view"), + default_value = None, + ), + domain = "multisite" +) + +#. +# .--WATO----------------------------------------------------------------. +# | __ ___ _____ ___ | +# | \ \ / / \|_ _/ _ \ | +# | \ \ /\ / / _ \ | || | | | | +# | \ V V / ___ \| || |_| | | +# | \_/\_/_/ \_\_| \___/ | +# | | +# +----------------------------------------------------------------------+ +# | Global Configuration for WATO | +# '----------------------------------------------------------------------' + +group = _("Configuration GUI (WATO)") + +register_configvar(group, + "wato_max_snapshots", + Integer(title = _("Number of configuration snapshots to keep"), + help = _("Whenever you successfully activate changes a snapshot of the configuration " + "will be created. You can also create snapshots manually. WATO will delete old " + "snapshots when the maximum number of snapshots is reached."), + minvalue = 1, + default_value = 50), + domain = "multisite") + +register_configvar(group, + "wato_activation_method", + DropdownChoice( + title = _("WATO restart mode for Nagios"), + help = _("Should WATO restart or reload Nagios when activating changes"), + choices = [ + ('restart', _("Restart")), + ('reload' , _("Reload") ), + ]), + domain = "multisite" + ) + +register_configvar(group, + "wato_legacy_eval", + Checkbox( + title = _("Use unsafe legacy encoding for distributed WATO"), + help = _("The current implementation of WATO uses a Python module called ast for the " + "communication between sites. Previous versions of Check_MK used an insecure encoding " + "named pickle. Even in the current version WATO falls back to pickle " + "if your Python version is not recent enough. This is at least the case for RedHat/CentOS 5.X " + "and Debian 5.0. In a mixed environment you can force using the legacy pickle format " + "in order to create compatibility."), + ), + domain = "multisite" +) + + +register_configvar(group, + "wato_hide_filenames", + Checkbox(title = _("Hide internal folder names in WATO"), + label = _("hide folder names"), + help = _("When enabled, then the internal names of WATO folder in the filesystem " + "are not shown. They will automatically be derived from the name of the folder " + "when a new folder is being created. Disable this option if you want to see and " + "set the filenames manually."), + default_value = True), + domain = "multisite") + + +register_configvar(group, + "wato_upload_insecure_snapshots", + Checkbox(title = _("Allow upload of insecure WATO snapshots"), + label = _("upload insecure snapshots"), + help = _("When enabled, insecure snapshots are allowed. Please keep in mind that the upload " + "of unverified snapshots represents a security risk, since the content of a snapshot is executed " + "during runtime. Any manipulations in the content - either willingly or unwillingly (XSS attack) " + "- pose a serious security risk."), + default_value = False), + domain = "multisite") + +register_configvar(group, + "wato_hide_hosttags", + Checkbox(title = _("Hide hosttags in WATO folder view"), + label = _("hide hosttags"), + help = _("When enabled, hosttags are no longer shown within the WATO folder view"), + default_value = False), + domain = "multisite") + + +register_configvar(group, + "wato_hide_varnames", + Checkbox(title = _("Hide names of configuration variables"), + label = _("hide variable names"), + help = _("When enabled, internal configuration variable names of Check_MK are hidden " + "from the user (for example in the rule editor)"), + default_value = True), + domain = "multisite") + + +register_configvar(group, + "wato_hide_help_in_lists", + Checkbox(title = _("Hide help text of rules in list views"), + label = _("hide help text"), + help = _("When disabled, WATO shows the help texts of rules also in the list views."), + default_value = True), + domain = "multisite") + +register_configvar(group, + "wato_use_git", + Checkbox(title = _("Use GIT version control for WATO"), + label = _("enable GIT version control"), + help = _("When enabled, all changes of configuration files are tracked with the " + "version control system GIT. You need to make sure that git is installed " + "on your Nagios server. The version history currently cannot be viewed " + "via the web GUI. Please use git command line tools within your Check_MK " + "configuration directory."), + default_value = False), + domain = "multisite") + +#. +# .--User Management-----------------------------------------------------. # | _ _ __ __ _ | # | | | | |___ ___ _ __ | \/ | __ _ _ __ ___ | |_ | # | | | | / __|/ _ \ '__| | |\/| |/ _` | '_ ` _ \| __| | @@ -306,6 +611,8 @@ # | \___/|___/\___|_| |_| |_|\__, |_| |_| |_|\__| | # | |___/ | # +----------------------------------------------------------------------+ +# | Global settings for users and LDAP connector. | +# '----------------------------------------------------------------------' group = _("User Management") @@ -327,10 +634,31 @@ ) register_configvar(group, + "userdb_automatic_sync", + ListChoice( + title = _('Automatic User Synchronization'), + help = _('By default the users are synchronized automatically in several situations. ' + 'The sync is started when opening the "Users" page in configuration and ' + 'during each page rendering. Each connector can then specify if it wants to perform ' + 'any actions. For example the LDAP connector will start the sync once the cached user ' + 'information are too old.'), + default_value = [ 'wato_users', 'page', 'wato_pre_activate_changes', 'wato_snapshot_pushed' ], + choices = [ + ('page', _('During regular page processing')), + ('wato_users', _('When opening the users\' configuration page')), + ('wato_pre_activate_changes', _('Before activating the changed configuration')), + ('wato_snapshot_pushed', _('On a remote site, when it receives a new configuration')), + ], + allow_empty = True, + ), + domain = "multisite", +) + +register_configvar(group, "ldap_connection", Dictionary( title = _("LDAP Connection Settings"), - help = _("This option configures all LDAP specific connection options. These options " + help = _("This section configures all LDAP specific connection options. These options " "are used by the LDAP user connector."), elements = [ ("server", TextAscii( @@ -338,6 +666,16 @@ help = _("Set the host address of the LDAP server. Might be an IP address or " "resolvable hostname."), allow_empty = False, + attrencode = True, + )), + ('failover_servers', ListOfStrings( + title = _('Failover Servers'), + help = _('When the connection to the first server fails with connect specific errors ' + 'like timeouts or some other network related problems, the connect mechanism ' + 'will try to use this server instead of the server configured above. If you ' + 'use persistent connections (default), the connection is being used until the ' + 'LDAP is not reachable or the local webserver is restarted.'), + allow_empty = False, )), ("port", Integer( title = _("TCP Port"), @@ -349,12 +687,24 @@ )), ("use_ssl", FixedValue( title = _("Use SSL"), - help = _("Connect to the LDAP server with a SSL encrypted connection."), + help = _("Connect to the LDAP server with a SSL encrypted connection. You might need " + "to configure the OpenLDAP installation on your monitoring server to accept " + "the certificates of the LDAP server. This is normally done via system wide " + "configuration of the CA certificate which signed the certificate of the LDAP " + "server. Please refer to the " + "documentation for details."), value = True, totext = _("Encrypt the network connection using SSL."), )), + ("no_persistent", FixedValue( + title = _("No persistent connection"), + help = _("The connection to the LDAP server is not persisted."), + value = True, + totext = _("Don't use persistent LDAP connections."), + )), ("connect_timeout", Float( - title = _("LDAP Connect Timeout (sec)"), + title = _("Connect Timeout (sec)"), help = _("Timeout for the initial connection to the LDAP server in seconds."), minvalue = 1.0, default_value = 2.0, @@ -372,12 +722,13 @@ "the selection e.g. the attribute names used in LDAP queries will " "be altered."), choices = [ - ("ad", _("Active Directory")), - ("openldap", _("OpenLDAP")), + ("ad", _("Active Directory")), + ("openldap", _("OpenLDAP")), + ("389directoryserver", _("389 Directory Server")), ], )), ("bind", Tuple( - title = _("LDAP Bind Credentials"), + title = _("Bind Credentials"), help = _("Set the credentials to be used to connect to the LDAP server. The " "used account must not be allowed to do any changes in the directory " "the whole connection is read only. " @@ -389,8 +740,8 @@ LDAPDistinguishedName( title = _("Bind DN"), help = _("Specify the distinguished name to be used to bind to " - "the LDAP directory."), - size = 80, + "the LDAP directory, e. g. CN=ldap,OU=users,DC=example,DC=com"), + size = 63, ), Password( title = _("Bind Password"), @@ -399,29 +750,45 @@ ), ], )), + ("page_size", Integer( + title = _("Page Size"), + help = _("LDAP searches can be performed in paginated mode, for example to improve " + "the performance. This enables pagination and configures the size of the pages."), + minvalue = 1, + default_value = 1000, + )), + ("response_timeout", Integer( + title = _("Response Timeout (sec)"), + help = _("Timeout for LDAP query responses."), + minvalue = 0, + default_value = 5, + )), ], - optional_keys = ['use_ssl', 'bind', ], + optional_keys = ['no_persistent', 'use_ssl', 'bind', 'page_size', 'response_timeout', 'failover_servers'], + default_keys = ['page_size'] ), domain = "multisite", + in_global_settings = False, ) register_configvar(group, "ldap_userspec", Dictionary( title = _("LDAP User Settings"), - help = _("This option configures all user related LDAP options. These options " + help = _("This section configures all user related LDAP options. These options " "are used by the LDAP user connector to find the needed users in the LDAP directory."), elements = [ ("dn", LDAPDistinguishedName( title = _("User Base DN"), - help = _("The base distinguished name to be used when performing user account " - "related queries to the LDAP server."), + help = _("Give a base distinguished name here, e. g. OU=users,DC=example,DC=com
    " + "All user accounts to synchronize must be located below this one."), size = 80, )), ("scope", DropdownChoice( title = _("Search Scope"), - help = _("Scope to be used in LDAP searches. In most cases \"sub\" is the best choice. " - "It searches for matching objects in the given base and the whole subtree."), + help = _("Scope to be used in LDAP searches. In most cases Search whole subtree below " + "the base DN is the best choice. " + "It searches for matching objects recursively."), choices = [ ("sub", _("Search whole subtree below the base DN")), ("base", _("Search only the entry at the base DN")), @@ -433,9 +800,30 @@ title = _("Search Filter"), help = _("Using this option you can define an optional LDAP filter which is used during " "LDAP searches. It can be used to only handle a subset of the users below the given " - "base DN."), + "base DN.

    Some common examples:

    " + "All user objects in LDAP:
    " + "(&(objectclass=user)(objectcategory=person))
    " + "Members of a group:
    " + "(&(objectclass=user)(objectcategory=person)(memberof=CN=cmk-users,OU=groups,DC=example,DC=com))
    "), size = 80, default_value = lambda: userdb.ldap_filter('users', False), + attrencode = True, + )), + ("filter_group", LDAPDistinguishedName( + title = _("Filter Group (Only use in special situations)"), + help = _("Using this option you can define the DN of a group object which is used to filter the users. " + "Only members of this group will then be synchronized. This is a filter which can be " + "used to extend capabilities of the regular \"Search Filter\". Using the search filter " + "you can only define filters which directly apply to the user objects. To filter by " + "group memberships, you can use the memberOf attribute of the user objects in some " + "directories. But some directories do not have such attributes because the memberships " + "are stored in the group objects as e.g. member attributes. You should use the " + "regular search filter whenever possible and only use this filter when it is really " + "neccessary. Finally you can say, you should not use this option when using Active Directory. " + "This option is neccessary in OpenLDAP directories when you like to filter by group membership.

    " + "If using, give a plain distinguished name of a group here, e. g. " + "CN=cmk-users,OU=groups,DC=example,DC=com"), + size = 80, )), ("user_id", TextAscii( title = _("User-ID Attribute"), @@ -443,6 +831,7 @@ "unique values to make an user identifyable by the value of this " "attribute."), default_value = lambda: userdb.ldap_attr('user_id'), + attrencode = True, )), ("lower_user_ids", FixedValue( title = _("Lower Case User-IDs"), @@ -450,30 +839,43 @@ value = True, totext = _("Enforce lower case User-IDs."), )), + ("user_id_umlauts", DropdownChoice( + title = _("Umlauts in User-IDs"), + help = _("Multisite does not support umlauts in User-IDs at the moment. To deal " + "with LDAP users having umlauts in their User-IDs you have the following " + "choices."), + choices = [ + ("replace", _("Replace umlauts like \"ü\" with \"ue\"")), + ("skip", _("Skip users with umlauts in their User-IDs")), + ], + default_value = "replace", + )), ], - optional_keys = ['scope', 'filter', 'user_id', 'lower_user_ids'], + optional_keys = ['filter', 'filter_group', 'user_id', 'lower_user_ids', ], ), domain = "multisite", + in_global_settings = False, ) register_configvar(group, "ldap_groupspec", Dictionary( title = _("LDAP Group Settings"), - help = _("This option configures all group related LDAP options. These options " + help = _("This section configures all group related LDAP options. These options " "are only needed when using group related attribute synchonisation plugins."), elements = [ ("dn", LDAPDistinguishedName( title = _("Group Base DN"), - help = _("The base distinguished name to be used when performing group account " - "related queries to the LDAP server."), + help = _("Give a base distinguished name here, e. g. OU=groups,DC=example,DC=com
    " + "All groups used must be located below this one."), size = 80, )), ("scope", DropdownChoice( title = _("Search Scope"), - help = _("Scope to be used in group related LDAP searches. In most cases \"sub\" " + help = _("Scope to be used in group related LDAP searches. In most cases " + "Search whole subtree below the base DN " "is the best choice. It searches for matching objects in the given base " - "and the whole subtree."), + "recursively."), choices = [ ("sub", _("Search whole subtree below the base DN")), ("base", _("Search only the entry at the base DN")), @@ -485,19 +887,23 @@ title = _("Search Filter"), help = _("Using this option you can define an optional LDAP filter which is used " "during group related LDAP searches. It can be used to only handle a " - "subset of the groups below the given base DN."), + "subset of the groups below the given base DN.

    " + "e.g. (objectclass=group)"), size = 80, default_value = lambda: userdb.ldap_filter('groups', False), + attrencode = True, )), ("member", TextAscii( title = _("Member Attribute"), help = _("The attribute used to identify users group memberships."), default_value = lambda: userdb.ldap_attr('member'), + attrencode = True, )), ], - optional_keys = ['scope', 'filter', 'member'], + optional_keys = ['filter', 'member'], ), domain = "multisite", + in_global_settings = False, ) register_configvar(group, @@ -510,36 +916,93 @@ 'user accounts for gathering their attributes. The user options which get imported ' 'into Check_MK from LDAP will be locked in WATO.'), elements = userdb.ldap_attribute_plugins_elements, + default_keys = ['email', 'alias', 'auth_expire' ], ), domain = "multisite", + in_global_settings = False, ) register_configvar(group, "ldap_cache_livetime", - Integer( + Age( title = _('LDAP Cache Livetime'), help = _('This option defines the maximum age for using the cached LDAP data. The time of the ' 'last LDAP synchronisation is saved and checked on every request to the multisite ' - 'interface. Once the cache gets outdated, a new synchronisation job is started.'), + 'interface. Once the cache gets outdated, a new synchronisation job is started.

    ' + 'Please note: Passwords of the users are never stored in WATO and therefor never cached!'), minvalue = 1, default_value = 300, ), domain = "multisite", + in_global_settings = False, ) register_configvar(group, "ldap_debug_log", + Checkbox( + title = _("LDAP connection diagnostics"), + label = _("Activate logging of LDAP transactions"), + help = _("If this option is enabled, Check_MK will create a log file in " + "var/log/ldap.log within your site in OMD environments. " + "You should enable this option only for debugging."), + default_value = False + ), + domain = "multisite", + in_global_settings = False, +) + +register_configvar(group, + "lock_on_logon_failures", Optional( - Filename( - label = _("Absolute path to log file"), - default = defaults.var_dir + '/web/ldap-debug.log', + Integer( + label = _("Number of logon failures to lock the account"), + default_value = 3, + minvalue = 1, ), - title = _("LDAP connection diagnostics"), - label = _("Activate logging of LDAP transactions into a logfile"), - help = _("If this option is used and set to a filename, Check_MK will create a logfile " - "containing details about connecting to LDAP and the single transactions.")), - domain = "multisite") + none_value = False, + title = _("Lock user accounts after N logon failures"), + label = _("Activate automatic locking of user accounts"), + help = _("This options enables automatic locking of user account after " + "N logon failures. One successful login resets the failure counter.") + ), + domain = "multisite" +) +register_configvar(group, + "password_policy", + Dictionary( + title = _('htpasswd: Password Policy'), + help = _('You can define some rules to which each user password ahers. By default ' + 'all passwords are accepted, even ones which are made of only a single character, ' + 'which is obviously a bad idea. Using this option you can enforce your users ' + 'to choose more secure passwords.'), + elements = [ + ('min_length', Integer( + title = _("Minimum password length"), + minvalue = 1, + )), + ('num_groups', Integer( + title = _("Number of character groups to use"), + minvalue = 1, + maxvalue = 4, + help = _("Force the user to choose a password that contains characters from at least " + "this number of different character groups. " + "Character groups are:
      " + "
    • lowercase letters
    • " + "
    • uppercase letters
    • " + "
    • digits
    • " + "
    • special characters such as an underscore or dash
    • " + "
    "), + )), + ('max_age', Age( + title = _("Maximum age of passwords"), + minvalue = 1, + display = ["days"], + )), + ], + ), + domain = "multisite", +) def list_roles(): roles = userdb.load_roles() @@ -547,7 +1010,7 @@ def list_contactgroups(): contact_groups = userdb.load_group_information().get("contact", {}) - entries = [ (c, contact_groups[c]) for c in contact_groups ] + entries = [ (c, g['alias']) for c, g in contact_groups.items() ] entries.sort() return entries @@ -577,17 +1040,82 @@ domain = "multisite", ) -# .----------------------------------------------------------------------. -# | _ _ | -# | ___ _ __ ___ | | __ ___ _ __ _ __ ___ ___ __| | ___ | -# | / __| '_ ` _ \| |/ / / _ \| '_ \| '_ ` _ \ / _ \ / _` |/ _ \ | -# | | (__| | | | | | < | (_) | |_) | | | | | | (_) | (_| | __/ | -# | \___|_| |_| |_|_|\_\ \___/| .__/|_| |_| |_|\___/ \__,_|\___| | -# | |_| | +register_configvar(group, + "save_user_access_times", + Checkbox( + title = _("Save last access times of users"), + label = _("Save the time of the latest user activity"), + help = _("When enabled, the time of the last access is stored for each user. The last " + "activity is shown on the users page."), + default_value = False + ), + domain = "multisite" +) + + +register_configvar(group, + "export_folder_permissions", + Checkbox( + title = _("Export WATO folder permissions"), + label = _("Make WATO folder permissions usable e.g. by NagVis"), + help = _("It is possible to create maps representing the WATO folder hierarchy within " + "NagVis by naming the maps like the folders are named internally. To make the " + "restriction of access to the maps as comfortable as possible, the permissions " + "configured within WATO can be exported to NagVis."), + default_value = False, + ), + domain = "multisite" +) + +#. +# .--Check_MK------------------------------------------------------------. +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | |_____| | # +----------------------------------------------------------------------+ +# | Operation mode of Check_MK | +# '----------------------------------------------------------------------' group = _("Operation mode of Check_MK") + +register_configvar(group, + "use_new_descriptions_for", + ListChoice( + title = _("Use new service descriptions"), + help = _("In order to make Check_MK more consistent, " + "the descriptions of several services have been renamed in newer " + "Check_MK versions. One example is the filesystem services that have " + "been renamed from fs_ into Filesystem. But since renaming " + "of existing services has many implications - including existing rules, performance " + "data and availability history - these renamings are disabled per default for " + "existing installations. Here you can switch to the new descriptions for " + "selected check types"), + choices = [ + ( "df", _("Used space in filesystems")), + ( "df_netapp", _("NetApp Filers: Used Space in Filesystems")), + ( "df_netapp32", _("NetApp Filers: Used space in Filesystem Using 32-Bit Counters")), + ( "esx_vsphere_datastores", _("VMWare ESX host systems: Used space")), + ( "hr_fs", _("Used space in filesystems via SNMP")), + ( "vms_diskstat.df", _("Disk space on OpenVMS")), + ( "zfsget", _("Used space in ZFS pools and filesystems")), + ( "ps", _("State and Count of Processes") ), + ( "ps.perf", _("State and Count of Processes (with additional performance data)")), + ( "wmic_process", _("Resource consumption of windows processes")), + ( "services", _("Windows Services")), + ( "logwatch", _("Check logfiles for relevant new messages")), + ( "cmk-inventory", _("Monitor hosts for unchecked services (Check_MK Discovery)")), + ( "hyperv_vms", _("Hyper-V Server: State of VMs")), + ], + render_orientation = "vertical", + ), + need_restart = True +) + + register_configvar(group, "tcp_connect_timeout", Float(title = _("Agent TCP connect timeout (sec)"), @@ -619,7 +1147,7 @@ "off locking altogether."), choices = [ ('abort', _("Abort with an error")), - ('ait' , _("Wait until the other has finished") ), + ('wait' , _("Wait until the other has finished") ), (None , _("Disable locking") ), ]), need_restart = False @@ -646,18 +1174,6 @@ "for the operation, but on the other hand will lead to a slightly higher load " "of Nagios for the first couple of minutes after the restart. "))) - -register_configvar(group, - "debug_log", - Optional(Filename(label = _("Absolute path to log file")), - title = _("Logfile for debugging errors in checks"), - label = _("Activate logging errors into a logfile"), - help = _("If this option is used and set to a filename, Check_MK will create a debug logfile " - "containing details about failed checks (those which have state UNKNOWN " - "and the output UNKNOWN - invalid output from plugin.... Per default no " - "logfile is written.")), - need_restart = True) - register_configvar(group, "cluster_max_cachefile_age", Integer(title = _("Maximum cache file age for clusters"), @@ -670,6 +1186,15 @@ need_restart = True) register_configvar(group, + "piggyback_max_cachefile_age", + Age(title = _("Maximum age for piggyback files"), + help = _("The maximum age for piggy back data from another host to be valid for monitoring. " + "Older files are deleted before processing them. Please make sure that this age is " + "at least as large as you normal check interval for piggy hosts.")), + need_restart = True) + + +register_configvar(group, "check_submission", DropdownChoice( title = _("Check submission method"), @@ -693,32 +1218,108 @@ "and children_system_time")), need_restart = True) -group = _("Inventory - automatic service detection") +register_configvar(group, + "use_dns_cache", + Checkbox( + title = _("Use DNS lookup cache"), + label = _("Prevent DNS lookups by use of a cache file"), + help = _("When this option is enabled (which is the default), then Check_MK tries to " + "prevent IP address lookups during the configuration generation. This can speed " + "up this process greatly when you have a larger number of hosts. The cache is stored " + "in a simple file. Note: when the cache is enabled then changes of the IP address " + "of a host in your name server will not be detected immediately. If you need an " + "immediate update then simply disable the cache once, activate the changes and " + "enabled it again. OMD based installations automatically update the cache once " + "a day."), + default_value = True, + ), + need_restart = True +) + +register_configvar(group, + "use_inline_snmp", + Checkbox( + title = _("Use Inline SNMP"), + label = _("Enable inline SNMP (directly use net-snmp libraries)"), + help = _("By default Check_MK uses command line calls of Net-SNMP tools like snmpget or " + "snmpwalk to gather SNMP information. For each request a new command line " + "program is being executed. It is now possible to use the inline SNMP implementation " + "which calls the net-snmp libraries directly via its python bindings. This " + "should increase the performance of SNMP checks in a significant way. The inline " + "SNMP mode is a feature which improves the performance for large installations and " + "only available via our subscription."), + default_value = False + ), + need_restart = True +) + +register_configvar(group, + "record_inline_snmp_stats", + Checkbox( + title = _("Record statistics of Inline SNMP"), + label = _("Enable recording of Inline SNMP statistics"), + help = _("When you have enabled Inline SNMP, you can use this flag to enable recording of " + "some performance related values. The recorded values are stored in a single file " + "at var/check_mk/snmp.stats.

    " + "Please note: Only enable this for a short period, because it will " + "decrease the performance of your monitoring."), + default_value = False + ), + need_restart = True +) + +group = _("Service discovery") register_configvar(group, "inventory_check_interval", Optional( - Integer(title = _("Do inventory check every"), - label = _("minutes"), - min_value = 1), - title = _("Enable regular inventory checks"), - help = _("If enabled, Check_MK will create one additional check per host " - "that does a regular check, if the inventory would find new services " + Integer(title = _("Perform service discovery check every"), + unit = _("minutes"), + min_value = 1, + default_value = 720), + title = _("Enable regular service discovery checks"), + help = _("If enabled, Check_MK will create one additional service per host " + "that does a regular check, if the service discovery would find new services " "currently un-monitored.")), need_restart = True) register_configvar(group, "inventory_check_severity", DropdownChoice( - title = _("Severity of failed inventory check"), - help = _("Please select which alarm state the inventory check services " + title = _("Severity of failed service discovery check"), + help = _("Please select which alarm state the service discovery check services " "shall assume in case that un-monitored services are found."), choices = [ (0, _("OK - do not alert, just display")), (1, _("Warning") ), (2, _("Critical") ), (3, _("Unknown") ), - ])) + ], + default_value = 1)) + +register_configvar(group, + "inventory_check_do_scan", + DropdownChoice( + title = _("Service discovery check for SNMP devices"), + choices = [ + ( True, _("Perform full SNMP scan always, detect new check types") ), + ( False, _("Just rely on existing check files, detect new items only") ) + ] + )) + +register_configvar(group, + "inventory_check_autotrigger", + Checkbox( + title = _("Service discovery triggers service discovery check"), + label = _("Automatically schedule service discovery check after service configuration changes"), + help = _("When this option is enabled then after each change of the service " + "configuration of a host via WATO - may it be via manual changes or a bulk " + "discovery - the service discovery check is automatically rescheduled in order " + "to reflect the new service state correctly immediately."), + default_value = True, + )) + + _if_portstate_choices = [ ( '1', 'up(1)'), @@ -730,6 +1331,37 @@ ( '7', 'lowerLayerDown(7)'), ] +_brocade_fcport_adm_choices = [ + ( 1, 'online(1)'), + ( 2, 'offline(2)'), + ( 3, 'testing(3)'), + ( 4, 'faulty(4)'), + ] + +_brocade_fcport_op_choices = [ + ( 0, 'unkown(0)'), + ( 1, 'online(1)'), + ( 2, 'offline(2)'), + ( 3, 'testing(3)'), + ( 4, 'faulty(4)'), + ] + +_brocade_fcport_phy_choices = [ + ( 1, 'noCard(1)'), + ( 2, 'noTransceiver(2)'), + ( 3, 'laserFault(3)'), + ( 4, 'noLight(4)'), + ( 5, 'noSync(5)'), + ( 6, 'inSync(6)'), + ( 7, 'portFault(7)'), + ( 8, 'diagFault(8)'), + ( 9, 'lockRef(9)'), + ( 10, 'validating(10)'), + ( 11, 'invalidModule(11)'), + ( 14, 'noSigDet(14)'), + ( 255, 'unkown(255)'), + ] + _if_porttype_choices = [ ("1", "other(1)" ), ("2", "regular1822(2)" ), ("3", "hdh1822(3)" ), ("4", "ddnX25(4)" ), ("5", "rfc877x25(5)" ), ("6", "ethernetCsmacd(6)" ), ("7", "iso88023Csmacd(7)" ), ("8", @@ -810,7 +1442,7 @@ "option. This will retain the old service descriptions and the old " "performance data."))) -register_configvar(group, +register_configvar(deprecated, "if_inventory_uses_description", Checkbox(title = _("Use description as service name for network interface checks"), label = _("use description"), @@ -818,7 +1450,7 @@ "of the port number. If no description is available then the port number is " "used anyway."))) -register_configvar(group, +register_configvar(deprecated, "if_inventory_uses_alias", Checkbox(title = _("Use alias as service name for network interface checks"), label = _("use alias"), @@ -826,14 +1458,14 @@ "of the port number. If no alias is available then the port number is used " "anyway."))) -register_configvar(group, +register_configvar(deprecated, "if_inventory_portstates", ListChoice(title = _("Network interface port states to inventorize"), help = _("When doing inventory on switches or other devices with network interfaces " "then only ports found in one of the configured port states will be added to the monitoring."), choices = _if_portstate_choices)) -register_configvar(group, +register_configvar(deprecated, "if_inventory_porttypes", ListChoice(title = _("Network interface port types to inventorize"), help = _("When doing inventory on switches or other devices with network interfaces " @@ -841,7 +1473,7 @@ choices = _if_porttype_choices, columns = 3)) -register_configvar(group, +register_configvar(deprecated, "diskstat_inventory_mode", DropdownChoice( title = _("Inventory mode for disk IO checks"), @@ -868,19 +1500,10 @@ need_restart = True ) -register_configvar(group, - "always_cleanup_autochecks", - Checkbox(title = _("Always cleanup autochecks"), - help = _("When switched on, Check_MK will always cleanup the autochecks files " - "after each inventory, i.e. create one file per host. This is the same " - "as adding the option -u to each call of -I on the " - "command line."))) - - group = _("Check configuration") -register_configvar(group, +register_configvar(deprecated, "if_inventory_monitor_state", Checkbox(title = _("Monitor port state of network interfaces"), label = _("monitor port state"), @@ -891,7 +1514,7 @@ "by overridden on a per-host and per-port base by defining special check " "parameters via a rule."))) -register_configvar(group, +register_configvar(deprecated, "if_inventory_monitor_speed", Checkbox(title = _("Monitor port speed of network interfaces"), label = _("monitor port speed"), @@ -899,7 +1522,7 @@ "(and switch ports) the current speed setting of the port will " "automatically be coded as a check parameter into the check. That way the check " "will get warning or critical when speed later changes (for example from " - "100 MBit/s to 10 MBit/s). This setting can later " + "100 Mbit/s to 10 Mbit/s). This setting can later " "by overridden on a per-host and per-port base by defining special check " "parameters via a rule."))) @@ -920,17 +1543,6 @@ ) register_configvar(group, - "logwatch_forward_to_ec", - Checkbox( - title = _("Forward logwatch messages to event console"), - label = _("forward to event console"), - help = _("Instead of using the regular logwatch check all lines received by logwatch can " - "be forwarded to a Check_MK event console daemon to be processed. The target event " - "console can be configured for each host in a separate rule."), - ), -) - -register_configvar(group, "printer_supply_some_remaining_status", DropdownChoice( title = _("Printer supply some remaining status"), @@ -946,7 +1558,7 @@ ), ) -register_configvar(group, +register_configvar(deprecated, "printer_supply_default_levels", Tuple( title = _("Printer supply default levels"), @@ -963,32 +1575,30 @@ ], )) -# +----------------------------------------------------------------------+ -# | ____ _ | -# | | _ \ _ _| | ___ | -# | | |_) | | | | |/ _ \ | -# | | _ <| |_| | | __/ | -# | |_| \_\\__,_|_|\___| | +#. +# .--Rulesets------------------------------------------------------------. +# | ____ _ _ | +# | | _ \ _ _| | ___ ___ ___| |_ ___ | +# | | |_) | | | | |/ _ \/ __|/ _ \ __/ __| | +# | | _ <| |_| | | __/\__ \ __/ |_\__ \ | +# | |_| \_\\__,_|_|\___||___/\___|\__|___/ | # | | -# | ____ _ _ _ | -# | | _ \ ___ ___| | __ _ _ __ __ _| |_(_) ___ _ __ ___ | -# | | | | |/ _ \/ __| |/ _` | '__/ _` | __| |/ _ \| '_ \/ __| | -# | | |_| | __/ (__| | (_| | | | (_| | |_| | (_) | | | \__ \ | -# | |____/ \___|\___|_|\__,_|_| \__,_|\__|_|\___/|_| |_|___/ | -# | | -# +----------------------------------------------------------------------+ -# | Declaration of rules to be defined in main.mk or in folders | # +----------------------------------------------------------------------+ +# | Rulesets for hosts and services except check parameter rules. | +# '----------------------------------------------------------------------' register_rulegroup("grouping", _("Grouping"), - _("Assignment of host & services to host, service and contacts groups. ")) + _("Assignment of host & services to host, service and contacts groups. ")) group = "grouping" register_rule(group, "host_groups", GroupSelection( "host", - title = _("Assignment of hosts to host groups")), + title = _("Assignment of hosts to host groups"), + help = _("Hosts can be grouped together into host groups. The most common use case " + "is to put hosts which belong together in a host group to make it possible " + "to get them listed together in the status GUI.")), match = "all") register_rule(group, @@ -1030,28 +1640,32 @@ register_rule(group, "extra_service_conf:check_interval", - Integer(title = _("Normal check interval for service checks"), - help = _("Check_MK usually uses an interval of one minute for the active Check_MK " - "check and for legacy checks. Here you can specify a larger interval. Please " - "note, that this setting only applies to active checks (those with the " - "%s reschedule button). If you want to change the check interval of " - "the Check_MK service only, specify Check_MK$ in the list " - "of services.") % '', - minvalue = 1, - label = _("minutes")), + Transform( + Age(minvalue=1, default_value=60), + forth = lambda v: int(v * 60), + back = lambda v: float(v) / 60.0, + title = _("Normal check interval for service checks"), + help = _("Check_MK usually uses an interval of one minute for the active Check_MK " + "check and for legacy checks. Here you can specify a larger interval. Please " + "note, that this setting only applies to active checks (those with the " + "%s reschedule button). If you want to change the check interval of " + "the Check_MK service only, specify Check_MK$ in the list " + "of services.") % ''), itemtype = "service") register_rule(group, "extra_service_conf:retry_interval", - Integer(title = _("Retry check interval for service checks"), - help = _("This setting is relevant if you have set the maximum number of check " - "attempts to a number greater than one. In case a service check is not OK " - "and the maximum number of check attempts is not yet reached, it will be " - "rescheduled with this interval. The retry interval is usually set to a smaller " - "value than the normal interval.

    This setting only applies to " - "active checks."), - minvalue = 1, - label = _("minutes")), + Transform( + Age(minvalue=1, default_value=60), + forth = lambda v: int(v * 60), + back = lambda v: float(v) / 60.0, + title = _("Retry check interval for service checks"), + help = _("This setting is relevant if you have set the maximum number of check " + "attempts to a number greater than one. In case a service check is not OK " + "and the maximum number of check attempts is not yet reached, it will be " + "rescheduled with this interval. The retry interval is usually set to a smaller " + "value than the normal interval.

    This setting only applies to " + "active checks.")), itemtype = "service") register_rule(group, @@ -1089,8 +1703,8 @@ title = _("Enable/disable passive checks for services"), help = _("This setting allows you to disable the processing of passiv check results for a " "service."), - choices = [ ("1", _("Enable processing of passiv check results")), - ("0", _("Disable processing of passiv check results")) ], + choices = [ ("1", _("Enable processing of passive check results")), + ("0", _("Disable processing of passive check results")) ], ), itemtype = "service") @@ -1116,27 +1730,30 @@ register_rule(group, "extra_host_conf:check_interval", - Integer( + Transform( + Age(minvalue=1, default_value=60), + forth = lambda v: int(v * 60), + back = lambda v: float(v) / 60.0, title = _("Normal check interval for host checks"), help = _("The default interval is set to one minute. Here you can specify a larger " "interval. The host is contacted in this interval on a regular base. The host " "check is also being executed when a problematic service state is detected to check " - "wether or not the service problem is resulting from a host problem."), - minvalue = 1, - label = _("minutes") + "wether or not the service problem is resulting from a host problem.") ) ) register_rule(group, "extra_host_conf:retry_interval", - Integer(title = _("Retry check interval for host checks"), + Transform( + Age(minvalue=1, default_value=60), + forth = lambda v: int(v * 60), + back = lambda v: float(v) / 60.0, + title = _("Retry check interval for host checks"), help = _("This setting is relevant if you have set the maximum number of check " "attempts to a number greater than one. In case a host check is not UP " "and the maximum number of check attempts is not yet reached, it will be " "rescheduled with this interval. The retry interval is usually set to a smaller " "value than the normal interval."), - minvalue = 1, - label = _("minutes") ) ) @@ -1149,15 +1766,76 @@ "the state of the host will stay at its last status.")), ) +register_rule( + group, + "host_check_commands", + CascadingDropdown( + title = _("Host Check Command"), + help = _("Usually Check_MK uses a series of PING (ICMP echo request) in order to determine " + "whether a host is up. In some cases this is not possible, however. With this rule " + "you can specify an alternative way of determining the host's state."), + choices = [ + ( "ping", _("PING (active check with ICMP echo request)") ), + ( "smart", _("Smart PING (only with Check_MK Micro Core)") ), + ( "tcp" , _("TCP Connect"), Integer(label = _("to port:"), minvalue=1, maxvalue=65535, default_value=80 )), + ( "ok", _("Always assume host to be up") ), + ( "agent", _("Use the status of the Check_MK Agent") ), + ( "service", _("Use the status of the service..."), + TextUnicode( + label = ":", + size = 45, + allow_empty = False, + attrencode = True, + )), + ( "custom", _("Use a custom check plugin..."), PluginCommandLine() ), + ], + default_value = "ping", + html_separator = " ", + ), + match = 'first' +) + + register_rule(group, "extra_host_conf:check_command", TextAscii( + title = _("Internal Command for Hosts Check"), label = _("Command:"), - title = _("Check Command for Hosts Check"), - help = _("This parameter changes the default check_command for " - "a host check"), - ), + help = _("This ruleset is deprecated and will be removed soon: " + "it changes the default check_command for a host check. You need to " + "define that command manually in your monitoring configuration."), + attrencode = True, + ), +) + +def get_snmp_checktypes(): + checks = check_mk_local_automation("get-check-information") + types = [ (cn, (c['title'] != cn and '%s: ' % cn or '') + c['title']) + for (cn, c) in checks.items() if c['snmp'] ] + types.sort() + return [ (None, _('All SNMP Checks')) ] + types + +register_rule(group, + "snmp_check_interval", + Tuple( + title = _('Check intervals for SNMP checks'), + help = _('This rule can be used to customize the check interval of each SNMP based check. ' + 'With this option it is possible to configure a longer check interval for specific ' + 'checks, than then normal check interval.'), + elements = [ + DropdownChoice( + title = _("Checktype"), + choices = get_snmp_checktypes, + ), + Integer( + title = _("Do check every"), + unit = _("minutes"), + min_value = 1, + default_value = 1, + ), + ] ) +) group = "monconf/" + _("Notifications") register_rule(group, @@ -1181,8 +1859,57 @@ "service completely. Per default all notifications are enabled."), choices = [ ("1", _("Enable service notifications")), ("0", _("Disable service notifications")) ], + ), + itemtype = "service" +) + +register_rule(group, + "extra_host_conf:notification_options", + Transform( + ListChoice( + choices = [ + ( "d", _("Host goes down")), + ( "u", _("Host gets unreachble")), + ( "r", _("Host goes up again")), + ( "f", _("Start or end of flapping state")), + ( "s", _("Start or end of a scheduled downtime")), + ], + default_value = [ "d", "u", "r", "f", "s" ], ), - itemtype = "service") + title = _("Notified events for hosts"), + help = _("This ruleset allows you to restrict notifications of host problems to certain " + "states, e.g. only notify on DOWN, but not on UNREACHABLE. Please select the types " + "of events that should initiate notifications. Please note that several other " + "filters must also be passed in order for notifications to finally being sent out."), + forth = lambda x: x != 'n' and x.split(",") or [], + back = lambda x: ",".join(x) or "n", + ), +) + +register_rule(group, + "extra_service_conf:notification_options", + Transform( + ListChoice( + choices = [ + ("w", _("Service goes into warning state")), + ("u", _("Service goes into unknown state")), + ("c", _("Service goes into critical state")), + ("r", _("Service recovers to OK")), + ("f", _("Start or end of flapping state")), + ("s", _("Start or end of a scheduled downtime")), + ], + default_value = [ "w", "u", "c", "r", "f", "s" ], + ), + title = _("Notified events for services"), + help = _("This ruleset allows you to restrict notifications of service problems to certain " + "states, e.g. only notify on CRIT, but not on WARN. Please select the types " + "of events that should initiate notifications. Please note that several other " + "filters must also be passed in order for notifications to finally being sent out."), + forth = lambda x: x != 'n' and x.split(",") or [], + back = lambda x: ",".join(x) or "n", + ), + itemtype = "service" +) register_rule(group, "extra_host_conf:notification_period", @@ -1219,8 +1946,9 @@ help = _("This setting delays notifications about host problems by the " "specified amount of time. If the host is up again within that " "time, no notification will be sent out."), - ) - ) + ), + factory_default = 0, +) register_rule(group, "extra_service_conf:first_notification_delay", @@ -1233,7 +1961,8 @@ help = _("This setting delays notifications about service problems by the " "specified amount of time. If the service is OK again within that " "time, no notification will be sent out."), - ), + ), + factory_default = 0, itemtype = "service") register_rule(group, @@ -1300,22 +2029,43 @@ register_rule(group, "extra_service_conf:notes_url", TextAscii( - label = _("Url:"), - title = _("Notes url for Services"), + label = _("URL:"), + title = _("Notes URL for Services"), help = _("With this setting you can set links to documentations " "for each service"), - ), + attrencode = True, + ), itemtype = "service") register_rule(group, "extra_host_conf:notes_url", TextAscii( - label = _("Url:"), - title = _("Notes url for Hosts"), + label = _("URL:"), + title = _("Notes URL for Hosts"), help = _("With this setting you can set links to documentations " "for Hosts"), - ), - ) + attrencode = True, + ), +) + +register_rule(group, + "extra_service_conf:display_name", + TextUnicode( + title = _("Alternative display name for Services"), + help = _("This rule set allows you to specify an alternative name " + "to be displayed for certain services. This name is available as " + "a column when creating new views or modifying existing ones. " + "It is always visible in the details view of a service. In the " + "availability reporting there is an option for using that name " + "instead of the normal service description. It does not automatically " + "replace the normal service name in all views.

    Note: The " + "purpose of this rule set is to define unique names for several well-known " + "services. It cannot rename services in general."), + size = 64, + attrencode = True, + ), + itemtype = "service") + group = "monconf/" + _("Inventory and Check_MK settings") @@ -1331,8 +2081,8 @@ register_rule(group, "ignored_services", - title = _("Ignored services"), - help = _("Services that are declared as ignored by this rule set will not be added " + title = _("Disabled services"), + help = _("Services that are declared as disabled by this rule set will not be added " "to a host during inventory (automatic service detection). Services that already " "exist will continued to be monitored but be marked as obsolete in the service " "list of a host."), @@ -1341,8 +2091,8 @@ register_rule(group, "ignored_checks", CheckTypeSelection( - title = _("Ignored checks"), - help = _("This ruleset is similar to 'Ignored services', but selects checks to be ignored " + title = _("Disabled checks"), + help = _("This ruleset is similar to 'Disabled services', but selects checks to be disabled " "by their type. This allows you to disable certain technical implementations " "such as filesystem checks via SNMP on hosts that also have the Check_MK agent " "installed."), @@ -1360,6 +2110,51 @@ itemtype = "service") group = "monconf/" + _("Various") +register_rule(group, + "clustered_services_mapping", + TextAscii( + title = _("Clustered services for overlapping clusters"), + label = _("Assign services to the following cluster:"), + help = _("It's possible to have clusters that share nodes. You could say that " + "such clusters "overlap". In such a case using the ruleset " + "Clustered services is not sufficient since it would not be clear " + "to which of the several possible clusters a service found on such a shared " + "node should be assigned to. With this ruleset you can assign services and " + "explicitely specify which cluster assign them to."), + ), + itemtype = "service", + ) + +register_rule(group, + "extra_host_conf:service_period", + TimeperiodSelection( + title = _("Service period for hosts"), + help = _("When it comes to availability reporting, you might want the report " + "to cover only certain time periods, e.g. only Monday to Friday " + "from 8:00 to 17:00. You can do this by specifying a service period " + "for hosts or services. In the reporting you can then decide to " + "include, exclude or ignore such periods und thus e.g. create a report " + "of the availability just within or without these times. Note: Changes in the " + "actual definition of a time period will only be reflected in " + "times after that change. Selecting a different service period " + "will also be reflected in the past.")), + ) + +register_rule(group, + "extra_service_conf:service_period", + TimeperiodSelection( + title = _("Service period for services"), + help = _("When it comes to availability reporting, you might want the report " + "to cover only certain time periods, e.g. only Monday to Friday " + "from 8:00 to 17:00. You can do this by specifying a service period " + "for hosts or services. In the reporting you can then decide to " + "include, exclude or ignore such periods und thus e.g. create a report " + "of the availability just within or without these times. Note: Changes in the " + "actual definition of a time period will only be reflected in " + "times after that change. Selecting a different service period " + "will also be reflected in the past.")), + itemtype = "service") + class MonitoringIcon(ValueSpec): def __init__(self, **kwargs): ValueSpec.__init__(self, **kwargs) @@ -1419,6 +2214,7 @@ def validate_value(self, value, varprefix): if value and value not in self.available_icons(): raise MKUserError(varprefix, _("The selected icon image does not exist.")) + ValueSpec.custom_validate(self, value, varprefix) @@ -1462,45 +2258,83 @@ "and can change.")) group = "agent/" + _("SNMP") -_snmpv3_basic_elements = [ - DropdownChoice( - choices = [ - ( "authPriv", _("authPriv")), - ( "authNoPriv", _("authNoPriv")), - ( "noAuthNoPriv", _("noAuthNoPriv")), - ], - title = _("Security level")), - DropdownChoice( - choices = [ - ( "md5", _("MD5") ), - ( "sha", _("SHA1") ), - ], - title = _("Authentication protocol")), - TextAscii(title = _("Security name")), - TextAscii(title = _("Authentication password"))] +_snmpv3_auth_elements = [ + DropdownChoice( + choices = [ + ( "md5", _("MD5") ), + ( "sha", _("SHA1") ), + ], + title = _("Authentication protocol") + ), + TextAscii( + title = _("Security name"), + attrencode = True + ), + Password( + title = _("Authentication password"), + minlen = 8, + ) +] register_rule(group, "snmp_communities", Alternative( - elements = [ - TextAscii( - title = _("SNMP community (SNMP Versions 1 and 2c)"), - allow_empty = False), - Tuple( - title = _("Credentials for SNMPv3"), - elements = _snmpv3_basic_elements), - Tuple( - title = _("Credentials for SNMPv3 including privacy options"), - elements = _snmpv3_basic_elements + [ - DropdownChoice( - choices = [ - ( "DES", _("DES") ), - ( "AES", _("AES") ), - ], - title = _("Privacy protocol")), - TextAscii(title = _("Privacy pass phrase")), - ])], - title = _("SNMP communities of monitored hosts"))) + elements = [ + TextAscii( + title = _("SNMP community (SNMP Versions 1 and 2c)"), + allow_empty = False, + attrencode = True, + ), + Tuple( + title = _("Credentials for SNMPv3 without authentication and privacy (noAuthNoPriv)"), + elements = [ + FixedValue("noAuthNoPriv", + title = _("Security Level"), + totext = _("No authentication, no privacy"), + ), + ] + ), + Tuple( + title = _("Credentials for SNMPv3 with authentication but without privacy (authNoPriv)"), + elements = [ + FixedValue("authNoPriv", + title = _("Security Level"), + totext = _("authentication but no privacy"), + ), + ] + _snmpv3_auth_elements + ), + Tuple( + title = _("Credentials for SNMPv3 with authentication and privacy (authPriv)"), + elements = [ + FixedValue("authPriv", + title = _("Security Level"), + totext = _("authentication and encryption"), + ), + ] + _snmpv3_auth_elements + [ + DropdownChoice( + choices = [ + ( "DES", _("DES") ), + ( "AES", _("AES") ), + ], + title = _("Privacy protocol") + ), + Password( + title = _("Privacy pass phrase"), + minlen = 8, + ), + ] + ), + ], + + match = lambda x: type(x) == tuple and ( \ + len(x) == 1 and 1 or \ + len(x) == 4 and 2 or 3) or 0, + + style = "dropdown", + default_value = "public", + title = _("SNMP credentials of monitored hosts"), + help = _("By default Check_MK uses the community \"public\" to contact hosts via SNMP v1/v2. This rule " + "can be used to customize the the credentials to be used when contacting hosts via SNMP."))) register_rule(group, "snmp_character_encodings", @@ -1510,14 +2344,14 @@ " always assumes UTF-8 encoding. You can declare other " " other encodings here"), choices = [ - ("utf-8", _("UTF-8 (default)") ), + ("utf-8", _("UTF-8") ), ("latin1" ,_("latin1")), ] )), register_rule(group, "bulkwalk_hosts", - title = _("Hosts using bulk walk (and SNMP v2c)"), + title = _("Hosts using SNMP bulk walk (enforces SNMP v2c)"), help = _("Most SNMP hosts support SNMP version 2c. However, Check_MK defaults to version 1, " "in order to support as many devices as possible. Please use this ruleset in order " "to configure SNMP v2c for as many hosts as possible. That version has two advantages: " @@ -1528,11 +2362,20 @@ "bulk walk, please use the rule set snmpv2c_hosts instead.")) register_rule(group, + "snmp_without_sys_descr", + title = _("Hosts without system description OID"), + help = _("Devices which do not publish the system description OID " + ".1.3.6.1.2.1.1.1.0 are normally ignored by the SNMP inventory. " + "Use this ruleset to select hosts which should nevertheless " + "be checked.")) + +register_rule(group, "snmpv2c_hosts", - title = _("Hosts using SNMP v2c (and no bulk walk)"), - help = _("There exist a few devices out there that behave very badly when using SNMP bulk walk. " - "If you want to use SNMP v2c on those devices, nevertheless, then use this rule set. " - "One reason is enabling 64 bit counters.")) + title = _("Legacy SNMP devices using SNMP v2c"), + help = _("There exist a few devices out there that behave very badly when using SNMP v2c and bulk walk. " + "If you want to use SNMP v2c on those devices, nevertheless, you need to configure this device as " + "legacy snmp device and upgrade it to SNMP v2c (without bulk walk) with this rule set. One reason is enabling 64 bit counters. " + "Note: This rule won't apply if the device is already configured as SNMP v2c device.")) register_rule(group, "snmp_timing", @@ -1542,25 +2385,29 @@ "for the SNMP access to devices."), elements = [ ( "timeout", - Integer( + Float( title = _("Timeout between retries"), - help = _("The default is 1 sec."), + help = _("After a request is sent to the remote SNMP agent we will wait up to this " + "number of seconds until assuming the answer get lost and retrying."), default_value = 1, - minvalue = 1, + minvalue = 0.1, maxvalue = 60, + allow_int = True, unit = _("sec"), + size = 6, ), ), ( "retries", Integer( title = _("Number of retries"), - help = _("The default is 5."), default_value = 5, - minvalue = 1, + minvalue = 0, maxvalue = 50, ) ), - ]), + ] + ), + factory_default = { "timeout" : 1, "retries" : 5 }, match = "dict") @@ -1573,32 +2420,126 @@ "is configured with this ruleset will then use the information from that " "file instead of using real SNMP. ")) +register_rule(group, + "snmp_ports", + Integer( + minvalue = 1, + maxvalue = 65535, + default_value = 161 + ), + title = _("UDP port used for SNMP"), + help = _("This variable allows you to customize the UDP port to " + "be used to communicate via SNMP on a per-host-basis."), +) + group = "agent/" + _("Check_MK Agent") register_rule(group, "agent_ports", Integer( - help = _("This variable allows to specify the TCP port to " - "be used to connect to the agent on a per-host-basis. "), minvalue = 1, maxvalue = 65535, default_value = 6556), - title = _("TCP port for connection to Check_MK agent") + title = _("TCP port for connection to Check_MK agent"), + help = _("This variable allows to specify the TCP port to " + "be used to connect to the agent on a per-host-basis. "), ) - +register_rule(group, + "check_mk_exit_status", + Dictionary( + elements = [ + ( "connection", + MonitoringState( + default_value = 2, + title = _("State in case of connection problems")), + ), + ( "timeout", + MonitoringState( + default_value = 2, + title = _("State in case of a overall timeout")), + ), + ( "missing_sections", + MonitoringState( + default_value = 1, + title = _("State if just some agent sections are missing")), + ), + ( "empty_output", + MonitoringState( + default_value = 2, + title = _("State in case of empty agent output")), + ), + ( "wrong_version", + MonitoringState( + default_value = 1, + title = _("State in case of wrong agent version")), + ), + ( "exception", + MonitoringState( + default_value = 3, + title = _("State in case of unhandled exception")), + ), + ], + ), + factory_default = { "connection" : 2, "missing_sections" : 1, "empty_output" : 2, "wrong_version" : 1, "exception": 3 }, + title = _("Status of the Check_MK service"), + help = _("This ruleset specifies the total status of the Check_MK service in " + "case of various error situations. One use case is the monitoring " + "of hosts that are not always up. You can have Check_MK an OK status " + "here if the host is not reachable. Note: the Timeout setting only works " + "when using the Check_MK Micro Core."), + match = "dict", +) register_rule(group, - "datasource_programs", - TextAscii( - title = _("Individual program call instead of agent access"), - help = _("For agent based checks Check_MK allows you to specify an alternative " - "program that should be called by Check_MK instead of connecting the agent " - "via TCP. That program must output the agent's data on standard output in " - "the same format the agent would do. This is for example useful for monitoring " - "via SSH. The command line may contain the placeholders <IP> and " - "<HOST>."), - label = _("Command line to execute"), - size = 80, - attrencode = True)) + "check_mk_agent_target_versions", + Transform( + CascadingDropdown( + title = _("Check for correct version of Check_MK agent"), + help = _("Here you can make sure that all of your Check_MK agents are running" + " one specific version. Agents running " + " a different version return a non-OK state."), + choices = [ + ("ignore", _("Ignore the version")), + ("site", _("Same version as the monitoring site")), + ("specific", _("Specific version"), + TextAscii( + allow_empty = False, + ) + ), + ("at_least", _("At least"), + Dictionary( + elements = [ + ('release', TextAscii( + title = _('Official Release version'), + allow_empty = False, + )), + ('daily_build', TextAscii( + title = _('Daily build'), + allow_empty = False, + )), + ] + ), + ), + ], + default_value = "ignore", + ), + # In the past, this was a OptionalDropdownChoice() which values could be strings: + # ignore, site or a custom string representing a version number. + forth = lambda x: type(x) == str and x not in [ "ignore", "site" ] and ("specific", x) or x + ) +) +register_rule(group, + "piggyback_translation", + HostnameTranslation( + title = _("Hostname translation for piggybacked hosts"), + help = _("Some agents or agent plugins send data not only for the queried host but also " + "for other hosts "piggyback" with their own data. This is the case " + "for the vSphere special agent and the SAP R/3 plugin, for example. The hostnames " + "that these agents send must match your hostnames in your monitoring configuration. " + "If that is not the case, then with this rule you can define a hostname translation. " + "Note: This rule must be configured for the "pig" - i.e. the host that the " + "agent is running on. It is not applied to the translated piggybacked hosts."), + ), + match = "dict") diff -Nru check-mk-1.2.2p3/plugins/wato/check_parameters.py check-mk-1.2.6p12/plugins/wato/check_parameters.py --- check-mk-1.2.2p3/plugins/wato/check_parameters.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/wato/check_parameters.py 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -26,21 +26,19 @@ # Rules for configuring parameters of checks (services) -register_rulegroup("checkparams", _("Parameters for Inventorized Checks"), - _("Levels and other parameters for checks found by the Check_MK inventory.\n" +register_rulegroup("checkparams", _("Parameters for discovered services"), + _("Levels and other parameters for checks found by the Check_MK service discovery.\n" "Use these rules in order to define parameters like filesystem levels, " "levels for CPU load and other things for services that have been found " - "by the automatic service detection (inventory) of Check_MK.")) + "by the automatic service discovery of Check_MK.")) group = "checkparams" subgroup_networking = _("Networking") -# subgroup_windows = _("Windows") subgroup_storage = _("Storage, Filesystems and Files") subgroup_os = _("Operating System Resources") -# subgroup_time = _("Time synchronization") subgroup_printing = _("Printers") -subgroup_environment = _("Temperature, Humidity, etc.") -subgroup_applications = _("Applications, Processes & Services") +subgroup_environment = _("Temperature, Humidity, Electrical Parameters, etc.") +subgroup_applications = _("Applications, Processes & Services") subgroup_virt = _("Virtualization") subgroup_hardware = _("Hardware, BIOS") subgroup_inventory = _("Inventory - automatic service detection") @@ -52,44 +50,8 @@ help = _("This rule sets the parameters for the host checks (via check_icmp) " "and also for PING checks on ping-only-hosts. For the host checks only the " "critical state is relevant, the warning levels are ignored."), - elements = [ - ( "rta", - Tuple( - title = _("Round trip average"), - elements = [ - Float(title = _("Warning at"), unit = "ms"), - Float(title = _("Critical at"), unit = "ms"), - ])), - ( "loss", - Tuple( - title = _("Packet loss"), - help = _("When the percentual number of lost packets is equal or greater then " - "the level, then the according state is triggered. The default for critical " - "is 100%. That means that the check is only critical if all packets " - "are lost."), - elements = [ - Percentage(title = _("Warning at")), - Percentage(title = _("Critical at")), - ])), - - ( "packets", - Integer( - title = _("Number of packets"), - help = _("Number ICMP echo request packets to send to the target host on each " - "check execution. All packets are sent directly on check execution. Afterwards " - "the check waits for the incoming packets."), - minvalue = 1, - maxvalue = 20, - )), - - ( "timeout", - Integer( - title = _("Total timeout of check"), - help = _("After this time (in seconds) the check is aborted, regardless " - "of how many packets have been received yet."), - minvalue = 1, - )), - ]), + elements = check_icmp_params, + ), match="dict") register_rule(group + '/' + subgroup_applications, @@ -110,16 +72,17 @@ ('I', _('IGNORE')), ], ), - RegExp( + RegExpUnicode( title = _("Pattern (Regex)"), size = 40, ), - TextAscii( + TextUnicode( title = _("Comment"), size = 40, ), ] ), + title = _("Logfile pattern rules"), help = _('

    You can define one or several patterns (regular expressions) in each logfile pattern rule. ' 'These patterns are applied to the selected logfiles to reclassify the ' 'matching log messages. The first pattern which matches a line will ' @@ -131,21 +94,28 @@ add_label = _("Add pattern"), ), itemtype = 'item', - itemname = 'logfile', + itemname = 'Logfile', itemhelp = _("Put the item names of the logfiles here. For example \"System$\" " - "to select the service \"LOG System\"."), + "to select the service \"LOG System\". You can use regular " + "expressions which must match the beginning of the logfile name."), match = 'all', ) register_rule(group + '/' + subgroup_inventory, varname = "inventory_services_rules", - title = _("Windows Service Inventory"), + title = _("Windows Service Discovery"), valuespec = Dictionary( elements = [ ('services', ListOfStrings( title = _("Services (Regular Expressions)"), - help = _('Matching the begining of the service names (regular expression). ' - 'If no service is given, this rule will match all services.'), + help = _('Regular expressions matching the begining of the internal name ' + 'or the description of the service. ' + 'If no name is given then this rule will match all services. The ' + 'match is done on the beginning of the service name. It ' + 'is done case sensitive. You can do a case insensitive match ' + 'by prefixing the regular expression with (?i). Example: ' + '(?i).*mssql matches all services which contain MSSQL ' + 'or MsSQL or mssql or...'), orientation = "horizontal", )), ('state', DropdownChoice( @@ -164,14 +134,14 @@ title = _("Create check if service is in start mode"), )), ], - help = _('

    This rule can be used to configure the inventory of the windows services check. ' - 'You can configure specific window services to be monitored by the windows check by ' - 'selecting them by name, current state during the inventory or start mode.'), + help = _('This rule can be used to configure the inventory of the windows services check. ' + 'You can configure specific windows services to be monitored by the windows check by ' + 'selecting them by name, current state during the inventory, or start mode.'), ), match = 'all', ) -#dublicate: check_mk_configuration.py +#duplicate: check_mk_configuration.py _if_portstate_choices = [ ( '1', 'up(1)'), ( '2', 'down(2)'), @@ -182,7 +152,7 @@ ( '7', 'lowerLayerDown(7)'), ] -#dublicate: check_mk_configuration.py +#duplicate: check_mk_configuration.py _if_porttype_choices = [ ("1", "other(1)" ), ("2", "regular1822(2)" ), ("3", "hdh1822(3)" ), ("4", "ddnX25(4)" ), ("5", "rfc877x25(5)" ), ("6", "ethernetCsmacd(6)" ), ("7", "iso88023Csmacd(7)" ), ("8", @@ -252,114 +222,372 @@ "fcipLink(224)" ), ("225", "rpr(225)" ), ("226", "qam(226)" ), ("227", "lmp(227)" ), ("228", "cblVectaStar(228)" ), ("229", "docsCableMCmtsDownstream(229)" ), ("230", "adsl2(230)" ), ] -register_rule(group + '/' + subgroup_networking, +register_rule(group + '/' + subgroup_inventory, varname = "inventory_if_rules", - title = _("Network interface and switch port Inventory"), + title = _("Network Interface and Switch Port Discovery"), valuespec = Dictionary( elements = [ - ("use_desc", Checkbox( + ( "use_desc", + Checkbox( title = _("Use description as service name for network interface checks"), label = _("use description"), help = _("This option lets Check_MK use the interface description as item instead " "of the port number. If no description is available then the port number is " "used anyway."))), - ("use_alias", Checkbox( + ( "use_alias", + Checkbox( title = _("Use alias as service name for network interface checks"), label = _("use alias"), help = _("This option lets Check_MK use the alias of the port (ifAlias) as item instead " "of the port number. If no alias is available then the port number is used " "anyway."))), - ("portstates", ListChoice(title = _("Network interface port states to inventorize"), - help = _("When doing inventory on switches or other devices with network interfaces " + ( "match_alias", + ListOfStrings( + title = _("Match interface alias (regex)"), + help = _("Only discover interfaces whose alias matches one of the configured " + "regular expressions. The match is done on the beginning of the alias. " + "This allows you to select interfaces based on the alias without having " + "the alias be part of the service description."), + orientation = "horizontal", + valuespec = RegExp(size = 32), + )), + + ( "portstates", + ListChoice(title = _("Network interface port states to discover"), + help = _("When doing discovery on switches or other devices with network interfaces " "then only ports found in one of the configured port states will be added to the monitoring."), - choices = _if_portstate_choices)), - ("porttypes", ListChoice(title = _("Network interface port types to inventorize"), - help = _("When doing inventory on switches or other devices with network interfaces " + choices = _if_portstate_choices, + toggle_all = True, + default_value = ['1'], + )), + ( "porttypes", + ListChoice(title = _("Network interface port types to discover"), + help = _("When doing discovery on switches or other devices with network interfaces " "then only ports of the specified types will be created services for."), choices = _if_porttype_choices, - columns = 3)), - + columns = 3, + toggle_all = True, + default_value = [ '6', '32', '62', '117', '127', '128', '129', '180', '181', '182', '205','229' ], + )), + ( "rmon", + Checkbox( + title = _("Collect RMON statistics data"), + help = _("If you enable this option, for every RMON capable switch port an additional service will " + "be created which is always OK and collects RMON data. This will give you detailed information " + "about the distribution of packet sizes transferred over the port. Note: currently " + "this extra RMON check does not honor the inventory settings for switch ports. In a future " + "version of Check_MK RMON data may be added to the normal interface service and not add " + "an additional service."), + label = _("Create extra service with RMON statistics data (if available for the device)"), + )), ], - help = _('

    This rule can be used to control the inventory for network ports. ' + help = _('This rule can be used to control the inventory for network ports. ' 'You can configure the port types and port states for inventory' 'and the use of alias or description as service name.'), ), match = 'dict', ) + +register_rule(group + '/' + subgroup_inventory, + varname = "brocade_fcport_inventory", + title = _("Brocade Port Discovery"), + valuespec = Dictionary( + elements = [ + ("use_portname", Checkbox( + title = _("Use port name as service name"), + label = _("use port name"), + default_value = True, + help = _("This option lets Check_MK use the port name as item instead of the " + "port number. If no description is available then the port number is " + "used anyway."))), + ("show_isl", Checkbox( + title = _("add \"ISL\" to service description for interswitch links"), + label = _("add ISL"), + default_value = True, + help = _("This option lets Check_MK add the string \"ISL\" to the service " + "description for interswitch links."))), + ("admstates", ListChoice(title = _("Administrative port states to discover"), + help = _("When doing service discovery on brocade switches only ports with the given administrative " + "states will be added to the monitoring system."), + choices = _brocade_fcport_adm_choices, + columns = 1, + toggle_all = True, + default_value = ['1', '3', '4' ], + )), + ("phystates", ListChoice(title = _("Physical port states to discover"), + help = _("When doing service discovery on brocade switches only ports with the given physical " + "states will be added to the monitoring system."), + choices = _brocade_fcport_phy_choices, + columns = 1, + toggle_all = True, + default_value = [ 3, 4, 5, 6, 7, 8, 9, 10 ] + )), + ("opstates", ListChoice(title = _("Operational port states to discover"), + help = _("When doing service discovery on brocade switches only ports with the given operational " + "states will be added to the monitoring system."), + choices = _brocade_fcport_op_choices, + columns = 1, + toggle_all = True, + default_value = [ 1, 2, 3, 4 ] + )), + ], + help = _('This rule can be used to control the service discovery for brocade ports. ' + 'You can configure the port states for inventory ' + 'and the use of the description as service name.'), + ), + match = 'dict', +) + +process_level_elements = [ + ('levels', Tuple( + title = _('Levels for process count'), + help = _("Please note that if you specify and also if you modify levels here, the change is activated " + "only during an inventory. Saving this rule is not enough. This is due to the nature of inventory rules."), + elements = [ + Integer( + title = _("Critical below"), + unit = _("processes"), + default_value = 1, + ), + Integer( + title = _("Warning below"), + unit = _("processes"), + default_value = 1, + ), + Integer( + title = _("Warning above"), + unit = _("processes"), + default_value = 99999, + ), + Integer( + title = _("Critical above"), + unit = _("processes"), + default_value = 99999, + ), + ], + )), + ( "cpulevels", + Tuple( + title = _("Levels on CPU utilization"), + elements = [ + Percentage(title = _("Warning at"), default_value = 90, maxvalue = 10000), + Percentage(title = _("Critical at"), default_value = 98, maxvalue = 10000), + ], + )), + ( "cpu_average", + Integer( + title = _("CPU Averaging"), + help = _("By activating averaging, Check_MK will compute the average of " + "the CPU utilization over a given interval. If you have defined " + "alerting levels then these will automatically be applied on the " + "averaged value. This helps to mask out short peaks. "), + unit = _("minutes"), + minvalue = 1, + default_value = 15, + ) + ), + ( "virtual_levels", + Tuple( + title = _("Virtual memory usage"), + elements = [ + Filesize(title = _("Warning at")), + Filesize(title = _("Critical at")), + ], + )), + ( "resident_levels", + Tuple( + title = _("Physical memory usage"), + elements = [ + Filesize(title = _("Warning at")), + Filesize(title = _("Critical at")), + ], + )), + ('handle_count', Tuple( + title = _('Handle Count (Windows only)'), + help = _("The number of object handles in the processes object table. This includes open handles to " + "threads, files and other resources like registry keys."), + elements = [ + Integer( + title = _("Warning above"), + unit = _("handles"), + ), + Integer( + title = _("Critical above"), + unit = _("handles"), + ), + ], + )), +] + +# In version 1.2.4 the check parameters for the resulting ps check +# where defined in the dicovery rule. We moved that to an own rule +# in the classical check parameter style. In order to support old +# configuration we allow reading old discovery rules and ship these +# settings in an optional sub-dictionary. +def convert_inventory_processes(old_dict): + new_dict = { "default_params" : {} } + for key, value in old_dict.items(): + if key in ['levels', 'handle_count', 'cpulevels', 'cpu_average', 'virtual_levels', 'resident_levels']: + new_dict["default_params"][key] = value + elif key != "perfdata": + new_dict[key] = value + return new_dict + register_rule(group + '/' + subgroup_inventory, varname = "inventory_processes_rules", - title = _('Process Inventory'), + title = _('Process Discovery'), + help = _("This ruleset defines criteria for automatically creating checks for running processes " + "based upon what is running when the service discovery is done. These services will be " + "created with default parameters. They will get critical when no process is running and " + "OK otherwise. You can parameterize the check with the ruleset State and count of processes."), + valuespec = Transform( + Dictionary( + elements = [ + ('descr', TextAscii( + title = _('Process Name'), + style = "dropdown", + allow_empty = False, + help = _('

    The process name may contain one or more occurances of %s. If you do this, then the pattern must be a regular ' + 'expression and be prefixed with ~. For each %s in the description, the expression has to contain one "group". A group ' + 'is a subexpression enclosed in brackets, for example (.*) or ([a-zA-Z]+) or (...). When the inventory finds a process ' + 'matching the pattern, it will substitute all such groups with the actual values when creating the check. That way one ' + 'rule can create several checks on a host.

    ' + '

    If the pattern contains more groups then occurrances of %s in the service description then only the first matching ' + 'subexpressions are used for the service descriptions. The matched substrings corresponding to the remaining groups ' + 'are copied into the regular expression, nevertheless.

    ' + '

    As an alternative to %s you may also use %1, %2, etc. ' + 'These will be replaced by the first, second, ... matching group. This allows you to reorder things.

    ' + ), + )), + ('match', Alternative( + title = _("Process Matching"), + style = "dropdown", + elements = [ + TextAscii( + title = _("Exact name of the process without argments"), + label = _("Executable:"), + size = 50, + ), + Transform( + RegExp(size = 50), + title = _("Regular expression matching command line"), + label = _("Command line:"), + help = _("This regex must match the beginning of the complete " + "command line of the process including arguments"), + forth = lambda x: x[1:], # remove ~ + back = lambda x: "~" + x, # prefix ~ + ), + FixedValue( + None, + totext = "", + title = _("Match all processes"), + ) + ], + match = lambda x: (not x and 2) or (x[0] == '~' and 1 or 0), + default_value = '/usr/sbin/foo', + )), + ('user', Alternative( + title = _('Name of the User'), + style = "dropdown", + elements = [ + FixedValue( + None, + totext = "", + title = _("Match all users"), + ), + TextAscii( + title = _('Exact name of the user'), + label = _("User:"), + ), + FixedValue( + False, + title = _('Grab user from found processess'), + totext = '', + ), + ], + help = _('

    The user specification can either be a user name (string). The inventory will then trigger only if that user matches ' + 'the user the process is running as and the resulting check will require that user. Alternatively you can specify ' + '"grab user". If user is not selected the created check will not check for a specific user.

    ' + '

    Specifying "grab user" makes the created check expect the process to run as the same user as during inventory: the user ' + 'name will be hardcoded into the check. In that case if you put %u into the service description, that will be replaced ' + 'by the actual user name during inventory. You need that if your rule might match for more than one user - your would ' + 'create duplicate services with the same description otherwise.

    Windows users are specified by the namespace followed by ' + 'the actual user name. For example "\\\\NT AUTHORITY\NETWORK SERVICE" or "\\\\CHKMKTEST\Administrator".

    '), + )), + ('default_params', + Dictionary( + title = _("Default parameters for detected services"), + help = _("Here you can select default parameters that are being set " + "for detected services. Note: the preferred way for setting parameters is to use " + "the rule set " + "State and Count of Processes instead. " + "A change there will immediately be active, while a change in this rule " + "requires a re-discovery of the services."), + elements = process_level_elements, + )), + ], + required_keys = [ "descr" ], + ), + forth = convert_inventory_processes, + ), + match = 'all', +) + + +register_rule(group + '/' + subgroup_inventory, + varname = "inv_domino_tasks_rules", + title = _('Lotus Domino Task Inventory'), + help = _("Keep in mind that all configuration parameters in this rule are only applied during the " + "hosts inventory. Any changes later on require a host re-inventory"), valuespec = Dictionary( elements = [ ('descr', TextAscii( title = _('Service Description'), - help = _('

    The service description may contain one or more occurances of %s. If you do this, then the pattern must be a regular ' - 'expression and be prefixed with ~. For each %s in the description, the expression has to contain one "group". A group ' - 'is a subexpression enclosed in brackets, for example (.*) or ([a-zA-Z]+) or (...). When the inventory finds a process ' - 'matching the pattern, it will substitute all such groups with the actual values when creating the check. That way one ' - 'rule can create several checks on a host.

    ' - '

    If the pattern contains more groups then occurrances of %s in the service description then only the first matching ' - 'subexpressions are used for the service descriptions. The matched substrings corresponding to the remaining groups ' - 'are copied into the regular expression, nevertheless.

    ' + allow_empty = False, + help = _('

    The service description may contain one or more occurances of %s. In this ' + 'case, the pattern must be a regular expression prefixed with ~. For each ' + '%s in the description, the expression has to contain one "group". A group ' + 'is a subexpression enclosed in brackets, for example (.*) or ' + '([a-zA-Z]+) or (...). When the inventory finds a task ' + 'matching the pattern, it will substitute all such groups with the actual values when ' + 'creating the check. In this way one rule can create several checks on a host.

    ' + '

    If the pattern contains more groups than occurrences of %s in the service ' + 'description, only the first matching subexpressions are used for the service ' + 'descriptions. The matched substrings corresponding to the remaining groups ' + 'are nevertheless copied into the regular expression.

    ' '

    As an alternative to %s you may also use %1, %2, etc. ' - 'These will be replaced by the first, second, ... matching group. This allows you to reorder things.

    ' + 'These expressions will be replaced by the first, second, ... matching group, allowing ' + 'you to reorder things.

    ' ), )), ('match', Alternative( - title = _("Process Matching"), + title = _("Task Matching"), elements = [ TextAscii( - title = _("Exact name of the process without argments"), + title = _("Exact name of the task"), size = 50, ), Transform( RegExp(size = 50), title = _("Regular expression matching command line"), - help = _("This regex must match the beginning of the complete " - "command line of the process including arguments"), + help = _("This regex must match the beginning of the task"), forth = lambda x: x[1:], # remove ~ back = lambda x: "~" + x, # prefix ~ ), FixedValue( None, totext = "", - title = _("Match all processes"), + title = _("Match all tasks"), ) ], match = lambda x: (not x and 2) or (x[0] == '~' and 1 or 0), - default_value = '/usr/sbin/foo', - )), - ('user', Alternative( - title = _('Name of the User'), - elements = [ - FixedValue( - None, - totext = "", - title = _("Match all users"), - ), - TextAscii( - title = _('Exact name of the user'), - ), - FixedValue( - False, - title = _('Grab user from found processess'), - totext = '', - ), - ], - help = _('

    The user specification can either be a user name (string). The inventory will then trigger only if that user matches ' - 'the user the process is running as and the resulting check will require that user. Alternatively you can specify ' - '"grab user". If user is not selected the created check will not check for a specific user.

    ' - '

    Specifying "grab user" makes the created check expect the process to run as the same user as during inventory: the user ' - 'name will be hardcoded into the check. In that case if you put %u into the service description, that will be replaced ' - 'by the actual user name during inventory. You need that if your rule might match for more than one user - your would ' - 'create duplicate services with the same description otherwise.

    '), - )), - ('perfdata', Checkbox( - title = _('Performance Data'), - label = _('Collect count of processes, memory and cpu usage'), + default_value = 'foo', )), ('levels', Tuple( title = _('Levels'), + help = _("Please note that if you specify and also if you modify levels here, the change is " + "activated only during an inventory. Saving this rule is not enough. This is due to " + "the nature of inventory rules."), elements = [ Integer( title = _("Critical below"), @@ -384,14 +612,109 @@ ], )), ], - optional_keys = [], + required_keys = ['match', 'levels', 'descr'], + ), + match = 'all', +) + +register_rule(group + '/' + subgroup_inventory, + varname = "inventory_sap_values", + title = _('SAP R/3 Single Value Inventory'), + valuespec = Dictionary( + elements = [ + ('match', Alternative( + title = _("Node Path Matching"), + elements = [ + TextAscii( + title = _("Exact path of the node"), + size = 100, + ), + Transform( + RegExp(size = 100), + title = _("Regular expression matching the path"), + help = _("This regex must match the beginning of the complete " + "path of the node as reported by the agent"), + forth = lambda x: x[1:], # remove ~ + back = lambda x: "~" + x, # prefix ~ + ), + FixedValue( + None, + totext = "", + title = _("Match all nodes"), + ) + ], + match = lambda x: (not x and 2) or (x[0] == '~' and 1 or 0), + default_value = 'SAP CCMS Monitor Templates/Dialog Overview/Dialog Response Time/ResponseTime', + )), + ('limit_item_levels', Integer( + title = _("Limit Path Levels for Service Names"), + unit = _('path levels'), + minvalue = 1, + help = _("The service descriptions of the inventorized services are named like the paths " + "in SAP. You can use this option to let the inventory function only use the last " + "x path levels for naming."), + )), + ], + optional_keys = ['limit_item_levels'], + ), + match = 'all', +) + +register_rule(group + '/' + subgroup_inventory, + varname = "sap_value_groups", + title = _('SAP Value Grouping Patterns'), + help = _('The check sap.value normally creates one service for each SAP value. ' + 'By defining grouping patterns, you can switch to the check sap.value-groups. ' + 'That check monitors a list of SAP values at once.'), + valuespec = ListOf( + Tuple( + help = _("This defines one value grouping pattern"), + show_titles = True, + orientation = "horizontal", + elements = [ + TextAscii( + title = _("Name of group"), + ), + Tuple( + show_titles = True, + orientation = "vertical", + elements = [ + RegExpUnicode(title = _("Include Pattern")), + RegExpUnicode(title = _("Exclude Pattern")) + ], + ), + ], + ), + add_label = _("Add pattern group"), ), match = 'all', ) -checkgroups = [] +register_rule(group + '/' + subgroup_inventory, + varname = "inventory_heartbeat_crm_rules", + title = _("Heartbeat CRM Discovery"), + valuespec = Dictionary( + elements = [ + ("naildown_dc", Checkbox( + title = _("Naildown the DC"), + label = _("Mark the currently distinguished controller as preferred one"), + help = _("Nails down the DC to the node which is the DC during discovery. The check " + "will report CRITICAL when another node becomes the DC during later checks.") + )), + ("naildown_resources", Checkbox( + title = _("Naildown the resources"), + label = _("Mark the nodes of the resources as preferred one"), + help = _("Nails down the resources to the node which is holding them during discovery. " + "The check will report CRITICAL when another holds the resource during later checks.") + )), + ], + help = _('This rule can be used to control the discovery for Heartbeat CRM checks.'), + optional_keys = [], + ), + match = 'dict', +) -checkgroups.append(( +register_check_parameters( subgroup_applications, "ad_replication", _("Active Directory Replication"), @@ -407,9 +730,55 @@ help = _("The name of the replication partner (Destination DC Site/Destination DC)."), ), "first" -)) +) + +register_check_parameters( + subgroup_applications, + "mq_queues", + _("Apache ActiveMQ Queue lengths"), + Dictionary( + elements = [ + ("size", + Tuple( + title = _("Levels for the queue length"), + help = _("Set the maximum and minimum length for the queue size"), + elements = [ + Integer(title="Warning at a size of"), + Integer(title="Critical at a size of"), + ] + )), + ("consumerCount", + Tuple( + title = _("Levels for the consumer count"), + help = _("Consumer Count is the size of connected consumers to a queue"), + elements = [ + Integer(title="Warning less then"), + Integer(title="Critical less then"), + ] + )), + ] + ), + TextAscii( title=_("Queue Name"), + help=_("The name of the queue like in the Apache queue manager")), + "first", +) + +register_check_parameters( + subgroup_applications, + "websphere_mq", + _("Maximum number of messages in Websphere Message Queues"), + Tuple( + title = _('Maximum number of messages'), + elements = [ + Integer(title = _("Warning at"), default_value = 1000 ), + Integer(title = _("Critical at"), default_value = 1200 ), + ] + ), + TextAscii(title = _("Name of Channel or Queue")), + None, +) -checkgroups.append(( +register_check_parameters( subgroup_applications, "plesk_backups", _("Plesk Backups"), @@ -453,29 +822,31 @@ title = _("Service descriptions"), allow_empty = False ), - None)) + None +) -checkgroups.append(( +register_check_parameters( subgroup_storage, "brocade_fcport", - _("Brocade FC FibreChannel ports"), + _("Brocade FibreChannel ports"), Dictionary( elements = [ ("bw", Alternative( title = _("Throughput levels"), - help = _("In few cases you have to set the link speed manually it you want " - "to use relative levels"), + help = _("Please note: in a few cases the automatic detection of the link speed " + "does not work. In these cases you have to set the link speed manually " + "below if you want to monitor percentage values"), elements = [ Tuple( - title = _("Maximum bandwidth in relation to the total traffic"), + title = _("Used bandwidth of port relative to the link speed"), elements = [ Percentage(title = _("Warning at"), unit = _("percent")), Percentage(title = _("Critical at"), unit = _("percent")), ] ), Tuple( - title = _("Megabyte bandwidth of the port"), + title = _("Used Bandwidth of port in megabyte/s"), elements = [ Integer(title = _("Warning at"), unit = _("MByte/s")), Integer(title = _("Critical at"), unit = _("MByte/s")), @@ -486,9 +857,8 @@ ("assumed_speed", Float( title = _("Assumed link speed"), - help = _("If the automatic detection of the link " - "speed does not work and you want monitors the relative levels of the " - "throughtput you have to set the link speed here."), + help = _("If the automatic detection of the link speed does " + "not work you can set the link speed here."), unit = _("GByte/s") ) ), @@ -530,13 +900,13 @@ ), ("average", Integer ( - title = _("Average"), - help = _("A number in minutes. If this parameter is set, then " - "averaging is turned on and all levels will be applied " - "to the averaged values, not the the current ones. Per " + title = _("Averaging"), + help = _("If this parameter is set, all throughputs will be averaged " + "over the specified time interval before levels are being applied. Per " "default, averaging is turned off. "), unit = _("minutes"), minvalue = 1, + default_value = 60, ) ), ("phystate", @@ -593,13 +963,13 @@ ] ), TextAscii( - title = _("Portname"), + title = _("port name"), help = _("The name of the switch port"), ), "first" -)) +) -checkgroups.append(( +register_check_parameters( subgroup_storage, "fs_mount_options", _("Filesystem mount options (Linux/UNIX)"), @@ -612,11 +982,38 @@ TextAscii( title = _("Mount point"), allow_empty = False), - "first")) - + "first" +) +register_check_parameters( + subgroup_os, + "uptime", + _("Uptime since last reboot"), + Dictionary( + elements = [ + ( "min", + Tuple( + title = _("Minimum required uptime"), + elements = [ + Age(title = _("Warning if below")), + Age(title = _("Critical if below")), + ] + )), + ( "max", + Tuple( + title = _("Maximum allowed uptime"), + elements = [ + Age(title = _("Warning at")), + Age(title = _("Critical at")), + ] + )), + ] + ), + None, + "first", +) -checkgroups.append(( +register_check_parameters( subgroup_os, "systemtime", _("Windows system time offset"), @@ -629,9 +1026,63 @@ ), None, "first" -)) +) + +register_check_parameters( + subgroup_environment, + "ups_test", + _("Time since last UPS selftest"), + Tuple( + title = _("Time since last UPS selftest"), + elements = [ + Integer( + title = _("Warning Level for time since last self test"), + help = _("Warning Level for time since last diagnostic test of the device. " + "For a value of 0 the warning level will not be used"), + unit = _("days"), + default_value = 0, + ), + Integer( + title = _("Critical Level for time since last self test"), + help = _("Critical Level for time since last diagnostic test of the device. " + "For a value of 0 the critical level will not be used"), + unit = _("days"), + default_value = 0, + ), + ] + ), + None, + "first" +) + +register_check_parameters( + subgroup_environment, + "apc_power", + _("APC Power Consumption"), + Tuple( + title = _("Power Comsumption of APC Devices"), + elements = [ + Integer( + title = _("Warning below"), + unit = _("W"), + default_value = 20, + ), + Integer( + title = _("Critical below"), + unit = _("W"), + default_value = 1, + ), + ] + ), + TextAscii( + title = _("Phase"), + help = _("The identifier of the phase the power is related to."), + ), + None, + "first" +) -checkgroups.append(( +register_check_parameters( subgroup_storage, "fileinfo", _("Size and age of single files"), @@ -641,8 +1092,8 @@ Tuple( title = _("Minimal age"), elements = [ - Age(title = _("Warning younger then")), - Age(title = _("Critical younger then")), + Age(title = _("Warning if younger than")), + Age(title = _("Critical if younger than")), ] ) ), @@ -650,8 +1101,8 @@ Tuple( title = _("Maximal age"), elements = [ - Age(title = _("Warning older then")), - Age(title = _("Critical older then")), + Age(title = _("Warning if older than")), + Age(title = _("Critical if older than")), ] ) ), @@ -659,8 +1110,8 @@ Tuple( title = _("Minimal size"), elements = [ - Filesize(title = _("Warning when lower then")), - Filesize(title = _("Critical when lower then")), + Filesize(title = _("Warning if below")), + Filesize(title = _("Critical if below")), ] ) ), @@ -668,8 +1119,8 @@ Tuple( title = _("Maximal size"), elements = [ - Filesize(title = _("Warning when higher then")), - Filesize(title = _("Critical when higher then")), + Filesize(title = _("Warning at")), + Filesize(title = _("Critical at")), ] ) ) @@ -680,7 +1131,7 @@ title = _("File name"), allow_empty = True), "first" -)) +) register_rule(group + '/' + subgroup_storage, varname = "filesystem_groups", @@ -689,9 +1140,8 @@ 'will create a single service for each filesystem. ' 'By defining grouping ' 'patterns you can handle groups of filesystems like one filesystem. ' - 'For each group you can define one or several patterns containing ' - '* and ?, for example ' - '/spool/tmpspace*. The filesystems matching one of the patterns ' + 'For each group you can define one or several patterns. ' + 'The filesystems matching one of the patterns ' 'will be monitored like one big filesystem in a single service.'), valuespec = ListOf( Tuple( @@ -702,7 +1152,11 @@ title = _("Name of group"), ), TextAscii( - title = _("File pattern (using * and ?)"), + title = _("Pattern for mount point (using * and ?)"), + help = _("You can specify one or several patterns containing " + "* and ?, for example /spool/tmpspace*. " + "The filesystems matching the patterns will be monitored " + "like one big filesystem in a single service."), ), ] ), @@ -710,9 +1164,10 @@ ), match = 'all', ) + register_rule(group + '/' + subgroup_storage, varname = "fileinfo_groups", - title = _('Fileinfo grouping patterns'), + title = _('File Grouping Patterns'), help = _('The check fileinfo monitors the age and size of ' 'a single file. Each file information that is sent ' 'by the agent will create one service. By defining grouping ' @@ -720,31 +1175,41 @@ 'That check monitors a list of files at once. You can set levels ' 'not only for the total size and the age of the oldest/youngest ' 'file but also on the count. You can define one or several ' - 'patterns containing * and ?, for example ' + 'patterns for a group containing * and ?, for example ' '/var/log/apache/*.log. For files contained in a group ' - 'the inventory will automatically create a group service and ' - 'no single service.'), + 'the inventory will automatically create a group service instead ' + 'of single services for each file. This rule also applies when ' + 'you use manually configured checks instead of inventorized ones.'), valuespec = ListOf( - Tuple( - help = _("This defines one fileinfo grouping pattern"), - show_titles = True, - orientation = "horizontal", - elements = [ - TextAscii( - title = _("Name of group"), - ), - TextAscii( - title = _("Pattern for mount point (using * and ?)"), - ), - ] - ), - add_label = _("Add pattern"), + Tuple( + help = _("This defines one file grouping pattern"), + show_titles = True, + orientation = "horizontal", + elements = [ + TextAscii( + title = _("Name of group"), + size = 10, + ), + Transform( + Tuple( + show_titles = True, + orientation = "vertical", + elements = [ + TextAscii(title = _("Include Pattern"), size=40), + TextAscii(title = _("Exclude Pattern"), size=40), + ], + ), + forth = lambda params: type(params) == str and ( params, '' ) or params + ), + ], + ), + add_label = _("Add pattern group"), ), match = 'all', ) -checkgroups.append(( +register_check_parameters( subgroup_storage, "fileinfo-groups", _("Size, age and count of file groups"), @@ -754,8 +1219,8 @@ Tuple( title = _("Minimal age of oldest file"), elements = [ - Age(title = _("Warning younger then")), - Age(title = _("Critical younger then")), + Age(title = _("Warning if younger than")), + Age(title = _("Critical if younger than")), ] ) ), @@ -763,8 +1228,8 @@ Tuple( title = _("Maximal age of oldest file"), elements = [ - Age(title = _("Warning older then")), - Age(title = _("Critical older then")), + Age(title = _("Warning if older than")), + Age(title = _("Critical if older than")), ] ) ), @@ -772,8 +1237,8 @@ Tuple( title = _("Minimal age of newest file"), elements = [ - Age(title = _("Warning younger then")), - Age(title = _("Critical younger then")), + Age(title = _("Warning if younger than")), + Age(title = _("Critical if younger than")), ] ) ), @@ -781,8 +1246,44 @@ Tuple( title = _("Maximal age of newest file"), elements = [ - Age(title = _("Warning older then")), - Age(title = _("Critical older then")), + Age(title = _("Warning if older than")), + Age(title = _("Critical if older than")), + ] + ) + ), + ("minsize_smallest", + Tuple( + title = _("Minimal size of smallest file"), + elements = [ + Filesize(title = _("Warning if below")), + Filesize(title = _("Critical if below")), + ] + ) + ), + ("maxsize_smallest", + Tuple( + title = _("Maximal size of smallest file"), + elements = [ + Filesize(title = _("Warning if above")), + Filesize(title = _("Critical if above")), + ] + ) + ), + ("minsize_largest", + Tuple( + title = _("Minimal size of largest file"), + elements = [ + Filesize(title = _("Warning if below")), + Filesize(title = _("Critical if below")), + ] + ) + ), + ("maxsize_largest", + Tuple( + title = _("Maximal size of largest file"), + elements = [ + Filesize(title = _("Warning if above")), + Filesize(title = _("Critical if above")), ] ) ), @@ -790,8 +1291,8 @@ Tuple( title = _("Minimal size"), elements = [ - Filesize(title = _("Warning when lower then")), - Filesize(title = _("Critical when lower then")), + Filesize(title = _("Warning if below")), + Filesize(title = _("Critical if below")), ] ) ), @@ -799,8 +1300,8 @@ Tuple( title = _("Maximal size"), elements = [ - Filesize(title = _("Warning when higher then")), - Filesize(title = _("Critical when higher then")), + Filesize(title = _("Warning if above")), + Filesize(title = _("Critical if above")), ] ) ), @@ -808,8 +1309,8 @@ Tuple( title = _("Minimal file count"), elements = [ - Integer(title = _("Warning when lower then")), - Integer(title = _("Critical when lower then")), + Integer(title = _("Warning if below")), + Integer(title = _("Critical if below")), ] ) ), @@ -817,20 +1318,23 @@ Tuple( title = _("Maximal file count"), elements = [ - Integer(title = _("Warning when higher then")), - Integer(title = _("Critical when higher then")), + Integer(title = _("Warning if above")), + Integer(title = _("Critical if above")), ] ) ), ] ), TextAscii( - title = _("Filegroup Name"), + title = _("File Group Name"), + help = _("This name must match the name of the group defined " + "in the %s ruleset.") % \ + (_('File Grouping Patterns')), allow_empty = True), "first" -)) +) -checkgroups.append(( +register_check_parameters( subgroup_storage, "netapp_fcprtio", _("Netapp FC Port throughput"), @@ -840,8 +1344,8 @@ Tuple( title = _("Read"), elements = [ - Filesize(title = _("Warning when lower then")), - Filesize(title = _("Critical when lower then")), + Filesize(title = _("Warning if below")), + Filesize(title = _("Critical if below")), ] ) ), @@ -849,8 +1353,8 @@ Tuple( title = _("Write"), elements = [ - Filesize(title = _("Warning when higher then")), - Filesize(title = _("Critical when higher then")), + Filesize(title = _("Warning at")), + Filesize(title = _("Critical at")), ] ) ) @@ -861,10 +1365,10 @@ title = _("File name"), allow_empty = True), "first" -)) +) -checkgroups.append(( +register_check_parameters( subgroup_os, "memory_pagefile_win", _("Memory and pagefile levels for Windows"), @@ -873,50 +1377,406 @@ ( "memory", Alternative( title = _("Memory Levels"), + style = "dropdown", elements = [ Tuple( - title = _("Levels in percent"), + title = _("Memory usage in percent"), elements = [ - Percentage(title = _("Warning at"), label = _("% usage")), - Percentage(title = _("Critical at"), label = _("% usage")), - ] + Percentage(title = _("Warning at")), + Percentage(title = _("Critical at")), + ], ), - Tuple( - title = _("Absolute levels"), - elements = [ - Filesize(title = _("Warning when higher then")), - Filesize(title = _("Critical when higher then")), - ] - ) - ])), + Transform( + Tuple( + title = _("Absolute free memory"), + elements = [ + Filesize(title = _("Warning if less than")), + Filesize(title = _("Critical if less than")), + ] + ), + # Note: Filesize values lesser 1MB will not work + # -> need hide option in filesize valuespec + back = lambda x: (x[0] / 1024 / 1024, x[1] / 1024 / 1024), + forth = lambda x: (x[0] * 1024 * 1024, x[1] * 1024 * 1024) + ), + PredictiveLevels( + title = _("Predictive levels"), + unit = _("GB"), + default_difference = (0.5, 1.0) + ) + ], + default_value = (80.0, 90.0))), ( "pagefile", Alternative( title = _("Pagefile Levels"), + style = "dropdown", elements = [ Tuple( - title = _("Levels in percent"), + title = _("Pagefile usage in percent"), elements = [ - Percentage(title = _("Warning at"), label = _("% usage")), - Percentage(title = _("Critical at"), label = _("% usage")), + Percentage(title = _("Warning at")), + Percentage(title = _("Critical at")), ] ), - Tuple( - title = _("Absolute levels"), - elements = [ - Filesize(title = _("Warning when higher then")), - Filesize(title = _("Critical when higher then")), - ] - ) - ])), + Transform( + Tuple( + title = _("Absolute free pagefile"), + elements = [ + Filesize(title = _("Warning if less than")), + Filesize(title = _("Critical if less than")), + ] + ), + # Note: Filesize values lesser 1MB will not work + # -> need hide option in filesize valuespec + back = lambda x: (x[0] / 1024 / 1024, x[1] / 1024 / 1024), + forth = lambda x: (x[0] * 1024 * 1024, x[1] * 1024 * 1024) + ), + PredictiveLevels( + title = _("Predictive levels"), + unit = _("GB"), + default_difference = (0.5, 1.0) + ) + ], + default_value = (80.0, 90.0)) + ), + ("average", + Integer ( + title = _("Averaging"), + help = _("If this parameter is set, all measured values will be averaged " + "over the specified time interval before levels are being applied. Per " + "default, averaging is turned off. "), + unit = _("minutes"), + minvalue = 1, + default_value = 60, + ) + ), + ]), None, "dict" -)) +) + +register_check_parameters( + subgroup_applications, + "apache_status", + ("Apache Status"), + Dictionary( + elements = [ + ( "OpenSlots", + Tuple( + title = _("Remaining Open Slots"), + help = _("Here you can set the number of remaining open slots"), + elements = [ + Integer(title = _("Warning at"), label = _("slots")), + Integer(title = _("Critical at"), label = _("slots")) + ] + ) + ) + ] + ), + TextAscii( + title = _("Apache Server"), + help = _("A string-combination of servername and port, e.g. 127.0.0.1:5000.") + ), + "first" +) + +register_check_parameters( + subgroup_applications, + "nginx_status", + ("Nginx Status"), + Dictionary( + elements = [ + ( "active_connections", + Tuple( + title = _("Active Connections"), + help = _("You can configure upper thresholds for the currently active " + "connections handled by the web server."), + elements = [ + Integer(title = _("Warning at"), unit = _("connections")), + Integer(title = _("Critical at"), unit = _("connections")) + ] + ) + ) + ] + ), + TextAscii( + title = _("Nginx Server"), + help = _("A string-combination of servername and port, e.g. 127.0.0.1:80.") + ), + "first" +) + +register_check_parameters( + subgroup_networking, + "viprinet_router", + _("Viprinet router"), + Dictionary( + elements = [ + ( "expect_mode", + DropdownChoice( + title = _("Set expected router mode"), + choices = [ + ( "inv", _("Mode found during inventory") ), + ( "0" , _("Node") ), + ( "1" , _("Hub") ), + ( "2" , _("Hub running as HotSpare") ), + ( "3" , _("Hotspare-Hub replacing another router") ), + ] + ) + ), + ] + ), + None, + None +) + +register_check_parameters( + subgroup_networking, + 'docsis_channels_upstream', + _("Docsis Upstream Channels"), + Dictionary( + elements = [ + ( 'signal_noise', Tuple( + title = _("Levels for signal/noise ratio"), + elements = [ + Float(title = _("Warning at or below"), unit = "dB", default_value = 10.0), + Float(title = _("Critical at or below"), unit = "dB", default_value = 5.0 ), + ] + )), + ] + ), + TextAscii(title = _("ID of the channel (usually ranging from 1)")), + "dict" +) + +register_check_parameters( + subgroup_networking, + "docsis_channels_downstream", + _("Docsis Downstream Channels"), + Dictionary( + elements = [ + ( "power", Tuple( + title = _("Transmit Power"), + help = _("The operational transmit power"), + elements = [ + Float(title = _("warning at or below"), unit = "dBmV", default_value = 5.0 ), + Float(title = _("critical at or below"), unit = "dBmV", default_value = 1.0 ), + ]) + ), + ] + ), + TextAscii(title = _("ID of the channel (usually ranging from 1)")), + "dict" +) + +register_check_parameters( + subgroup_networking, + "docsis_cm_status", + _("Docsis Cable Modem Status"), + Dictionary( + elements = [ + ( "error_states", ListChoice( + title = _("Modem States that lead to a critical state"), + help = _("If one of the selected states occurs the check will repsond with a critical state "), + choices = [ + ( 1, "other" ), + ( 2, "notReady" ), + ( 3, "notSynchronized" ), + ( 4, "phySynchronized" ), + ( 5, "usParametersAcquired" ), + ( 6, "rangingComplete" ), + ( 7, "ipComplete" ), + ( 8, "todEstablished" ), + ( 9, "securityEstablished" ), + ( 10, "paramTransferComplete"), + ( 11, "registrationComplete"), + ( 12, "operational"), + ( 13, "accessDenied"), + ], + default_value = [ 1, 2, 13 ], + )), + ( "tx_power", Tuple( + title = _("Transmit Power"), + help = _("The operational transmit power"), + elements = [ + Float(title = _("warning at"), unit = "dBmV", default_value = 20.0 ), + Float(title = _("critical at"), unit = "dBmV", default_value = 10.0 ), + ])), + ] + ), + TextAscii( title = _("ID of the Entry")), + "dict" +) + +register_check_parameters( + subgroup_networking, + "vpn_tunnel", + _("VPN Tunnel"), + Dictionary( + elements = [ + ( "tunnels", + ListOf( + Tuple( + title = ("VPN Tunnel Endpoints"), + elements = [ + IPv4Address( + title = _("IP-Address or Name of Tunnel Endpoint"), + help = _("The configured value must match a tunnel reported by the monitored " + "device."), + allow_empty = False, + ), + TextUnicode( + title = _("Tunnel Alias"), + help = _("You can configure an individual alias here for the tunnel matching " + "the IP-Address or Name configured in the field above."), + ), + MonitoringState( + default_value = 2, + title = _("State if tunnel is not found"), + )]), + add_label = _("Add tunnel"), + movable = False, + title = _("VPN tunnel specific configuration"), + )), + ( "state", + MonitoringState( + title = _("Default state to report when tunnel can not be found anymore"), + help = _("Default state if a tunnel, which is not listed above in this rule, " + "can no longer be found."), + ), + ), + ], + ), + TextAscii( title = _("IP-Address of Tunnel Endpoint")), + "first" +) + +register_check_parameters( + subgroup_networking, + "lsnat", + _("Enterasys LSNAT Bindings"), + Dictionary( + elements = [ + ( "current_bindings", + Tuple( + title = _("Number of current LSNAT bindings"), + elements = [ + Integer(title = _("Warning at"), size = 10, unit=_("bindings")), + Integer(title = _("Critical at"), size = 10, unit=_("bindings")), + ] + ) + ), + ], + optional_keys = False, + ), + None, + "dict" +) + +hivemanger_states = [ + ( "Critical" , "Critical" ), + ( "Maybe" , "Maybe" ), + ( "Major" , "Major" ), + ( "Minor" , "Minor" ), +] +register_check_parameters( + subgroup_networking, + "hivemanager_devices", + _("Hivemanager Devices"), + Dictionary( + elements = [ + ( 'max_clients', + Tuple( + title = _("Number of clients"), + help = _("Number of clients connected to a Device."), + elements = [ + Integer(title = _("Warning at"), unit=_("clients")), + Integer(title = _("Critical at"), unit=_("clients")), + ] + )), + ( 'max_uptime', + Tuple( + title = _("Maximum uptime of Device"), + elements = [ + Age(title = _("Warning at")), + Age(title = _("Critical at")), + ] + )), + ( 'alert_on_loss', + FixedValue( + False, + totext = "", + title = _("Do not alert on connection loss"), + )), + ( "war_states", + ListChoice( + title = _("States treated as warning"), + choices = hivemanger_states, + default_value = ['Maybe', 'Major', 'Minor'], + ) + ), + ( "crit_states", + ListChoice( + title = _("States treated as critical"), + choices = hivemanger_states, + default_value = ['Critical'], + ) + ), + ]), + TextAscii( + title = _("Hostname of the Device") + ), + "first" +) + +register_check_parameters( + subgroup_networking, + "wlc_clients", + _("WLC WiFi client connections"), + Tuple( + title = _("Number of connections"), + help = _("Number of connections for a WiFi"), + elements = [ + Integer(title = _("Critical if below"), unit=_("connections")), + Integer(title = _("Warning if below"), unit=_("connections")), + Integer(title = _("Warning if above"), unit=_("connections")), + Integer(title = _("Critical if above"), unit=_("connections")), + ] + ), + TextAscii( title = _("Name of Wifi")), + "first" +) -checkgroups.append(( +register_check_parameters( + subgroup_networking, + "cisco_wlc", + _("Cisco WLAN AP"), + Dictionary( + help = _("Here you can set which alert type is set when the given " + "access point is missing (might be powered off). The access point " + "can be specified by the AP name or the AP model"), + elements = [ + ( "ap_name", + ListOf( + Tuple( + elements = [ + TextAscii(title = _("AP name")), + MonitoringState( title=_("State when missing"), default_value = 2) + ] + ), + title = _("Access point name"), + add_label = _("Add name")) + ) + ] + ), + TextAscii(title = _("Access Point")), + "first", +) +register_check_parameters( subgroup_networking, "tcp_conn_stats", - ("TCP connection stats"), + ("TCP connection stats (LINUX / UNIX)"), Dictionary( elements = [ ( "ESTABLISHED", @@ -1041,117 +1901,365 @@ ), None, "first" -)) - -checkgroups.append(( - subgroup_applications, - "msx_queues", - _("MS Exchange message queues"), - Tuple( - help = _("The length of the queues"), - elements = [ - Integer(title = _("Warning at queue length")), - Integer(title = _("Critical at queue length")) - ]), - OptionalDropdownChoice( - title = _("Explicit Queue Names"), - help = _("You can enter a number of explicit queues names that " - "rule should or should not apply here. Builtin queues:
    " - "Active Remote Delivery
    Active Mailbox Delivery
    " - "Retry Remote Delivery
    Poison Queue Length
    "), - choices = [ - ( "Active Remote Delivery", _("Active Remote Delivery") ), - ( "Retry Remote Delivery", _("Retry Remote Delivery") ), - ( "Active Mailbox Delivery", _("Active Mailbox Delivery") ), - ( "Poison Queue Length", _("Poison Queue Length") ), - ], - otherlabel = _("specify manually ->"), - explicit = TextAscii(allow_empty = False)), - "first") ) -checkgroups.append(( - subgroup_storage, - "filesystem", - _("Filesystems (used space and growth)"), + +register_check_parameters( + subgroup_networking, + "tcp_connections", + _("Monitor specific TCP/UDP connections and listeners"), Dictionary( + help = _("This rule allows to monitor the existence of specific TCP connections or " + "TCP/UDP listeners."), elements = [ - ( "levels", - Tuple( - title = _("Levels for the used space"), - elements = [ - Percentage(title = _("Warning at"), unit = _("% usage"), allow_int = True, default_value=80), - Percentage(title = _("Critical at"), unit = _("% usage"), allow_int = True, default_value=90)])), - ( "magic", - Float( - title = _("Magic factor (automatic level adaptation for large filesystems)"), - default_value = 0.8, - minvalue = 0.1, - maxvalue = 1.0)), - ( "magic_normsize", - Integer( - title = _("Reference size for magic factor"), - default_value = 20, - minvalue = 1, - label = _("GB"))), - ( "levels_low", - Tuple( - title = _("Minimum levels if using magic factor"), - help = _("The filesystem levels will never fall below these values, when using " - "the magic factor and the filesystem is very small."), - elements = [ - Percentage(title = _("Warning at"), label = _("usage"), allow_int = True, default_value=50), - Percentage(title = _("Critical at"), label = _("usage"), allow_int = True, default_value=60)])), - ( "trend_range", - Optional( - Integer( - title = _("Range for filesystem trend computation"), - default_value = 24, - minvalue = 1, - label= _("hours")), - title = _("Trend computation"), - label = _("Enable trend computation"))), - ( "trend_mb", - Tuple( - title = _("Levels on trends in MB per range"), - elements = [ - Integer(title = _("Warning at"), label = _("MB / range"), default_value = 100), - Integer(title = _("Critical at"), label = _("MB / range"), default_value = 200) - ])), - ( "trend_perc", + ( "proto", + DropdownChoice( + title = _("Protocol"), + choices = [ ("TCP", _("TCP")), ("UDP", _("UDP")) ], + default_value = "TCP", + ), + ), + ( "state", + DropdownChoice( + title = _("State"), + choices = [ + ( "ESTABLISHED", "ESTABLISHED" ), + ( "LISTENING", "LISTENING" ), + ( "SYN_SENT", "SYN_SENT" ), + ( "SYN_RECV", "SYN_RECV" ), + ( "LAST_ACK", "LAST_ACK" ), + ( "CLOSE_WAIT", "CLOSE_WAIT" ), + ( "TIME_WAIT", "TIME_WAIT" ), + ( "CLOSED", "CLOSED" ), + ( "CLOSING", "CLOSING" ), + ( "FIN_WAIT1", "FIN_WAIT1" ), + ( "FIN_WAIT2", "FIN_WAIT2" ), + ( "BOUND", "BOUND" ), + ] + ), + ), + ( "local_ip", IPv4Address(title = _("Local IP address"))), + ( "local_port", Integer(title = _("Local port number"), minvalue = 1, maxvalue = 65535, )), + ( "remote_ip", IPv4Address(title = _("Remote IP address"))), + ( "remote_port", Integer(title = _("Remote port number"), minvalue = 1, maxvalue = 65535, )), + ( "min_states", Tuple( - title = _("Levels for the percentual growth"), + title = _("Minimum number of connections or listeners"), elements = [ - Percentage(title = _("Warning at"), label = _("% / range"), default_value = 5,), - Percentage(title = _("Critical at"), label = _("% / range"), default_value = 10,), - ])), - ( "trend_timeleft", + Integer(title = _("Warning if below")), + Integer(title = _("Critical if below")), + ], + ), + ), + ( "max_states", Tuple( - title = _("Levels on the time left until the filesystem gets full"), + title = _("Maximum number of connections or listeners"), elements = [ - Integer(title = _("Warning at"), label = _("hours left"), default_value = 12,), - Integer(title = _("Critical at"), label = _("hours left"), default_value = 6, ), - ])), - ( "trend_perfdata", - Checkbox( - title = _("Trend performance data"), - label = _("Enable performance data from trends"))), + Integer(title = _("Warning at")), + Integer(title = _("Critical at")), + ], + ), + ), + ] + ), + TextAscii(title = _("Connection name"), help = _("Specify an arbitrary name of this connection here"), allow_empty = False), + "dict", + has_inventory = False, +) +def transform_msx_queues(params): + if type(params) == tuple: + return { "levels" : ( params[0], params[1] ) } + return params - ]), - TextAscii( - title = _("Mount point"), - help = _("For Linux/UNIX systems, specify the mount point, for Windows systems " - "the drive letter uppercase followed by a colon, e.g. C:"), - allow_empty = False), - "dict") -) register_check_parameters( - subgroup_networking, - "bonding", - _("Status of Linux bonding interfaces"), - Dictionary( + subgroup_applications, + "msx_queues", + _("MS Exchange Message Queues"), + Transform( + Dictionary( + title = _("Set Levels"), + elements = [ + ( 'levels', + Tuple( + title = _("Maximum Number of E-Mails in Queue"), + elements = [ + Integer(title = _("Warning at"), unit = _("E-Mails")), + Integer(title = _("Critical at"), unit = _("E-Mails")) + ]), + ), + ('offset', + Integer( + title = _("Offset"), + help = _("Use this only if you want to overwrite the postion of the information in the agent " + "output. Also refer to the rule Microsoft Exchange Queues Discovery ") + ) + ), + ], + optional_keys = [ "offset" ], + ), + forth = transform_msx_queues, + ), + TextAscii( + title = _("Explicit Queue Names"), + help = _("Specify queue names that the rule should apply to"), + ), + "first" +) + +def get_free_used_dynamic_valuespec(what, name, default_value = (80.0, 90.0)): + if what == "used": + title = _("used space") + course = _("above") + else: + title = _("free space") + course = _("below") + + + vs_subgroup = [ + Tuple( title = _("Percentage %s") % title, + elements = [ + Percentage(title = _("Warning if %s") % course, unit = "%", minvalue = 0.0), + Percentage(title = _("Critical if %s") % course, unit = "%", minvalue = 0.0), + ] + ), + Tuple( title = _("Absolute %s") % title, + elements = [ + Integer(title = _("Warning if %s") % course, unit = _("MB"), minvalue = 0), + Integer(title = _("Critical if %s") % course, unit = _("MB"), minvalue = 0), + ] + ) + ] + + return Alternative( + title = _("Levels for %s %s") % (name, title), + show_alternative_title = True, + default_value = default_value, + elements = vs_subgroup + [ + ListOf( + Tuple( + orientation = "horizontal", + elements = [ + Filesize(title = _("%s larger than") % name.title()), + Alternative( + elements = vs_subgroup + ) + ] + ), + title = _('Dynamic levels'), + )], + ) + + +# Match and transform functions for level configurations like +# -- used absolute, positive int (2, 4) +# -- used percentage, positive float (2.0, 4.0) +# -- available absolute, negative int (-2, -4) +# -- available percentage, negative float (-2.0, -4.0) +# (4 alternatives) +def match_dual_level_type(value): + if type(value) == list: + for entry in value: + if entry[1][0] < 0 or entry[1][1] < 0: + return 1 + else: + return 0 + else: + if value[0] < 0 or value[1] < 0: + return 1 + else: + return 0 + +def transform_filesystem_free(value): + tuple_convert = lambda val: tuple(map(lambda x: -x, val)) + + if type(value) == tuple: + return tuple_convert(value) + else: + result = [] + for item in value: + result.append((item[0], tuple_convert(item[1]))) + return result + + +filesystem_elements = [ + ("levels", + Alternative( + title = _("Levels for filesystem"), + show_alternative_title = True, + default_value = (80.0, 90.0), + match = match_dual_level_type, + elements = [ + get_free_used_dynamic_valuespec("used", "filesystem"), + Transform( + get_free_used_dynamic_valuespec("free", "filesystem", default_value = (20.0, 10.0)), + title = _("Levels for filesystem free space"), + allow_empty = False, + forth = transform_filesystem_free, + back = transform_filesystem_free + ) + ] + ) + ), + # Beware: this is a nasty hack that helps us to detect new-style paramters. + # Something hat has todo with float/int conversion and has not been documented + # by the one who implemented this. + ( "flex_levels", + FixedValue( + None, + totext = "", + title = "", + )), + ( "inodes_levels", + Alternative( + title = _("Levels for Inodes"), + help = _("The number of remaining inodes on the filesystem. " + "Please note that this setting has no effect on some filesystem checks."), + elements = [ + Tuple( title = _("Percentage free"), + elements = [ + Percentage(title = _("Warning if less than") , unit = "%", minvalue = 0.0), + Percentage(title = _("Critical if less than"), unit = "%", minvalue = 0.0), + ] + ), + Tuple( title = _("Absolute free"), + elements = [ + Integer(title = _("Warning if less than"), size = 10, unit = _("inodes"), minvalue = 0), + Integer(title = _("Critical if less than"), size = 10, unit = _("inodes"), minvalue = 0), + ] + ) + ] + ) + ), + ( "magic", + Float( + title = _("Magic factor (automatic level adaptation for large filesystems)"), + default_value = 0.8, + minvalue = 0.1, + maxvalue = 1.0)), + ( "magic_normsize", + Integer( + title = _("Reference size for magic factor"), + default_value = 20, + minvalue = 1, + unit = _("GB"))), + ( "levels_low", + Tuple( + title = _("Minimum levels if using magic factor"), + help = _("The filesystem levels will never fall below these values, when using " + "the magic factor and the filesystem is very small."), + elements = [ + Percentage(title = _("Warning at"), unit = _("% usage"), allow_int = True, default_value=50), + Percentage(title = _("Critical at"), unit = _("% usage"), allow_int = True, default_value=60)])), + ( "trend_range", + Optional( + Integer( + title = _("Time Range for filesystem trend computation"), + default_value = 24, + minvalue = 1, + unit= _("hours")), + title = _("Trend computation"), + label = _("Enable trend computation"))), + ( "trend_mb", + Tuple( + title = _("Levels on trends in MB per time range"), + elements = [ + Integer(title = _("Warning at"), unit = _("MB / range"), default_value = 100), + Integer(title = _("Critical at"), unit = _("MB / range"), default_value = 200) + ])), + ( "trend_perc", + Tuple( + title = _("Levels for the percentual growth per time range"), + elements = [ + Percentage(title = _("Warning at"), unit = _("% / range"), default_value = 5,), + Percentage(title = _("Critical at"), unit = _("% / range"), default_value = 10,), + ])), + ( "trend_timeleft", + Tuple( + title = _("Levels on the time left until the filesystem gets full"), + elements = [ + Integer(title = _("Warning if below"), unit = _("hours"), default_value = 12,), + Integer(title = _("Critical if below"), unit = _("hours"), default_value = 6, ), + ])), + ( "trend_showtimeleft", + Checkbox( title = _("Display time left in check output"), label = _("Enable"), + help = _("Normally, the time left until the disk is full is only displayed when " + "the configured levels have been breached. If you set this option " + "the check always reports this information")) + ), + ( "trend_perfdata", + Checkbox( + title = _("Trend performance data"), + label = _("Enable generation of performance data from trends"))), +] + +register_check_parameters( + subgroup_storage, + "filesystem", + _("Filesystems (used space and growth)"), + Dictionary( + elements = filesystem_elements, + hidden_keys = ["flex_levels"], + ), + TextAscii( + title = _("Mount point"), + help = _("For Linux/UNIX systems, specify the mount point, for Windows systems " + "the drive letter uppercase followed by a colon and a slash, e.g. C:/"), + allow_empty = False), + "dict" +) + +register_check_parameters( + subgroup_storage, + "esx_vsphere_datastores", + _("ESX Datastores (used space and growth)"), + Dictionary( + elements = filesystem_elements + [ + ("provisioning_levels", Tuple( + title = _("Provisioning Levels"), + help = _("Configure thresholds for overprovisioning of datastores."), + elements = [ + Percentage(title = _("Warning at overprovisioning of"), maxvalue = None), + Percentage(title = _("Critical at overprovisioning of"), maxvalue = None), + ] + )), + ], + hidden_keys = ["flex_levels"], + ), + TextAscii( + title = _("Datastore Name"), + help = _("The name of the Datastore"), + allow_empty = False + ), + "dict" +) + +register_check_parameters( + subgroup_storage, + "esx_hostystem_maintenance", + _("ESX Hostsystem Maintenance Mode"), + Dictionary( + elements = [ + ("target_state", DropdownChoice( + title = _("Target State"), + help = _("Configure the target mode for the system."), + choices = [ + ('true', "System should be in Maintenance Mode"), + ('false', "System not should be in Maintenance Mode"), + ] + )), + ], + ), + None, + "dict" +) + +register_check_parameters( + subgroup_networking, + "bonding", + _("Status of Linux bonding interfaces"), + Dictionary( elements = [ ( "expect_active", DropdownChoice( @@ -1168,9 +2276,10 @@ TextAscii( title = _("Name of the bonding interface"), ), - "dict") + "dict" +) -checkgroups.append(( +register_check_parameters( subgroup_networking, "if", _("Network interfaces and switch ports"), @@ -1184,24 +2293,24 @@ "the given bounds. The error rate is computed by dividing number of " "errors by the total number of packets (successful plus errors)."), elements = [ - Percentage(title = _("Warning at"), label = _("errors")), - Percentage(title = _("Critical at"), label = _("errors")) + Percentage(title = _("Warning at"), label = _("errors"), default_value = 0.01, display_format = '%.3f' ), + Percentage(title = _("Critical at"), label = _("errors"), default_value = 0.1, display_format = '%.3f' ) ])), - ( "speed", OptionalDropdownChoice( title = _("Operating speed"), help = _("If you use this parameter then the check goes warning if the " "interface is not operating at the expected speed (e.g. it " - "is working with 100MBit/s instead of 1GBit/s.Note: " + "is working with 100Mbit/s instead of 1Gbit/s.Note: " "some interfaces do not provide speed information. In such cases " "this setting is used as the assumed speed when it comes to " "traffic monitoring (see below)."), choices = [ ( None, _("ignore speed") ), - ( 10000000, "10 MBit/s" ), - ( 100000000, "100 MBit/s" ), - ( 1000000000, "1 GBit/s" ) ], + ( 10000000, "10 Mbit/s" ), + ( 100000000, "100 Mbit/s" ), + ( 1000000000, "1 Gbit/s" ), + ( 10000000000, "10 Gbit/s" ) ], otherlabel = _("specify manually ->"), explicit = \ Integer(title = _("Other speed in bits per second"), @@ -1220,6 +2329,44 @@ none_label = _("ignore"), negate = True) ), + ( "assumed_speed_in", + OptionalDropdownChoice( + title = _("Assumed input speed"), + help = _("If the automatic detection of the link speed does not work " + "or the switch's capabilities are throttled because of the network setup " + "you can set the assumed speed here."), + choices = [ + ( None, _("ignore speed") ), + ( 10000000, "10 Mbit/s" ), + ( 100000000, "100 Mbit/s" ), + ( 1000000000, "1 Gbit/s" ), + ( 10000000000, "10 Gbit/s" ) ], + otherlabel = _("specify manually ->"), + default_value = 16000000, + explicit = \ + Integer(title = _("Other speed in bits per second"), + label = _("Bits per second"), + size = 10)) + ), + ( "assumed_speed_out", + OptionalDropdownChoice( + title = _("Assumed output speed"), + help = _("If the automatic detection of the link speed does not work " + "or the switch's capabilities are throttled because of the network setup " + "you can set the assumed speed here."), + choices = [ + ( None, _("ignore speed") ), + ( 10000000, "10 Mbit/s" ), + ( 100000000, "100 Mbit/s" ), + ( 1000000000, "1 Gbit/s" ), + ( 10000000000, "10 Gbit/s" ) ], + otherlabel = _("specify manually ->"), + default_value = 1500000, + explicit = \ + Integer(title = _("Other speed in bits per second"), + label = _("Bits per second"), + size = 12)) + ), ( "unit", RadioChoice( title = _("Measurement unit"), @@ -1231,7 +2378,7 @@ )), ( "traffic", Alternative( - title = _("Used bandwidth (traffic)"), + title = _("Used bandwidth (maximum traffic)"), help = _("Settings levels on the used bandwidth is optional. If you do set " "levels you might also consider using an averaging."), elements = [ @@ -1252,6 +2399,29 @@ ) ]) ), + ( "traffic_minimum", + Alternative( + title = _("Used bandwidth (minimum traffic)"), + help = _("Settings levels on the used bandwidth is optional. If you do set " + "levels you might also consider using an averaging."), + elements = [ + Tuple( + title = _("Percentual levels (in relation to port speed)"), + elements = [ + Percentage(title = _("Warning if below"), label = _("% of port speed")), + Percentage(title = _("Critical if below"), label = _("% of port speed")), + ] + ), + Tuple( + title = _("Absolute levels in bits or bytes per second"), + help = _("Depending on the measurement unit (defaults to byte) the absolute levels are set in bit or byte"), + elements = [ + Integer(title = _("Warning if below"), label = _("bits / bytes per second")), + Integer(title = _("Critical if below"), label = _("bits / bytes per second")), + ] + ) + ]) + ), ( "average", Integer( @@ -1260,8 +2430,9 @@ "errors and traffic are applied to the averaged value. That " "way you can make the check react only on long-time changes, " "not on one-minute events."), - label = _("minutes"), + unit = _("minutes"), minvalue = 1, + default_value = 15, ) ), @@ -1271,9 +2442,141 @@ title = _("port specification"), allow_empty = False), "dict", - )) +) +register_check_parameters( + subgroup_networking, + "signal_quality", + _("Signal quality of Wireless device"), + Tuple( + elements=[ + Percentage(title = _("Warning if under"), maxvalue=100 ), + Percentage(title = _("Critical if under"), maxvalue=100 ), + ]), + TextAscii( + title = _("Network specification"), + allow_empty = True), + "first", +) + +register_check_parameters( + subgroup_networking, + "cisco_qos", + _("Cisco quality of service"), + Dictionary( + elements = [ + ( "unit", + RadioChoice( + title = _("Measurement unit"), + help = _("Here you can specifiy the measurement unit of the network interface"), + default_value = "bit", + choices = [ + ( "bit", _("Bits") ), + ( "byte", _("Bytes") ),], + )), + ( "post", + Alternative( + title = _("Used bandwidth (traffic)"), + help = _("Settings levels on the used bandwidth is optional. If you do set " + "levels you might also consider using averaging."), + elements = [ + Tuple( + title = _("Percentual levels (in relation to policy speed)"), + elements = [ + Percentage(title = _("Warning at"), maxvalue=1000, label = _("% of port speed")), + Percentage(title = _("Critical at"), maxvalue=1000, label = _("% of port speed")), + ] + ), + Tuple( + title = _("Absolute levels in bits or bytes per second"), + help = _("Depending on the measurement unit (defaults to bit) the absolute levels are set in bit or byte"), + elements = [ + Integer(title = _("Warning at"), size = 10, label = _("bits / bytes per second")), + Integer(title = _("Critical at"), size = 10, label = _("bits / bytes per second")), + ] + ) + ]) + ), + ( "average", + Integer( + title = _("Average values"), + help = _("By activating the computation of averages, the levels on " + "errors and traffic are applied to the averaged value. That " + "way you can make the check react only on long-time changes, " + "not on one-minute events."), + label = _("minutes"), + minvalue = 1, + ) + ), + ( "drop", + Alternative( + title = _("Number of dropped bits or bytes per second"), + help = _("Depending on the measurement unit (defaults to bit) you can set the warn and crit " + "levels for the number of dropped bits or bytes"), + elements = [ + Tuple( + title = _("Percentual levels (in relation to policy speed)"), + elements = [ + Percentage(title = _("Warning at"), maxvalue=1000, label = _("% of port speed")), + Percentage(title = _("Critical at"), maxvalue=1000, label = _("% of port speed")), + ] + ), + Tuple( + elements = [ + Integer(title = _("Warning at"), size = 8, label = _("bits / bytes per second")), + Integer(title = _("Critical at"), size = 8, label = _("bits / bytes per second")), + ] + ) + ]) + ), + ]), + TextAscii( + title = _("port specification"), + allow_empty = False), + "dict", +) + +register_check_parameters( + subgroup_os, + "innovaphone_mem", + _("Innovaphone Memory Usage"), + Tuple( + title = _("Specify levels in percentage of total RAM"), + elements = [ + Percentage(title = _("Warning at a usage of"), unit = _("% of RAM") ), + Percentage(title = _("Critical at a usage of"), unit = _("% of RAM") ), + ] + ), + None, + "first" +) + +register_check_parameters( + subgroup_os, + "statgrab_mem", + _("Statgrab Memory Usage"), + Alternative( + elements = [ + Tuple( + title = _("Specify levels in percentage of total RAM"), + elements = [ + Percentage(title = _("Warning at a usage of"), unit = _("% of RAM"), maxvalue = None), + Percentage(title = _("Critical at a usage of"), unit = _("% of RAM"), maxvalue = None) + ] + ), + Tuple( + title = _("Specify levels in absolute usage values"), + elements = [ + Integer(title = _("Warning at"), unit = _("MB")), + Integer(title = _("Critical at"), unit = _("MB")) + ] + ), + ] + ), + None, + "first" +) -checkgroups.append(( +register_check_parameters( subgroup_os, "cisco_mem", _("Cisco Memory Usage"), @@ -1282,8 +2585,8 @@ Tuple( title = _("Specify levels in percentage of total RAM"), elements = [ - Percentage(title = _("Warning at a usage of"), label = _("% of RAM"), maxvalue = None), - Percentage(title = _("Critical at a usage of"), label = _("% of RAM"), maxvalue = None) + Percentage(title = _("Warning at a usage of"), unit = _("% of RAM"), maxvalue = None), + Percentage(title = _("Critical at a usage of"), unit = _("% of RAM"), maxvalue = None) ] ), Tuple( @@ -1300,1370 +2603,4809 @@ allow_empty = False ), None -)) +) -checkgroups.append(( +register_check_parameters( subgroup_os, - "memory", - _("Main memory usage (Linux / UNIX / Other Devices)"), + "juniper_mem", + _("Juniper Memory Usage"), + Tuple( + title = _("Specify levels in percentage of total memory usage"), + elements = [ + Percentage(title = _("Warning at a usage of"), unit =_("% of RAM"), default_value = 80.0, maxvalue = 100.0 ), + Percentage(title = _("Critical at a usage of"), unit =_("% of RAM"), default_value = 90.0, maxvalue = 100.0 ) + ] + ), + None, + "first" +) + +register_check_parameters( + subgroup_os, + "general_flash_usage", + _("Flash Space Usage"), Alternative( - help = _("The levels for memory usage on Linux and UNIX systems take into account the " - "currently used memory (RAM or SWAP) by all processes and sets this in relation " - "to the total RAM of the system. This means that the memory usage can exceed 100%. " - "A usage of 200% means that the total size of all processes is twice as large as " - "the main memory, so at least the half of it is currently swapped out. " - "Besides Linux and UNIX systems, these parameters are also used for memory checks " - "of other devices, like Fortigate devices."), elements = [ Tuple( - title = _("Specify levels in percentage of total RAM"), + title = _("Specify levels in percentage of total Flash"), elements = [ - Percentage(title = _("Warning at a usage of"), label = _("% of RAM"), maxvalue = None), - Percentage(title = _("Critical at a usage of"), label = _("% of RAM"), maxvalue = None)]), + Percentage(title = _("Warning at a usage of"), label = _("% of Flash"), maxvalue = None), + Percentage(title = _("Critical at a usage of"), label = _("% of Flash"), maxvalue = None) + ] + ), Tuple( title = _("Specify levels in absolute usage values"), elements = [ Integer(title = _("Warning at"), unit = _("MB")), - Integer(title = _("Critical at"), unit = _("MB"))]), - ]), - None, None)) - -checkgroups.append(( - subgroup_printing, - "printer_supply", - _("Printer cardridge levels"), - Tuple( - help = _("Levels for printer cardridges."), - elements = [ - Float(title = _("Warning remaining")), - Float(title = _("Critical remaining"))] + Integer(title = _("Critical at"), unit = _("MB")) + ] + ), + ] ), - TextAscii( - title = _("cardridge specification"), - allow_empty = True + None, + None +) +register_check_parameters( + subgroup_os, + "cisco_supervisor_mem", + _("Cisco Nexus Supervisor Memory Usage"), + Tuple( + title = _("The average utilization of memory on the active supervisor"), + elements = [ + Percentage(title = _("Warning at a usage of"), default_value = 80.0, maxvalue = 100.0 ), + Percentage(title = _("Critical at a usage of"), default_value = 90.0, maxvalue = 100.0 ) + ] ), None, - )) + None +) -checkgroups.append(( +register_check_parameters( subgroup_os, - "cpu_load", - _("CPU load (not utilization!)"), - Tuple( - help = _("The CPU load of a system is the number of processes currently being " - "in the state running, i.e. either they occupy a CPU or wait " - "for one. The load average is the averaged CPU load over the last 1, " - "5 or 15 minutes. The following levels will be applied on the average " - "load. On Linux system the 15-minute average load is used when applying " - "those levels. The configured levels are multiplied with the number of " - "CPUs, so you should configure the levels based on the value you want to " - "be warned \"per CPU\"."), - elements = [ - Float(title = _("Warning at a load of"), accept_int = True), - Float(title = _("Critical at a load of"), accept_int = True)]), - None, None)) - -checkgroups.append(( - subgroup_os, - "cpu_utilization", - _("CPU utilization (percentual)"), - Optional( - Tuple( - elements = [ - Percentage(title = _("Warning at a utilization of"), label = "%"), - Percentage(title = _("Critical at a utilization of"), label = "%")]), - label = _("Alert on too high CPU utilization"), - help = _("The CPU utilization sums up the percentages of CPU time that is used " - "for user processes and kernel routines over all available cores within " - "the last check interval. The possible range is from 0% to 100%")), - None, None)) - -checkgroups.append(( - subgroup_os, - "cpu_iowait", - _("CPU utilization (disk wait)"), - Optional( - Tuple( - elements = [ - Percentage(title = _("Warning at a disk wait of"), label = "%"), - Percentage(title = _("Critical at a disk wait of"), label = "%")]), - label = _("Alert on too high disk wait (IO wait)"), - help = _("The CPU utilization sums up the percentages of CPU time that is used " - "for user processes, kernel routines (system), disk wait (sometimes also " - "called IO wait) or nothing (idle). " - "Currently you can only set warning/critical levels to the disk wait. This " - "is the total percentage of time all CPUs have nothing else to do then waiting " - "for data coming from or going to disk. If you have a significant disk wait " - "the the bottleneck of your server is IO. Please note that depending on the " - "applications being run this might or might not be totally normal.")), - None, None)) - -checkgroups.append(( - subgroup_environment, - "akcp_humidity", - _("AKCP Humidity Levels"), - Tuple( - help = _("This Rulset sets the threshold limits for humidity sensors attached to " - "AKCP Sensor Probe "), - elements = [ - Integer(title = _("Critical if moisture lower than")), - Integer(title = _("Warning if moisture lower than")), - Integer(title = _("Warning if moisture higher than")), - Integer(title = _("Critical if moisture higher than")), - ]), - TextAscii( - title = _("Service descriptions"), - allow_empty = False), - None)) - -checkgroups.append(( - subgroup_applications, - "oracle_tablespaces", - _("Oracle Tablespaces"), - Dictionary( - elements = [ - ("levels", - Alternative( - title = _("Levels for the Tablespace size"), - elements = [ - Tuple( - title = _("Percentage free space"), - elements = [ - Percentage(title = _("Warning below"), label = _("free")), - Percentage(title = _("Critical below"), label = _("free")), - ] - ), - Tuple( - title = _("Absolute free space"), - elements = [ - Integer(title = _("Warning lower than"), unit = _("MB")), - Integer(title = _("Critical lower than"), unit = _("MB")), - ] - ), - ListOf( - Tuple( - orientation = "horizontal", + "memory", + _("Main memory usage (UNIX / Other Devices)"), + Transform( + Dictionary( + elements = [ + ( "levels", + Alternative( + title = _("Levels for memory"), + show_alternative_title = True, + default_value = (150.0, 200.0), + match = match_dual_level_type, + help = _("The used and free levels for the memory on UNIX systems take into account the " + "currently used memory (RAM or SWAP) by all processes and sets this in relation " + "to the total RAM of the system. This means that the memory usage can exceed 100%. " + "A usage of 200% means that the total size of all processes is twice as large as " + "the main memory, so at least half of it is currently swapped out. For systems " + "without Swap space you should choose levels below 100%."), + elements = [ + Alternative( + title = _("Levels for used memory"), elements = [ - Filesize(title = _("Tablespace larger than")), + Tuple( + title = _("Specify levels in percentage of total RAM"), + elements = [ + Percentage(title = _("Warning at a usage of"), maxvalue = None), + Percentage(title = _("Critical at a usage of"), maxvalue = None) + ] + ), + Tuple( + title = _("Specify levels in absolute values"), + elements = [ + Integer(title = _("Warning at"), unit = _("MB")), + Integer(title = _("Critical at"), unit = _("MB")) + ] + ), + ] + ), + Transform( Alternative( - title = _("Levels for the Tablespace size"), elements = [ Tuple( - title = _("Percentage free space"), + title = _("Specify levels in percentage of total RAM"), elements = [ - Percentage(title = _("Warning below"), label = _("free")), - Percentage(title = _("Critical below"), label = _("free")), + Percentage(title = _("Warning if less than"), maxvalue = None), + Percentage(title = _("Critical if less than"), maxvalue = None) ] ), Tuple( - title = _("Absolute free space"), + title = _("Specify levels in absolute values"), elements = [ - Integer(title = _("Warning lower than"), unit = _("MB")), - Integer(title = _("Critical lower than"), unit = _("MB")), + Integer(title = _("Warning if below"), unit = _("MB")), + Integer(title = _("Critical if below"), unit = _("MB")) ] ), ] ), - ], - ), - title = _('Dynamic levels'), - ), - ] - ) - ), - ("magic", - Float( - title = _("Magic factor (automatic level adaptation for large tablespaces)"), - minvalue = 0.1, - maxvalue = 1.0)), - ( "magic_normsize", - Integer( - title = _("Reference size for magic factor"), - minvalue = 1, - default_value = 1000, - label = _("MB"))), - ( "levels_low", - Tuple( - title = _("Minimum levels if using magic factor"), - help = _("The tablespace levels will never fall below these values, when using " - "the magic factor and the tablespace is very small."), - elements = [ - Percentage(title = _("Warning at"), label = _("usage"), allow_int = True), - Percentage(title = _("Critical at"), label = _("usage"), allow_int = True)])), - ( "autoextend", - Checkbox( - title = _("Autoextend"), - label = _("Autoextension is expected"), - help = "")), - ]), - TextAscii( - title = _("Explicit tablespaces"), - help = _("Here you can set explicit tablespaces by defining them via SID and the tablespace name, separated by a dot, for example pengt.TEMP"), - regex = '.+\..+', - allow_empty = False), - None)) - + title = _("Levels for free memory"), + help = _("Keep in mind that if you have 1GB RAM and 1GB SWAP you need to " + "specify 120% or 1200MB to get an alert if there is only 20% free RAM available. " + "The free memory levels do not work with the fortigate check, because it does " + "not provide total memory data."), + allow_empty = False, + forth = lambda val: tuple(map(lambda x: -x, val)), + back = lambda val: tuple(map(lambda x: -x, val)) + ) + ] + ), + ), + ("average", + Integer( + title = _("Averaging"), + help = _("If this parameter is set, all measured values will be averaged " + "over the specified time interval before levels are being applied. Per " + "default, averaging is turned off."), + unit = _("minutes"), + minvalue = 1, + default_value = 60, + ) + ), + ], + optional_keys = [ "average" ], + ), + forth = lambda t: type(t) == tuple and { "levels" : t } or t, + ), + None, None +) -checkgroups.append(( - subgroup_applications, - "oracle_logswitches", - _("Oracle Logswitches"), - Tuple( - help = _("This check monitors the number of log switches of an ORACLE " - "database instance in the last 60 minutes. You can set levels for upper and lower bounds."), - elements = [ - Integer(title = _("Critical if fewer than"), unit=_("log switches")), - Integer(title = _("Warning if fewer than"), unit=_("log switches")), - Integer(title = _("Warning if more than"), unit=_("log switches")), - Integer(title = _("Critical if more than"), unit=_("log switches")), - ]), +register_check_parameters( + subgroup_os, + "memory_multiitem", + _("Main memory usage of devices with modules"), + Dictionary( + help = _("The memory levels for one specific module of this host. This is relevant for hosts that have " + "several distinct memory areas, e.g. pluggable cards"), + elements = [ + ("levels", Alternative( + title = _("Memory levels"), + elements = [ + Tuple( + title = _("Specify levels in percentage of total RAM"), + elements = [ + Percentage(title = _("Warning at a memory usage of"), default_value = 80.0, maxvalue = None), + Percentage(title = _("Critical at a memory usage of"), default_value = 90.0, maxvalue = None)]), + Tuple( + title = _("Specify levels in absolute usage values"), + elements = [ + Filesize(title = _("Warning at")), + Filesize(title = _("Critical at"))]), + ])), + ], + optional_keys = []), TextAscii( - title = _("Service descriptions"), - allow_empty = False), - None)) - + title = _("Module name"), + allow_empty = False + ), + "match" +) -checkgroups.append(( - subgroup_applications, - "mssql_backup", - _("MSSQL Backups"), - Optional( +register_check_parameters( + subgroup_networking, + "mem_cluster", + _("Memory Usage of Clusters"), + ListOf( Tuple( elements = [ - Integer(title = _("Warning if more than"), unit = _("seconds")), - Integer(title = _("Critical if more than"), unit = _("seconds")) + Integer(title = _("Equal or more than"), unit = _("nodes")), + Tuple( + title = _("Percentage of total RAM"), + elements = [ + Percentage(title = _("Warning at a RAM usage of"), default_value = 80.0), + Percentage(title = _("Critical at a RAM usage of"), default_value = 90.0), + ]) ] ), - title = _("Specify time since last successful backup"), - help = _("The levels for memory usage on Linux and UNIX systems take into account the " - "currently used memory (RAM or SWAP) by all processes and sets this in relation " - "to the total RAM of the system. This means that the memory usage can exceed 100%. " - "A usage of 200% means that the total size of all processes is twice as large as " - "the main memory, so at least the half of it is currently swapped out."), + help = _("Here you can specify the total memory usage levels for clustered hosts."), + title = _("Memory Usage"), + add_label = _("Add limits") ), - TextAscii( - title = _("Service descriptions"), - allow_empty = False), - None)) + None, + "first", + False +) -checkgroups.append(( - subgroup_applications, - "mssql_counters_locks", - _("MSSQL Locks"), - Dictionary( - help = _("This check monitors locking related information of MSSQL tablespaces."), +register_check_parameters( + subgroup_os, + "esx_host_memory", + _("Main memory usage of ESX host system"), + Tuple( + title = _("Specify levels in percentage of total RAM"), + elements = [ + Percentage(title = _("Warning at a RAM usage of"), default_value = 80.0), + Percentage(title = _("Critical at a RAM usage of"), default_value = 90.0), + ]), + None, None +) + +register_check_parameters( + subgroup_os, + "vm_guest_tools", + _("Virtual machine (for example ESX) guest tools status"), + Dictionary( + optional_keys = False, elements = [ - ("lock_requests/sec", - Tuple( - title = _("Lock Requests / sec"), - help = _("Number of new locks and lock conversions per second requested from the lock manager."), - elements = [ - Float(title = _("Warning at"), unit = _("requests/sec")), - Float(title = _("Critical at"), unit = _("requests/sec")), - ], - ), + ( "guestToolsCurrent", + MonitoringState( + title = _("VMware Tools is installed, and the version is current"), + default_value = 0, + ) ), - ( "lock_timeouts/sec", - Tuple( - title = _("Lock Timeouts / sec"), - help = _("Number of lock requests per second that timed out, including requests for NOWAIT locks."), - elements = [ - Float(title = _("Warning at"), unit = _("timeouts/sec")), - Float(title = _("Critical at"), unit = _("timeouts/sec")), - ], - ), + ( "guestToolsNeedUpgrade", + MonitoringState( + title = _("VMware Tools is installed, but the version is not current"), + default_value = 1, + ) ), - ( "number_of_deadlocks/sec", - Tuple( - title = _("Number of Deadlocks / sec"), - help = _("Number of lock requests per second that resulted in a deadlock."), - elements = [ - Float(title = _("Warning at"), unit = _("deadlocks/sec")), - Float(title = _("Critical at"), unit = _("deadlocks/sec")), - ], - ), + ( "guestToolsNotInstalled", + MonitoringState( + title = _("VMware Tools have never been installed"), + default_value = 2, + ) ), - ( "lock_waits/sec", - Tuple( - title = _("Lock Waits / sec"), - help = _("Number of lock requests per second that required the caller to wait."), - elements = [ - Float(title = _("Warning at"), unit = _("waits/sec")), - Float(title = _("Critical at"), unit = _("waits/sec")), - ], - ), + ( "guestToolsUnmanaged", + MonitoringState( + title = _("VMware Tools is installed, but it is not managed by VMWare"), + default_value = 1, + ) ), ] - ), - TextAscii( - title = _("Service descriptions"), - allow_empty = False - ), - None)) - - -checkgroups.append(( - subgroup_applications, - "mysql_sessions", - _("MySQL Sessions & Connections"), - Dictionary( - help = _("This check monitors the current number of active sessions to the MySQL " - "database server as well as the connection rate."), + ), + None, + "dict", +) +register_check_parameters( + subgroup_os, + "vm_heartbeat", + _("Virtual machine (for example ESX) heartbeat status"), + Dictionary( + optional_keys = False, elements = [ - ( "total", - Tuple( - title = _("Number of current sessions"), - elements = [ - Integer(title = _("Warning at"), unit = _("sessions"), default_value = 100), - Integer(title = _("Critical at"), unit = _("sessions"), default_value = 200), - ], - ), + ( "heartbeat_missing", + MonitoringState( + title = _("No heartbeat"), + help = _("Guest operating system may have stopped responding."), + default_value = 2, + ) ), - ( "running", - Tuple( - title = _("Number of currently running sessions"), - help = _("Levels for the number of sessions that are currently active"), - elements = [ - Integer(title = _("Warning at"), unit = _("sessions"), default_value = 10), - Integer(title = _("Critical at"), unit = _("sessions"), default_value = 20), - ], - ), + ( "heartbeat_intermittend", + MonitoringState( + title = _("Intermittent heartbeat"), + help = _("May be due to high guest load."), + default_value = 1, + ) ), - ( "connections", - Tuple( - title = _("Number of new connections per second"), - elements = [ - Integer(title = _("Warning at"), unit = _("connection/sec"), default_value = 20), - Integer(title = _("Critical at"), unit = _("connection/sec"), default_value = 40), - ], - ), + ( "heartbeat_no_tools", + MonitoringState( + title = _("Heartbeat tools missing or not installed"), + help = _("No VMWare Tools installed."), + default_value = 1, + ) + ), + ( "heartbeat_ok", + MonitoringState( + title = _("Heartbeat OK"), + help = _("Guest operating system is responding normally."), + default_value = 0, + ) ), ] - ), + ), None, - None)) + "dict", +) -checkgroups.append(( +register_check_parameters( subgroup_applications, - "mysql_innodb_io", - _("MySQL InnoDB Throughput"), + "services_summary", + _("Windows Service Summary"), Dictionary( + title = _('Autostart Services'), elements = [ - ( "read", - Tuple( - title = _("Read throughput"), - elements = [ - Float(title = _("warning at"), unit = _("MB/s")), - Float(title = _("critical at"), unit = _("MB/s")) - ])), - ( "write", - Tuple( - title = _("Write throughput"), - elements = [ - Float(title = _("warning at"), unit = _("MB/s")), - Float(title = _("critical at"), unit = _("MB/s")) - ])), - ( "average", - Integer( - title = _("Average"), - help = _("When averaging is set, then an floating average value " - "of the disk throughput is computed and the levels for read " - "and write will be applied to the average instead of the current " - "value."), - unit = "min")) - ]), + ('ignored', + ListOfStrings( + title = _("Ignored autostart services"), + help = _('Regular expressions matching the begining of the internal name ' + 'or the description of the service. ' + 'If no name is given then this rule will match all services. The ' + 'match is done on the beginning of the service name. It ' + 'is done case sensitive. You can do a case insensitive match ' + 'by prefixing the regular expression with (?i). Example: ' + '(?i).*mssql matches all services which contain MSSQL ' + 'or MsSQL or mssql or...'), + orientation = "horizontal", + )), + ('state_if_stopped', + MonitoringState( + title = _("Default state if stopped autostart services are found"), + default_value = 0, + )), + ], + ), None, - "dict")) + "dict" +) -checkgroups.append(( +register_check_parameters( subgroup_applications, - "mysql_connections", - _("MySQL Connections"), + "esx_vsphere_objects", + _("State of ESX hosts and virtual machines"), Dictionary( + help = _("Usually the check goes to WARN if a VM or host is powered off and OK otherwise. " + "You can change this behaviour on a per-state-basis here."), + optional_keys = False, elements = [ - ( "perc_used", - Tuple( - title = _("Max. parallel connections"), - help = _("Compares the the maximum number of connections that have been " - "in use simultaneously since the server started with the maximum parallel " - "connections allowed by the configuration of the server. This threshold " - "makes the check raises warning/critical states if the percentage is equal to " - "or above the configured levels."), - elements = [ - Percentage(title = _("Warning at")), - Percentage(title = _("Critical at")), - ] - ) - ), - ]), - None, - "dict")) + ( "states", + Dictionary( + title = _("Target states"), + optional_keys = False, + elements = [ + ( "poweredOn", + MonitoringState( + title = _("Powered ON"), + help = _("Check result if the host or VM is powered on"), + default_value = 0, + ) + ), + ( "poweredOff", + MonitoringState( + title = _("Powered OFF"), + help = _("Check result if the host or VM is powered off"), + default_value = 1, + ) + ), + ( "suspended", + MonitoringState( + title = _("Suspended"), + help = _("Check result if the host or VM is suspended"), + default_value = 1, + ) + ), + ( "unknown", + MonitoringState( + title = _("Unknown"), + help = _("Check result if the host or VM state is reported as unknown"), + default_value = 3, + ) + ), + ] + ) + ), + ] + ), + TextAscii( + title = _("Name of the VM/HostSystem"), + help = _("Please do not forget to specify either VM or HostSystem. Example: VM abcsrv123. Also note, " + "that we match the beginning of the name."), + regex = "(^VM|HostSystem)( .*|$)", + regex_error = _("The name of the system must begin with VM or HostSystem."), + allow_empty = False, + ), + "dict", +) -checkgroups.append(( - subgroup_applications, - "dbsize", - _("Size of MySQL/PostgresQL databases"), - Optional( +def transform_printer_supply(l): + if len(l) == 2: + return l[0], l[1], False + return l + +register_check_parameters( + subgroup_printing, + "printer_supply", + _("Printer cartridge levels"), + Transform( Tuple( elements = [ - Integer(title = _("warning at"), unit = _("MB")), - Integer(title = _("critical at"), unit = _("MB")), - ]), - help = _("The check will trigger a warning or critical state if the size of the " - "database exceeds these levels."), - title = _("Impose limits on the size of the database"), + Percentage( + title = _("Warning remaining"), + allow_int = True, + default_value = 20.0, + help = _("For consumable supplies, this is configured as the percentage of " + "remaining capacity. For supplies that fill up, this is configured " + "as remaining space."), + ), + Percentage( + title = _("Critical remaining"), + allow_int = True, + default_value = 10.0, + help = _("For consumable supplies, this is configured as the percentage of " + "remaining capacity. For supplies that fill up, this is configured " + "as remaining space."), + ), + Checkbox( + title = _("Upturn toner levels"), + label = _("Printer sends used material instead of remaining"), + help = _("Some Printers (eg. Konica for Drum Cartdiges) returning the available" + " fuel instead of what is left. In this case it's possible" + " to upturn the levels to handle this behavior") + ), + ] + ), + forth = transform_printer_supply, ), TextAscii( - title = _("Name of the database"), + title = _("cartridge specification"), + allow_empty = True ), - "first")) - -checkgroups.append(( - subgroup_applications, - "postgres_sessions", - _("PostgreSQL Sessions"), - Dictionary( - help = _("This check monitors the current number of active and idle sessions on PostgreSQL"), - elements = [ - ( "total", - Tuple( - title = _("Number of current sessions"), - elements = [ - Integer(title = _("Warning at"), unit = _("sessions"), default_value = 100), - Integer(title = _("Critical at"), unit = _("sessions"), default_value = 200), - ], - ), - ), - ( "running", - Tuple( - title = _("Number of currently running sessions"), - help = _("Levels for the number of sessions that are currently active"), - elements = [ - Integer(title = _("Warning at"), unit = _("sessions"), default_value = 10), - Integer(title = _("Critical at"), unit = _("sessions"), default_value = 20), - ], - ), + None, +) +register_check_parameters( + subgroup_printing, + "windows_printer_queues", + _("Number of open jobs of a printer on windows" ), + Transform( + Optional( + Tuple( + help = _("This rule is applied to the number of print jobs " + "currently waiting in windows printer queue."), + elements = [ + Integer(title = _("Warning at"), unit = _("jobs"), default_value = 40), + Integer(title = _("Critical at"), unit = _("jobs"), default_value = 60), + ] ), - ] + label=_('Enable thresholds on the number of jobs'), + ), + forth = lambda old: old != (None, None) and old or None, ), - None, - None)) - - -checkgroups.append(( - subgroup_applications, - "oracle_sessions", - _("Oracle Sessions"), - Tuple( - title = _("Number of active sessions"), - help = _("This check monitors the current number of active sessions on Oracle"), - elements = [ - Integer(title = _("Warning at"), unit = _("sessions"), default_value = 100), - Integer(title = _("Critical at"), unit = _("sessions"), default_value = 200), - ], - ), TextAscii( - title = _("Database name"), - allow_empty = False), - None)) + title = _("Printer Name"), + allow_empty = True + ), + None +) -checkgroups.append(( - subgroup_applications, - "postgres_stat_database", - _("PostgreSQL Database Statistics"), +register_check_parameters( + subgroup_printing, + "printer_input", + _("Printer Input Units"), Dictionary( - help = _("This check monitors how often database objects in a PostgreSQL Database are accessed"), - elements = [ - ( "blocks_read", - Tuple( - title = _("Blocks read"), - elements = [ - Float(title = _("Warning at"), unit = _("blocks/s")), - Float(title = _("Critical at"), unit = _("blocks/s")), - ], - ), - ), - ( "xact_commit", - Tuple( - title = _("Commits"), - elements = [ - Float(title = _("Warning at"), unit = _("/s")), - Float(title = _("Critical at"), unit = _("/s")), - ], - ), - ), - ( "tup_fetched", - Tuple( - title = _("Fetches"), - elements = [ - Float(title = _("Warning at"), unit = _("/s")), - Float(title = _("Critical at"), unit = _("/s")), - ], - ), - ), - ( "tup_deleted", - Tuple( - title = _("Deletes"), - elements = [ - Float(title = _("Warning at"), unit = _("/s")), - Float(title = _("Critical at"), unit = _("/s")), - ], - ), - ), - ( "tup_updated", - Tuple( - title = _("Updates"), - elements = [ - Float(title = _("Warning at"), unit = _("/s")), - Float(title = _("Critical at"), unit = _("/s")), - ], - ), - ), - ( "tup_inserted", - Tuple( - title = _("Inserts"), - elements = [ - Float(title = _("Warning at"), unit = _("/s")), - Float(title = _("Critical at"), unit = _("/s")), - ], - ), - ), + elements = [ + ('capacity_levels', Tuple( + title = _('Capacity remaining'), + elements = [ + Percentage(title = _("Warning at"), default_value = 0.0), + Percentage(title = _("Critical at"), default_value = 0.0), + ], + )), ], + default_keys = ['capacity_levels'], ), TextAscii( - title = _("Database name"), - allow_empty = False), - None)) + title = _('Unit Name'), + allow_empty = True + ), + None, +) -checkgroups.append(( - subgroup_applications, - "win_dhcp_pools", - _("Windows DHCP Pool"), - Tuple( - help = _("The count of remaining entries in the DHCP pool represents " - "the number of IP addresses left which can be assigned in the network"), - elements = [ - Percentage(title = _("Warning if pool usage higher than")), - Percentage(title = _("Critical if pool usage higher than")), - ]), +register_check_parameters( + subgroup_printing, + "printer_output", + _("Printer Output Units"), + Dictionary( + elements = [ + ('capacity_levels', Tuple( + title = _('Capacity filled'), + elements = [ + Percentage(title = _("Warning at"), default_value = 0.0), + Percentage(title = _("Critical at"), default_value = 0.0), + ], + )), + ], + default_keys = ['capacity_levels'], + ), TextAscii( - title = _("Service descriptions"), - allow_empty = False), - None)) + title = _('Unit Name'), + allow_empty = True + ), + None, +) -checkgroups.append(( +register_check_parameters( subgroup_os, - "threads", - _("Number of threads"), - Tuple( - help = _("These levels check the number of currently existing threads on the system. Each process has at " - "least one thread."), - elements = [ - Integer(title = _("Warning at"), unit = _("threads"), default_value = 1000), - Integer(title = _("Critical at"), unit = _("threads"), default_value = 2000)]), - None, None)) + "cpu_load", + _("CPU load (not utilization!)"), + Levels( + help = _("The CPU load of a system is the number of processes currently being " + "in the state running, i.e. either they occupy a CPU or wait " + "for one. The load average is the averaged CPU load over the last 1, " + "5 or 15 minutes. The following levels will be applied on the average " + "load. On Linux system the 15-minute average load is used when applying " + "those levels. The configured levels are multiplied with the number of " + "CPUs, so you should configure the levels based on the value you want to " + "be warned \"per CPU\"."), + unit = "per core", + default_difference = (2.0, 4.0), + default_levels = (5.0, 10.0), + ), + None, None +) -checkgroups.append(( - subgroup_applications, - "vms_procs", - _("Number of processes on OpenVMS"), +register_check_parameters( + subgroup_os, + "cpu_utilization", + _("CPU utilization for Appliances"), Optional( Tuple( elements = [ - Integer(title = _("Warning at"), unit = _("processes"), default_value = 100), - Integer(title = _("Critical at"), unit = _("processes"), default_value = 200)]), - title = _("Impose levels on number of processes"), - ), - None, None)) + Percentage(title = _("Warning at a utilization of")), + Percentage(title = _("Critical at a utilization of"))]), + label = _("Alert on too high CPU utilization"), + help = _("The CPU utilization sums up the percentages of CPU time that is used " + "for user processes and kernel routines over all available cores within " + "the last check interval. The possible range is from 0% to 100%"), + default_value = (90.0, 95.0)), + None, None +) -checkgroups.append(( +register_check_parameters( subgroup_os, - "vm_counter", - _("Number of kernel events per second"), + "cpu_utilization_multiitem", + _("CPU utilization of Devices with Modules"), + Dictionary( + help = _("The CPU utilization sums up the percentages of CPU time that is used " + "for user processes and kernel routines over all available cores within " + "the last check interval. The possible range is from 0% to 100%"), + elements = [ + ("levels", Tuple( + title = _("Alert on too high CPU utilization"), + elements = [ + Percentage(title = _("Warning at a utilization of"), default_value=90.0), + Percentage(title = _("Critical at a utilization of"), default_value=95.0)], + ), + ), + ] + ), + TextAscii( + title = _("Module name"), + allow_empty = False + ), + None +) + +register_check_parameters( + subgroup_os, + "fpga_utilization", + _("FPGA utilization"), + Dictionary( + help = _("Give FPGA utilization levels in percent. The possible range is from 0% to 100%."), + elements = [ + ("levels", Tuple( + title = _("Alert on too high FPGA utilization"), + elements = [ + Percentage(title = _("Warning at a utilization of"), default_value = 80.0), + Percentage(title = _("Critical at a utilization of"), default_value = 90.0)], + ), + ), + ] + ), + TextAscii( + title = _("FPGA"), + allow_empty = False + ), + None +) + + +register_check_parameters( + subgroup_os, + "cpu_utilization_os", + _("CPU utilization for Windows and ESX Hosts"), + Dictionary( + help = _("This rule configures levels for the CPU utilization (not load) for " + "the operating systems Windows and VMWare ESX host systems. The utilization " + "ranges from 0 to 100 - regardless of the number of CPUs."), + elements = [ + ( "levels", + Levels( + title = _("Levels"), + unit = "%", + default_levels = (85, 90), + default_difference = (5, 8), + default_value = None, + ), + ), + ( "average", + Integer( + title = _("Averaging"), + help = _("When this option is activated then the CPU utilization is being " + "averaged before the levels are being applied."), + unit = "min", + default_value = 15, + label = _("Compute average over last "), + )), + ] + ), + None, None +) + +register_check_parameters( + subgroup_os, + "cpu_iowait", + _("CPU utilization on Linux/UNIX"), + Transform( + Dictionary( + elements = [ + ( "util", + Tuple( + title = _("Alert on too high CPU utilization"), + elements = [ + Percentage(title = _("Warning at a utilization of"), default_value = 90.0), + Percentage(title = _("Critical at a utilization of"), default_value = 95.0)], + + help = _("Here you can set levels on the total CPU utilization, i.e. the sum of " + "system, user and iowait. The levels are always applied " + "on the average utiliazation since the last check - which is usually one minute."), + ) + ), + ( "iowait", + Tuple( + title = _("Alert on too high disk wait (IO wait)"), + elements = [ + Percentage(title = _("Warning at a disk wait of"), default_value = 30.0), + Percentage(title = _("Critical at a disk wait of"), default_value = 50.0)], + help = _("The CPU utilization sums up the percentages of CPU time that is used " + "for user processes, kernel routines (system), disk wait (sometimes also " + "called IO wait) or nothing (idle). " + "Currently you can only set warning/critical levels to the disk wait. This " + "is the total percentage of time all CPUs have nothing else to do then waiting " + "for data coming from or going to disk. If you have a significant disk wait " + "the the bottleneck of your server is IO. Please note that depending on the " + "applications being run this might or might not be totally normal.")), + ), + ] + ), + forth = lambda old: type(old) != dict and { "iowait" : old } or old, + ), + None, + "dict", +) + +register_check_parameters( + subgroup_environment, + "humidity", + _("Humidity Levels"), Tuple( - help = _("This ruleset applies to several similar checks measing various kernel " - "events like context switches, process creations and major page faults. " - "Please create separate rules for each type of kernel counter you " - "want to set levels for."), - show_titles = False, + help = _("This Rulset sets the threshold limits for humidity sensors"), elements = [ - Optional( - Float(label = _("events per second")), - title = _("Set warning level:"), - sameline = True), - Optional( - Float(label = _("events per second")), - title = _("Set critical level:"), - sameline = True)]), + Integer(title = _("Critical at or below"), unit="%" ), + Integer(title = _("Warning at or below"), unit="%" ), + Integer(title = _("Warning at or above"), unit="%" ), + Integer(title = _("Critical at or above"), unit="%" ), + ]), + TextAscii( + title = _("Sensor names"), + allow_empty = False), + None +) - DropdownChoice( - title = _("kernel counter"), - choices = [ - ( "Context Switches", _("Context Switches") ), - ( "Process Creations", _("Process Creations") ), - ( "Major Page Faults", _("Major Page Faults") )]), - "first")) +register_check_parameters( + subgroup_environment, + "single_humidity", + _("Humidity Levels for devices with a single sensor"), + Tuple( + help = _("This Rulset sets the threshold limits for humidity sensors"), + elements = [ + Integer(title = _("Critical at or below"), unit="%" ), + Integer(title = _("Warning at or below"), unit="%" ), + Integer(title = _("Warning at or above"), unit="%" ), + Integer(title = _("Critical at or above"), unit="%" ), + ]), + None, + None +) -checkgroups.append(( - subgroup_storage, - "disk_io", - _("Levels on disk IO (throughput)"), + +register_check_parameters( + subgroup_applications, + "oracle_tablespaces", + _("Oracle Tablespaces"), Dictionary( + help = _("A tablespace is a container for segments (tables, indexes, etc). A " + "database consists of one or more tablespaces, each made up of one or " + "more data files. Tables and indexes are created within a particular " + "tablespace. " + "This rule allows you to define checks on the size of tablespaces."), elements = [ - ( "read", - Tuple( - title = _("Read throughput"), - elements = [ - Float(title = _("warning at"), unit = _("MB/s")), - Float(title = _("critical at"), unit = _("MB/s")) - ])), - ( "write", + ("levels", + Alternative( + title = _("Levels for the Tablespace usage"), + default_value = (10.0, 5.0), + elements = [ + Tuple( + title = _("Percentage free space"), + elements = [ + Percentage(title = _("Warning if below"), unit = _("% free")), + Percentage(title = _("Critical if below"), unit = _("% free")), + ] + ), + Tuple( + title = _("Absolute free space"), + elements = [ + Integer(title = _("Warning if below"), unit = _("MB"), default_value = 1000), + Integer(title = _("Critical if below"), unit = _("MB"), default_value = 500), + ] + ), + ListOf( + Tuple( + orientation = "horizontal", + elements = [ + Filesize(title = _("Tablespace larger than")), + Alternative( + title = _("Levels for the Tablespace size"), + elements = [ + Tuple( + title = _("Percentage free space"), + elements = [ + Percentage(title = _("Warning if below"), unit = _("% free")), + Percentage(title = _("Critical if below"), unit = _("% free")), + ] + ), + Tuple( + title = _("Absolute free space"), + elements = [ + Integer(title = _("Warning if below"), unit = _("MB")), + Integer(title = _("Critical if below"), unit = _("MB")), + ] + ), + ] + ), + ], + ), + title = _('Dynamic levels'), + ), + ] + ) + ), + ("magic", + Float( + title = _("Magic factor (automatic level adaptation for large tablespaces)"), + help = _("This is only be used in case of percentual levels"), + minvalue = 0.1, + maxvalue = 1.0, + default_value = 0.9)), + ( "magic_normsize", + Integer( + title = _("Reference size for magic factor"), + minvalue = 1, + default_value = 1000, + unit = _("MB"))), + ( "magic_maxlevels", Tuple( - title = _("Write throughput"), + title = _("Maximum levels if using magic factor"), + help = _("The tablespace levels will never be raise above these values, when using " + "the magic factor and the tablespace is very small."), elements = [ - Float(title = _("warning at"), unit = _("MB/s")), - Float(title = _("critical at"), unit = _("MB/s")) - ])), - ( "average", - Integer( - title = _("Average"), - help = _("When averaging is set, then an floating average value " - "of the disk throughput is computed and the levels for read " - "and write will be applied to the average instead of the current " - "value."), - unit = "min")), - ( "latency", + Percentage(title = _("Maximum warning level"), unit = _("% free"), allow_int = True, default_value = 60.0), + Percentage(title = _("Maximum critical level"), unit = _("% free"), allow_int = True, default_value = 50.0)])), + ( "autoextend", + Checkbox( + title = _("Autoextend"), + label = _("Autoextension is expected"), + help = "")), + ( "defaultincrement", + Checkbox( + title = _("Default Increment"), + label = _("State is WARNING in case the next extent has the default size."), + help = "")), + ]), + TextAscii( + title = _("Explicit tablespaces"), + help = _("Here you can set explicit tablespaces by defining them via SID and the tablespace name, separated by a dot, for example pengt.TEMP"), + regex = '.+\..+', + allow_empty = False), + None +) + +register_check_parameters( + subgroup_applications, + "oracle_processes", + _("Oracle Processes"), + Dictionary( + help = _("Here you can override the default levels for the ORACLE Processes check. The levels " + "are applied on the number of used processes in percentage of the configured limit."), + elements = [ + ( "levels", + Tuple( + title = _("Levels for used processes"), + elements = [ + Percentage(title = _("Warning if more than"), default_value = 70.0), + Percentage(title = _("Critical if more than"), default_value = 90.0) + ] + ) + ), + ], + optional_keys = False, + ), + TextAscii( + title = _("Database SID"), + size = 12, + allow_empty = False), + "dict", +) + +register_check_parameters( + subgroup_applications, + "oracle_logswitches", + _("Oracle Logswitches"), + Tuple( + help = _("This check monitors the number of log switches of an ORACLE " + "database instance in the last 60 minutes. You can set levels for upper and lower bounds."), + elements = [ + Integer(title = _("Critical at or below"), unit=_("log switches / hour"), default_value = -1), + Integer(title = _("Warning at or below"), unit=_("log switches / hour"), default_value = -1), + Integer(title = _("Warning at or above"), unit=_("log switches / hour"), default_value = 50), + Integer(title = _("Critical at or above"), unit=_("log switches / hour"), default_value = 100), + ]), + TextAscii( + title = _("Database SID"), + size = 12, + allow_empty = False), + "first", +) + +register_check_parameters( + subgroup_applications, + "oracle_recovery_area", + _("Oracle Recovery Area"), + Dictionary( + elements = [ + ("levels", + Tuple( + title = _("Levels for used space (reclaimable is considered as free)"), + elements = [ + Percentage(title = _("warning at"), default_value = 70.0), + Percentage(title = _("critical at"), default_value = 90.0), + ] + ) + ) + ] + ), + TextAscii( + title = _("Database SID"), + size = 12, + allow_empty = False), + "dict", +) + +register_check_parameters( + subgroup_applications, + "oracle_dataguard_stats", + _("Oracle Data-Guard Stats"), + Dictionary( + help = _("The Data-Guard statistics are available in Oracle Enterprise Edition with enabled Data-Guard. " + "The init.ora parameter dg_broker_start must be TRUE for this check. " + "The apply and transport lag can be configured with this rule."), + elements = [ + ( "apply_lag", Tuple( - title = _("IO Latency"), + title = _("Apply Lag Maximum Time"), + help = _( "The maximum limit for the apply lag in v$dataguard_stats."), elements = [ - Float(title = _("warning at"), unit = _("ms"), default_value = 80.0), - Float(title = _("critical at"), unit = _("ms"), default_value = 160.0), - ])), - ( "latency_perfdata", - Checkbox( - title = _("Performance Data for Latency"), - label = _("Collect performance data for disk latency"), - help = _("Note: enabling performance data for the latency might " - "cause incompatibilities with existing historical data " - "if you are running PNP4Nagios in SINGLE mode.")), - ), - ( "read_ql", + Age(title = _("Warning at"),), + Age(title = _("Critical at"),)])), + ( "apply_lag_min", Tuple( - title = _("Read Queue-Length"), + title = _("Apply Lag Minimum Time"), + help = _( "The minimum limit for the apply lag in v$dataguard_stats. " + "This is only useful if also Apply Lag Maximum Time has been configured."), elements = [ - Float(title = _("warning at"), default_value = 80.0), - Float(title = _("critical at"), default_value = 90.0), - ])), - ( "write_ql", + Age(title = _("Warning at"),), + Age(title = _("Critical at"),)])), + ( "transport_lag", Tuple( - title = _("Write Queue-Length"), + title = _("Transport Lag"), + help = _( "The limit for the transport lag in v$dataguard_stats"), elements = [ - Float(title = _("warning at"), default_value = 80.0), - Float(title = _("critical at"), default_value = 90.0), - ])), - ( "ql_perfdata", - Checkbox( - title = _("Performance Data for Queue Length"), - label = _("Collect performance data for disk latency"), - help = _("Note: enabling performance data for the latency might " - "cause incompatibilities with existing historical data " - "if you are running PNP4Nagios in SINGLE mode.")), - ), - ]), - OptionalDropdownChoice( - choices = [ ( "SUMMARY", _("Summary of all disks") ), - ( "read", _("Summary of disk input (read)") ), - ( "write", _("Summary of disk output (write)") ), - ], + Age(title = _("Warning at"),), + Age(title = _("Critical at"),)])), + ]), + TextAscii( + title = _("Database SID"), + size = 12, + allow_empty = False), + "dict", +) + +register_check_parameters( + subgroup_applications, + "oracle_undostat", + _("Oracle Undo Retention"), + Dictionary( + elements = [ + ("levels", + Tuple( + title = _("Levels for remaining undo retention"), + elements = [ + Age(title = _("warning if less then"), default_value = 600), + Age(title = _("critical if less then"), default_value = 300), + ] + ) + ),( + 'nospaceerrcnt_state', + MonitoringState( + default_value = 2, + title = _("State in case of non space error count is greater then 0: "), + ), + ), + ] + ), + TextAscii( + title = _("Database SID"), + size = 12, + allow_empty = False), + "dict", +) + +register_check_parameters( + subgroup_applications, + "oracle_recovery_status", + _("Oracle Recovery Status"), + Dictionary( + elements = [ + ("levels", + Tuple( + title = _("Levels for checkpoint time"), + elements = [ + Age(title = _("warning if higher then"), default_value = 1800), + Age(title = _("critical if higher then"), default_value = 3600), + ] + ) + ), + ("backup_age", + Tuple( + title = _("Levels for user managed backup files"), + help = _( "Important! This checks is only for monitoring of datafiles " + "who were left in backup mode " + "(alter database datafile ... begin backup;)."), + elements = [ + Age(title = _("warning if higher then"), default_value = 1800), + Age(title = _("critical if higher then"), default_value = 3600), + ] + ) + ) + ] + ), + TextAscii( + title = _("Database SID"), + size = 12, + allow_empty = False), + "dict", +) + +register_check_parameters( + subgroup_applications, + "oracle_jobs", + _("ORACLE Scheduler Job"), + Dictionary( + help = _("A scheduler job is an object in an ORACLE database which could be " + "compared to a cron job on Unix."), + elements = [ + ( "run_duration", + Tuple( + title = _("Maximum run duration for last execution"), + help = _("Here you can define an upper limit for the run duration of " + "last execution of the job."), + elements = [ + Age(title = _("warning at")), + Age(title = _("critical at")), + ])), + ( "disabled", DropdownChoice( + title = _("Job State"), + totext = "", + choices = [ + ( True, _("Ignore the state of the Job")), + ( False, _("Consider the state of the job")),], + help = _("The state of the job is ignored per default.") + )),]), + TextAscii( + title = _("Scheduler Job Name"), + help = _("Here you can set explicit Scheduler-Jobs by defining them via SID, Job-Owner " + "and Job-Name, separated by a dot, for example TUX12C.SYS.PURGE_LOG"), + regex = '.+\..+', + allow_empty = False), + None +) + +register_check_parameters( + subgroup_applications, + "oracle_instance", + _("Oracle Instance"), + Dictionary( + title = _("Consider state of Archivelogmode: "), + elements = [( + 'archivelog', + MonitoringState( + default_value = 0, + title = _("State in case of Archivelogmode is enabled: "), + ) + ),( + 'noarchivelog', + MonitoringState( + default_value = 1, + title = _("State in case of Archivelogmode is disabled: "), + ), + ),( + 'forcelogging', + MonitoringState( + default_value = 0, + title = _("State in case of Force Logging is enabled: "), + ), + ),( + 'noforcelogging', + MonitoringState( + default_value = 1, + title = _("State in case of Force Logging is disabled: "), + ), + ),( + 'logins', + MonitoringState( + default_value = 2, + title = _("State in case of logins are not possible: "), + ), + ),( + 'primarynotopen', + MonitoringState( + default_value = 2, + title = _("State in case of Database is PRIMARY and not OPEN: "), + ), + ),( + 'uptime_min', + Tuple( + title = _("Minimum required uptime"), + elements = [ + Age(title = _("Warning if below")), + Age(title = _("Critical if below")), + ] + )), + ], + ), + TextAscii( + title = _("Database SID"), + size = 12, + allow_empty = False), + 'first', +) + +register_check_parameters( + subgroup_applications, + "asm_diskgroup", + _("ASM Disk Group (used space and growth)"), + Dictionary( + elements = filesystem_elements + [ + ("req_mir_free", DropdownChoice( + title = _("Handling for required mirror space"), + totext = "", + choices = [ + ( False, _("Disregard required mirror space as free space")), + ( True, _("Regard required mirror space as free space")),], + help = _("ASM calculates the free space depending on free_mb or required mirror " + "free space. Enable this option to set the check against required " + "mirror free space. This only works for normal or high redundancy Disk Groups.")) + ), + ], + hidden_keys = ["flex_levels"], + ), + TextAscii( + title = _("ASM Disk Group"), + help = _("Specify the name of the ASM Disk Group "), + allow_empty = False), + "dict" +) + +register_check_parameters( + subgroup_applications, + "mssql_backup", + _("MSSQL Time since last Backup"), + Optional( + Tuple( + elements = [ + Integer(title = _("Warning if older than"), unit = _("seconds")), + Integer(title = _("Critical if older than"), unit = _("seconds")) + ] + ), + title = _("Specify time since last successful backup"), + ), + TextAscii( + title = _("Service descriptions"), + allow_empty = False), + None +) + +register_check_parameters( + subgroup_applications, + "mssql_tablespaces", + _("MSSQL Size of Tablespace"), + Dictionary( + elements = [ + ("size", + Tuple( + title = _("Size"), + elements = [ + Filesize(title = _("Warning at")), + Filesize(title = _("Critical at")) + ] + )), + ("unallocated", + Tuple( + title = _("Unallocated Space"), + elements = [ + Filesize(title = _("Warning at")), + Filesize(title = _("Critical at")) + ] + )), + ("reserved", + Tuple( + title = _("Reserved Space"), + elements = [ + Filesize(title = _("Warning at")), + Filesize(title = _("Critical at")) + ] + )), + ("data", + Tuple( + title = _("Data"), + elements = [ + Filesize(title = _("Warning at")), + Filesize(title = _("Critical at")) + ] + )), + ("indexes", + Tuple( + title = _("Indexes"), + elements = [ + Filesize(title = _("Warning at")), + Filesize(title = _("Critical at")) + ] + )), + ("unused", + Tuple( + title = _("Unused"), + elements = [ + Filesize(title = _("Warning at")), + Filesize(title = _("Critical at")) + ] + )), + + ], + ), + TextAscii( + title = _("Tablespace name"), + allow_empty = False), + None +) + +register_check_parameters( + subgroup_applications, + "vm_snapshots", + _("Virtual Machine Snapshots"), + Dictionary( + elements = [ + ("age", + Tuple( + title = _("Age of the last snapshot"), + elements = [ + Age(title = _("Warning if older than")), + Age(title = _("Critical if older than")) + ] + ) + )] + ), + None, + None +) +register_check_parameters( + subgroup_applications, + "veeam_backup", + _("Veeam: Time since last Backup"), + Dictionary( + elements = [ + ("age", + Tuple( + title = _("Time since end of last backup"), + elements = [ + Age(title = _("Warning if older than"), default_value = 108000), + Age(title = _("Critical if older than"), default_value = 172800) + ] + ) + )] + ), + TextAscii(title=_("Job name")), + "first" +) + +register_check_parameters( + subgroup_applications, + "backup_timemachine", + _("Age of timemachine backup"), + Dictionary( + elements = [ + ("age", + Tuple( + title = _("Maximum age of latest timemachine backup"), + elements = [ + Age(title = _("Warning if older than"), default_value = 86400), + Age(title = _("Critical if older than"), default_value = 172800) + ] + ) + )] + ), + None, + None +) + +register_check_parameters( + subgroup_applications, + "job", + _("Age of jobs controlled by mk-job"), + Dictionary( + elements = [ + ("age", + Tuple( + title = _("Maximum time since last start of job execution"), + elements = [ + Age(title = _("Warning at"), default_value = 0), + Age(title = _("Critical at"), default_value = 0) + ] + ) + )] + ), + TextAscii( + title = _("Job name"), + ), + None +) + +register_check_parameters( + subgroup_applications, + "mssql_counters_locks", + _("MSSQL Locks"), + Dictionary( + help = _("This check monitors locking related information of MSSQL tablespaces."), + elements = [ + ("lock_requests/sec", + Tuple( + title = _("Lock Requests / sec"), + help = _("Number of new locks and lock conversions per second requested from the lock manager."), + elements = [ + Float(title = _("Warning at"), unit = _("requests/sec")), + Float(title = _("Critical at"), unit = _("requests/sec")), + ], + ), + ), + ( "lock_timeouts/sec", + Tuple( + title = _("Lock Timeouts / sec"), + help = _("Number of lock requests per second that timed out, including requests for NOWAIT locks."), + elements = [ + Float(title = _("Warning at"), unit = _("timeouts/sec")), + Float(title = _("Critical at"), unit = _("timeouts/sec")), + ], + ), + ), + ( "number_of_deadlocks/sec", + Tuple( + title = _("Number of Deadlocks / sec"), + help = _("Number of lock requests per second that resulted in a deadlock."), + elements = [ + Float(title = _("Warning at"), unit = _("deadlocks/sec")), + Float(title = _("Critical at"), unit = _("deadlocks/sec")), + ], + ), + ), + ( "lock_waits/sec", + Tuple( + title = _("Lock Waits / sec"), + help = _("Number of lock requests per second that required the caller to wait."), + elements = [ + Float(title = _("Warning at"), unit = _("waits/sec")), + Float(title = _("Critical at"), unit = _("waits/sec")), + ], + ), + ), + ] + ), + TextAscii( + title = _("Service descriptions"), + allow_empty = False + ), + None +) + +register_check_parameters( + subgroup_applications, + "mysql_sessions", + _("MySQL Sessions & Connections"), + Dictionary( + help = _("This check monitors the current number of active sessions to the MySQL " + "database server as well as the connection rate."), + elements = [ + ( "total", + Tuple( + title = _("Number of current sessions"), + elements = [ + Integer(title = _("Warning at"), unit = _("sessions"), default_value = 100), + Integer(title = _("Critical at"), unit = _("sessions"), default_value = 200), + ], + ), + ), + ( "running", + Tuple( + title = _("Number of currently running sessions"), + help = _("Levels for the number of sessions that are currently active"), + elements = [ + Integer(title = _("Warning at"), unit = _("sessions"), default_value = 10), + Integer(title = _("Critical at"), unit = _("sessions"), default_value = 20), + ], + ), + ), + ( "connections", + Tuple( + title = _("Number of new connections per second"), + elements = [ + Integer(title = _("Warning at"), unit = _("connection/sec"), default_value = 20), + Integer(title = _("Critical at"), unit = _("connection/sec"), default_value = 40), + ], + ), + ), + ] + ), + None, + None +) + +register_check_parameters( + subgroup_applications, + "mysql_innodb_io", + _("MySQL InnoDB Throughput"), + Dictionary( + elements = [ + ( "read", + Tuple( + title = _("Read throughput"), + elements = [ + Float(title = _("warning at"), unit = _("MB/s")), + Float(title = _("critical at"), unit = _("MB/s")) + ])), + ( "write", + Tuple( + title = _("Write throughput"), + elements = [ + Float(title = _("warning at"), unit = _("MB/s")), + Float(title = _("critical at"), unit = _("MB/s")) + ])), + ( "average", + Integer( + title = _("Average"), + help = _("When averaging is set, then an floating average value " + "of the disk throughput is computed and the levels for read " + "and write will be applied to the average instead of the current " + "value."), + unit = "min")) + ]), + None, + "dict" +) + +register_check_parameters( + subgroup_applications, + "mysql_connections", + _("MySQL Connections"), + Dictionary( + elements = [ + ( "perc_used", + Tuple( + title = _("Max. parallel connections"), + help = _("Compares the maximum number of connections that have been " + "in use simultaneously since the server started with the maximum simultaneous " + "connections allowed by the configuration of the server. This threshold " + "makes the check raise warning/critical states if the percentage is equal to " + "or above the configured levels."), + elements = [ + Percentage(title = _("Warning at")), + Percentage(title = _("Critical at")), + ] + ) + ), + ]), + None, + "dict" +) + +register_check_parameters( + subgroup_applications, + "f5_connections", + _("F5 Loadbalancer Connections"), + Dictionary( + elements = [ + ( "conns", + Levels( + title = _("Max. number of connections"), + default_value = None, + default_levels = (25000, 30000) + ) + ), + ( "ssl_conns", + Levels( + title = _("Max. number of SSL connections"), + default_value = None, + default_levels = (25000, 30000) + ) + ), + ]), + None, + "dict" +) + +register_check_parameters( + subgroup_applications, + "checkpoint_connections", + _("Checkpoint Firewall Connections"), + Tuple( + help = _("This rule sets limits to the current number of connections through " + "a Checkpoint firewall."), + title = _("Maximum number of firewall connections"), + elements = [ + Integer( title = _("Warning at"), default_value = 40000), + Integer( title = _("Critical at"), default_value = 50000), + ], + ), + None, + None +) + +register_check_parameters( + subgroup_applications, + "checkpoint_packets", + _("Checkpoint Firewall Packet Rates"), + Dictionary( + elements = [ + ( "accepted", + Levels( + title = _("Maximum Rate of Accepted Packets"), + default_value = None, + default_levels = (100000, 200000), + unit = "pkts/sec" + ) + ), + ( "rejected", + Levels( + title = _("Maximum Rate of Rejected Packets"), + default_value = None, + default_levels = (100000, 200000), + unit = "pkts/sec" + ) + ), + ( "dropped", + Levels( + title = _("Maximum Rate of Dropped Packets"), + default_value = None, + default_levels = (100000, 200000), + unit = "pkts/sec" + ) + ), + ( "logged", + Levels( + title = _("Maximum Rate of Logged Packets"), + default_value = None, + default_levels = (100000, 200000), + unit = "pkts/sec" + ) + ), + ]), + None, + "dict" +) + +register_check_parameters( + subgroup_applications, + "fortigate_sessions", + _("Fortigate Firewall Sessions"), + Tuple( + title = _("Levels for number of active sessions"), + elements = [ + Integer(title = _("Warning at"), unit=_("sessions"), size=10, default_value = 100000), + Integer(title = _("Critical at"), unit=_("sessions"), size=10, default_value = 150000), + ] + ), + None, + "first", +) + + +register_check_parameters( + subgroup_applications, + "f5_pools", + _("F5 Loadbalancer Pools"), + Tuple( + title = _("Minimum number of pool members"), + elements = [ + Integer( title = _("Warning if below"), unit=_("members")), + Integer( title = _("Critical if below"), unit=_("members")), + ], + ), + TextAscii(title = _("Name of pool")), + "first" +) + +register_check_parameters( + subgroup_applications, + "dbsize", + _("Size of MySQL/PostgresQL databases"), + Optional( + Tuple( + elements = [ + Integer(title = _("warning at"), unit = _("MB")), + Integer(title = _("critical at"), unit = _("MB")), + ]), + help = _("The check will trigger a warning or critical state if the size of the " + "database exceeds these levels."), + title = _("Impose limits on the size of the database"), + ), + TextAscii( + title = _("Name of the database"), + ), + "first" +) + +register_check_parameters( + subgroup_applications, + "postgres_sessions", + _("PostgreSQL Sessions"), + Dictionary( + help = _("This check monitors the current number of active and idle sessions on PostgreSQL"), + elements = [ + ( "total", + Tuple( + title = _("Number of current sessions"), + elements = [ + Integer(title = _("Warning at"), unit = _("sessions"), default_value = 100), + Integer(title = _("Critical at"), unit = _("sessions"), default_value = 200), + ], + ), + ), + ( "running", + Tuple( + title = _("Number of currently running sessions"), + help = _("Levels for the number of sessions that are currently active"), + elements = [ + Integer(title = _("Warning at"), unit = _("sessions"), default_value = 10), + Integer(title = _("Critical at"), unit = _("sessions"), default_value = 20), + ], + ), + ), + ] + ), + None, + None +) + + +register_check_parameters( + subgroup_applications, + "oracle_sessions", + _("Oracle Sessions"), + Tuple( + title = _("Number of active sessions"), + help = _("This check monitors the current number of active sessions on Oracle"), + elements = [ + Integer(title = _("Warning at"), unit = _("sessions"), default_value = 100), + Integer(title = _("Critical at"), unit = _("sessions"), default_value = 200), + ], + ), + TextAscii( + title = _("Database name"), + allow_empty = False), + None +) + +register_check_parameters( + subgroup_applications, + "oracle_locks", + _("Oracle Locks"), + Dictionary( + elements = [ + ("levels", + Tuple( + title = _("Levels for minimum wait time for a lock"), + elements = [ + Age(title = _("warning if higher then"), default_value = 1800), + Age(title = _("critical if higher then"), default_value = 3600), + ] + ) + ) + ] + ), + TextAscii( + title = _("Database SID"), + size = 12, + allow_empty = False), + "dict", +) + +register_check_parameters( + subgroup_applications, + "oracle_longactivesessions", + _("Oracle Long Active Sessions"), + Dictionary( + elements = [ + ("levels", + Tuple( + title = _("Levels of active sessions"), + elements = [ + Integer(title = _("Warning if more than"), unit=_("sessions")), + Integer(title = _("Critical if more than"), unit=_("sessions")), + ] + ) + ) + ] + ), + TextAscii( + title = _("Database SID"), + size = 12, + allow_empty = False), + "dict", +) + +register_check_parameters( + subgroup_applications, + "postgres_stat_database", + _("PostgreSQL Database Statistics"), + Dictionary( + help = _("This check monitors how often database objects in a PostgreSQL Database are accessed"), + elements = [ + ( "blocks_read", + Tuple( + title = _("Blocks read"), + elements = [ + Float(title = _("Warning at"), unit = _("blocks/s")), + Float(title = _("Critical at"), unit = _("blocks/s")), + ], + ), + ), + ( "xact_commit", + Tuple( + title = _("Commits"), + elements = [ + Float(title = _("Warning at"), unit = _("/s")), + Float(title = _("Critical at"), unit = _("/s")), + ], + ), + ), + ( "tup_fetched", + Tuple( + title = _("Fetches"), + elements = [ + Float(title = _("Warning at"), unit = _("/s")), + Float(title = _("Critical at"), unit = _("/s")), + ], + ), + ), + ( "tup_deleted", + Tuple( + title = _("Deletes"), + elements = [ + Float(title = _("Warning at"), unit = _("/s")), + Float(title = _("Critical at"), unit = _("/s")), + ], + ), + ), + ( "tup_updated", + Tuple( + title = _("Updates"), + elements = [ + Float(title = _("Warning at"), unit = _("/s")), + Float(title = _("Critical at"), unit = _("/s")), + ], + ), + ), + ( "tup_inserted", + Tuple( + title = _("Inserts"), + elements = [ + Float(title = _("Warning at"), unit = _("/s")), + Float(title = _("Critical at"), unit = _("/s")), + ], + ), + ), + ], + ), + TextAscii( + title = _("Database name"), + allow_empty = False), + None +) + +register_check_parameters( + subgroup_applications, + "win_dhcp_pools", + _("Windows DHCP Pool"), + Tuple( + help = _("The count of remaining entries in the DHCP pool represents " + "the number of IP addresses left which can be assigned in the network"), + elements = [ + Percentage(title = _("Warning if less than"), unit = _("% free pool entries")), + Percentage(title = _("Critical if less than"), unit = _("% free pool entries")), + ]), + TextAscii( + title = _("Service descriptions"), + allow_empty = False), + None +) + +register_check_parameters( + subgroup_os, + "threads", + _("Number of threads"), + Tuple( + help = _("These levels check the number of currently existing threads on the system. Each process has at " + "least one thread."), + elements = [ + Integer(title = _("Warning at"), unit = _("threads"), default_value = 1000), + Integer(title = _("Critical at"), unit = _("threads"), default_value = 2000)]), + None, None +) + +register_check_parameters( + subgroup_os, + "logins", + _("Number of Logins on System"), + Tuple( + help = _("This rule defines upper limits for the number of logins on a system."), + elements = [ + Integer(title = _("Warning at"), unit = _("users"), default_value = 20), + Integer(title = _("Critical at"), unit = _("users"), default_value = 30)]), + None, None +) + +register_check_parameters( + subgroup_applications, + "vms_procs", + _("Number of processes on OpenVMS"), + Optional( + Tuple( + elements = [ + Integer(title = _("Warning at"), unit = _("processes"), default_value = 100), + Integer(title = _("Critical at"), unit = _("processes"), default_value = 200)]), + title = _("Impose levels on number of processes"), + ), + None, None +) + +register_check_parameters( + subgroup_os, + "vm_counter", + _("Number of kernel events per second"), + Levels( + help = _("This ruleset applies to several similar checks measing various kernel " + "events like context switches, process creations and major page faults. " + "Please create separate rules for each type of kernel counter you " + "want to set levels for."), + unit = _("events per second"), + default_levels = (1000, 5000), + default_difference = (500.0, 1000.0), + default_value = None, + ), + DropdownChoice( + title = _("kernel counter"), + choices = [ + ( "Context Switches", _("Context Switches") ), + ( "Process Creations", _("Process Creations") ), + ( "Major Page Faults", _("Major Page Faults") )]), + "first" +) + +register_check_parameters( + subgroup_storage, + "ibm_svc_total_latency", + _("IBM SVC: Levels for total disk latency"), + Dictionary( + elements = [ + ( "read", + Levels( + title = _("Read latency"), + unit = _("ms"), + default_value = None, + default_levels = (50.0, 100.0))), + ( "write", + Levels( + title = _("Write latency"), + unit = _("ms"), + default_value = None, + default_levels = (50.0, 100.0))), + ] + ), + DropdownChoice( + choices = [ ( "Drives", _("Total latency for all drives") ), + ( "MDisks", _("Total latency for all MDisks") ), + ( "VDisks", _("Total latency for all VDisks") ), + ], + title = _("Disk/Drive type"), + help = _("Please enter Drives, Mdisks or VDisks here.")), + "first" +) + + +register_check_parameters( + subgroup_storage, + "ibm_svc_host", + _("IBM SVC: Options for SVC Hosts Check "), + Dictionary( + elements = [ + ( "always_ok", + DropdownChoice( + title = _("Override Service State"), + choices = [ + ( False, _("Check shows errors in case of degraded or offline hosts")), + ( True, _("Check always is in a OK State") ) + ], + ) + ) + ], + optional_keys = None, + ), + None, + "first" +) + + +register_check_parameters( + subgroup_storage, + "disk_io", + _("Levels on disk IO (throughput)"), + Dictionary( + elements = [ + ( "read", + Levels( + title = _("Read throughput"), + unit = _("MB/s"), + default_value = None, + default_levels = (50.0, 100.0))), + ( "write", + Levels( + title = _("Write throughput"), + unit = _("MB/s"), + default_value = None, + default_levels = (50.0, 100.0))), + ( "average", + Integer( + title = _("Average"), + help = _("When averaging is set, then an floating average value " + "of the disk throughput is computed and the levels for read " + "and write will be applied to the average instead of the current " + "value."), + unit = "min")), + ( "latency", + Tuple( + title = _("IO Latency"), + elements = [ + Float(title = _("warning at"), unit = _("ms"), default_value = 80.0), + Float(title = _("critical at"), unit = _("ms"), default_value = 160.0), + ])), + ( "latency_perfdata", + Checkbox( + title = _("Performance Data for Latency"), + label = _("Collect performance data for disk latency"), + help = _("Note: enabling performance data for the latency might " + "cause incompatibilities with existing historical data " + "if you are running PNP4Nagios in SINGLE mode.")), + ), + ( "read_ql", + Tuple( + title = _("Read Queue-Length"), + elements = [ + Float(title = _("warning at"), default_value = 80.0), + Float(title = _("critical at"), default_value = 90.0), + ])), + ( "write_ql", + Tuple( + title = _("Write Queue-Length"), + elements = [ + Float(title = _("warning at"), default_value = 80.0), + Float(title = _("critical at"), default_value = 90.0), + ])), + ( "ql_perfdata", + Checkbox( + title = _("Performance Data for Queue Length"), + label = _("Collect performance data for disk latency"), + help = _("Note: enabling performance data for the latency might " + "cause incompatibilities with existing historical data " + "if you are running PNP4Nagios in SINGLE mode.")), + ), + ]), + OptionalDropdownChoice( + choices = [ ( "SUMMARY", _("Summary of all disks") ), + ( "read", _("Summary of disk input (read)") ), + ( "write", _("Summary of disk output (write)") ), + ], otherlabel = _("On explicit devices ->"), explicit = TextAscii(allow_empty = False), title = _("Device"), help = _("For a summarized throughput of all disks, specify SUMMARY, for a " "sum of read or write throughput write read or write resp. " - "A per-disk IO is specified by the drive letter and a colon on Windows " - "(e.g. C:) or by the device name on Linux/UNIX (e.g. /dev/sda).")), - "first")) + "A per-disk IO is specified by the drive letter, a colon and a slash on Windows " + "(e.g. C:/) or by the device name on Linux/UNIX (e.g. /dev/sda).")), + "first" +) + + +register_rule( + group + '/' + subgroup_storage, + "diskstat_inventory", + ListChoice( + title = _("Inventory mode for Disk IO check"), + help = _("This rule controls which and how many checks will be created " + "for monitoring individual physical and logical disks."), + choices = [ + ( "summary", _("Create a summary over all physical disks") ), + ( "legacy", _("Create a summary for all read, one for write") ), + ( "physical", _("Create a separate check for each physical disk") ), + ( "lvm", _("Create a separate check for each LVM volume (Linux)") ), + ( "vxvm", _("Creata a separate check for each VxVM volume (Linux)") ), + ], + default_value = [ 'summary' ], + ), + match="first") + + +register_rule(group + '/' + subgroup_networking, + varname = "if_groups", + title = _('Network interface groups'), + help = _('Normally the Interface checks create a single service for interface. ' + 'By defining if-group patterns multiple interfaces can be combined together. ' + 'A single service is created for this interface group showing the total traffic amount ' + 'of its members. You can configure if interfaces which are identified as group interfaces ' + 'should not show up as single service. You can restrict grouped interfaces by iftype and the ' + 'item name of the single interface.'), + valuespec = ListOf( + Dictionary( + elements = [ + ("name", + TextAscii( + title = _("Name of group"), + help = _("Name of group in service description"), + allow_empty = False, + )), + ("iftype", Integer( + title = _("Restrict interface port type"), + help = _("Only interfaces with the given port type are put into this group. " + "For example 53 (propVirtual)."), + default_value = 6, + minvalue = 1, + maxvalue = 255, + )), + ("include_items", ListOfStrings( + title = _("Restrict interface items"), + help = _("Only interface with these item names are put into this group."), + )), + ("single", Checkbox( + title = _("Do not list grouped interfaces separately"), + )), + ], + required_keys = ["name", "single"]), + add_label = _("Add pattern")), + match = 'all', +) + +register_rule(group + '/' + subgroup_inventory, + varname = "winperf_msx_queues_inventory", + title = _('Microsoft Exchange Queues Inventory'), + help = _('Per default the offsets of all Windows performance counters are preconfigured in the check. ' + 'If the format of your counters object is not compatible then you can adapt the counter ' + 'offsets manually.'), + valuespec = ListOf( + Tuple( + orientation = "horizontal", + elements = [ + TextAscii( + title = _("Name of Counter"), + help = _("Name of the Counter to be monitored."), + size = 50, + allow_empty = False, + ), + Integer( + title = _("Offset"), + help = _("The offset of the information relative to counter base"), + allow_empty = False, + ), + ]), + movable = False, + add_label = _("Add Counter")), + match = 'all', +) + +register_check_parameters( + subgroup_applications, + "mailqueue_length", + _("Number of mails in outgoing mail queue"), + Tuple( + help = _("This rule is applied to the number of E-Mails that are " + "currently in the outgoing mail queue."), + elements = [ + Integer(title = _("Warning at"), unit = _("mails"), default_value = 10), + Integer(title = _("Critical at"), unit = _("mails"), default_value = 20), + ] + ), + None, + None +) + +register_check_parameters( + subgroup_os, + "uptime", + _("Display the system's uptime as a check"), + None, + None, None +) + +register_check_parameters( + subgroup_storage, + "zpool_status", + _("ZFS storage pool status"), + None, + None, None +) + +register_check_parameters( + subgroup_virt, + "vm_state", + _("Overall state of a virtual machine (for example ESX VMs)"), + None, + None, None +) + + +register_check_parameters( + subgroup_hardware, + "hw_errors", + _("Simple checks for BIOS/Hardware errors"), + None, + None, None +) + +register_check_parameters( + subgroup_applications, + "omd_status", + _("OMD site status"), + None, + TextAscii( + title = _("Name of the OMD site"), + help = _("The name of the OMD site to check the status for")), + "first" +) + +register_check_parameters( + subgroup_storage, + "network_fs", + _("Network filesystem - overall status (e.g. NFS)"), + None, + TextAscii( + title = _("Name of the mount point"), + help = _("For NFS enter the name of the mount point.")), + "first" +) + +register_check_parameters( + subgroup_storage, + "multipath", + _("Multipathing - health of a multipath LUN"), + Integer( + title = _("Expected number of active paths")), + TextAscii( + title = _("Name of the MP LUN"), + help = _("For Linux multipathing this is either the UUID (e.g. " + "60a9800043346937686f456f59386741), or the configured " + "alias.")), + "first" +) + +register_rule( + "checkparams/" + subgroup_storage, + varname = "inventory_multipath_rules", + title = _("Linux Multipath Inventory"), + valuespec = Dictionary( + elements = [ + ("use_alias", Checkbox( + title = _("Use the multipath alias as service name, if one is set"), + label = _("use alias"), + help = _("If a multipath device has an alias then you can use that for specifying " + "the device instead of the UUID. The alias will then be part of the service " + "description. The UUID will be displayed in the pluging output.")) + ), + ], + help = _('This rule set controls the discovery of Multipath devices on Linux.'), + ), + match = 'dict', +) + +register_check_parameters( + subgroup_storage, + "multipath_count", + _("Multipath Count"), + Alternative( + title = _("Match type"), + elements = [ + FixedValue( + None, + title = _("OK if standby count is zero or equals active paths."), + totext = "", + ), + Dictionary( + title = _("Custom settings"), + elements = [ (element, + Transform( + Tuple( + title = description, + elements = [ + Integer(title = _("Critical if less than")), + Integer(title = _("Warning if less than")), + Integer(title = _("Warning if more than")), + Integer(title = _("Critical if more than")), + ] + ), + forth = lambda x: len(x) == 2 and (0, 0, x[0], x[1]) or x + ) + ) for (element, description) in [ + ("active", _("Active paths")), + ("dead", _("Dead paths")), + ("disabled", _("Disabled paths")), + ("standby", _("Standby paths")), + ("unknown", _("Unknown paths")) + ] + ] + ), + ] + ), + TextAscii( + title = _("Path ID")), + "first" +) + + + +register_check_parameters( + subgroup_storage, + "hpux_multipath", + _("Multipathing on HPUX - state of paths of a LUN"), + Tuple( + title = _("Expected path situation"), + elements = [ + Integer(title = _("Number of active paths")), + Integer(title = _("Number of standby paths")), + Integer(title = _("Number of failed paths")), + Integer(title = _("Number of unopen paths")), + ]), + TextAscii( + title = _("WWID of the LUN")), + "first" +) + +register_check_parameters( + subgroup_storage, + "drbd", + _("DR:BD roles and diskstates"), + Dictionary( + elements = [ + ( "roles", + Alternative( + title = _("Roles"), + elements = [ + FixedValue(None, totext = "", title = _("Do not monitor")), + ListOf( + Tuple( + orientation = "horizontal", + elements = [ + DropdownChoice( + title = _("DRBD shows up as"), + default_value = "running", + choices = [ + ( "primary_secondary", _("Primary / Secondary") ), + ( "primary_primary", _("Primary / Primary") ), + ( "secondary_primary", _("Secondary / Primary") ), + ( "secondary_secondary", _("Secondary / Secondary") ) + ] + ), + MonitoringState( + title = _("Resulting state"), + ), + ], + default_value = ( "ignore", 0) + ), + title = _("Set roles"), + add_label = _("Add role rule") + ) + ] + ) + ), + ( "diskstates", + Alternative( + title = _("Diskstates"), + elements = [ + FixedValue(None, totext = "", title = _("Do not monitor")), + ListOf( + Tuple( + elements = [ + DropdownChoice( + title = _("Diskstate"), + choices = [ + ( "primary_Diskless", _("Primary - Diskless") ), + ( "primary_Attaching", _("Primary - Attaching") ), + ( "primary_Failed", _("Primary - Failed") ), + ( "primary_Negotiating", _("Primary - Negotiating") ), + ( "primary_Inconsistent", _("Primary - Inconsistent") ), + ( "primary_Outdated", _("Primary - Outdated") ), + ( "primary_DUnknown", _("Primary - DUnknown") ), + ( "primary_Consistent", _("Primary - Consistent") ), + ( "primary_UpToDate", _("Primary - UpToDate") ), + ( "secondary_Diskless", _("Secondary - Diskless") ), + ( "secondary_Attaching", _("Secondary - Attaching") ), + ( "secondary_Failed", _("Secondary - Failed") ), + ( "secondary_Negotiating", _("Secondary - Negotiating") ), + ( "secondary_Inconsistent", _("Secondary - Inconsistent") ), + ( "secondary_Outdated", _("Secondary - Outdated") ), + ( "secondary_DUnknown", _("Secondary - DUnknown") ), + ( "secondary_Consistent", _("Secondary - Consistent") ), + ( "secondary_UpToDate", _("Secondary - UpToDate") ), + ] + ), + MonitoringState( title = _("Resulting state") ) + ], + orientation = "horizontal", + ), + title = _("Set diskstates"), + add_label = _("Add diskstate rule") + ) + ] + ), + ) + ] + ), + TextAscii( title = _("DRBD device") ), + "first", + True, +) + +register_check_parameters( + subgroup_storage, + "netapp_disks", + _("NetApp Broken/Spare Disk Ratio"), + Dictionary( + help = _("You can set a limit to the broken to spare disk ratio. " + "The ratio is calculated with broken / (broken + spare)."), + elements = [ + ( "broken_spare_ratio", + Tuple( + title = _("Broken to spare ratio"), + elements = [ + Percentage(title = _("Warning at or above"), default_value = 1.0), + Percentage(title = _("Critical at or above"), default_value = 50.0), + ] + )), + ], + optional_keys = False + ), + None, + "match" +) + +register_check_parameters( + subgroup_storage, + "netapp_volumes", + _("NetApp Volumes"), + Dictionary( + elements = [ + ("levels", + Alternative( + title = _("Levels for volume"), + show_alternative_title = True, + default_value = (80.0, 90.0), + match = match_dual_level_type, + elements = [ + get_free_used_dynamic_valuespec("used", "volume"), + Transform( + get_free_used_dynamic_valuespec("free", "volume", default_value = (20.0, 10.0)), + allow_empty = False, + forth = transform_filesystem_free, + back = transform_filesystem_free + ) + ] + ) + ), + ("perfdata", + ListChoice( + title = _("Performance data for protocols"), + help = _("Specify for which protocol performance data should get recorded."), + choices = [ + ( "", _("Summarized data of all protocols") ), + ( "nfs", _("NFS") ), + ( "cifs", _("CIFS") ), + ( "san", _("SAN") ), + ( "fcp", _("FCP") ), + ( "iscsi", _("iSCSI") ), + ], + )), + ( "magic", + Float( + title = _("Magic factor (automatic level adaptation for large volumes)"), + default_value = 0.8, + minvalue = 0.1, + maxvalue = 1.0)), + ( "magic_normsize", + Integer( + title = _("Reference size for magic factor"), + default_value = 20, + minvalue = 1, + unit = _("GB"))), + ( "levels_low", + Tuple( + title = _("Minimum levels if using magic factor"), + help = _("The volume levels will never fall below these values, when using " + "the magic factor and the volume is very small."), + elements = [ + Percentage(title = _("Warning if above"), unit = _("% usage"), allow_int = True, default_value=50), + Percentage(title = _("Critical if above"), unit = _("% usage"), allow_int = True, default_value=60)])), + ( "trend_range", + Optional( + Integer( + title = _("Time Range for filesystem trend computation"), + default_value = 24, + minvalue = 1, + unit= _("hours")), + title = _("Trend computation"), + label = _("Enable trend computation"))), + ( "trend_mb", + Tuple( + title = _("Levels on trends in MB per time range"), + elements = [ + Integer(title = _("Warning at"), unit = _("MB / range"), default_value = 100), + Integer(title = _("Critical at"), unit = _("MB / range"), default_value = 200) + ])), + ( "trend_perc", + Tuple( + title = _("Levels for the percentual growth per time range"), + elements = [ + Percentage(title = _("Warning at"), unit = _("% / range"), default_value = 5,), + Percentage(title = _("Critical at"), unit = _("% / range"), default_value = 10,), + ])), + ( "trend_timeleft", + Tuple( + title = _("Levels on the time left until the filesystem gets full"), + elements = [ + Integer(title = _("Warning if below"), unit = _("hours"), default_value = 12,), + Integer(title = _("Critical if below"), unit = _("hours"), default_value = 6, ), + ])), + ( "trend_showtimeleft", + Checkbox( title = _("Display time left in check output"), label = _("Enable"), + help = _("Normally, the time left until the disk is full is only displayed when " + "the configured levels have been breached. If you set this option " + "the check always reports this information")) + ), + ( "trend_perfdata", + Checkbox( + title = _("Trend performance data"), + label = _("Enable generation of performance data from trends"))), + + + ] + ), + TextAscii(title = _("Volume name")), + "match" +) + +register_check_parameters( + subgroup_applications, + "services", + _("Windows Services"), + Dictionary( + elements = [ + ( "states", + ListOf( + Tuple( + orientation = "horizontal", + elements = [ + DropdownChoice( + title = _("Expected state"), + default_value = "running", + choices = [ + ( None, _("ignore the state") ), + ( "running", _("running") ), + ( "stopped", _("stopped") )]), + DropdownChoice( + title = _("Start type"), + default_value = "auto", + choices = [ + ( None, _("ignore the start type") ), + ( "demand", _("demand") ), + ( "disabled", _("disabled") ), + ( "auto", _("auto") ), + ( "unknown", _("unknown (old agent)") ), + ]), + MonitoringState( + title = _("Resulting state"), + ), + ], + default_value = ( "running", "auto", 0) + ), + title = _("Services states"), + help = _("You can specify a separate monitoring state for each possible " + "combination of service state and start type. If you do not use " + "this parameter, then only running/auto will be assumed to be OK."), + )), + ( "else", + MonitoringState( + title = _("State if no entry matches"), + default_value = 2, + ), + ),] + ), + TextAscii( + title = _("Name of the service"), + help = _("Please Please note, that the agent replaces spaces in " + "the service names with underscores. If you are unsure about the " + "correct spelling of the name then please look at the output of " + "the agent (cmk -d HOSTNAME). The service names are in the first " + "column of the section <<<services>>>. Please " + "do not mix up the service name with the display name of the service." + "The latter one is just being displayed as a further information."), + allow_empty = False), + "first", +) + +register_check_parameters( + subgroup_applications, + "winperf_ts_sessions", + _("Windows Terminal Server Sessions"), + Dictionary( + help = _("This check monitors number of active and inactive terminal " + "server sessions."), + elements = [ + ( "active", + Tuple( + title = _("Number of active sessions"), + elements = [ + Integer(title = _("Warning at"), unit = _("sessions"), default_value = 100), + Integer(title = _("Critical at"), unit = _("sessions"), default_value = 200), + ], + ), + ), + ( "inactive", + Tuple( + title = _("Number of inactive sessions"), + help = _("Levels for the number of sessions that are currently inactive"), + elements = [ + Integer(title = _("Warning at"), unit = _("sessions"), default_value = 10), + Integer(title = _("Critical at"), unit = _("sessions"), default_value = 20), + ], + ), + ), + ] + ), + None, + None +) + +register_check_parameters( + subgroup_storage, + "raid", + _("RAID: overall state"), + None, + TextAscii( + title = _("Name of the device"), + help = _("For Linux MD specify the device name without the " + "/dev/, e.g. md0, for hardware raids " + "please refer to the manual of the actual check being used.")), + "first" +) + +register_check_parameters( + subgroup_storage, + "raid_disk", + _("RAID: state of a single disk"), + TextAscii( + title = _("Target state"), + help = _("State the disk is expected to be in. Typical good states " + "are online, host spare, OK and the like. The exact way of how " + "to specify a state depends on the check and hard type being used. " + "Please take examples from discovered checks for reference.")), + TextAscii( + title = _("Number or ID of the disk"), + help = _("How the disks are named depends on the type of hardware being " + "used. Please look at already discovered checks for examples.")), + "first" +) + +register_check_parameters( + subgroup_environment, + "switch_contact", + _("Switch contact state"), + DropdownChoice( + help = _("This rule sets the required state of a switch contact"), + label = _("Required switch contact state"), + choices = [ + ( "open", "Switch contact is open" ), + ( "closed", "Switch contact is closed" ), + ( "ignore", "Ignore switch contact state" ), + ], + ), + TextAscii( + title = _("Sensor"), + allow_empty = False), + None +) + +register_check_parameters( + subgroup_environment, + "plugs", + _("State of PDU Plugs"), + DropdownChoice( + help = _("This rule sets the required state of a PDU plug. It is meant to " + "be independent of the hardware manufacturer."), + title = _("Required plug state"), + choices = [ + ( "on", _("Plug is ON") ), + ( "off", _("Plug is OFF") ), + ], + default_value = "on" + ), + TextAscii( + title = _("Number or name of the plug item"), + allow_empty = True), + None +) + +# New temperature rule for modern temperature checks that have the +# sensor type (e.g. "CPU", "Chassis", etc.) as the beginning of their +# item (e.g. "CPU 1", "Chassis 17/11"). This will replace all other +# temperature rulesets in future. Note: those few temperature checks +# that do *not* use an item, need to be converted to use one single +# item (other than None). +register_check_parameters( + subgroup_environment, + "temperature", + _("Temperature"), + Transform( + Dictionary( + elements = [ + ( "levels", + Tuple( + title = _("Upper Temperature Levels"), + elements = [ + Integer(title = _("Warning at"), unit = u"°C", default_value = 26), + Integer(title = _("Critical at"), unit = u"°C", default_value = 30), + ] + )), + ( "levels_lower", + Tuple( + title = _("Lower Temperature Levels"), + elements = [ + Integer(title = _("Warning below"), unit = u"°C", default_value = 0), + Integer(title = _("Critical below"), unit = u"°C", default_value = -10), + ] + )), + ( "output_unit", + DropdownChoice( + title = _("Display values in "), + choices = [ + ( "c", _("Celsius") ), + ( "f", _("Fahrenheit") ), + ( "k", _("Kelvin") ), + ] + )), + ( "input_unit", + DropdownChoice( + title = _("Override unit of sensor"), + help = _("In some rare cases the unit that is signalled by the sensor " + "is wrong and e.g. the sensor sends values in Fahrenheit while " + "they are misinterpreted as Celsius. With this setting you can " + "force the reading of the sensor to be interpreted as customized. "), + choices = [ + ( "c", _("Celsius") ), + ( "f", _("Fahrenheit") ), + ( "k", _("Kelvin") ), + ] + )), + ( "device_levels_handling", + DropdownChoice( + title = _("Interpretation of the device's own temperature status"), + choices = [ + ( "usr", _("Ignore device's own levels") ), + ( "dev", _("Only use device's levels, ignore yours" ) ), + ( "best", _("Use least critical of your and device's levels") ), + ( "worst", _("Use most critical of your and device's levels") ), + ( "devdefault", _("Use device's levels if present, otherwise yours") ), + ( "usrdefault", _("Use your own levels if present, otherwise the device's") ), + ], + default_value = "usrdefault", + )), + + ] + ), + forth = lambda v: type(v) == tuple and { "levels" : v } or v, + ), + TextAscii( + title = _("Sensor ID"), + help = _("The identifier of the thermal sensor.")), + "dict", +) + +register_check_parameters( + subgroup_environment, + "room_temperature", + _("Room temperature (external thermal sensors)"), + Tuple( + help = _("Temperature levels for external thermometers that are used " + "for monitoring the temperature of a datacenter. An example " + "is the webthem from W&T."), + elements = [ + Integer(title = _("warning at"), unit = u"°C", default_value = 26), + Integer(title = _("critical at"), unit = u"°C", default_value = 30), + ]), + TextAscii( + title = _("Sensor ID"), + help = _("The identifier of the themal sensor.")), + "first" +) + +register_check_parameters( + subgroup_environment, + "hw_single_temperature", + _("Host/Device temperature"), + Tuple( + help = _("Temperature levels for hardware devices with " + "a single temperature sensor."), + elements = [ + Integer(title = _("warning at"), unit = u"°C", default_value = 35), + Integer(title = _("critical at"), unit = u"°C", default_value = 40), + ]), + None, + "first" +) + +register_check_parameters( + subgroup_environment, + "evolt", + _("Voltage levels (UPS / PDU / Other Devices)"), + Tuple( + help = _("Voltage Levels for devices like UPS or PDUs. " + "Several phases may be addressed independently."), + elements = [ + Integer(title = _("warning if below"), unit = "V", default_value = 210), + Integer(title = _("critical if below"), unit = "V", default_value = 180), + ]), + TextAscii( + title = _("Phase"), + help = _("The identifier of the phase the power is related to.")), + "first" +) + +register_check_parameters( + subgroup_environment, + "efreq", + _("Nominal Frequencies"), + Tuple( + help = _("Levels for the nominal frequencies of AC devices " + "like UPSs or PDUs. Several phases may be addressed independently."), + elements = [ + Integer(title = _("warning if below"), unit = "Hz", default_value = 40), + Integer(title = _("critical if below"), unit = "Hz", default_value = 45), + ]), + TextAscii( + title = _("Phase"), + help = _("The identifier of the phase the power is related to.")), + "first" +) + +register_check_parameters( + subgroup_environment, + "epower", + _("Electrical Power"), + Tuple( + help = _("Levels for the electrical power consumption of a device " + "like a UPS or a PDU. Several phases may be addressed independently."), + elements = [ + Integer(title = _("warning if below"), unit = "Watt", default_value = 20), + Integer(title = _("critical if below"), unit = "Watt", default_value = 1), + ]), + TextAscii( + title = _("Phase"), + help = _("The identifier of the phase the power is related to.")), + "first" +) + +register_check_parameters( + subgroup_environment, + "epower_single", + _("Electrical Power for Devices with only one phase"), + Tuple( + help = _("Levels for the electrical power consumption of a device "), + elements = [ + Integer(title = _("warning if at"), unit = "Watt", default_value = 300), + Integer(title = _("critical if at"), unit = "Watt", default_value = 400), + ]), + None, + "first" +) + +register_check_parameters( + subgroup_environment, + "hw_temperature", + _("Hardware temperature, multiple sensors"), + Tuple( + help = _("Temperature levels for hardware devices like " + "Brocade switches with (potentially) several " + "temperature sensors. Sensor IDs can be selected " + "in the rule."), + elements = [ + Integer(title = _("warning at"), unit = u"°C", default_value = 35), + Integer(title = _("critical at"), unit = u"°C", default_value = 40), + ]), + TextAscii( + title = _("Sensor ID"), + help = _("The identifier of the thermal sensor.")), + "first" +) + +register_check_parameters( + subgroup_environment, + "hw_temperature_single", + _("Hardware temperature, single sensor"), + Tuple( + help = _("Temperature levels for hardware devices like " + "DELL Powerconnect that have just one temperature sensor. "), + elements = [ + Integer(title = _("warning at"), unit = u"°C", default_value = 35), + Integer(title = _("critical at"), unit = u"°C", default_value = 40), + ]), + None, + "first" +) + +register_check_parameters( + subgroup_environment, + "disk_temperature", + _("Harddisk temperature (e.g. via SMART)"), + Tuple( + help = _("Temperature levels for hard disks, that is determined e.g. via SMART"), + elements = [ + Integer(title = _("warning at"), unit = u"°C", default_value = 35), + Integer(title = _("critical at"), unit = u"°C", default_value = 40), + ]), + TextAscii( + title = _("Hard disk device"), + help = _("The identificator of the hard disk device, e.g. /dev/sda.")), + "first" +) + +register_check_parameters( + subgroup_environment, + "eaton_enviroment", + _("Temperature and Humidity for Eaton UPS"), + Dictionary( + elements = [ + ( "temp", + Tuple( + title = _("Temperature"), + elements = [ + Integer(title = _("warning at"), unit = u"°C", default_value = 26), + Integer(title = _("critical at"), unit = u"°C", default_value = 30), + ])), + ( "remote_temp", + Tuple( + title = _("Remote Temperature"), + elements = [ + Integer(title = _("warning at"), unit = u"°C", default_value = 26), + Integer(title = _("critical at"), unit = u"°C", default_value = 30), + ])), + ( "humidity", + Tuple( + title = _("Humidity"), + elements = [ + Integer(title = _("warning at"), unit = u"%", default_value = 60), + Integer(title = _("critical at"), unit = u"%", default_value = 75), + ])), + ]), + None, + "dict" +) + +register_check_parameters( + subgroup_environment, + "ups_outphase", + _("Parameters for output phases of UPSs and PDUs"), + Dictionary( + elements = [ + ( "voltage", + Tuple( + title = _("Voltage"), + elements = [ + Integer(title = _("warning if below"), unit = u"V", default_value = 210), + Integer(title = _("critical if below"), unit = u"V", default_value = 200), + ])), + ( "load", + Tuple( + title = _("Load"), + elements = [ + Integer(title = _("warning at"), unit = u"%", default_value = 80), + Integer(title = _("critical at"), unit = u"%", default_value = 90), + ])), + ]), + TextAscii( + title = _("Phase Number"), + help = _("The number of the phase (usually 1,2,3).")), + "dict" +) + +register_check_parameters( + subgroup_environment, + "el_inphase", + _("Parameters for input phases of UPSs and PDUs"), + Dictionary( + elements = [ + ( "voltage", + Tuple( + title = _("Voltage"), + elements = [ + Integer(title = _("warning if below"), unit = u"V", default_value = 210), + Integer(title = _("critical if below"), unit = u"V", default_value = 200), + ], + )), + ( "power", + Tuple( + title = _("Power"), + elements = [ + Integer(title = _("warning at"), unit = u"W", default_value = 1000), + Integer(title = _("critical at"), unit = u"W", default_value = 1200), + ], + )), + ( "appower", + Tuple( + title = _("Apparent Power"), + elements = [ + Integer(title = _("warning at"), unit = u"VA", default_value = 1100), + Integer(title = _("critical at"), unit = u"VA", default_value = 1300), + ], + )), + ( "current", + Tuple( + title = _("Current"), + elements = [ + Integer(title = _("warning at"), unit = u"A", default_value = 5), + Integer(title = _("critical at"), unit = u"A", default_value = 10), + ], + )), + ] + ), + TextAscii( + title = _("Phase Number"), + help = _("The number of the phase (usually 1,2,3).")), + "dict" +) + +register_check_parameters( + subgroup_environment, + "hw_fans", + _("FAN speed of Hardware devices"), + Dictionary( + elements = [ + ("lower", + Tuple( + help = _("Lower levels for the fan speed of a hardware device"), + title = _("Lower levels"), + elements = [ + Integer(title = _("warning if below"), unit = u"rpm"), + Integer(title = _("critical if below"), unit = u"rpm"), + ]), + ), + ( "upper", + Tuple( + help = _("Upper levels for the Fan speed of a hardware device"), + title = _("Upper levels"), + elements = [ + Integer(title = _("warning at"), unit = u"rpm", default_value = 8000), + Integer(title = _("critical at"), unit = u"rpm", default_value = 8400), + ]), + ), + ], + optional_keys = ["upper"], + ), + TextAscii( + title = _("Fan Name"), + help = _("The identificator of the fan.")), + "first" +) + +register_check_parameters( + subgroup_os, + "pf_used_states", + _("Number of used states of OpenBSD PF engine"), + Dictionary( + elements = [ + ("used", + Tuple( + title = _("Limits for the number of used states"), + elements = [ + Integer(title = _("warning at")), + Integer(title = _("critical at")), + ]), + ), + ], + optional_keys = [None], + ), + None, + "first" +) + +register_check_parameters( + subgroup_environment, + "pdu_gude", + _("Levels for Gude PDU Devices"), + Dictionary( + elements = [ + ( "kWh", + Tuple( + title = _("Total accumulated Active Energy of Power Channel"), + elements = [ + Integer(title = _("warning at"), unit = _("kW") ), + Integer(title = _("critical at"), unit = _("kW")), + ])), + ( "W", + Tuple( + title = _("Active Power"), + elements = [ + Integer(title = _("warning at"), unit = _("W") ), + Integer(title = _("critical at"), unit = _("W") ), + ])), + ( "A", + Tuple( + title = _("Current on Power Channel"), + elements = [ + Integer(title = _("warning at"), unit = _("A") ), + Integer(title = _("critical at"), unit = _("A")), + ])), + ( "V", + Tuple( + title = _("Voltage on Power Channel"), + elements = [ + Integer(title = _("warning if below"), unit = _("V") ), + Integer(title = _("critical if below"), unit = _("V") ), + ])), + ( "VA", + Tuple( + title = _("Line Mean Apparent Power"), + elements = [ + Integer(title = _("warning at"), unit = _("VA") ), + Integer(title = _("critical at"), unit = _("VA")), + ])), + ]), + TextAscii( + title = _("Phase Number"), + help = _("The Number of the power Phase.")), + "first" +) + + +register_check_parameters( + subgroup_environment, + "hostsystem_sensors", + _("Hostsystem sensor alerts"), + ListOf( + Dictionary( + help = _("This rule allows to override alert levels for the given sensor names."), + elements = [("name", TextAscii(title = _("Sensor name")) ), + ("states", Dictionary( + title = _("Custom states"), + elements = [ + (element, + MonitoringState( title = "Sensor %s" % + description, label = _("Set state to"), + default_value = int(element) ) + ) for (element, description) in [ + ("0", _("OK")), + ("1", _("WARNING")), + ("2", _("CRITICAL")), + ("3", _("UNKNOWN")) + ] + ], + ))], + optional_keys = False + ), + add_label = _("Add sensor name") + ), + None, + "first" +) + +register_check_parameters( + subgroup_environment, + "temperature_auto", + _("Temperature sensors with builtin levels"), + None, + TextAscii( + title = _("Sensor ID"), + help = _("The identificator of the thermal sensor.")), + "first" +) + +register_check_parameters( + subgroup_environment, + "temperature_trends", + _("Temperature trends for devices with builtin levels"), + Dictionary( + title = _("Temperature Trend Analysis"), + help = _("This rule enables and configures a trend analysis and corresponding limits for devices, " + "which have their own limits configured on the device. It will only work for supported " + "checks, right now the adva_fsp_temp check."), + elements = [ + ( "trend_range", + Optional( + Integer( + title = _("Time range for temperature trend computation"), + default_value = 30, + minvalue = 5, + unit= _("minutes")), + title = _("Trend computation"), + label = _("Enable trend computation") + ) + ), + ( "trend_c", + Tuple( + title = _("Levels on trends in degrees Celsius per time range"), + elements = [ + Integer(title = _("Warning at"), unit = u"°C / " + _("range"), default_value = 5), + Integer(title = _("Critical at"), unit = u"°C / " + _("range"), default_value = 10) + ] + ) + ), + ( "trend_timeleft", + Tuple( + title = _("Levels on the time left until limit is reached"), + elements = [ + Integer(title = _("Warning if below"), unit = _("minutes"), default_value = 240,), + Integer(title = _("Critical if below"), unit = _("minutes"), default_value = 120, ), + ] + ) + ), + ] + ), + TextAscii( + title = _("Sensor ID"), + help = _("The identifier of the thermal sensor.")), + "dict" +) +ntp_params = \ + Tuple( + elements = [ + Integer( + title = _("Critical at stratum"), + default_value = 10, + help = _("The stratum (\"distance\" to the reference clock) at which the check gets critical."), + ), + Float( + title = _("Warning at"), + unit = _("ms"), + default_value = 200.0, + help = _("The offset in ms at which a warning state is triggered."), + ), + Float( + title = _("Critical at"), + unit = _("ms"), + default_value = 500.0, + help = _("The offset in ms at which a critical state is triggered."), + ), + ] + ) + +register_check_parameters( + subgroup_os, + "ntp_time", + _("State of NTP time synchronisation"), + ntp_params, + None, + "first" +) + +register_check_parameters( + subgroup_os, + "ntp_peer", + _("State of NTP peer"), + ntp_params, + TextAscii( + title = _("Name of the peer")), + "first" +) + +def apc_convert_from_tuple(params): + if type(params) in (list, tuple): + params = { "levels": params} + return params + +register_check_parameters( + subgroup_environment, + "apc_symentra", + _("APC Symmetra Checks"), + Transform( + Dictionary( + elements = [ + ("levels", + Tuple( + title = _("Levels of battery parameters during normal operation"), + elements = [ + Integer( + title = _("Critical Battery Capacity"), + help = _("The battery capacity in percent at and below which a critical state is triggered"), + unit = "%", default_value = 95, + ), + Integer( + title = _("Critical System Temperature"), + help = _("The critical temperature of the System"), + unit = _("C"), + default_value = 55, + ), + Integer( + title = _("Critical Battery Current"), + help = _("The critical battery current in Ampere"), + unit = _("A"), + default_value = 1, + ), + Integer( + title = _("Critical Battery Voltage"), + help = _("The output voltage at and below which a critical state " + "is triggered."), + unit = _("V"), + default_value = 220, + ), + ] + )), + ("output_load", + Tuple( + title = _("Current Output Load"), + help = _("Here you can set levels on the current percentual output load of the UPS. " + "This load affects the running time of all components being supplied " + "with battery power."), + elements = [ + Percentage( + title = _("Warning level"), + ), + Percentage( + title = _("Critical level"), + ), + ] + )), + ("post_calibration_levels", + Dictionary( + title = _("Levels of battery parameters after calibration"), + help = _("After a battery calibration the battery capacity is reduced until the " + "battery is fully charged again. Here you can specify an alternative " + "lower level in this post-calibration phase. " + "Since apc devices remember the time of the last calibration only " + "as a date, the alternative lower level will be applied on the whole " + "day of the calibration until midnight. You can extend this time period " + "with an additional time span to make sure calibrations occuring just " + "before midnight do not trigger false alarms." + ), + elements = [ + ("altcapacity", + Percentage( + title = _("Alternative critical battery capacity after calibration"), + default_value = 50, + )), + ("additional_time_span", + Integer( + title = ("Extend post-calibration phase by additional time span"), + unit = _("minutes"), + default_value = 0, + )), + ], + optional_keys = False, + )), + ], + optional_keys = ['post_calibration_levels', 'output_load'], + ), + forth = apc_convert_from_tuple + ), + None, + "first" +) + +register_check_parameters( + subgroup_environment, + "apc_ats_output", + _("APC Automatic Transfer Switch Output"), + Dictionary( + title = _("Levels for ATS Output parameters"), + optional_keys = True, + elements = [ + ("output_voltage_max", + Tuple( + title = _("Maximum Levels for Voltage"), + elements = [ + Integer(title = _("Warning at"), unit="Volt"), + Integer(title = _("Critical at"), unit="Volt"), + ])), + ("output_voltage_min", + Tuple( + title = _("Minimum Levels for Voltage"), + elements = [ + Integer(title = _("Warning if below"), unit="Volt"), + Integer(title = _("Critical if below"), unit="Volt"), + ])), + ("load_perc_max", + Tuple( + title = _("Maximum Levels for load in percent"), + elements = [ + Percentage(title = _("Warning at")), + Percentage(title = _("Critical at")), + ])), + ("load_perc_min", + Tuple( + title = _("Minimum Levels for load in percent"), + elements = [ + Percentage(title = _("Warning if below")), + Percentage(title = _("Critical if below")), + ])), + + ], + ), + TextAscii( title = _("ID of phase")), + "dict", +) + +register_check_parameters( + subgroup_environment, + "airflow", + _("Airflow levels"), + Dictionary( + title = _("Levels for airflow"), + elements = [ + ("level_low", + Tuple( + title = _("Lower levels"), + elements = [ + Integer(title = _( "Warning if below"), unit=_("l/s")), + Integer(title = _( "Critical if below"), unit=_("l/s")) + ] + ) + ), + ("level_high", + Tuple( + title = _("Upper levels"), + elements = [ + Integer(title = _( "Warning at"), unit=_("l/s")), + Integer(title = _( "Critical at"), unit=_("l/s")) + ] + ) + ), + ] + ), + None, + None, +) + + +register_check_parameters( + subgroup_environment, + "ups_capacity", + _("UPS Capacity"), + Dictionary( + title = _("Levels for battery parameters"), + optional_keys = False, + elements = [ + ("capacity", + Tuple( + title = _("Battery capacity"), + elements = [ + Integer( + title = _("Warning at"), + help = _("The battery capacity in percent at and below which a warning state is triggered"), + unit = "%", + default_value = 95, + ), + Integer( + title = _("Critical at"), + help = _("The battery capacity in percent at and below which a critical state is triggered"), + unit = "%", + default_value = 90, + ), + ], + ), + ), + ("battime", + Tuple( + title = _("Time left on battery"), + elements = [ + Integer( + title = _("Warning at"), + help = _("Time left on Battery at and below which a warning state is triggered"), + unit = _("min"), + default_value = 0, + ), + Integer( + title = _("Critical at"), + help = _("Time Left on Battery at and below which a critical state is triggered"), + unit = _("min"), + default_value = 0, + ), + ], + ), + )], + ), + None, + "first" +) + +register_check_parameters( + subgroup_applications, + "jvm_threads", + _("JVM threads"), + Tuple( + help = _("This rule sets the warn and crit levels for the number of threads " + "running in a JVM."), + elements = [ + Integer( + title = _("Warning at"), + unit = _("threads"), + default_value = 80, + ), + Integer( + title = _("Critical at"), + unit = _("threads"), + default_value = 100, + ), + ] + ), + TextAscii( + title = _("Name of the virtual machine"), + help = _("The name of the application server"), + allow_empty = False, + ), + "first" +) + +register_check_parameters( + subgroup_applications, + "jvm_uptime", + _("JVM uptime (since last reboot)"), + Dictionary( + help = _("This rule sets the warn and crit levels for the uptime of a JVM. " + "Other keywords for this rule: Tomcat, Jolokia, JMX. "), + elements = [ + ( "min", + Tuple( + title = _("Minimum required uptime"), + elements = [ + Age(title = _("Warning if below")), + Age(title = _("Critical if below")), + ] + )), + ( "max", + Tuple( + title = _("Maximum allowed uptime"), + elements = [ + Age(title = _("Warning at")), + Age(title = _("Critical at")), + ] + )), + ] + ), + TextAscii( + title = _("Name of the virtual machine"), + help = _("The name of the application server"), + allow_empty = False, + ), + "first", +) +register_check_parameters( + subgroup_applications, + "jvm_sessions", + _("JVM session count"), + Tuple( + help = _("This rule sets the warn and crit levels for the number of current " + "connections to a JVM application on the servlet level."), + elements = [ + Integer( + title = _("Warning if below"), + unit = _("sessions"), + default_value = -1, + ), + Integer( + title = _("Critical if below"), + unit = _("sessions"), + default_value = -1, + ), + Integer( + title = _("Warning at"), + unit = _("sessions"), + default_value = 800, + ), + Integer( + title = _("Critical at"), + unit = _("sessions"), + default_value = 1000, + ), + ] + ), + TextAscii( + title = _("Name of the virtual machine"), + help = _("The name of the application server"), + allow_empty = False, + ), + "first" +) + +register_check_parameters( + subgroup_applications, + "jvm_requests", + _("JVM request count"), + Tuple( + help = _("This rule sets the warn and crit levels for the number " + "of incoming requests to a JVM application server."), + elements = [ + Integer( + title = _("Warning if below"), + unit = _("requests/sec"), + default_value = -1, + ), + Integer( + title = _("Critical if below"), + unit = _("requests/sec"), + default_value = -1, + ), + Integer( + title = _("Warning at"), + unit = _("requests/sec"), + default_value = 800, + ), + Integer( + title = _("Critical at"), + unit = _("requests/sec"), + default_value = 1000, + ), + ] + ), + TextAscii( + title = _("Name of the virtual machine"), + help = _("The name of the application server"), + allow_empty = False, + ), + "first" +) + +register_check_parameters( + subgroup_applications, + "jvm_queue", + _("JVM Execute Queue Count"), + Tuple( + help = _("The BEA application servers have 'Execute Queues' " + "in which requests are processed. This rule allows to set " + "warn and crit levels for the number of requests that are " + "being queued for processing."), + elements = [ + Integer( + title = _("Warning at"), + unit = _("requests"), + default_value = 20, + ), + Integer( + title = _("Critical at"), + unit = _("requests"), + default_value = 50, + ), + ] + ), + TextAscii( + title = _("Name of the virtual machine"), + help = _("The name of the application server"), + allow_empty = False, + ), + "first" +) + + +register_check_parameters( + subgroup_applications, + "jvm_memory", + _("JVM memory levels"), + Dictionary( + help = _("This rule allows to set the warn and crit levels of the heap / " + "non-heap and total memory area usage on web application servers (for example Tomcat). It requires " + "the jolokia (JMX) war file deployed. "), + elements = [ + ( "totalheap", + Alternative( + title = _("Total Memory Levels"), + elements = [ + Tuple( + title = _("Percentage levels of used space"), + elements = [ + Percentage(title = _("Warning at"), label = _("% usage")), + Percentage(title = _("Critical at"), label = _("% usage")), + ] + ), + Tuple( + title = _("Absolute free space in MB"), + elements = [ + Integer(title = _("Warning if below"), unit = _("MB")), + Integer(title = _("Critical if below"), unit = _("MB")), + ] + ) + ])), + ( "heap", + Alternative( + title = _("Heap Memory Levels"), + elements = [ + Tuple( + title = _("Percentage levels of used space"), + elements = [ + Percentage(title = _("Warning at"), label = _("% usage")), + Percentage(title = _("Critical at"), label = _("% usage")), + ] + ), + Tuple( + title = _("Absolute free space in MB"), + elements = [ + Integer(title = _("Warning if below"), unit = _("MB")), + Integer(title = _("Critical if below"), unit = _("MB")), + ] + ) + ])), + ( "nonheap", + Alternative( + title = _("Nonheap Memory Levels"), + elements = [ + Tuple( + title = _("Percentage levels of used space"), + elements = [ + Percentage(title = _("Warning at"), label = _("% usage")), + Percentage(title = _("Critical at"), label = _("% usage")), + ] + ), + Tuple( + title = _("Absolute free space in MB"), + elements = [ + Integer(title = _("Warning if below"), unit = _("MB")), + Integer(title = _("Critical if below"), unit = _("MB")), + ] + ) + ])), + ]), + TextAscii( + title = _("Name of the virtual machine"), + help = _("The name of the application server"), + allow_empty = False, + ), + "dict" +) + +register_check_parameters( + subgroup_applications, + "sym_brightmail_queues", + "Symantec Brightmail Queues", + Dictionary( + help = _("This check is used to monitor successful email delivery through " + "Symantec Brightmail Scanner appliances."), + elements = [ + ("connections", + Tuple( + title = _("Number of connections"), + elements = [ + Integer(title = _("Warning at")), + Integer(title = _("Critical at")), + ] + )), + ("messageRate", + Tuple( + title = _("Number of messages delivered"), + elements = [ + Integer(title = _("Warning at")), + Integer(title = _("Critical at")), + ] + )), + ("dataRate", + Tuple( + title = _("Amount of data processed"), + elements = [ + Integer(title = _("Warning at")), + Integer(title = _("Cricital at")), + ] + )), + ("queuedMessages", + Tuple( + title = _("Number of messages currently queued"), + elements = [ + Integer(title = _("Warning at")), + Integer(title = _("Critical at")), + ] + )), + ("queueSize", + Tuple( + title = _("Size of the queue"), + elements = [ + Integer(title = _("Warning at")), + Integer(title = _("Critical at")), + ] + )), + ("deferredMessages", + Tuple( + title = _("Number of messages in deferred state"), + elements = [ + Integer(title = _("Warning at")), + Integer(title = _("Critical at")), + ] + )), -register_rule( - group + '/' + subgroup_storage, - "diskstat_inventory", - ListChoice( - title = _("Inventory mode for Disk IO check"), - help = _("This rule controls which and how many checks will be created " - "for monitoring individual physical and logical disks."), - choices = [ - ( "summary", _("Create a summary over all physical disks") ), - ( "legacy", _("Create a summary for all read, one for write") ), - ( "physical", _("Create a separate check for each physical disk") ), - ( "lvm", _("Create a separate check for each LVM volume (Linux)") ), - ( "vxvm", _("Creata a separate check for each VxVM volume (Linux)") ), ], - default_value = [ 'summary' ], ), - match="first") + TextAscii( + title = _("Instance name"), + allow_empty = True), + "dict", +) -register_rule(group + '/' + subgroup_networking, - varname = "if_groups", - title = _('Network interface groups'), - help = _('Normally the if checks create a single service for interface. ' - 'By defining if-group patterns multiple interfaces can be combined together. ' - 'A single service is created for this interface group showing the total traffic amount ' - 'of its members. You can configure if interfaces which are identified as group interfaces ' - 'should not show up as single service'), - valuespec = ListOf( - Dictionary( +register_check_parameters( + subgroup_applications, + "db2_logsizes", + _("Size of DB2 logfiles"), + get_free_used_dynamic_valuespec("free", "logfile", default_value = (20.0, 10.0)), + TextAscii( + title = _("Logfile name"), + allow_empty = True), + "first" +) + +register_check_parameters( + subgroup_applications, + "db2_mem", + _("Memory levels for DB2 memory usage"), + Tuple( + elements = [ + Percentage(title = _("Warning if less than"), unit = _("% memory left")), + Percentage(title = _("Critical if less than"), unit = _("% memory left")), + ], + ), + TextAscii( + title = _("Instance name"), + allow_empty = True), + "first" +) + +register_check_parameters( + subgroup_applications, + "windows_updates", + _("WSUS (Windows Updates)"), + Tuple( + title = _("Parameters for the Windows Update Check with WSUS"), + help = _("Set the according numbers to 0 if you want to disable alerting."), + elements = [ + Integer(title = _("Warning if at least this number of important updates are pending")), + Integer(title = _("Critical if at least this number of important updates are pending")), + Integer(title = _("Warning if at least this number of optional updates are pending")), + Integer(title = _("Critical if at least this number of optional updates are pending")), + Age(title = _("Warning if time until forced reboot is less then"), default_value = 604800), + Age(title = _("Critical if time time until forced reboot is less then"), default_value = 172800), + Checkbox(title = _("display all important updates verbosely"), default_value = True), + ], + ), + None, + "first" +) + +register_check_parameters( + subgroup_applications, + "antivir_update_age", + _("Age of last AntiVirus update"), + Tuple( + title = _("Age of last AntiVirus update"), + elements = [ + Age(title = _("Warning level for time since last update")), + Age(title = _("Critical level for time since last update")), + ] + ), + None, + "first" +) + +register_check_parameters(subgroup_applications, + "logwatch_ec", + _('Logwatch Event Console Forwarding'), + Alternative( + title = _("Forwarding"), + help = _("Instead of using the regular logwatch check all lines received by logwatch can " + "be forwarded to a Check_MK event console daemon to be processed. The target event " + "console can be configured for each host in a separate rule."), + style = "dropdown", + elements = [ + FixedValue( + "", + totext = _("Messages are handled by logwatch."), + title = _("No forwarding"), + ), + Dictionary( + title = _('Forward Messages to Event Console'), + elements = [ + ('restrict_logfiles', + ListOfStrings( + title = _('Restrict Logfiles (Prefix matching regular expressions)'), + help = _("Put the item names of the logfiles here. For example \"System$\" " + "to select the service \"LOG System\". You can use regular expressions " + "which must match the beginning of the logfile name."), + ), + ), + ('method', Alternative( + title = _("Forwarding Method"), elements = [ - ("name", - TextAscii( - title = _("Name of group"), - help = _("Name of group in service description"), - allow_empty = False, - )), - ("iftype", Integer( - title = _("Interface port type"), - help = _("The number of the port type. For example 53 (propVirtual)"), - default_value = 0, - minvalue = 1, - maxvalue = 255, - )), - ("single", Checkbox( - title = _("Do not list grouped interfaces separately"), - )), + Alternative( + title = _("Send events to local event console"), + elements = [ + FixedValue( + "", + totext = _("Directly forward to event console"), + title = _("Send events to local event console in same OMD site"), + ), + TextAscii( + title = _("Send events to local event console into unix socket"), + allow_empty = False, + ), + + FixedValue( + "spool:", + totext = _("Spool to event console"), + title = _("Spooling: Send events to local event console in same OMD site"), + ), + Transform( + TextAscii(), + title = _("Spooling: Send events to local event console into given spool directory"), + allow_empty = False, + forth = lambda x: x[6:], # remove prefix + back = lambda x: "spool:" + x, # add prefix + ), + ], + match = lambda x: x and (x == 'spool:' and 2 or x.startswith('spool:') and 3 or 1) or 0 + ), + Tuple( + title = _("Send events to remote syslog host"), + elements = [ + DropdownChoice( + choices = [ + ('udp', _('UDP')), + ('tcp', _('TCP')), + ], + title = _("Protocol"), + ), + TextAscii( + title = _("Address"), + allow_empty = False, + ), + Integer( + title = _("Port"), + allow_empty = False, + default_value = 514, + minvalue = 1, + maxvalue = 65535, + size = 6, + ), + ] + ), ], - required_keys = ["name", "iftype", "single"]), - add_label = _("Add pattern")), - match = 'all', + )), + ('facility', DropdownChoice( + title = _("Syslog facility for forwarded messages"), + help = _("When forwarding messages and no facility can be extracted from the " + "message this facility is used."), + choices = syslog_facilities, + default_value = 17, # local1 + )), + ('monitor_logfilelist', + Checkbox( + title = _("Monitoring of forwarded logfiles"), + label = _("Warn if list of forwarded logfiles changes"), + help = _("If this option is enabled, the check monitors the list of forwarded " + "logfiles and will warn you if at any time a logfile is missing or exceeding " + "when compared to the initial list that was snapshotted during service detection. " + "Reinventorize this check in order to make it OK again."), + ) + ), + ('expected_logfiles', + ListOfStrings( + title = _("List of expected logfiles"), + help = _("When the monitoring of forwarded logfiles is enabled, the check verifies that " + "all of the logfiles listed here are reported by the monitored system."), + ) + ), + ], + optional_keys = ['restrict_logfiles', 'expected_logfiles'], + ), + ], + default_value = '', + ), + None, + 'first', ) - - -checkgroups.append(( - subgroup_applications, - "mailqueue_length", - _("Number of mails in outgoing mail queue"), - Tuple( - help = _("These levels are applied to the number of Email that are " - "currently in the outgoing mail queue."), - elements = [ - Integer(title = _("Warning at"), unit = _("mails"), default_value = 10), - Integer(title = _("Critical at"), unit = _("mails"), default_value = 20), - ] +register_rule(group + '/' + subgroup_applications, + varname = "logwatch_groups", + title = _('Logfile Grouping Patterns'), + help = _('The check logwatch normally creates one service for each logfile. ' + 'By defining grouping patterns you can switch to the check logwatch.groups. ' + 'That check monitors a list of logfiles at once. This is useful if you have ' + 'e.g. a folder with rotated logfiles where the name of the current logfile' + 'also changes with each rotation'), + valuespec = ListOf( + Tuple( + help = _("This defines one logfile grouping pattern"), + show_titles = True, + orientation = "horizontal", + elements = [ + TextAscii( + title = _("Name of group"), + ), + Tuple( + show_titles = True, + orientation = "vertical", + elements = [ + TextAscii(title = _("Include Pattern")), + TextAscii(title = _("Exclude Pattern")) + ], + ), + ], + ), + add_label = _("Add pattern group"), ), - None, - None -)) + match = 'all', +) -checkgroups.append(( - subgroup_os, - "uptime", - _("Display the system's uptime as a check"), - None, - None, None)) +register_rule( + group + "/" + subgroup_networking, + "if_disable_if64_hosts", + title = _("Hosts forced to use if instead of if64"), + help = _("A couple of switches with broken firmware report that they " + "support 64 bit counters but do not output any actual data " + "in those counters. Listing those hosts in this rule forces " + "them to use the interface check with 32 bit counters instead.")) -checkgroups.append(( - subgroup_storage, - "zpool_status", - _("ZFS storage pool status"), - None, - None, None)) -checkgroups.append(( - subgroup_virt, - "vm_state", - _("Overall state of a virtual machine"), - None, - None, None)) +# Create Rules for static checks +register_rulegroup("static", _("Manual Checks"), + _("Statically configured Check_MK checks that do not rely on the inventory")) -checkgroups.append(( - subgroup_hardware, - "hw_errors", - _("Simple checks for BIOS/Hardware errors"), - None, - None, None)) -checkgroups.append(( +# wmic_process does not support inventory at the moment +register_check_parameters( subgroup_applications, - "omd_status", - _("OMD site status"), - None, - TextAscii( - title = _("Name of the OMD site"), - help = _("The name of the OMD site to check the status for")), - "first")) - -checkgroups.append(( - subgroup_storage, - "network_fs", - _("Network filesystem - overall status (e.g. NFS)"), - None, - TextAscii( - title = _("Name of the mount point"), - help = _("For NFS enter the name of the mount point.")), - "first")) - -checkgroups.append(( - subgroup_storage, - "multipath", - _("Multipathing - health of a multipath LUN"), - Integer( - title = _("Expected number of active paths")), - TextAscii( - title = _("Name of the MP LUN"), - help = _("For Linux multipathing this is either the UUID (e.g. " - "60a9800043346937686f456f59386741), or the configured " - "alias.")), - "first")) - -checkgroups.append(( - subgroup_storage, - "hpux_multipath", - _("Multipathing on HPUX - state of paths of a LUN"), + "wmic_process", + _("Memory and CPU of processes on Windows"), Tuple( - title = _("Expected path situation"), elements = [ - Integer(title = _("Number of active paths")), - Integer(title = _("Number of standby paths")), - Integer(title = _("Number of failed paths")), - Integer(title = _("Number of unopen paths")), - ]), + TextAscii( + title = _("Name of the process"), + allow_empty = False, + ), + Integer(title = _("Memory warning at"), unit = "MB"), + Integer(title = _("Memory critical at"), unit = "MB"), + Integer(title = _("Pagefile warning at"), unit = "MB"), + Integer(title = _("Pagefile critical at"), unit = "MB"), + Percentage(title = _("CPU usage warning at")), + Percentage(title = _("CPU usage critical at")), + ], + ), TextAscii( - title = _("WWID of the LUN")), - "first")) + title = _("Process name for usage in the Nagios service description"), + allow_empty = False), + "first", False +) +# Add checks that have parameters but are only configured as manual checks +def ps_convert_from_tuple(params): + if type(params) in (list, tuple): + if len(params) == 5: + procname, warnmin, okmin, okmax, warnmax = params + user = None + elif len(params) == 6: + procname, user, warnmin, okmin, okmax, warnmax = params + params = { + "process" : procname, + "warnmin" : warnmin, + "okmin" : okmin, + "okmax" : okmax, + "warnmax" : warnmax, + } + if user != None: + params["user"] = user + return params + +# Next step in conversion: introduce "levels" +def ps_convert_from_singlekeys(old_params): + params = {} + params.update(ps_convert_from_tuple(old_params)) + if "warnmin" in params: + params["levels"] = ( + params.get("warnmin", 1), + params.get("okmin", 1), + params.get("warnmax", 99999), + params.get("okmax", 99999), + ) + for key in [ "warnmin", "warnmax", "okmin", "okmax" ]: + if key in params: + del params[key] + return params + +def ps_convert_inventorized_from_singlekeys(old_params): + params = ps_convert_from_singlekeys(old_params) + if 'user' in params: + del params['user'] + if 'process' in params: + del params['process'] + return params -checkgroups.append(( +# Rule for disovered process checks +register_check_parameters( subgroup_applications, - "services", - _("Windows Services"), - Dictionary( - elements = [ - ( "states", - ListOf( - Tuple( - orientation = "horizontal", + "ps", + _("State and count of processes"), + Transform( + Dictionary( + elements = process_level_elements, + ), + forth = ps_convert_inventorized_from_singlekeys, + ), + TextAscii( + title = _("Process name as defined at discovery"), + ), + "dict", + has_inventory = True, + register_static_check = False, +) + +# Rule for static process checks +register_check_parameters( + subgroup_applications, + "ps", + _("State and count of processes"), + Transform( + Dictionary( + elements = [ + ( "process", Alternative( + title = _("Process Matching"), + style = "dropdown", elements = [ - DropdownChoice( - title = _("Expected state"), - default_value = "running", - choices = [ - ( None, _("ignore the state") ), - ( "running", _("running") ), - ( "stopped", _("stopped") )]), - DropdownChoice( - title = _("Start type"), - default_value = "auto", - choices = [ - ( None, _("ignore the start type") ), - ( "demand", _("demand") ), - ( "disabled", _("disabled") ), - ( "auto", _("auto") ), - ( "unknown", _("unknown (old agent)") ), - ]), - MonitoringState( - title = _("Resulting state"), + TextAscii( + title = _("Exact name of the process without argments"), + size = 50, ), + Transform( + RegExp(size = 50), + title = _("Regular expression matching command line"), + help = _("This regex must match the beginning of the complete " + "command line of the process including arguments"), + forth = lambda x: x[1:], # remove ~ + back = lambda x: "~" + x, # prefix ~ + ), + FixedValue( + None, + totext = "", + title = _("Match all processes"), + ) ], - default_value = ( "running", "auto", 0)), - title = _("Services states"), - help = _("You can specify a separate monitoring state for each possible " - "combination of service state and start type. If you do not use " - "this parameter, then only running/auto will be assumed to be OK."), - )), - ( "else", - MonitoringState( - title = _("State if no entry matches"), - default_value = 2, - ), - ),] + match = lambda x: (not x and 2) or (x[0] == '~' and 1 or 0) + )), + ( "user", Alternative( + title = _("Name of operating system user"), + style = "dropdown", + elements = [ + TextAscii( + title = _("Exact name of the operating system user") + ), + Transform( + RegExp(size = 50), + title = _("Regular expression matching username"), + help = _("This regex must match the beginning of the complete " + "username"), + forth = lambda x: x[1:], # remove ~ + back = lambda x: "~" + x, # prefix ~ + ), + FixedValue( + None, + totext = "", + title = _("Match all users"), + ) + + ], + match = lambda x: (not x and 2) or (x[0] == '~' and 1 or 0) + + )), + ] + process_level_elements, + # required_keys = [ "process" ], + ), + forth = ps_convert_from_singlekeys, ), TextAscii( - title = _("Name of the service"), - help = _("Please Please note, that the agent replaces spaces in " - "the service names with underscores. If you are unsure about the " - "correct spelling of the name then please look at the output of " - "the agent (cmk -d HOSTNAME). The service names are in the first " - "column of the section <<<services>>>. Please " - "do not mix up the service name with the display name of the service." - "The latter one is just being displayed as a further information."), - allow_empty = False), - "first")) + title = _("Process Name"), + help = _("This name will be used in the description of the service"), + allow_empty = False, + regex = "^[a-zA-Z_0-9 _.-]*$", + regex_error = _("Please use only a-z, A-Z, 0-9, space, underscore, " + "dot and hyphon for your service description"), + ), + "dict", + has_inventory = False, +) -checkgroups.append(( - subgroup_storage, - "raid", - _("RAID: overall state"), +register_check_parameters( + subgroup_os, + "zypper", + _("Zypper Updates"), None, - TextAscii( - title = _("Name of the device"), - help = _("For Linux MD specify the device name without the " - "/dev/, e.g. md0, for hardware raids " - "please refer to the manual of the actual check being used.")), - "first")) - -checkgroups.append(( - subgroup_storage, - "raid_disk", - _("RAID: state of a single disk"), - TextAscii( - title = _("Target state"), - help = _("State the disk is expected to be in. Typical good states " - "are online, host spare, OK and the like. The exact way of how " - "to specify a state depends on the check and hard type being used. " - "Please take examples from inventorized checks for reference.")), - TextAscii( - title = _("Number or ID of the disk"), - help = _("How the disks are named depends on the type of hardware being " - "used. Please look at already inventorized checks for examples.")), - "first")) + None, None, +) -checkgroups.append(( +register_check_parameters( subgroup_environment, - "room_temperature", - _("Room temperature (external thermal sensors)"), + "airflow_deviation", + _("Airflow Deviation in Percent"), Tuple( - help = _("Temperature levels for external thermometers that are used " - "for monitoring the temperature of a datacenter. An example " - "is the webthem from W&T."), + help = _("Levels for Airflow Deviation measured at airflow sensors "), elements = [ - Integer(title = _("warning at"), unit = u"°C", default_value = 26), - Integer(title = _("critical at"), unit = u"°C", default_value = 30), + Float(title = _("critical if below or equal"), unit = u"%", default_value = -20), + Float(title = _("warning if below or equal"), unit = u"%", default_value = -20), + Float(title = _("warning if above or equal"), unit = u"%", default_value = 20), + Float(title = _("critical if above or equal"), unit = u"%", default_value = 20), ]), TextAscii( - title = _("Sensor ID"), - help = _("The identificator of the themal sensor.")), - "first")) + title = _("Detector ID"), + help = _("The identifier of the detector.")), + "first" +) + + +vs_license = Alternative( + title = _("Levels for Number of Licenses"), + style = "dropdown", + default_value = None, + elements = [ + Tuple( + title = _("Absolute levels for unused licenses"), + elements = [ + Integer(title = _("Warning at"), default_value = 5, unit = _("unused licenses")), + Integer(title = _("Critical at"), default_value = 0, unit = _("unused licenses")), + ] + ), + Tuple( + title = _("Percentual levels for unused licenses"), + elements = [ + Percentage(title = _("Warning at"), default_value = 10.0), + Percentage(title = _("Critical at"), default_value = 0), + ] + ), + FixedValue( + None, + totext = _("Critical when all licenses are used"), + title = _("Go critical if all licenses are used"), + ), + FixedValue( + False, + title = _("Always report OK"), + totext = _("Alerting depending on the number of used licenses is disabled"), + ) + ] + ) + +register_check_parameters( + subgroup_applications, + "esx_licenses", + _("Number of used VMware licenses"), + vs_license, + TextAscii( + title = _("Name of the license"), + help = _("For example VMware vSphere 5 Standard"), + allow_empty = False, + ), + "first" +) + +register_check_parameters( + subgroup_applications, + "citrix_licenses", + _("Number of used Citrix licenses"), + vs_license, + TextAscii( + title = _("ID of the license, e.g. PVSD_STD_CCS"), + allow_empty = False, + ), + "first" +) -checkgroups.append(( - subgroup_environment, - "disk_temperature", - _("Harddisk temperature (e.g. via SMART)"), +register_check_parameters( + subgroup_applications, + "citrix_load", + _("CPU load of Citrix Server"), Tuple( - help = _("Temperature levels for hard disks, that is determined e.g. via SMART"), + title = _("Citrix Server load"), elements = [ - Integer(title = _("warning at"), unit = u"°C", default_value = 35), - Integer(title = _("critical at"), unit = u"°C", default_value = 40), + Integer(title = _("warning at"), default_value = 8500), + Integer(title = _("critical at"), default_value = 9500), ]), - TextAscii( - title = _("Hard disk device"), - help = _("The identificator of the hard disk device, e.g. sda.")), - "first" -)) + None, None +) register_check_parameters( - subgroup_environment, - "eaton_enviroment", - _("Temperature and Humidity for Eaton UPS"), + subgroup_applications, + "citrix_sessions", + _("Citrix Terminal Server Sessions"), Dictionary( elements = [ - ( "temp", + ( "total", Tuple( - title = _("Temperature"), + title = _("Total number of Sessions"), elements = [ - Integer(title = _("warning at"), unit = u"°C", default_value = 26), - Integer(title = _("critical at"), unit = u"°C", default_value = 30), - ])), - ( "remote_temp", + Integer(title = _("warning at"), unit = "Sessions" ), + Integer(title = _("critical at"), unit = "Session" ), + ]) + ), + ( "active", Tuple( - title = _("Remote Temperature"), + title = _("Number of Active Sessions"), elements = [ - Integer(title = _("warning at"), unit = u"°C", default_value = 26), - Integer(title = _("critical at"), unit = u"°C", default_value = 30), - ])), - ( "humidity", + Integer(title = _("warning at"), unit = "Sessions" ), + Integer(title = _("critical at"), unit = "Session" ), + ]) + ), + ( "inactive", Tuple( - title = _("Humidity"), + title = _("Number of Inactive Sessions"), elements = [ - Integer(title = _("warning at"), unit = u"%", default_value = 60), - Integer(title = _("critical at"), unit = u"%", default_value = 75), - ])), - ]), - None, - "first") - + Integer(title = _("warning at"), unit = "Sessions" ), + Integer(title = _("critical at"), unit = "Session" ), + ]) + ), + ] + ), + None, "dict" +), -checkgroups.append(( - subgroup_environment, - "pdu_gude", - _("Levels for Gude PDU Devices"), +register_check_parameters( + subgroup_networking, + "adva_ifs", + _("Adva Optical Transport Laser Power"), Dictionary( elements = [ - ( "kWh", - Tuple( - title = _("Total accumulated Active Energy of Power Channel"), - elements = [ - Integer(title = _("warning at"), unit = _("kW") ), - Integer(title = _("critical at"), unit = _("kW")), - ])), - ( "W", - Tuple( - title = _("Active Power"), - elements = [ - Integer(title = _("warning at"), unit = _("W") ), - Integer(title = _("critical at"), unit = _("W") ), - ])), - ( "A", + ( "limits_output_power", Tuple( - title = _("Current on Power Channel"), + title = _("Sending Power"), elements = [ - Integer(title = _("warning at"), unit = _("A") ), - Integer(title = _("critical at"), unit = _("A")), - ])), - ( "V", + Float(title = _("lower limit"), unit = "dBm"), + Float(title = _("upper limit"), unit = "dBm"), + ]) + ), + ( "limits_input_power", Tuple( - title = _("Voltage on Power Channel"), + title = _("Received Power"), elements = [ - Integer(title = _("warning lower"), unit = _("V") ), - Integer(title = _("critical lower"), unit = _("V") ), - ])), - ( "VA", + Float(title = _("lower limit"), unit = "dBm"), + Float(title = _("upper limit"), unit = "dBm"), + ]) + ), + ] + ), + TextAscii( + title = _("Interface"), + allow_empty = False, + ), + "dict" +), + +bluecat_operstates = [ + (1, "running normally"), + (2, "not running"), + (3, "currently starting"), + (4, "currently stopping"), + (5, "fault"), +] + +register_check_parameters( + subgroup_networking, + "bluecat_ntp", + _("Bluecat NTP Settings"), + Dictionary( + elements = [ + ( "oper_states", + Dictionary( + title = _("Operations States"), + elements = [ + ( "warning", + ListChoice( + title = _("States treated as warning"), + choices = bluecat_operstates, + default_value = [ 2, 3, 4 ], + ) + ), + ( "critical", + ListChoice( + title = _("States treated as critical"), + choices = bluecat_operstates, + default_value = [ 5 ], + ) + ), + ], + required_keys = [ 'warning', 'critical' ], + ) + ), + ( "stratum", Tuple( - title = _("Line Mean Apparent Power"), + title = _("Levels for Stratum "), elements = [ - Integer(title = _("warning at"), unit = _("VA") ), - Integer(title = _("critical at"), unit = _("VA")), - ])), - ]), - TextAscii( - title = _("Phase Number"), - help = _("The Number of the power Phase.")), - "first")) + Integer(title = _("Warning at")), + Integer(title = _("Critical at")), + ]) + ), + ] + ), + None, + "first" +), + +register_check_parameters( + subgroup_networking, + "bluecat_dhcp", + _("Bluecat DHCP Settings"), + Dictionary( + elements = [ + ( "oper_states", + Dictionary( + title = _("Operations States"), + elements = [ + ( "warning", + ListChoice( + title = _("States treated as warning"), + choices = bluecat_operstates, + default_value = [ 2, 3, 4 ], + ) + ), + ( "critical", + ListChoice( + title = _("States treated as critical"), + choices = bluecat_operstates, + default_value = [ 5 ], + ) + ), + ], + required_keys = [ 'warning', 'critical' ], + ) + ), + ], + required_keys = [ 'oper_states' ], # There is only one value, so its required + ), + None, + "first" +), +register_check_parameters( + subgroup_networking, + "bluecat_command_server", + _("Bluecat Command Server Settings"), + Dictionary( + elements = [ + ( "oper_states", + Dictionary( + title = _("Operations States"), + elements = [ + ( "warning", + ListChoice( + title = _("States treated as warning"), + choices = bluecat_operstates, + default_value = [ 2, 3, 4 ], + ) + ), + ( "critical", + ListChoice( + title = _("States treated as critical"), + choices = bluecat_operstates, + default_value = [ 5 ], + ) + ), + ], + required_keys = [ 'warning', 'critical' ], + ) + ), + ], + required_keys = [ 'oper_states' ], # There is only one value, so its required + ), + None, + "first" +), -checkgroups.append(( - subgroup_environment, - "temperature_auto", - _("Temperature sensors with builtin levels"), +register_check_parameters( + subgroup_networking, + "bluecat_dns", + _("Bluecat DNS Settings"), + Dictionary( + elements = [ + ( "oper_states", + Dictionary( + title = _("Operations States"), + elements = [ + ( "warning", + ListChoice( + title = _("States treated as warning"), + choices = bluecat_operstates, + default_value = [ 2, 3, 4 ], + ) + ), + ( "critical", + ListChoice( + title = _("States treated as critical"), + choices = bluecat_operstates, + default_value = [ 5 ], + ) + ), + ], + required_keys = [ 'warning', 'critical' ], + ) + ), + ], + required_keys = [ 'oper_states' ], # There is only one value, so its required + ), None, - TextAscii( - title = _("Sensor ID"), - help = _("The identificator of the themal sensor.")), - "first")) + "first" +), -checkgroups.append(( - subgroup_os, - "ntp_time", - _("State of NTP time synchronisation"), - Tuple( +bluecat_ha_operstates = [ + ( 1 , "standalone"), + ( 2 , "active"), + ( 3 , "passiv"), + ( 4 , "stopped"), + ( 5 , "stopping"), + ( 6 , "becoming active"), + ( 7 , "becomming passive"), + ( 8 , "fault"), +] + +register_check_parameters( + subgroup_networking, + "bluecat_ha", + _("Bluecat HA Settings"), + Dictionary( elements = [ - Integer( - title = _("Max. allowed stratum"), - default_value = 10, - help = _("The stratum (\"distance\" to the reference clock) at which the check gets critical."), + ( "oper_states", + Dictionary( + title = _("Operations States"), + elements = [ + ( "warning", + ListChoice( + title = _("States treated as warning"), + choices = bluecat_ha_operstates, + default_value = [ 5, 6, 7 ], + ), + ), + ( "critical", + ListChoice( + title = _("States treated as critical"), + choices = bluecat_ha_operstates , + default_value = [ 8, 4 ], + ), + ), + ], + required_keys = [ 'warning', 'critical' ], + ) ), - Float( - title = _("Warning at"), - unit = _("Milliseconds"), - default_value = 200.0, - help = _("The offset in ms at which a warning state is triggered."), + ], + required_keys = [ 'oper_states' ], # There is only one value, so its required + ), + None, + "first" +), +register_check_parameters( + subgroup_storage, + "fc_port", + _("FibreChannel Ports (FCMGMT MIB)"), + Dictionary( + elements = [ + ("bw", + Alternative( + title = _("Throughput levels"), + help = _("Please note: in a few cases the automatic detection of the link speed " + "does not work. In these cases you have to set the link speed manually " + "below if you want to monitor percentage values"), + elements = [ + Tuple( + title = _("Used bandwidth of port relative to the link speed"), + elements = [ + Percentage(title = _("Warning at"), unit = _("percent")), + Percentage(title = _("Critical at"), unit = _("percent")), + ] + ), + Tuple( + title = _("Used Bandwidth of port in megabyte/s"), + elements = [ + Integer(title = _("Warning at"), unit = _("MByte/s")), + Integer(title = _("Critical at"), unit = _("MByte/s")), + ] + ) + ]) ), - Float( - title = _("Critical at"), - unit = _("Milliseconds"), - default_value = 500.0, - help = _("The offset in ms at which a critical state is triggered."), + ("assumed_speed", + Float( + title = _("Assumed link speed"), + help = _("If the automatic detection of the link speed does " + "not work you can set the link speed here."), + unit = _("Gbit/s") + ) + ), + ("rxcrcs", + Tuple ( + title = _("CRC errors rate"), + elements = [ + Percentage( title = _("Warning at"), unit = _("percent")), + Percentage( title = _("Critical at"), unit = _("percent")), + ] + ) + ), + ("rxencoutframes", + Tuple ( + title = _("Enc-Out frames rate"), + elements = [ + Percentage( title = _("Warning at"), unit = _("percent")), + Percentage( title = _("Critical at"), unit = _("percent")), + ] + ) + ), + ("notxcredits", + Tuple ( + title = _("No-TxCredits errors"), + elements = [ + Percentage( title = _("Warning at"), unit = _("percent")), + Percentage( title = _("Critical at"), unit = _("percent")), + ] + ) + ), + ("c3discards", + Tuple ( + title = _("C3 discards"), + elements = [ + Percentage( title = _("Warning at"), unit = _("percent")), + Percentage( title = _("Critical at"), unit = _("percent")), + ] + ) + ), + ("average", + Integer ( + title = _("Averaging"), + help = _("If this parameter is set, all throughputs will be averaged " + "over the specified time interval before levels are being applied. Per " + "default, averaging is turned off. "), + unit = _("minutes"), + minvalue = 1, + default_value = 5, + ) ), +# ("phystate", +# Optional( +# ListChoice( +# title = _("Allowed states (otherwise check will be critical)"), +# choices = [ (1, _("unknown") ), +# (2, _("failed") ), +# (3, _("bypassed") ), +# (4, _("active") ), +# (5, _("loopback") ), +# (6, _("txfault") ), +# (7, _("nomedia") ), +# (8, _("linkdown") ), +# ] +# ), +# title = _("Physical state of port") , +# negate = True, +# label = _("ignore physical state"), +# ) +# ), +# ("opstate", +# Optional( +# ListChoice( +# title = _("Allowed states (otherwise check will be critical)"), +# choices = [ (1, _("unknown") ), +# (2, _("unused") ), +# (3, _("ready") ), +# (4, _("warning") ), +# (5, _("failure") ), +# (6, _("not participating") ), +# (7, _("initializing") ), +# (8, _("bypass") ), +# (9, _("ols") ), +# ] +# ), +# title = _("Operational state") , +# negate = True, +# label = _("ignore operational state"), +# ) +# ), +# ("admstate", +# Optional( +# ListChoice( +# title = _("Allowed states (otherwise check will be critical)"), +# choices = [ (1, _("unknown") ), +# (2, _("online") ), +# (3, _("offline") ), +# (4, _("bypassed") ), +# (5, _("diagnostics") ), +# ] +# ), +# title = _("Administrative state") , +# negate = True, +# label = _("ignore administrative state"), +# ) +# ) ] + ), + TextAscii( + title = _("port name"), + help = _("The name of the FC port"), ), - None, "first" -)) +) -checkgroups.append(( - subgroup_os, - "apc_symentra", - _("Levels for APC Symentra Check"), +register_check_parameters( + subgroup_environment, + "plug_count", + _("Number of active Plugs"), Tuple( + help = _("Levels for the number of active plugs in a device."), elements = [ - Integer( - title = _("Max. Crit Capacity"), - help = _("The battery capacity in percent at and below which a critical state is be triggered"), - ), - Integer( - title = _("Max. Battery Temperature"), - help = _("The critical temperature of the battery"), - ), - Integer( - title = _("Max. Current Power"), - help = _("The critical battery current in Ampere"), - ), - Integer( - title = _("Max. Voltage"), - help = _("The output voltage at and below which a critical state is triggered."), - ), - ] - ), + Integer(title = _("critical if below or equal"), default_value = 30), + Integer(title = _("warning if below or equal"), default_value = 32), + Integer(title = _("warning if above or equal"), default_value = 38), + Integer(title = _("critical if above or equal"), default_value = 40), + ]), None, "first" -)) +) -syslog_facilities = [ - (0, "kern"), - (1, "user"), - (2, "mail"), - (3, "daemon"), - (4, "auth"), - (5, "syslog"), - (6, "lpr"), - (7, "news"), - (8, "uucp"), - (9, "cron"), - (10, "authpriv"), - (11, "ftp"), - (16, "local0"), - (17, "local1"), - (18, "local2"), - (19, "local3"), - (20, "local4"), - (21, "local5"), - (22, "local6"), - (23, "local7"), -] +# Rules for configuring parameters of checks (services) +register_check_parameters( + subgroup_environment, + "ucs_bladecenter_chassis_voltage", + _("UCS Bladecenter Chassis Voltage Levels"), + Dictionary( + help = _("Here you can configure the 3.3V and 12V voltage levels for each chassis."), + elements = [ + ( "levels_3v_lower", + Tuple( + title = _("3.3 Volt Output Lower Levels"), + elements = [ + Float(title = _("warning if below or equal"), unit = "V", default_value = 3.25), + Float(title = _("critical if below or equal"), unit = "V", default_value = 3.20), + ] + )), + ( "levels_3v_upper", + Tuple( + title = _("3.3 Volt Output Upper Levels"), + elements = [ + Float(title = _("warning if above or equal"), unit = "V", default_value = 3.4), + Float(title = _("critical if above or equal"), unit = "V", default_value = 3.45), + ] + )), + ( "levels_12v_lower", + Tuple( + title = _("12 Volt Output Lower Levels"), + elements = [ + Float(title = _("warning if below or equal"), unit = "V", default_value = 11.9), + Float(title = _("critical if below or equal"), unit = "V", default_value = 11.8), + ] + )), + ( "levels_12v_upper", + Tuple( + title = _("12 Volt Output Upper Levels"), + elements = [ + Float(title = _("warning if above or equal"), unit = "V", default_value = 12.1), + Float(title = _("critical if above or equal"), unit = "V", default_value = 12.2), + ] + )) + ] + ), + TextAscii( + title = _("Chassis"), + help = _("The identifier of the chassis.")), + "dict" +) -checkgroups.append(( - subgroup_applications, - "jvm_memory", - _("JVM memory levels"), +register_check_parameters( + subgroup_applications, + "jvm_gc", + _("JVM garbage collection levels"), Dictionary( + help = _("This ruleset also covers Tomcat, Jolokia and JMX. "), elements = [ - ( "totalheap", + ( "CollectionTime", Alternative( - title = _("Total Memory Levels"), + title = _("Collection time levels"), elements = [ Tuple( - title = _("Percentage levels of used space"), - elements = [ - Percentage(title = _("Warning at"), label = _("% usage")), - Percentage(title = _("Critical at"), label = _("% usage")), - ] - ), - Tuple( - title = _("Absolute free space in MB"), + title = _("Time of garbage collection in ms per minute"), elements = [ - Integer(title = _("Warning lower than"), unit = _("MB")), - Integer(title = _("Critical lower than"), unit = _("MB")), + Integer(title = _("Warning at"), + unit = _("ms"), + allow_empty = False), + Integer(title = _("Critical at"), + unit = _("ms"), + allow_empty = False), ] - ) + ) ])), - ( "heap", + ( "CollectionCount", Alternative( - title = _("Heap Memory Levels"), + title = _("Collection count levels"), elements = [ Tuple( - title = _("Percentage levels of used space"), - elements = [ - Percentage(title = _("Warning at"), label = _("% usage")), - Percentage(title = _("Critical at"), label = _("% usage")), - ] - ), - Tuple( - title = _("Absolute free space in MB"), + title = _("Count of garbage collection per minute"), elements = [ - Integer(title = _("Warning lower than"), unit = _("MB")), - Integer(title = _("Critical lower than"), unit = _("MB")), + Integer(title = _("Warning at"), allow_empty = False), + Integer(title = _("Critical at"), allow_empty = False), ] - ) + ) ])), - ( "nonheap", + ]), + TextAscii( + title = _("Name of the virtual machine and/or
    garbage collection type"), + help = _("The name of the application server"), + allow_empty = False, + ), + "dict" +) + +register_check_parameters( + subgroup_applications, + "jvm_tp", + _("JVM tomcat threadpool levels"), + Dictionary( + help = _("This ruleset also covers Tomcat, Jolokia and JMX. "), + elements = [ + ( "currentThreadCount", Alternative( - title = _("Nonheap Memory Levels"), + title = _("Current thread count levels"), elements = [ Tuple( - title = _("Percentage levels of used space"), + title = _("Percentage levels of current thread count in threadpool"), elements = [ - Percentage(title = _("Warning at"), label = _("% usage")), - Percentage(title = _("Critical at"), label = _("% usage")), + Integer(title = _("Warning at"), + unit = _(u"%"), + allow_empty = False), + Integer(title = _("Critical at"), + unit = _(u"%"), + allow_empty = False), ] - ), + ) + ])), + ( "currentThreadsBusy", + Alternative( + title = _("Current threads busy levels"), + elements = [ Tuple( - title = _("Absolute free space in MB"), + title = _("Percentage of current threads busy in threadpool"), elements = [ - Integer(title = _("Warning lower than"), unit = _("MB")), - Integer(title = _("Critical lower than"), unit = _("MB")), + Integer(title = _("Warning at"), + unit = _(u"%"), + allow_empty = False), + Integer(title = _("Critical at"), + unit = _(u"%"), + allow_empty = False), ] - ) + ) ])), ]), TextAscii( - title = _("Name of the virtual machine"), + title = _("Name of the virtual machine and/or
    threadpool"), help = _("The name of the application server"), allow_empty = False, ), "dict" -)) - -checkgroups.append(( - subgroup_applications, - "db2_mem", - _("Memory levels for DB2 memory usage"), - Tuple( - elements = [ - Percentage(title = _("Memory left warning at")), - Percentage(title = _("Memory left critical at")), - ], - ), - TextAscii( - title = _("Instance name"), - allow_empty = True), - "first")) - - -checkgroups.append(( - subgroup_applications, - "logwatch_ec", - _('Logwatch Event Console Forwarding'), - Dictionary( - elements = [ - ('method', Alternative( - title = _("Forwarding Method"), - elements = [ - FixedValue( - None, - totext = "", - title = _("Send events to local event console in same OMD site"), - ), - TextAscii( - title = _("Send events to local event console into unix socket"), - allow_empty = False, - ), - Tuple( - title = _("Send events to remote syslog host"), - elements = [ - DropdownChoice( - choices = [ - ('udp', _('UDP')), - ('tcp', _('TCP')), - ], - title = _("Protocol"), - ), - TextAscii( - title = _("Address"), - allow_empty = False, - ), - Integer( - title = _("Port"), - allow_empty = False, - default_value = 514, - minvalue = 1, - maxvalue = 65535, - size = 6, - ), - ] - ), - ], - )), - ('facility', DropdownChoice( - title = _("Syslog facility for forwarded messages"), - help = _("When forwarding messages and no facility can be extracted from the " - "message this facility is used."), - choices = syslog_facilities, - default_value = 17, # local1 - )), - ], - optional_keys = [], - ), - None, - 'first' -)) - -# Create rules for check parameters of inventorized checks -for subgroup, checkgroup, title, valuespec, itemspec, matchtype in checkgroups: - register_check_parameters(subgroup, checkgroup, title, valuespec, itemspec, matchtype) - -checkgroups = [] - -register_rule( - group + "/" + subgroup_networking, - "if_disable_if64_hosts", - title = _("Hosts forced to use if instead of if64"), - help = _("A couple of switches with broken firmware report that they " - "support 64 bit counters but do not output any actual data " - "in those counters. Listing those hosts in this rule forces " - "them to use the interface check with 32 bit counters instead.")) - - -# Create Rules for static checks -register_rulegroup("static", _("Manual Checks"), - _("Statically configured Check_MK checks that do not rely on the inventory")) +) -# wmic_process does not support inventory at the moment -checkgroups.append(( - subgroup_applications, - "wmic_process", - _("Memory and CPU of processes on Windows"), +register_check_parameters( + subgroup_storage, + "heartbeat_crm", + _("Heartbeat CRM general status"), Tuple( elements = [ - TextAscii( - title = _("Name of the process"), - allow_empty = False, + Integer( + title = _("Maximum age"), + help = _("Maximum accepted age of the reported data in seconds"), + unit = _("seconds"), + default_value = 60, + ), + Optional( + TextAscii( + allow_empty = False + ), + title = _("Expected DC"), + help = _("The hostname of the expected distinguished controller of the cluster"), ), - Integer(title = _("Memory warning at"), unit = "MB"), - Integer(title = _("Memory critical at"), unit = "MB"), - Integer(title = _("Pagefile warning at"), unit = "MB"), - Integer(title = _("Pagefile critical at"), unit = "MB"), - Percentage(title = _("CPU usage warning at")), - Percentage(title = _("CPU usage critical at")), - ], + Optional( + Integer( + min_value = 2, + default_value = 2 + ), + title = _("Number of Nodes"), + help = _("The expected number of nodes in the cluster"), + ), + Optional( + Integer( + min_value = 0, + ), + title = _("Number of Resources"), + help = _("The expected number of resources in the cluster"), + ), + ] + ), + None, None +) + +register_check_parameters( + subgroup_storage, + "heartbeat_crm_resources", + _("Heartbeat CRM resource status"), + Optional( + TextAscii( + allow_empty = False + ), + title = _("Expected node"), + help = _("The hostname of the expected node to hold this resource."), + none_label = _("Do not enforce the resource to be hold by a specific node."), ), TextAscii( - title = _("Process name for usage in the Nagios service description"), - allow_empty = False), - "first")) + title = _("Resource Name"), + help = _("The name of the cluster resource as shown in the service description."), + allow_empty = False, + ), + "first" +) -# Add checks that have parameters but are only configured as manual checks -checkgroups.append(( +register_check_parameters( subgroup_applications, - "ps", - _("State and count of processes"), - Tuple( + "domino_tasks", + _("Lotus Domino Tasks"), + Dictionary( elements = [ - Alternative( - title = _("Name of the process"), + ( "process", Alternative( + title = _("Name of the task"), + style = "dropdown", elements = [ TextAscii( - title = _("Exact name of the process without argments"), + title = _("Exact name of the task"), size = 50, ), Transform( RegExp(size = 50), - title = _("Regular expression matching command line"), + title = _("Regular expression matching tasks"), help = _("This regex must match the beginning of the complete " - "command line of the process including arguments"), + "command line of the task including arguments"), forth = lambda x: x[1:], # remove ~ back = lambda x: "~" + x, # prefix ~ ), FixedValue( None, totext = "", - title = _("Match all processes"), + title = _("Match all tasks"), ) ], match = lambda x: (not x and 2) or (x[0] == '~' and 1 or 0) - ), - TextAscii( - title = _("Name of operating system user"), - help = _("Leave this empty, if the user does not matter"), - none_is_empty = True, - ), - Integer( - title = _("Minimum number of matched process for WARNING state"), - default_value = 1, - ), - Integer( - title = _("Minimum number of matched process for OK state"), - default_value = 1, - ), - Integer( - title = _("Maximum number of matched process for OK state"), + )), + ( "warnmin", Integer( + title = _("Minimum number of matched tasks for WARNING state"), default_value = 1, - ), - Integer( - title = _("Maximum number of matched process for WARNING state"), + )), + ( "okmin", Integer( + title = _("Minimum number of matched tasks for OK state"), default_value = 1, - ), - ]), + )), + ( "okmax", Integer( + title = _("Maximum number of matched tasks for OK state"), + default_value = 99999, + )), + ( "warnmax", Integer( + title = _("Maximum number of matched tasks for WARNING state"), + default_value = 99999, + )), + ], + required_keys = [ 'warnmin', 'okmin', 'okmax', 'warnmax', 'process' ], + ), TextAscii( title = _("Name of service"), help = _("This name will be used in the description of the service"), @@ -2672,9 +7414,63 @@ regex_error = _("Please use only a-z, A-Z, 0-9, space, underscore, " "dot and hyphon for your service description"), ), - "first")) + "first", False +) +register_check_parameters( + subgroup_applications, + "domino_mailqueues", + _("Lotus Domino Mail Queues"), + Dictionary( + elements = [ + ( "queue_length", + Tuple( + title = _("Number of Mails in Queue"), + elements = [ + Integer(title = _("warning at"), default_value = 300 ), + Integer(title = _("critical at"), default_value = 350 ), + ] + )), + ], + required_keys = [ 'queue_length' ], + ), + DropdownChoice( + choices = [ + ('lnDeadMail', _('Mails in Dead Queue')), + ('lnWaitingMail', _('Mails in Waiting Queue')), + ('lnMailHold', _('Mails in Hold Queue')), + ('lnMailTotalPending', _('Total Pending Mails')), + ('InMailWaitingforDNS', _('Mails Waiting for DNS Queue')), + ], + title = _("Domino Mail Queue Names"), + ), + "first" +) -for subgroup, checkgroup, title, valuespec, itemspec, matchtype in checkgroups: - register_check_parameters(subgroup, checkgroup, title, valuespec, itemspec, matchtype, False) +register_check_parameters( + subgroup_applications, + "domino_users", + _("Lotus Domino Users"), + Tuple( + title = _("Number of Lotus Domino Users"), + elements = [ + Integer(title = _("warning at"), default_value = 1000 ), + Integer(title = _("critical at"), default_value = 1500 ), + ] + ), + None, None +) +register_check_parameters( + subgroup_applications, + "domino_transactions", + _("Lotus Domino Transactions"), + Tuple( + title = _("Number of Transactions per Minute on a Lotus Domino Server"), + elements = [ + Integer(title = _("warning at"), default_value = 30000 ), + Integer(title = _("critical at"), default_value = 35000 ), + ] + ), + None, None +) diff -Nru check-mk-1.2.2p3/plugins/wato/datasource_programs.py check-mk-1.2.6p12/plugins/wato/datasource_programs.py --- check-mk-1.2.2p3/plugins/wato/datasource_programs.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/wato/datasource_programs.py 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,478 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +register_rulegroup("datasource_programs", + _("Datasource Programs"), + _("Specialized agents, e.g. check via SSH, ESX vSphere, SAP R/3")) +group = "datasource_programs" + +register_rule(group, + "datasource_programs", + TextAscii( + title = _("Individual program call instead of agent access"), + help = _("For agent based checks Check_MK allows you to specify an alternative " + "program that should be called by Check_MK instead of connecting the agent " + "via TCP. That program must output the agent's data on standard output in " + "the same format the agent would do. This is for example useful for monitoring " + "via SSH. The command line may contain the placeholders <IP> and " + "<HOST>."), + label = _("Command line to execute"), + empty_text = _("Access Check_MK Agent via TCP"), + size = 80, + attrencode = True)) + +register_rule(group, + "special_agents:vsphere", + Transform( + valuespec = Dictionary( + elements = [ + ( "user", + TextAscii( + title = _("vSphere User name"), + allow_empty = False, + ) + ), + ( "secret", + Password( + title = _("vSphere secret"), + allow_empty = False, + ) + ), + ( "tcp_port", + Integer( + title = _("TCP Port number"), + help = _("Port number for HTTPS connection to vSphere"), + default_value = 443, + minvalue = 1, + maxvalue = 65535, + ) + ), + ( "ssl", + Alternative( + title = _("SSL certificate checking"), + elements = [ + FixedValue( False, title = _("Deactivated"), totext=""), + FixedValue( True, title = _("Use hostname"), totext=""), + TextAscii( title = _("Use other hostname"), + help = _("The IP of the other hostname needs to be the same IP as the host address") + ) + ], + default_value = False + ) + ), + ( "timeout", + Integer( + title = _("Connect Timeout"), + help = _("The network timeout in seconds when communicating with vSphere or " + "to the Check_MK Agent. The default is 60 seconds. Please note that this " + "is not a total timeout but is applied to each individual network transation."), + default_value = 60, + minvalue = 1, + unit = _("seconds"), + ) + ), + ( "infos", + Transform( + ListChoice( + choices = [ + ( "hostsystem", _("Host Systems") ), + ( "virtualmachine", _("Virtual Machines") ), + ( "datastore", _("Datastores") ), + ( "counters", _("Performance Counters") ), + ( "licenses", _("License Usage") ), + ], + default_value = [ "hostsystem", "virtualmachine", "datastore", "counters" ], + allow_empty = False, + ), + forth = lambda v: [ x.replace("storage", "datastore") for x in v ], + title = _("Retrieve information about..."), + ) + ), + ( "host_pwr_display", + DropdownChoice( + title = _("Display ESX Host power state on"), + choices = [ + ( None, _("The queried ESX system (vCenter / Host)") ), + ( "esxhost", _("The ESX Host") ), + ( "vm", _("The Virtual Machine") ), + ], + default = None, + ) + ), + ( "vm_pwr_display", + DropdownChoice( + title = _("Display VM power state on"), + choices = [ + ( None, _("The queried ESX system (vCenter / Host)") ), + ( "esxhost", _("The ESX Host") ), + ( "vm", _("The Virtual Machine") ), + ], + default = None, + ) + ), + ( "spaces", + DropdownChoice( + title = _("Spaces in hostnames"), + choices = [ + ( "cut", _("Cut everything after first space") ), + ( "underscore", _("Replace with underscores") ), + ], + default = "underscore", + ) + ), + ( "direct", + DropdownChoice( + title = _("Type of query"), + choices = [ + ( True, _("Queried host is a host system" ) ), + ( False, _("Queried host is the vCenter") ), + ( "agent", _("Queried host is the vCenter with Check_MK Agent installed") ), + ], + default = True, + ) + ), + ( "skip_placeholder_vms", + Checkbox( + title = _("Placeholder VMs"), + label = _("Do no monitor placeholder VMs"), + default_value = True, + true_label = _("ignore"), + false_label = _("monitor"), + help = _("Placeholder VMs are created by the Site Recovery Manager(SRM) and act as backup " + "virtual machines in case the default vm is unable to start. This option tells the " + "vsphere agent to exclude placeholder vms in its output." + )) + ), + ( "use_pysphere", + Checkbox( + title = _("Compatibility mode"), + label = _("Support ESX 4.1 (using slower PySphere implementation)"), + true_label = _("Support 4.1"), + false_label = _("fast"), + help = _("The current very performant implementation of the ESX special agent " + "does not support older ESX versions than 5.0. Please use the slow " + "compatibility mode for those old hosts."), + ) + ), + ], + optional_keys = [ "tcp_port", "timeout", "vm_pwr_display", "host_pwr_display" ], + ), + title = _("Check state of VMWare ESX via vSphere"), + help = _("This rule selects the vSphere agent instead of the normal Check_MK Agent " + "and allows monitoring of VMWare ESX via the vSphere API. You can configure " + "your connection settings here."), + forth = lambda a: dict([("skip_placeholder_vms", True), ("ssl", False), ("use_pysphere" , False), ("spaces", "underscore")] + a.items()) + ), + factory_default = FACTORY_DEFAULT_UNUSED, # No default, do not use setting if no rule matches + match = 'first') + +register_rule(group, + "special_agents:netapp", + Dictionary( + title = _("Username and password for the NetApp Filer."), + elements = [ + ( "username", + TextAscii( + title = _("Username"), + allow_empty = False, + ) + ), + ( "password", + Password( + title = _("Password"), + allow_empty = False, + ) + ), + ], + optional_keys = False + ), + title = _("Check NetApp via WebAPI"), + help = _("This rule set selects the NetApp special agent instead of the normal Check_MK Agent " + "and allows monitoring via the NetApp API. Right now only 7-Mode is supported, " + "Cluster Mode will follow soon. Important: To make this special agent NetApp work " + "you will have to provide two additional python files (NaServer.py, NaElement.py) " + "from the NetApp Manageability SDK. They need to be put into the site directory " + "into ~/local/lib/python. The user requires a number of permissions for specific API classes. " + "They are displayed if you call the agent with agent_netapp --help. The agent itself " + "is located in the site directory under ~/share/check_mk/agents/special."), + match = 'first') + +register_rule(group, + "special_agents:activemq", + Tuple( + title = _("Apache ActiveMQ queues"), + help = _( "Configure the Server Address and the Portnumber of the target server"), + elements = [ + TextAscii(title = _("Server Name")), + Integer( title = _("Port Number"), default_value=8161 ), + ListChoice( + choices = [ + ("piggybag", _("Run in piggyback mode")), + ], + allow_empty = True + ) + ] + ), + factory_default = FACTORY_DEFAULT_UNUSED, # No default, do not use setting if no rule matches + match = "first") + +register_rule(group, + "special_agents:emcvnx", + Dictionary( + title = _("Check state of EMC VNX storage systems"), + help = _("This rule selects the EMC VNX agent instead of the normal Check_MK Agent " + "and allows monitoring of EMC VNX storage systems by calling naviseccli " + "commandline tool locally on the monitoring system. Make sure it is installed " + "and working. You can configure your connection settings here." + ), + elements = [ + ( "user", + TextAscii( + title = _("EMC VNX admin user name"), + allow_empty = True, + help = _("If you leave user name and password empty, the special agent tries to " + "authenticate against the EMC VNX device by Security Files. " + "These need to be created manually before using. Therefor run as " + "instance user (if using OMD) or Nagios user (if not using OMD) " + "a command like " + "naviseccli -AddUserSecurity -scope 0 -password PASSWORD -user USER " + "This creates SecuredCLISecurityFile.xml and " + "SecuredCLIXMLEncrypted.key in the home directory of the user " + "and these files are used then." + ), + ) + ), + ( "password", + Password( + title = _("EMC VNX admin user password"), + allow_empty = True, + ) + ), + ( "infos", + Transform( + ListChoice( + choices = [ + ( "disks", _("Disks") ), + ( "hba", _("iSCSI HBAs") ), + ( "hwstatus", _("Hardware Status") ), + ( "raidgroups", _("RAID Groups") ), + ( "agent", _("Model and Revsion") ), + ], + default_value = [ "disks", "hba", "hwstatus", ], + allow_empty = False, + ), + title = _("Retrieve information about..."), + ) + ), + ], + optional_keys = [ ], + ), + factory_default = FACTORY_DEFAULT_UNUSED, # No default, do not use setting if no rule matches + match = 'first') + +register_rule(group, + "special_agents:ibmsvc", + Dictionary( + title = _("Check state of IBM SVC / V7000 storage systems"), + help = _("This rule set selects the ibmsvc agent instead of the normal Check_MK Agent " + "and allows monitoring of IBM SVC / V7000 storage systems by calling " + "ls* commands there over SSH. " + "Make sure you have SSH key authentication enabled for your monitoring user. " + "That means: The user your monitoring is running under on the monitoring " + "system must be able to ssh to the storage system as the user you gave below " + "without password." + ), + elements = [ + ( "user", + TextAscii( + title = _("IBM SVC / V7000 user name"), + allow_empty = True, + help = _("User name on the storage system. Read only permissions are sufficient."), + ) + ), + ( "accept-any-hostkey", + Checkbox( + title = _("Accept any SSH Host Key"), + label = _("Accept any SSH Host Key"), + default_value = False, + help = _("Accepts any SSH Host Key presented by the storage device. " + "Please note: This might be a security issue because man-in-the-middle " + "attacks are not recognized! Better solution would be to add the " + "SSH Host Key of the monitored storage devices to the .ssh/known_hosts " + "file for the user your monitoring is running under (on OMD: the site user)" + )) + ), + ( "infos", + Transform( + ListChoice( + choices = [ + ( "lshost", _("Hosts Connected") ), + ( "lslicense", _("Licensing Status") ), + ( "lsmdisk", _("MDisks") ), + ( "lsmdiskgrp", _("MDisks Groups") ), + ( "lsnode", _("IO Groups") ), + ( "lsnodestats", _("Node Stats") ), + ( "lssystem", _("System Info") ), + ( "lssystemstats", _("System Stats") ), + ( "lseventlog", _("Event Log") ), + ( "lsportfc", _("FC Ports") ), + ( "lsenclosure", _("Enclosures") ), + ( "lsenclosurestats", _("Enclosure Stats") ), + ( "lsarray", _("RAID Arrays") ), + ], + default_value = [ "lshost", "lslicense", "lsmdisk", "lsmdiskgrp", "lsnode", + "lsnodestats", "lssystem", "lssystemstats", "lsportfc", + "lsenclosure", "lsenclosurestats", "lsarray" ], + allow_empty = False, + ), + title = _("Retrieve information about..."), + ) + ), + ], + optional_keys = [ ], + ), + factory_default = FACTORY_DEFAULT_UNUSED, # No default, do not use setting if no rule matches + match = 'first') + + +register_rule(group, + "special_agents:random", + FixedValue( + {}, + title = _("Create random monitoring data"), + help = _("By configuring this rule for a host - instead of the normal " + "Check_MK agent random monitoring data will be created."), + totext = _("Create random monitoring data"), + ), + factory_default = FACTORY_DEFAULT_UNUSED, # No default, do not use setting if no rule matches + match = 'first') + +register_rule(group, + "special_agents:fritzbox", + Dictionary( + title = _("Check state of Fritz!Box Devices"), + help = _("This rule selects the Fritz!Box agent, which uses UPNP to gather information " + "about configuration and connection status information."), + elements = [ + ( "timeout", + Integer( + title = _("Connect Timeout"), + help = _("The network timeout in seconds when communicating via UPNP. " + "The default is 10 seconds. Please note that this " + "is not a total timeout, instead it is applied to each API call."), + default_value = 10, + minvalue = 1, + unit = _("seconds"), + ) + ), + ], + optional_keys = [ "timeout" ], + ), + factory_default = FACTORY_DEFAULT_UNUSED, # No default, do not use setting if no rule matches + match = 'first') + + +register_rulegroup("datasource_programs", + _("Datasource Programs"), + _("Specialized agents, e.g. check via SSH, ESX vSphere, SAP R/3")) + +group = "datasource_programs" + + +register_rule(group, + "special_agents:innovaphone", + Tuple( + title = _("Innovaphone Gateways"), + help = _( "Please specify the user and password needed to access the xml interface"), + elements = [ + TextAscii(title = _("Username")), + Password( title = _("Password")), + ] + ), + factory_default = FACTORY_DEFAULT_UNUSED, + match = "first") + +register_rule(group, + "special_agents:hivemanager", + Tuple( + title = _("Aerohive HiveManager"), + help = _( "Activate monitoring of host via a HTTP connect to the HiveManager"), + elements = [ + TextAscii(title = _("Username")), + Password( title = _("Password")), + ] + ), + factory_default = FACTORY_DEFAULT_UNUSED, + match = "first") + +register_rule(group, + "special_agents:allnet_ip_sensoric", + Dictionary( + title = _("Check state of ALLNET IP Sensoric Devices"), + help = _("This rule selects the ALLNET IP Sensoric agent, which fetches " + "/xml/sensordata.xml from the device by HTTP and extracts the " + "needed monitoring information from this file."), + elements = [ + ( "timeout", + Integer( + title = _("Connect Timeout"), + help = _("The network timeout in seconds when communicating via HTTP. " + "The default is 10 seconds."), + default_value = 10, + minvalue = 1, + unit = _("seconds"), + ) + ), + ], + optional_keys = [ "timeout" ], + ), + factory_default = FACTORY_DEFAULT_UNUSED, # No default, do not use setting if no rule matches + match = 'first') + + +register_rule(group, + "special_agents:ucs_bladecenter", + Dictionary( + elements = [ + ( "username", + TextAscii( + title = _("Username"), + allow_empty = False, + ) + ), + ( "password", + Password( + title = _("Password"), + allow_empty = False, + ) + ), + ], + optional_keys = False + ), + title = _("Check state of UCS Bladecenter"), + help = _("This rule selects the UCS Bladecenter agent instead of the normal Check_MK Agent " + "which collects the data through the UCS Bladecenter Web API"), + match = 'first') diff -Nru check-mk-1.2.2p3/plugins/wato/globals_notification.py check-mk-1.2.6p12/plugins/wato/globals_notification.py --- check-mk-1.2.2p3/plugins/wato/globals_notification.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/wato/globals_notification.py 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -28,25 +28,82 @@ group = _("Notification") +register_configvar(group, + "enable_rulebased_notifications", + Checkbox( + title = _("Rule based notifications"), + label = _("Enable new rule based notifications"), + help = _("If you enable the new rule based notifications then the current plain text email and " + ""flexible notifications" will become inactive. Instead notificatios will " + "be configured with the WATO module Notifications on a global base."), + default_value = False, + ), + domain = "check_mk", + need_restart = True) + +register_configvar(group, + "notification_fallback_email", + EmailAddress( + title = _("Fallback email address for rule based notifications"), + help = _("If you work with rule based notifications then you should configure an email " + "address here. In case of a hole in your notification rules a notification " + "will be sent to this address. This makes sure that in any case someone gets " + "notified."), + empty_text = _("(No fallback email address configured!)"), + make_clickable = False, + ), + domain = "check_mk") + +register_configvar(group, + "notification_backlog", + Integer( + title = _("Store notifications for rule analysis"), + help = _("If this option is set to a non-zero number, then Check_MK " + "keeps the last X notifications for later reference. " + "You can replay these notifications and analyse your set of " + "notifications rules. This only works with rulebased notiications. Note: " + "only notifications sent out by the local notification system can be " + "tracked. If you have a distributed environment you need to do the analysis " + "directly on the remote sites - unless you use a central spooling."), + default_value = 10, + ), + domain = "check_mk") + +register_configvar(group, + "notification_bulk_interval", + Age( + title = _("Interval for checking for ripe bulk notifications"), + help = _("If you are using rule based notifications with and Bulk Notifications " + "then Check_MK will check for ripe notification bulks to be sent out " + "at latest every this interval."), + default_value = 10, + minvalue = 1, + ), + domain = "check_mk", + need_restart = True) register_configvar(group, "notification_logging", - DropdownChoice( - title = _("Debug notifications"), - help = _("When notification debugging is on, then in the notification logfile " - "in %s additional information will be logged." % - (defaults.var_dir + "/notify/notify.log")), - choices = [ - ( 0, _("No logging")), - ( 1, _("One line per notification")), - ( 2, _("Full dump of all variables and command"))] + Transform( + DropdownChoice( + choices = [ + ( 1, _("Normal logging")), + ( 2, _("Full dump of all variables and command")) + ], + default_value = 1, ), + forth = lambda x: x == 0 and 1 or x, # transform deprecated value 0 (no logging) to 1 + title = _("Notification log level"), + help = _("You can configure the notification mechanism to log more details about " + "the notifications into the notification log. These information are logged " + "into the file %s") % site_neutral_path(defaults.log_dir + "/notify.log"), + ), domain = "check_mk") -register_configvar(group, +register_configvar(deprecated, "notification_mail_command", TextUnicode( - title = _("Email command line used for notifications"), + title = _("Email command line used for plain notifications"), help = _("This command will be executed whenever a notification should be done. " "The command will receive the notification text body on standard input. " "The macro $SUBJECT$ will be replaced by a text configured " @@ -60,38 +117,44 @@ "wato.py?mode=edit_configvar&varname=notification_service_subject", "wato.py?mode=edit_configvar&varname=notification_common_body", )), - size = 50), + size = 50, + attrencode = True, + ), domain = "check_mk") -register_configvar(group, +register_configvar(deprecated, "notification_host_subject", TextUnicode( - title = _("Email subject to use for host notifications"), + title = _("Email subject to use for plain host notifications"), help = _("This template will be used as $SUBJECT$ in email notifications " "that deal with host alerts. The variable $SUBJECT$ will then " "be available in notification_common_body." % ( "wato.py?mode=edit_configvar&varname=notification_common_body", )), - size = 50), + size = 50, + attrencode = True, + ), domain = "check_mk") -register_configvar(group, +register_configvar(deprecated, "notification_service_subject", TextUnicode( - title = _("Email subject to use for service notifications"), + title = _("Email subject to use for plain service notifications"), help = _("This template will be used as $SUBJECT$ in email notifications " "that deal with service alerts. The variable $SUBJECT$ will then " "be available in notification_common_body." % ( "wato.py?mode=edit_configvar&varname=notification_common_body", )), - size = 50), + size = 50, + attrencode = True, + ), domain = "check_mk") -register_configvar(group, +register_configvar(deprecated, "notification_common_body", TextAreaUnicode( - title = _("Email body to use for both host and service notifications"), + title = _("Email body to use for both plain host and service notifications"), help = _("This template will be used as email body when sending notifications. " "Appended to it will be a specific body for either host or service " "notifications configured in two extra parameters. " @@ -126,28 +189,73 @@ "$OMD_ROOT$: the home directory of the OMD site (only on OMD) " "$OMD_SITE$: the name of the OMD site (only on OMD) " ), + attrencode = True, ), domain = "check_mk") -register_configvar(group, +register_configvar(deprecated, "notification_host_body", TextAreaUnicode( - title = _("Email body to use for host notifications"), + title = _("Email body to use for plain host notifications"), help = _("This template will be appended to the " "notification_common_body when host notifications are sent." % "wato.py?mode=edit_configvar&varname=notification_common_body" ), + attrencode = True, ), domain = "check_mk") -register_configvar(group, +register_configvar(deprecated, "notification_service_body", TextAreaUnicode( - title = _("Email body to use for service notifications"), + title = _("Email body to use for plain service notifications"), help = _("This template will be appended to the " "notification_common_body when service notifications are sent." % "wato.py?mode=edit_configvar&varname=notification_common_body" ), + attrencode = True, ), domain = "check_mk") + + +register_configvar(group, + "mkeventd_service_levels", + ListOf( + Tuple( + elements = [ + Integer( + title = _("internal ID"), + minvalue = 0, + maxvalue = 100, + ), + TextUnicode( + title = _("Name / Description"), + allow_empty = False, + attrencode = True, + ), + ], + orientation = "horizontal", + ), + title = _("Service Levels"), + help = _("Here you can configure the list of possible service levels for hosts, services and " + "events. A service level can be assigned to a host or service by configuration. " + "The event console can configure each created event to have a specific service level. " + "Internally the level is represented as an integer number. Note: a higher number represents " + "a higher service level. This is important when filtering views " + "by the service level.

    You can also attach service levels to hosts " + "and services in the monitoring. These levels will then be sent to the " + "Event Console when you forward notifications to it and will override the " + "setting of the matching rule."), + allow_empty = False, + default_value = [ + (0, _("(no Service level)")), + (10, _("Silver")), + (20, _("Gold")), + (30, _("Platinum")), + ], + ), + domain = "multisite", + allow_reset = False, +) + diff -Nru check-mk-1.2.2p3/plugins/wato/inventory.py check-mk-1.2.6p12/plugins/wato/inventory.py --- check-mk-1.2.2p3/plugins/wato/inventory.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/wato/inventory.py 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,93 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +register_rulegroup("inventory", + _("Hardware/Software-Inventory"), + _("Configuration of the Check_MK Hardware and Software Inventory System")) +group = "inventory" + +register_rule(group, + "active_checks:cmk_inv", + FixedValue(None, totext = _("No configuration neccessary")), + title = _("Do hardware/software Inventory"), + help = _("All hosts configured via this ruleset will do a hardware and " + "software inventory. For each configured host a new active check " + "will be created. You should also create a rule for changing the " + "normal interval for that check to something between a couple of " + "hours and one day. " + "Note: in order to get any useful " + "result for agent based hosts make sure that you have installed " + "the agent plugin mk_inventory on these hosts."), + match = "all", +) + +register_rule(group, + "inv_exports:software_csv", + Dictionary( + title = _("Export List of Software packages as CSV file"), + elements = [ + ( "filename", + TextAscii( + title = _("Export file to create, containing <HOST> for the hostname"), + help = _("Please specify the path to the export file. The text <HOST> " + "will be replaced with the host name the inventory has been done for. " + "If you use a relative path then that will be relative to Check_MK's directory " + "for variable data, which is %s.") % defaults.var_dir, + allow_empty = False, + size = 64, + default_value = "csv-export/.csv", + )), + ( "separator", + TextAscii( + title = _("Separator"), + allow_empty = False, + size = 1, + default_value = ";", + )), + ( "quotes", + DropdownChoice( + title = _("Quoting"), + choices = [ + ( None, _("Don't use quotes") ), + ( "single", _("Use single quotes, escape contained quotes with backslash") ), + ( "double", _("Use double quotes, escape contained quotes with backslash") ), + ], + default_value = None, + )), + ( "headers", + DropdownChoice( + title = _("Column headers"), + choices = [ + ( False, _("Do not add column headers") ), + ( True, _("Add a first row with column titles") ), + ], + default_value = False, + )), + ], + required_keys = [ "filename" ], + ), + match = "first" +) diff -Nru check-mk-1.2.2p3/plugins/wato/mknotifyd.py check-mk-1.2.6p12/plugins/wato/mknotifyd.py --- check-mk-1.2.2p3/plugins/wato/mknotifyd.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/wato/mknotifyd.py 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,118 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +import config + +try: + mknotifyd_enabled = config.mknotifyd_enabled +except: + # Non OMD-users: must enable this explicitely, sorry + mknotifyd_enabled = False + +mknotifyd_config_dir = defaults.default_config_dir + "/mknotifyd.d/wato/" + +if mknotifyd_enabled: + group = _("Notification") + + # Check_MK var + register_configvar(group, + "notification_spooling", + Checkbox( + title = _("Deliver notifications asychronously"), + help = _("The option will make notifications handled asynchronously. For each notification a spool " + "file will be created and later processes by the notification spooler. This avoids a hanging " + "core in case of notifications that need very long to execute. It also enables a retry in " + "case of failed notifications. Please note that this is not useful if you only use notification " + "methods that have their own spooling (like email or SMS tools)."), + default_value = False), + domain = "check_mk" + ) + + # Check_MK var + register_configvar(group, + "notification_spool_to", + Optional( + Tuple( + elements = [ + TextAscii( + title = _("Remote host"), + ), + Integer( + title = _("TCP port"), + minvalue = 1024, + maxvalue = 65535, + default_value = 6555, + ), + Checkbox( + title = _("Local processing"), + label = _("Process notifications also locally"), + ), + ]), + title = _("Forward all notifications to remote server"), + help = _("This option allows you to forward notifications to another Check_MK site. " + "That site must have the notification spooler running and TCP listening enabled. " + "This allows you to create a centralized notification handling."), + label = _("Spool notifications to remote site"), + none_label = _("(Do not spool to remote site)"), + ), + domain = "check_mk" + ) + + # Daemon var + register_configvar_domain("mknotifyd", mknotifyd_config_dir) + register_configvar(group, + "notification_deferred_retention_time", + Integer( + title = _("Notification fail retry interval"), + help = _("If the processing of a notification fails, the notify daemon " + "retries to send the notification again after this time"), + minvalue = 10, + maxvalue = 86400, + default_value = 180, + unit = _("Seconds") + ), + domain = "mknotifyd" + ) + + + # Daemon var + register_configvar(group, + "notification_daemon_listen_port", + Optional( + Integer( + minvalue = 1024, + maxvalue = 65535, + default_value = 6555, + ), + help = _("Here you can set the port at which the notification spooler listens for forwarded" + "notification messages from spoolers on remote sites."), + title = _("Port for receiving notifications"), + label = _("Receive notifications from remote sites"), + none_label = _("(Do not receive notifications)"), + ), + domain = "mknotifyd" + ) + diff -Nru check-mk-1.2.2p3/plugins/wato/nagvis_auth.py check-mk-1.2.6p12/plugins/wato/nagvis_auth.py --- check-mk-1.2.2p3/plugins/wato/nagvis_auth.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/wato/nagvis_auth.py 2015-06-24 09:48:39.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -36,6 +36,13 @@ ) config.declare_permission( + 'nagvis.Rotation_view_*', + _('Use all map rotations'), + _('Grants read access to all rotations.'), + [ 'guest' ] +) + +config.declare_permission( 'nagvis.Map_view_*', _('View all maps'), _('Grants read access to all maps.'), @@ -55,3 +62,24 @@ _('Permits to delete all maps.'), [] ) + +config.declare_permission( + 'nagvis.Map_view', + _('View permitted maps'), + _('Grants read access to all maps the user is a contact for.'), + ['user'] +) + +config.declare_permission( + 'nagvis.Map_edit', + _('Edit permitted maps'), + _('Grants modify access to all maps the user is contact for.'), + ['user'] +) + +config.declare_permission( + 'nagvis.Map_delete', + _('Delete permitted maps'), + _('Permits to delete all maps the user is contact for.'), + ['user'] +) diff -Nru check-mk-1.2.2p3/plugins/wato/notifications.py check-mk-1.2.6p12/plugins/wato/notifications.py --- check-mk-1.2.2p3/plugins/wato/notifications.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/wato/notifications.py 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,233 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +register_notification_parameters("mail", + Dictionary( + elements = [ + ( "from", + TextAscii( + title = _("From: Address"), + size = 40, + allow_empty = False, + ) + ), + ( "reply_to", + TextAscii( + title = _("Reply-To: Address"), + size = 40, + allow_empty = False, + ) + ), + ( "host_subject", + TextUnicode( + title = _("Subject for host notifications"), + help = _("Here you are allowed to use all macros that are defined in the " + "notification context."), + default_value = "Check_MK: $HOSTNAME$ - $EVENT_TXT$", + size = 64, + ) + ), + ( "service_subject", + TextUnicode( + title = _("Subject for service notifications"), + help = _("Here you are allowed to use all macros that are defined in the " + "notification context."), + default_value = "Check_MK: $HOSTNAME$/$SERVICEDESC$ $EVENT_TXT$", + size = 64, + ) + ), + ( "elements", + ListChoice( + title = _("Information to be displayed in the email body"), + choices = [ + ( "address", _("IP Address of Host") ), + ( "abstime", _("Absolute Time of Alert") ), + ( "reltime", _("Relative Time of Alert") ), + ( "longoutput", _("Additional Plugin Output") ), + ( "ack_author", _("Acknowledgement Author") ), + ( "ack_comment", _("Acknowledgement Comment") ), + ( "perfdata", _("Performance Data") ), + ( "graph", _("Performance Graphs") ), + ( "context", _("Complete variable list (for testing)" ) ), + ], + default_value = [ "perfdata", "graph", "abstime", "address", "longoutput" ], + ) + ), + ( "url_prefix", + TextAscii( + title = _("URL prefix for links to Check_MK"), + help = _("If you specify an URL prefix here, then several parts of the " + "email body are armed with hyperlinks to your Check_MK GUI, so " + "that the recipient of the email can directly visit the host or " + "service in question in Check_MK. Specify an absolute URL including " + "the .../check_mk/"), + regex = "^(http|https)://.*/check_mk/$", + regex_error = _("The URL must begin with http or " + "https and end with /check_mk/."), + size = 64, + default_value = "http://" + socket.gethostname() + "/" + ( + defaults.omd_site and defaults.omd_site + "/" or "") + "check_mk/", + ) + ), + ( "no_floating_graphs", FixedValue( + True, + title = _("Display graphs among each other"), + totext = _("Graphs are shown among each other"), + help = _("By default all multiple graphs in emails are displayed floating " + "nearby. You can enable this option to show the graphs among each " + "other."), + )), + ] + ) +) +register_notification_parameters("asciimail", + Dictionary( + elements = [ + ( "from", + EmailAddress( + title = _("From: Address"), + size = 40, + allow_empty = False, + ) + ), + ( "reply_to", + EmailAddress( + title = _("Reply-To: Address"), + size = 40, + allow_empty = False, + ) + ), + ( "host_subject", + TextUnicode( + title = _("Subject for host notifications"), + help = _("Here you are allowed to use all macros that are defined in the " + "notification context."), + default_value = "Check_MK: $HOSTNAME$ - $EVENT_TXT$", + size = 64, + ) + ), + ( "service_subject", + TextUnicode( + title = _("Subject for service notifications"), + help = _("Here you are allowed to use all macros that are defined in the " + "notification context."), + default_value = "Check_MK: $HOSTNAME$/$SERVICEDESC$ $EVENT_TXT$", + size = 64, + ) + ), + ( "common_body", + TextAreaUnicode( + title = _("Body head for both host and service notifications"), + rows = 7, + cols = 58, + monospaced = True, + default_value = """Host: $HOSTNAME$ +Alias: $HOSTALIAS$ +Address: $HOSTADDRESS$ +""", + ) + ), + ( "host_body", + TextAreaUnicode( + title = _("Body tail for host notifications"), + rows = 9, + cols = 58, + monospaced = True, + default_value = """Event: $EVENT_TXT$ +Output: $HOSTOUTPUT$ +Perfdata: $HOSTPERFDATA$ +$LONGHOSTOUTPUT$ +""", + ) + ), + ( "service_body", + TextAreaUnicode( + title = _("Body tail for service notifications"), + rows = 11, + cols = 58, + monospaced = True, + default_value = """Service: $SERVICEDESC$ +Event: $EVENT_TXT$ +Output: $SERVICEOUTPUT$ +Perfdata: $SERVICEPERFDATA$ +$LONGSERVICEOUTPUT$ +""", + ) + ), + ] + ) +) + + + +register_notification_parameters("mkeventd", + Dictionary( + elements = [ + ( "facility", + DropdownChoice( + title = _("Syslog Facility to use"), + help = _("The notifications will be converted into syslog messages with " + "the facility that you choose here. In the Event Console you can " + "later create a rule matching this facility."), + choices = syslog_facilities, + ) + ), + ( "remote", + IPv4Address( + title = _("IP Address of remote Event Console"), + help = _("If you set this parameter then the notifications will be sent via " + "syslog/UDP (port 514) to a remote Event Console or syslog server."), + ) + ), + ] + ) +) + + +register_notification_parameters("spectrum", + Dictionary( + optional_keys = None, + elements = [ + ("destination", + IPv4Address( + title = _("Destination IP"), + help = _("IP Address of the Spectrum server receiving the SNMP trap") + ), + ), + ("community", + TextAscii( + title = _("SNMP Community"), + help = _("SNMP Community for the SNMP trap") + )), + ("baseoid", + TextAscii( + title = _("Base OID"), + help = _("The base OID for the trap content"), + default_value = "1.3.6.1.4.1.1234" + ), + ), + ]) + ) diff -Nru check-mk-1.2.2p3/plugins/wato/user_attributes.py check-mk-1.2.6p12/plugins/wato/user_attributes.py --- check-mk-1.2.2p3/plugins/wato/user_attributes.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/wato/user_attributes.py 2015-06-24 09:48:39.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,29 +24,4 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. - -declare_user_attribute( - "force_authuser", - Checkbox( - title = _("Visibility of Hosts/Services"), - label = _("Only show hosts and services the user is a contact for"), - help = _("When this option is checked, then the status GUI will only " - "display hosts and services that the user is a contact for - " - "even if he has the permission for seeing all objects."), - ), - permission = "general.see_all" -) - -declare_user_attribute( - "force_authuser_webservice", - Checkbox( - title = _("Visibility of Hosts/Services (Webservice)"), - label = _("Export only hosts and services the user is a contact for"), - help = _("When this option is checked, then the Multisite webservice " - "will only export hosts and services that the user is a contact for - " - "even if he has the permission for seeing all objects."), - ), - permission = "general.see_all" -) - - +# has been moved to userdb plugins diff -Nru check-mk-1.2.2p3/plugins/wato/userdb.py check-mk-1.2.6p12/plugins/wato/userdb.py --- check-mk-1.2.2p3/plugins/wato/userdb.py 2013-11-05 09:23:09.000000000 +0000 +++ check-mk-1.2.6p12/plugins/wato/userdb.py 2015-06-24 09:48:39.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,7 +24,16 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -import userdb +def sync_pre_activate_changes(_unused): + # In some rare cases for still unknown reasons at this time the + # variable config sometimes has the value None. This could or could + # not be a mod_python problem. But it makes the activation of changes + # in a D-WATO setup break. So better handle this case here. + try: + do_sync = 'wato_pre_activate_changes' in config.userdb_automatic_sync + except: + do_sync = False + if do_sync: + userdb.hook_sync() -api.register_hook('pre-activate-changes', lambda hosts: userdb.hook_sync()) -api.register_hook('snapshot-pushed', userdb.hook_sync) +register_hook('pre-activate-changes', sync_pre_activate_changes) diff -Nru check-mk-1.2.2p3/plugins/webapi/webapi.py check-mk-1.2.6p12/plugins/webapi/webapi.py --- check-mk-1.2.2p3/plugins/webapi/webapi.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/webapi/webapi.py 2015-08-25 13:36:54.000000000 +0000 @@ -0,0 +1,153 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def action_add_host(request): + if html.var("create_folders"): + create_folders = bool(int(html.var("create_folders"))) + else: + create_folders = True + + hostname = request.get("hostname") + folder = request.get("folder") + attributes = request.get("attributes", {}) + + if not hostname: + raise MKUserError(None, "Hostname is missing") + if not folder: + raise MKUserError(None, "Foldername is missing") + + return g_api.add_hosts([{"hostname": hostname, + "folder": folder, + "attributes": attributes}], + create_folders = create_folders) + +api_actions["add_host"] = { + "handler" : action_add_host, + "locking" : True, +} + +############### + +def action_edit_host(request): + hostname = request.get("hostname") + attributes = request.get("attributes", {}) + unset_attributes = request.get("unset_attributes", []) + + if not hostname: + raise MKUserError(None, "Hostname is missing") + + return g_api.edit_hosts([{"hostname": hostname, + "attributes": attributes, + "unset_attributes": unset_attributes}]) + +api_actions["edit_host"] = { + "handler" : action_edit_host, + "locking" : True, +} + +############### + +def action_get_host(request): + if html.var("effective_attributes"): + effective_attributes = bool(int(html.var("effective_attributes"))) + else: + effective_attributes = True + + hostname = request.get("hostname") + + if not hostname: + raise MKUserError(None, "Hostname is missing") + + return g_api.get_host(hostname, effective_attr = effective_attributes) + +api_actions["get_host"] = { + "handler" : action_get_host, + "locking" : False, +} + +############### + +def action_delete_host(request): + hostname = request.get("hostname") + + if not hostname: + raise MKUserError(None, "Hostname is missing") + + return g_api.delete_hosts([hostname]) + +api_actions["delete_host"] = { + "handler" : action_delete_host, + "locking" : True, +} + +############### + +def action_discover_services(request): + mode = html.var("mode") and html.var("mode") or "new" + + hostname = request.get("hostname") + + if not hostname: + raise MKUserError(None, "Hostname is missing") + + return g_api.discover_services(hostname, mode = mode) + +api_actions["discover_services"] = { + "handler" : action_discover_services, + "locking" : True, +} + +############### + +def action_activate_changes(request): + mode = html.var("mode") and html.var("mode") or "dirty" + if html.var("allow_foreign_changes"): + allow_foreign_changes = bool(int(html.var("allow_foreign_changes"))) + else: + allow_foreign_changes = False + + sites = request.get("sites") + return g_api.activate_changes(sites = sites, mode = mode, allow_foreign_changes = allow_foreign_changes) + +api_actions["activate_changes"] = { + "handler" : action_activate_changes, + "locking" : True, +} + +############### + +def action_get_all_hosts(request): + if html.var("effective_attributes"): + effective_attributes = bool(int(html.var("effective_attributes"))) + else: + effective_attributes = False + return g_api.get_all_hosts(effective_attr = effective_attributes) + +api_actions["get_all_hosts"] = { + "handler": action_get_all_hosts, + "locking": False, +} + diff -Nru check-mk-1.2.2p3/plugins/websphere_mq check-mk-1.2.6p12/plugins/websphere_mq --- check-mk-1.2.2p3/plugins/websphere_mq 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/plugins/websphere_mq 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,62 @@ +#!/bin/sh +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# plugin for websphere_mq_* checks + +if [ "$1" = "" ] +then + su - mqm -c "/usr/lib/check_mk_agent/plugins/websphere_mq run" +else + # Loop over all local mq instances + for QM in $( ps -ef | grep -i '[/]usr/mqm/bin/runmqchl -c' | awk '{ print $NF }' | uniq) + do + echo '<<>>' + for i in `echo " display CHANNEL (*) TYPE (SDR) " | /usr/bin/runmqsc $QM | grep CHLTYPE | grep -v SYSTEM | awk '{print $1}'` + do + j=`echo "display $i " | /usr/bin/runmqsc $QM | grep XMITQ | tr " " "\n" | grep XMITQ | sed '1,$s/(/ /g' | sed '1,$s/)/ /g'| awk '{print $2 }'` + a=`echo " display qlocal ($j) CURDEPTH " | /usr/bin/runmqsc $QM | grep CURDEPTH | tr " " "\n" | grep CURDEPTH | sed '1,$s/(/ /g' | sed '1,$s/)/ /g'| awk '{print $2 }' | tr "\n" " "` + c=`echo " display qlocal ($j) MAXDEPTH " | /usr/bin/runmqsc $QM | grep MAXDEPTH | tr " " "\n" | grep MAXDEPTH | sed '1,$s/(/ /g' | sed '1,$s/)/ /g'| awk '{print $2 }' | tr "\n" " "` + + l=`echo $i | sed '1,$s/(/ /g' | sed '1,$s/)/ /g'| awk '{print $2 }'` + s=`echo " display chstatus($l)" | /usr/bin/runmqsc $QM | grep STATUS | tail -1 | sed '1,$s/(/ /g' | sed '1,$s/)/ /g'| awk '{print $NF }'` + + if [ "$s" = "" ] + then + s="Unknown" + fi + echo "$a $i $c $s" + done + echo '<<>>' + for t in `echo " display queue (*) where (USAGE EQ NORMAL) " | /usr/bin/runmqsc $QM | grep QLOCAL | grep -v SYSTEM | grep -v _T0 | grep -v _T1 | grep -v _T2 | grep -v _T3 | grep -v mqtest | grep QUEUE | awk '{ print $1 }' | sed '1,$s/(/ /g' | sed '1,$s/)/ /g'| awk '{print $2 }'` + do + a=`echo " display queue ($t) CURDEPTH " | /usr/bin/runmqsc $QM | grep CURDEPTH | tail -1 | sed '1,$s/(/ /g' | sed '1,$s/)/ /g'| awk '{print $2 }'` + b=`echo " display qlocal ($t) MAXDEPTH " | /usr/bin/runmqsc $QM | grep MAXDEPTH | tr " " "\n" | grep MAXDEPTH | sed '1,$s/(/ /g' | sed '1,$s/)/ /g'| awk '{print $2 }' | tr "\n" " "` + + # Muster: Anzahl eingehender Messages $a auf $t Max-Queues $b + + echo "$a $t $b" + done + done +fi Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/pnp-templates.tar.gz and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/pnp-templates.tar.gz differ diff -Nru check-mk-1.2.2p3/postfix_mailq check-mk-1.2.6p12/postfix_mailq --- check-mk-1.2.2p3/postfix_mailq 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/postfix_mailq 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -81,7 +81,7 @@ def check_postfix_mailq(item, params, info): for line in info: if " ".join(line[-2:]) == 'is empty': - return (0, 'OK - The mailqueue is empty ', [ ('length', 0, params[0], params[1]), + return (0, 'The mailqueue is empty ', [ ('length', 0, params[0], params[1]), ('size', '0') ]) elif line[0] == '--' or line[0:2] == [ 'Total', 'requests:']: if line[0] == '--': @@ -95,15 +95,21 @@ ('size', '%d' % size) ] if len > params[1]: - return (2, 'CRIT - Mailqueue length is %d ' + return (2, 'Mailqueue length is %d ' '(More than threshold: %d)' % (len, params[0]), perfdata) elif len > params[0]: - return (1, 'WARN - Mailqueue length is %d ' + return (1, 'Mailqueue length is %d ' '(More than threshold: %d)' % (len, params[0]), perfdata) else: - return (0, 'OK - Mailqueue length is %d ' % len, perfdata) + return (0, 'Mailqueue length is %d ' % len, perfdata) - return (3, 'UNKNOWN - Could not find summarizing line in output') + return (3, 'Could not find summarizing line in output') -check_info['postfix_mailq'] = (check_postfix_mailq, "Postfix Queue", 1, inventory_postfix_mailq) -checkgroup_of["postfix_mailq"] = "mailqueue_length" + +check_info["postfix_mailq"] = { + 'check_function': check_postfix_mailq, + 'inventory_function': inventory_postfix_mailq, + 'service_description': 'Postfix Queue', + 'has_perfdata': True, + 'group': 'mailqueue_length', +} diff -Nru check-mk-1.2.2p3/postgres_sessions check-mk-1.2.6p12/postgres_sessions --- check-mk-1.2.2p3/postgres_sessions 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/postgres_sessions 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -61,8 +61,7 @@ warn, crit = None, None perfdata.append((what, value, warn, crit)) - infotext = " - " + ", ".join(infos) - return (status, nagios_state_names[status] + infotext, perfdata) + return (status, ", ".join(infos), perfdata) check_info['postgres_sessions'] = { diff -Nru check-mk-1.2.2p3/postgres_stat_database check-mk-1.2.6p12/postgres_stat_database --- check-mk-1.2.2p3/postgres_stat_database 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/postgres_stat_database 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -62,14 +62,13 @@ def check_postgres_stat_database(item, params, info): parsed = parse_postgres_stat(info) if item not in parsed: - return (3, "UNKNOWN - Database not found") + return (3, "Database not found") stats = parsed[item] status = 0 infos = [] perfdata = [] this_time = time.time() - one_wrapped = False for what, title in [ ( "blks_read", "Blocks Read" ), ( "tup_fetched", "Fetches" ), @@ -79,11 +78,7 @@ ( "tup_inserted", "Inserts" ), ]: counter = stats[what] counter_name = "postgres_stat_database.%s.%s" % (item, what) - try: - timedif, rate = get_counter(counter_name, this_time, counter) - except MKCounterWrapped, e: - one_wrapped = e - continue + rate = get_rate(counter_name, this_time, counter) infos.append("%s: %.2f/s" % (title, rate)) if what in params: @@ -98,10 +93,7 @@ warn, crit = None, None perfdata.append((what, rate, warn, crit)) - if one_wrapped: - raise one_wrapped - - return (status, nagios_state_names[status] + " - " + ", ".join(infos), perfdata) + return (status, ", ".join(infos), perfdata) check_info['postgres_stat_database'] = { "check_function" : check_postgres_stat_database, @@ -115,11 +107,11 @@ def check_postgres_stat_database_size(item, _no_params, info): parsed = parse_postgres_stat(info) if item not in parsed: - return (3, "UNKNOWN - Database not found") + return (3, "Database not found") stats = parsed[item] size = stats["datsize"] - return (0, "OK - Size is %s" % get_bytes_human_readable(size), [("size", size)]) + return (0, "Size is %s" % get_bytes_human_readable(size), [("size", size)]) check_info['postgres_stat_database.size'] = { diff -Nru check-mk-1.2.2p3/postgres_stat_database.size check-mk-1.2.6p12/postgres_stat_database.size --- check-mk-1.2.2p3/postgres_stat_database.size 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/postgres_stat_database.size 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Database size check for PostgreSQL +title: Database Size for PostgreSQL agents: linux -author: Mathias Kettner +catalog: app/postgresql license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/prediction.py check-mk-1.2.6p12/prediction.py --- check-mk-1.2.2p3/prediction.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/prediction.py 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,361 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Code for predictive monitoring / anomaly detection + +# Export data from an RRD file. This requires an up-to-date +# version of the rrdtools. + +def debug(x): + import pprint ; pprint.pprint(x) + +def rrd_export(filename, ds, cf, fromtime, untiltime, rrdcached=None): + # rrdtool xport --json -s 1361554418 -e 1361640814 --step 60 DEF:x=/omd/sites/heute/X.rrd:1:AVERAGE XPORT:x:HIRNI + cmd = "rrdtool xport --json -s %d -e %d --step 60 " % (fromtime, untiltime) + if rrdcached and os.path.exists(rrdcached): + cmd += "--daemon '%s' " % rrdcached + cmd += " DEF:x=%s:%s:%s XPORT:x 2>&1" % (filename, ds, cf) + # if opt_debug: + # sys.stderr.write("Running %s\n" % cmd) + f = os.popen(cmd) + output = f.read() + exit_code = f.close() + if exit_code: + raise MKGeneralException("Cannot fetch RRD data: %s" % output) + + # Parse without json module (this is not always available) + # Our data begins at "data: [...". The sad thing: names are not + # quoted here. Don't know why. We fake this by defining variables. + about = "about" + meta = "meta" + start = "start" + step = "step" + end = "end" + legend = "legend" + data = "data" + null = None + + # begin = output.index("data:") + # data_part = output[begin + 5:-2] + data = eval(output) + + return data["meta"]["step"], [ x[0] for x in data["data"] ] + +def find_ds_in_pnp_xmlfile(xml_file, varname): + ds = None + name = None + for line in file(xml_file): + line = line.strip() + if line.startswith(""): + ds = line[4:].split('<')[0] + if name == varname: + return int(ds) + elif line.startswith("

    '): + bail_out(line[17:].split('<')[0]) + except Exception, e: + bail_out("Cannot call Multisite URL: %s" % e) + + +# We have 6 different modes: +# Only the host | 1: set | 4: remove +# Only specific services | 2: set | 5: remove +# Host and all services | 3: set | 6: remove + +# Authentication and host selection variables = [ ( "_username", opt_user ), ( "_secret", opt_secret ), ( "_transid", "-1" ), ( "_do_confirm", "yes" ), ( "_do_actions", "yes" ), - ( "host", arg_host ), + ("host", arg_host ), ] -if opt_mode == 'remove': +# Action variables for setting or removing (works in all views) +if opt_mode == "remove": variables += [ - ("view_name", "downtimes"), ("_remove_downtimes", "Remove"), + ("_down_remove", "Remove"), ] else: variables += [ ( "_down_from_now", "yes" ), - ( "_down_minutes", opt_duration ), + ( "_down_minutes", str(opt_duration) ), ( "_down_comment", opt_comment ), ] - if arg_services: - variables.append(("view_name", "service")) - else: - variables.append(("view_name", "hoststatus")) -def set_downtime(variables, add_vars): - url = make_url(opt_url + "view.py", variables + add_vars) - verbose("URL: " + url) - try: - pipe = urllib.urlopen(url) - l = len(pipe.readlines()) - verbose(" --> Got %d lines of response" % l) - except Exception, e: - bail_out("Cannot call Multisite URL: %s" % e) +# Downtime on host (handles 1 & 4, needed for 3 & 6) +if not arg_services: + set_downtime(variables, [("view_name", "hoststatus")]) -if arg_services: - for service in arg_services: - set_downtime(variables, [("service", service + "$")]) +# Downtime on specific services (handles 2 & 5) else: - set_downtime(variables, []) - if opt_all: - if opt_mode == 'set': - set_downtime(variables, [("view_name", "service")]) - + for service in arg_services: + set_downtime(variables, [("view_name", "service"), ("service", service)]) +# Handle services for option --all (3 & 6) +if opt_all: + set_downtime(variables, [("view_name", "host")]) diff -Nru check-mk-1.2.2p3/treasures/ds_random_bi.mk check-mk-1.2.6p12/treasures/ds_random_bi.mk --- check-mk-1.2.2p3/treasures/ds_random_bi.mk 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/ds_random_bi.mk 2013-11-05 09:58:00.000000000 +0000 @@ -0,0 +1,54 @@ + +# aggregation_rules["host"] = ( +# "Host $HOST$", +# [ "HOST" ], +# "worst", +# [ +# ( "snarks", [ "$HOST$" ] ), +# ( "gnogo", [ "$HOST$" ] ), +# ( "other", [ "$HOST$" ] ), +# ] +# ) + +aggregation_rules["host"] = { + "title" : "Host $HOST$", + "params" : [ "HOST" ], + "aggregation" : "worst", + "nodes" : [ + ( "snarks", [ "$HOST$" ] ), + ( "gnogo", [ "$HOST$" ] ), + ( "other", [ "$HOST$" ] ), + ] +} + + +aggregation_rules["snarks"] = ( + "Snarks", + [ "HOST", ], + "best", + [ + ( "$HOST$", "Snarks" ), + ] +) + +aggregation_rules["gnogo"] = ( + "Gnogo", + [ "HOST", ], + "best", + [ + ( "$HOST$", "Gnogo" ), + ] +) + +aggregation_rules["other"] = ( + "Other", + [ "HOST" ], + "worst", + [ + ( "$HOST$", REMAINING ), + ] +) + +aggregations += [ + ( "DS Random", FOREACH_HOST, ALL_HOSTS, "host", ["$1$"] ), +] diff -Nru check-mk-1.2.2p3/treasures/Event_Console/Eventconsole-Performance.py check-mk-1.2.6p12/treasures/Event_Console/Eventconsole-Performance.py --- check-mk-1.2.2p3/treasures/Event_Console/Eventconsole-Performance.py 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/treasures/Event_Console/Eventconsole-Performance.py 2014-10-30 13:30:24.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/treasures/Event_Console/mail_mkevent.py check-mk-1.2.6p12/treasures/Event_Console/mail_mkevent.py --- check-mk-1.2.2p3/treasures/Event_Console/mail_mkevent.py 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/treasures/Event_Console/mail_mkevent.py 2014-10-30 13:30:24.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/treasures/Event_Console/message_to_syslog.py check-mk-1.2.6p12/treasures/Event_Console/message_to_syslog.py --- check-mk-1.2.2p3/treasures/Event_Console/message_to_syslog.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/Event_Console/message_to_syslog.py 2014-10-30 13:30:24.000000000 +0000 @@ -0,0 +1,53 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# This Script enables the sending of messages to a upd syslog server +# like the integrated syslogserver of mkeventd. +# +# Bastian Kuhn, bk@mathias-kettner.de +import time +import socket +import sys + +if len(sys.argv) < 6: + print 'This script sends a message via upd to a syslogserver' + print 'Usage: %s SYSLOGSERVER HOSTNAME PRIO APPLICATION "MESSAGE"' % sys.argv[0] + sys.exit() + +host = sys.argv[1] +event_host = sys.argv[2] +prio = sys.argv[3] +application = sys.argv[4] +message = sys.argv[5] + +port = 514 +sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + +sock.connect((host, port)) +timestamp = time.strftime("%b %d %H:%M:%S", time.localtime(time.time())) +sock.send("<%s>%s %s %s: %s\n" % (prio, timestamp, event_host, application, message)) +sock.close() + diff -Nru check-mk-1.2.2p3/treasures/Event_Console/nsca2mkeventd/LIESMICH check-mk-1.2.6p12/treasures/Event_Console/nsca2mkeventd/LIESMICH --- check-mk-1.2.2p3/treasures/Event_Console/nsca2mkeventd/LIESMICH 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/Event_Console/nsca2mkeventd/LIESMICH 2013-11-05 09:58:00.000000000 +0000 @@ -0,0 +1,33 @@ +Install into OMD site +===================== +Prerequirements: + + - Have an existing OMD site + - Have the NSCA running in the site + - Have the MK Event Console running in the site + +Copy the nsca2mkeventd program to the site and make it executable: + +cp -p nsca2mkeventd /omd/sites//local/bin +chmod +x /omd/sites//local/bin/nsca2mkeventd + + +Copy the init script and enable the service: + +cp -p nsca2mkeventd.init /omd/sites//etc/init.d/nsca2mkeventd +ln -s ../init.d/nsca2mkeventd /omd/sites//etc/rc.d/70-nsca2mkeventd + + +Change the nsca configuration to forward the events to the nsca2mkeventd pipe. +Open /omd/sites//etc/nsca/nsca.cfg and change the parameter "command_file" +to /omd/sites//tmp/run/nsca2mkeventd.pipe + + +Now restart all services. + +omd restart + + +Now all check results incoming from the sites NSCA daemon, are handed over +to the command pipe opened by the nsca2mkeventd and then handed over to +the event console of the site. diff -Nru check-mk-1.2.2p3/treasures/Event_Console/nsca2mkeventd/nsca2mkeventd check-mk-1.2.6p12/treasures/Event_Console/nsca2mkeventd/nsca2mkeventd --- check-mk-1.2.2p3/treasures/Event_Console/nsca2mkeventd/nsca2mkeventd 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/Event_Console/nsca2mkeventd/nsca2mkeventd 2013-11-05 09:58:00.000000000 +0000 @@ -0,0 +1,280 @@ +#!/usr/bin/python +# encoding: utf-8 +# Reads all incoming commands from it's own command pipe. Those +# commands are sent by NSCA to the command pipe. Forwards these +# events to the local event console daemon. + +import os, sys, getopt, traceback, time, socket + +opt_debug = False +opt_verbose = False +opt_foreground = False + +g_application = 'nsca' + +#. +# .--Helper functions----------------------------------------------------. +# | _ _ _ | +# | | | | | ___| |_ __ ___ _ __ ___ | +# | | |_| |/ _ \ | '_ \ / _ \ '__/ __| | +# | | _ | __/ | |_) | __/ | \__ \ | +# | |_| |_|\___|_| .__/ \___|_| |___/ | +# | |_| | +# +----------------------------------------------------------------------+ +# | Various helper functions | +# '----------------------------------------------------------------------' + +def bail_out(reason): + log("FATAL ERROR: %s" % reason) + sys.exit(1) + +def open_logfile(): + global g_logfile + g_logfile = file(g_logfile_path, "a") + +def log(text): + if type(text) == unicode: + text = text.encode("utf-8") + try: + g_logfile.write('[%.6f] %s\n' % (time.time(), text)) + g_logfile.flush() + except: + sys.stderr.write("%s\n" % text) + +def verbose(txt): + if opt_verbose: + log(txt) + +def usage(): + sys.stdout.write('''Usage: nsca2mkeventd [OPTIONS] + + -g, --foreground Do not daemonize, run in foreground + -d, --debug Enable debug mode (let exceptions through) + -E, --eventsocket P Path to unix socket to write events to + -P, --pipe P Path to pipe for receiving events from NSCA + -v, --verbose Log more details + +Default paths: + + Event socket: %(g_eventsocket_path)s + Event Pipe: %(g_pipe_path)s + Log file: %(g_logfile_path)s + +''' % globals()) + +#. +# .--Daemonize-----------------------------------------------------------. +# | ____ _ | +# | | _ \ __ _ ___ _ __ ___ ___ _ __ (_)_______ | +# | | | | |/ _` |/ _ \ '_ ` _ \ / _ \| '_ \| |_ / _ \ | +# | | |_| | (_| | __/ | | | | | (_) | | | | |/ / __/ | +# | |____/ \__,_|\___|_| |_| |_|\___/|_| |_|_/___\___| | +# | | +# +----------------------------------------------------------------------+ +# | Code for daemonizing | +# '----------------------------------------------------------------------' + +def daemonize(user=0, group=0): + # do the UNIX double-fork magic, see Stevens' "Advanced + # Programming in the UNIX Environment" for details (ISBN 0201563177) + try: + pid = os.fork() + if pid > 0: + # exit first parent + sys.exit(0) + except OSError, e: + sys.stderr.write("Fork failed (#1): %d (%s)\n" % (e.errno, e.strerror)) + sys.exit(1) + + # decouple from parent environment + # chdir -> don't prevent unmounting... + os.chdir("/") + + # Create new process group with the process as leader + os.setsid() + + # Set user/group depending on params + if group: + os.setregid(getgrnam(group)[2], getgrnam(group)[2]) + if user: + os.setreuid(getpwnam(user)[2], getpwnam(user)[2]) + + # do second fork + try: + pid = os.fork() + if pid > 0: + sys.exit(0) + except OSError, e: + sys.stderr.write("Fork failed (#2): %d (%s)\n" % (e.errno, e.strerror)) + sys.exit(1) + + sys.stdout.flush() + sys.stderr.flush() + + si = os.open("/dev/null", os.O_RDONLY) + so = os.open("/dev/null", os.O_WRONLY) + os.dup2(si, 0) + os.dup2(so, 1) + os.dup2(so, 2) + os.close(si) + os.close(so) + + log("Daemonized with PID %d." % os.getpid()) + +class MKSignalException(Exception): + def __init__(self, signum): + Exception.__init__(self, "Got signal %d" % signum) + self._signum = signum + +def signal_handler(signum, stack_frame): + verbose("Got signal %d." % signum) + raise MKSignalException(signum) + +#. +# .--Main----------------------------------------------------------------. +# | __ __ _ | +# | | \/ | __ _(_)_ __ | +# | | |\/| |/ _` | | '_ \ | +# | | | | | (_| | | | | | | +# | |_| |_|\__,_|_|_| |_| | +# | | +# +----------------------------------------------------------------------+ +# | Main entry and option parsing | +# '----------------------------------------------------------------------' + +def create_pipe(): + try: + if not stat.S_ISFIFO(os.stat(g_pipe_path).st_mode): + os.remove(g_pipe_path) + except: + pass + + if not os.path.exists(g_pipe_path): + os.mkfifo(g_pipe_path) + os.chmod(g_pipe_path, 0666) + log("Created pipe '%s' for receiving commands from nsca" % g_pipe_path) + +# Incoming lines: +#fprintf(command_file_fp,"[%lu] PROCESS_HOST_CHECK_RESULT;%s;%d;%s\n",(unsigned long)check_time,host_name,return_code,plugin_output); +#fprintf(command_file_fp,"[%lu] PROCESS_SERVICE_CHECK_RESULT;%s;%s;%d;%s\n",(unsigned long)check_time,host_name,svc_description,return_code,plugin_output); +def parse_line(line): + timestamp, command = line.split(' ', 1) + timestamp = int(timestamp[1:-1]) + cmd = command.split(';') + return [ timestamp ] + cmd + +def state_to_prio(state): + state = int(state) + if state == 0: + return 5 + elif state == 1: + return 4 + elif state == 2: + return 2 + elif state == 3: + return 7 + +def forward_line(parsed): + if parsed[1] == 'PROCESS_HOST_CHECK_RESULT': + timestamp, cmd, hostname, state, output = parsed + else: + timestamp, cmd, hostname, svc_desc, state, output = parsed + output = '%s: %s' % (svc_desc, output) + + t = time.strftime("%b %d %H:%M:%S", time.localtime(timestamp)) + line = "<%d>%s %s %s: %s\n" % (state_to_prio(state), t, hostname, g_application, output) + + s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + s.connect(g_eventsocket_path) + verbose('=> %r' % line.rstrip()) + s.send(line) + s.close() + +def main(): + create_pipe() + + while True: + try: + for line in file(g_pipe_path): + try: + verbose('<= %r' % line.rstrip()) + parsed = parse_line(line.rstrip()) + forward_line(parsed) + except Exception, e: + log('EXCEPTION while processing line "%s" (%s). Skipping...' % (line.rstrip(), e)) + if opt_debug: + raise + + except MKSignalException, e: + log("Signalled to death by signal %d" % e._signum) + break + + except Exception, e: + log("EXCEPTION in main thread:\n%s" % traceback.format_exc()) + if opt_debug: + raise + time.sleep(1) + +os.unsetenv("LANG") + +omd_root = os.getenv("OMD_ROOT") +if omd_root: + g_pipe_path = omd_root + '/tmp/run/nsca2mkeventd.pipe' + g_eventsocket_path = omd_root + "/tmp/run/mkeventd/eventsocket" + g_logfile_path = omd_root + "/var/log/nsca2mkeventd.log" +else: + g_pipe_path = None + g_eventsocket_path = None + g_logfile_path = "/var/log/nsca2mkeventd.log" + +short_options = "hdvgP:E:" +long_options = [ "help", "foreground", "debug", "verbose", "eventsocket=", "pipe=" ] + +try: + opts, args = getopt.getopt(sys.argv[1:], short_options, long_options) + + # first parse modifers + for o, a in opts: + if o in [ '-d', '--debug' ]: + opt_debug = True + elif o in [ '-v', '--verbose' ]: + opt_verbose = True + elif o in [ '-g', '--foreground' ]: + opt_foreground = True + elif o in [ '-E', '--eventsocket' ]: + g_eventsocket_path = a + elif o in [ '-P', '--pipe' ]: + g_pipe_path = a + + # now handle action options + for o, a in opts: + if o in [ '-h', '--help' ]: + usage() + sys.exit(0) + + if not g_pipe_path: + bail_out("Please specify the path to the pipe (using -P).") + + if not g_eventsocket_path: + bail_out("Please specify the path to the eventsocket (using -E).") + + # Prepare logging if running in daemon mode + if not opt_foreground: + open_logfile() + log("nsca2mkeventd is starting") + + if not opt_foreground: + daemonize() + + main() + + log("Cleaning up") + os.remove(g_pipe_path) + + log("Successfully shut down.") + sys.exit(0) + +except Exception, e: + if opt_debug: + raise + bail_out(e) diff -Nru check-mk-1.2.2p3/treasures/Event_Console/nsca2mkeventd/nsca2mkeventd.init check-mk-1.2.6p12/treasures/Event_Console/nsca2mkeventd/nsca2mkeventd.init --- check-mk-1.2.2p3/treasures/Event_Console/nsca2mkeventd/nsca2mkeventd.init 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/Event_Console/nsca2mkeventd/nsca2mkeventd.init 2013-11-05 09:58:00.000000000 +0000 @@ -0,0 +1,80 @@ +#!/bin/bash +### BEGIN INIT INFO +# Provides: nsca2mkeventd +# Required-Start: $local_fs $remote_fs +# Required-Stop: $local_fs $remote_fs +# Should-Start: $syslog +# Should-Stop: $syslog +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Start or stop the nsca2mkeventd daemon. +### END INIT INFO + +unset LANG + +pidof_nsca2mkeventd() { + pgrep -u $OMD_SITE -f -x "/usr/bin/python $OMD_ROOT/local/bin/nsca2mkeventd" +} + +case "$1" in + start) + echo -n 'Starting nsca2mkeventd... ' + + PID=$(pidof_nsca2mkeventd) && { + echo "Already running (PID: $PID)." + exit 1 + } + + $OMD_ROOT/local/bin/nsca2mkeventd + if [ $? -eq 0 ]; then + echo OK + else + echo ERROR + exit 1 + fi + ;; + stop) + echo -n 'Stopping nsca2mkeventd... ' + PID=$(pidof_nsca2mkeventd) || { + echo 'Not running.' + exit 0 + } + + I=0 + kill $PID + while kill -0 $PID >/dev/null 2>&1; do + if [ $I = '5' ]; then + echo -e 'sending SIGKILL... ' + kill -9 $PID + elif [ $I = '10' ]; then + echo ERROR + exit 1 + fi + + echo -n '.' + I=$(($I+1)) + sleep 1 + done + + echo OK + ;; + status) + PID=$(pidof_nsca2mkeventd) && { + echo "Running (PID: $PID)." + exit 0 + } || { + echo "Not running." + exit 1 + } + ;; + restart) + $0 stop + $0 start + ;; + *) + echo 'Usage: /etc/init.d/nsca2mkeventd {start|stop|restart|status}' + exit 1 + ;; +esac + +exit 0 diff -Nru check-mk-1.2.2p3/treasures/Event_Console/sl_notify_to_eventd.py check-mk-1.2.6p12/treasures/Event_Console/sl_notify_to_eventd.py --- check-mk-1.2.2p3/treasures/Event_Console/sl_notify_to_eventd.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/Event_Console/sl_notify_to_eventd.py 2014-10-30 13:30:24.000000000 +0000 @@ -0,0 +1,72 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Send notifications remote to mkeventd +# Including Service Level + +mkevent_host = '' +mkevent_port = 514 +application = "notify" + +import time, socket, os +host = os.environ['NOTIFY_HOSTNAME'] +#0 Emergency +#1 Alert +#2 Critical +#3 Error +#4 Warning +#5 Notice +#6 Informational +#7 Debug + +def state_to_prio(state): + state = int(state) + if state == 0: + return 5 + elif state == 1: + return 4 + elif state == 2: + return 2 + elif state == 3: + return 7 + + +if os.environ['NOTIFY_WHAT'] == 'SERVICE': + sl = os.environ.get('NOTIFY_SVC_SL', 0) + prio = state_to_prio(os.environ['NOTIFY_SERVICESTATEID']) + message = "%s|%s|%s" % \ + ( sl, os.environ['NOTIFY_SERVICEDESC'], os.environ['NOTIFY_SERVICEOUTPUT'] ) +else: + sl = os.environ.get('NOTIFY_HOST_SL', 0) + prio = state_to_prio(os.environ['NOTIFY_HOSTSTATEID']) + message = "%s|HOSTSTATE|%s" % (sl, os.environ['NOTIFY_HOSTOUTPUT'] ) + +sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) +sock.connect((mkevent_host, mkevent_port)) + +timestamp = time.strftime("%b %d %H:%M:%S", time.localtime(time.time())) +sock.send("<%s>%s %s %s: %s\n" % (prio, timestamp, host, application, message)) +sock.close() diff -Nru check-mk-1.2.2p3/treasures/Event_Console/snmptd_mkevent.py check-mk-1.2.6p12/treasures/Event_Console/snmptd_mkevent.py --- check-mk-1.2.2p3/treasures/Event_Console/snmptd_mkevent.py 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/treasures/Event_Console/snmptd_mkevent.py 2014-10-30 13:30:24.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -28,22 +28,42 @@ # all traps to the mkeventd # # Bastian Kuhn, bk@mathias-kettner.de -# Use this script only for testing. -# It can lead to a poor performance: for -# every received trap the python interpreter is -# started and the script is called +# If you use this script please keep in mind that this script is called +# for every trap the server receives. +# To use this Script, you have to configure your snmptrad.conf like that: +# authCommunity execute public +# traphandle default /path/to/this/script + +# Define the Hostname patterns here: +hostname_patterns = [ + 'SMI::enterprises.2349.2.2.2.5 = "(.*)"' +] import time import sys +import re -site_name = "SITE" +# Insert here the name of your omd site +site_name = "TESTSITE" deamon_path = "/omd/sites/%s/tmp/run/mkeventd/events" % site_name data = [] +match_host = False for line in sys.stdin: - data.append(line.strip()) + line = line.strip() + if hostname_patterns: + for pattern in hostname_patterns: + e = re.search(pattern, line) + if e: + match_host = e.group(1) + data.append(line) + msg = " ".join(data[2:]) -host, ip = data[:2] +host, ip = data[:2] +if match_host: + host = match_host.strip() + +#Write to mkevent Socket out = open(deamon_path, "w") timestamp = time.strftime("%b %d %H:%M:%S", time.localtime(time.time())) out.write("<5>%s %s trap: %s\n" % (timestamp, host, msg)) diff -Nru check-mk-1.2.2p3/treasures/find_piggy_orphans check-mk-1.2.6p12/treasures/find_piggy_orphans --- check-mk-1.2.2p3/treasures/find_piggy_orphans 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/find_piggy_orphans 2013-11-05 09:58:00.000000000 +0000 @@ -0,0 +1,7 @@ +#!/bin/bash +cd $OMD_ROOT +for h in $(cd tmp/check_mk/piggyback ; echo *) +do + lq "GET hosts\nColumns: address name\nFilter: address = $h\nFilter: name = $h\nOr: 2" | grep -q . || echo "$h" +done + diff -Nru check-mk-1.2.2p3/treasures/fsc_ipmi_mem_status.sh check-mk-1.2.6p12/treasures/fsc_ipmi_mem_status.sh --- check-mk-1.2.2p3/treasures/fsc_ipmi_mem_status.sh 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/treasures/fsc_ipmi_mem_status.sh 2014-10-30 13:30:24.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/treasures/host_to_ping_check.sh check-mk-1.2.6p12/treasures/host_to_ping_check.sh --- check-mk-1.2.2p3/treasures/host_to_ping_check.sh 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/host_to_ping_check.sh 2014-10-30 13:30:24.000000000 +0000 @@ -0,0 +1,26 @@ +#!/bin/sh +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +find . -name _HOST_\* | sed -re 's/(\S*)_HOST_(\S*)/\1_HOST_\2 \1PING\2/' | xargs -n 2 cp diff -Nru check-mk-1.2.2p3/treasures/incomplete_checks/db2/db2_bp_hitratios check-mk-1.2.6p12/treasures/incomplete_checks/db2/db2_bp_hitratios --- check-mk-1.2.2p3/treasures/incomplete_checks/db2/db2_bp_hitratios 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/incomplete_checks/db2/db2_bp_hitratios 2014-10-30 13:30:24.000000000 +0000 @@ -0,0 +1,67 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# ------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# ------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# <<>> + +def inventory_db2_bp_hitratios(info): + inventory = [] + current_instance = "" + for line in info: + if line[0].startswith("[[["): + current_instance = line[0][3:-3] + found_headers = False + continue + if not found_headers: + found_headers = True + continue + inventory.append(("%s:%s" % (current_instance, line[0]), {})) + return inventory + +def check_db2_bp_hitratios(item, no_params, info): + lines = iter(info) + try: + while True: + line = lines.next() + if line[0].startswith("[[["): + current_instance = line[0][3:-3] + headers = lines.next() + continue + if item == "%s:%s" % (current_instance, line[0]): + hr_info = dict(zip(headers[1:], line[1:])) # skip BP_NAME + for key, value in hr_info.items(): + value = value.replace("-","0") + key = key.replace("_RATIO_PERCENT","") + yield 0, "%s: %s%%" % (key, value), [(key, float(value),0,0,0,100)] + return + except StopIteration: + pass + +check_info['db2_bp_hitratios'] = { + "service_description" : "DB2 BP-Hitratios %s", + "check_function" : check_db2_bp_hitratios, + "inventory_function" : inventory_db2_bp_hitratios, + "has_perfdata" : True +} diff -Nru check-mk-1.2.2p3/treasures/incomplete_checks/db2/db2_counters check-mk-1.2.6p12/treasures/incomplete_checks/db2/db2_counters --- check-mk-1.2.2p3/treasures/incomplete_checks/db2/db2_counters 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/incomplete_checks/db2/db2_counters 2014-10-30 13:30:24.000000000 +0000 @@ -0,0 +1,97 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# ------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# ------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# <<>> +# CMDBS1 4711 0 +2.00000000000000E+000 +0.00000000000000E+000 + +factory_settings["db2_counters_defaults"] = { + "deadlocks" : (10.0, 5.0), + "lockwaits" : (11.0, 6.0), + "sortoverflows" : (12.0, 7.0), +} + +counters = { + "deadlocks" : "Deadlocks/sec", + "lockwaits" : "Lockwaits/sec", + "sortoverflows" : "Sortoverflows/sec", +} + +def inventory_db2_counters(info): + inventory = [] + for line in info: + if len(line) == 3: + db_name = line[0] + if line[1] in counters.keys(): + inventory.append((db_name, db2_counters_defaults)) + return inventory + +def check_db2_counters(item, params, info): + perfdata = [] + output = [] + wrapped = False + state = 0 + now = time.time() + for line in info: + if len(line) > 2 and line[0] == item and line[1] in counters.keys(): + counter = line[1] + label = counters.get(counter) + value = float(line[2]) + # compute rate from counter value + countername = "db2_counters.%s.%s" % (item, counter) + try: + timedif, rate = get_counter(countername, now, value) + except MKCounterWrapped: + wrapped = True + continue + + warn, crit = params.get(counter) + sym = "" + if rate > crit: + state = max(state, 2) + sym = "(!!)" + elif rate > warn: + state = max(state, 1) + sym = "(!)" + + + output.append('%s: %.1f/s%s' % (label, rate, sym)) + perfdata.append((counter, rate, warn, crit)) + + if wrapped: + raise MKCounterWrapped("", "Some counter wrapped, no data this time") + if output: + return (state, ', '.join(output), perfdata) + else: + return (3, 'Counters for %s could not be found in agent output' % (item)) + +check_info['db2_counters'] = { + "service_description" : "DB2 Counters %s", + "check_function" : check_db2_counters, + "inventory_function" : inventory_db2_counters, + "has_perfdata" : True, + "group" : "db2_counters", + "default_levels_variable" : "db2_counters_defaults", +} diff -Nru check-mk-1.2.2p3/treasures/incomplete_checks/db2/db2_logsizes check-mk-1.2.6p12/treasures/incomplete_checks/db2/db2_logsizes --- check-mk-1.2.2p3/treasures/incomplete_checks/db2/db2_logsizes 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/incomplete_checks/db2/db2_logsizes 2014-10-30 13:30:24.000000000 +0000 @@ -0,0 +1,101 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# ------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# ------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# <<>> +# db2taddm DB2v10.1.0.4,s140509(IP23577) + +def inventory_db2_logsizes(info): + inventory = [] + for line in info: + if line[0].startswith("[[["): + inventory.append((line[0][3:-3], None)) + return inventory + +def check_db2_logsizes(item, params, info): + lines = iter(info) + sector_size = 4096 + try: + while True: + line = lines.next() + if item == line[0][3:-3]: + data = {} + data.update(dict([lines.next()])) # usedspace + data.update(dict([lines.next()])) # logfilsiz + data.update(dict([lines.next()])) # logprimary + data.update(dict([lines.next()])) # logsecond + + total = int(data["logfilsiz"]) * (int(data["logprimary"]) + int(data["logsecond"])) * sector_size + used = int(data["usedspace"]) + free = total - used + perc_free = (float(free) / total) * 100 + + warn, crit = None, None + if params: + if type(params) == tuple: + warn, crit = params + else: + # A list of levels. Choose the correct one depending on the + # size of the logfile. We do not make the first + # rule match, but that with the largest size_gb. That way + # the order of the entries is not important. + found_size = 0 + found = False + for to_size, this_levels in params: + if total > to_size and to_size >= found_size: + warn, crit = this_levels + found_size = to_size + found = True + if not found: + warn, crit = 100.0, 100.0 # entry not found in list + + if type(warn) == float: # percentage free + levels_info = "(Levels at %.1f/%.1f%%)" % (warn, crit) + if perc_free <= crit: + yield 2, levels_info + elif perc_free <= warn: + yield 1, levels_info + else: # absolute free + warn = warn * 1024 * 1024 + cirt = crit * 1024 * 1024 + levels_info = "(Levels at %s/%s%%)" % (tuple(map(lambda x: get_bytes_human_readable(x), [warn, crit]))) + if free <= crit: + yield 2, levels_info + elif free <= warn: + yield 1, levels_info + + perfdata = [("free", free, warn, crit, 0, total)] + yield 0, "%.2f%% free: (%s of %s)" % tuple([perc_free] + map(lambda x: get_bytes_human_readable(x), [free, total])), perfdata + break + except StopIteration: + pass + +check_info['db2_logsizes'] = { + "service_description" : "DB2 Logsize %s", + "check_function" : check_db2_logsizes, + "inventory_function" : inventory_db2_logsizes, + "group" : "db2_logsizes", + "has_perfdata" : True +} diff -Nru check-mk-1.2.2p3/treasures/incomplete_checks/db2/db2_sessions check-mk-1.2.6p12/treasures/incomplete_checks/db2/db2_sessions --- check-mk-1.2.2p3/treasures/incomplete_checks/db2/db2_sessions 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/incomplete_checks/db2/db2_sessions 2014-10-30 13:30:24.000000000 +0000 @@ -0,0 +1,69 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# ------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# ------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# <<>> +# db2taddm DB2v10.1.0.4,s140509(IP23577) + +def inventory_db2_sessions(info): + inventory = [] + for line in info: + if line[0].startswith("[[["): + inventory.append((line[0][3:-3], {})) + return inventory + +def check_db2_sessions(item, no_params, info): + found_match = False + for line in info: + if item == line[0][3:-3]: + found_match = True + continue + + if found_match: + if line[0].startswith("[[["): + break + else: + continue + + info, value = line + if info == "connections": + yield 0, "%s: %s" % (info.title(), value), [(info, int(value))] + elif info == "latency": + minutes, rest = value.split(":") + seconds, mseconds = rest.split(".") + ms = int(minutes) * 60 * 1000 + int(seconds) * 1000 + int(mseconds) + yield 0, "%s: %.2f ms" % (info.title(), ms), [(info, ms)] + else: + yield 0, "%s: %s" % (info.title(), value) + + if not found_match: + yield 3, "Database not found in agent output" + +check_info['db2_sessions'] = { + "service_description" : "DB2 Session %s", + "check_function" : check_db2_sessions, + "inventory_function" : inventory_db2_sessions, + "has_perfdata" : True +} diff -Nru check-mk-1.2.2p3/treasures/incomplete_checks/db2/db2_tablespaces check-mk-1.2.6p12/treasures/incomplete_checks/db2/db2_tablespaces --- check-mk-1.2.2p3/treasures/incomplete_checks/db2/db2_tablespaces 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/incomplete_checks/db2/db2_tablespaces 2014-10-30 13:30:24.000000000 +0000 @@ -0,0 +1,226 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# ------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# ------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# no used space check for Tablsspaces with CONTENTS in ('TEMPORARY','UNDO') +# It is impossible to check the used space in UNDO and TEMPORARY Tablespaces +# These Types of Tablespaces are ignored in this plugin. +# This restriction is only working with newer agents, because we need an +# additional parameter at end if each datafile + +# <<>> +#[[[db2taddm]]] +#DB2 TSM Variablen werden gesetzt +# +# Database Connection Information +# +# Database server = DB2/AIX64 9.7.4 +# SQL authorization ID = DB2TADDM +# Local database alias = CMDBS2 +# +#TBSP_NAME TBSP_TYPE TBSP_STATE TBSP_USABLE_SIZE_KB TBSP_TOTAL_SIZE_KB TBSP_USED_SIZE_KB TBSP_FREE_SIZE_KB +#-------------------------------------------------------------------------------------------------------------------------------- ---------- ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -------------------- -------------------- -------------------- -------------------- +#SYSCATSPACE DMS NORMAL 655232 655360 623632 31600 +#USERSPACE1 DMS NORMAL 15203328 15204352 15171072 32256 +#SYSTOOLSPACE DMS NORMAL 32640 32768 3600 29040 +#LARGESPACE2 DMS NORMAL 80896 81920 3072 77824 +#TEMPSPACE1 SMS NORMAL 32 32 32 682252576 +#USERSPACE2 SMS NORMAL 2700864 2700864 2700864 85281572 +#LARGETEMP2 SMS NORMAL 3520 3520 3520 85281572 +#USERSPACE3 SMS NORMAL 4421232 4421232 4421232 85281572 +#MYTMPSPACE SMS NORMAL 64 64 64 682252576 +#SYSTOOLSTMPSPACE SMS NORMAL 32 32 32 682252576 +# +# 10 record(s) selected. + +# +# Order of columns +# 0. TBSP_NAME +# 1. TBSP_TYPE +# 2. TBSP_STATE +# 3. TBSP_USABLE_SIZE_KB +# 4. TBSP_TOTAL_SIZE_KB +# 5. TBSP_USED_SIZE_KB +# 6. TBSP_FREE_SIZE_KB + + +factory_settings["db2_tablespaces_defaults"] = { + "levels" : (10.0, 5.0), + "magic_normsize" : 1000, + "levels_low" : (0.0, 0.0) +} + +def inventory_db2_tablespaces(info): + inventory = [] + tbsp_name = "" + for line in info: + if len(line) == 1: + if line[0].startswith("[[["): + instance = line[0].replace("[[[","").replace("]]]","") + tbsp_name = "" + elif len(line) == 7: + try: + tbsp_name, tbsp_type, tbsp_state = line[0:3] + usable_kb, total_kb, used_kb, free_kb = map(int, line[3:7]) + except Exception, e: + # tbsp_name = "" + continue + else: + #tbsp_name = "" + continue + + if tbsp_name: + inventory.append(("%s.%s" % ( instance, tbsp_name), None)) + return inventory + + +def get_tablespace_levels_in_bytes(size_bytes, params): + # If the magic factor is used, take table size and magic factor + # into account in order to move levels + magic = params.get("magic") + + # Use tablespace size dependent dynamic levels or static levels + if type(params.get("levels")) == tuple: + warn, crit = params.get("levels") + else: + # A list of levels. Choose the correct one depending on the + # size of the current tablespace + for to_size, this_levels in params.get("levels"): + if size_bytes > to_size: + warn, crit = this_levels + break + else: + return None, None, "" + + levels_text = "levels at " + if magic: + # convert warn/crit to percentage + if type(warn) != float: + warn = savefloat(warn * 1024 * 1024 / float(size_bytes)) * 100 + if type(crit) != float: + crit = savefloat(crit * 1024 * 1024 / float(size_bytes)) * 100 + + normsize = params["magic_normsize"] + hbytes_size = size_bytes / (float(normsize) * 1024 * 1024) + felt_size = hbytes_size ** magic + scale = felt_size / hbytes_size + warn_scaled = 100 - (( 100 - warn ) * scale) + crit_scaled = 100 - (( 100 - crit ) * scale) + + # Make sure levels do never get too low due to magic factor + lowest_warning_level, lowest_critical_level = params["levels_low"] + if warn_scaled < lowest_warning_level: + warn_scaled = lowest_warning_level + if crit_scaled < lowest_critical_level: + crit_scaled = lowest_critical_level + warn_bytes = savefloat(size_bytes * warn_scaled / 100) + crit_bytes = savefloat(size_bytes * crit_scaled / 100) + levels_text = get_bytes_human_readable(warn_bytes) + levels_text = "/%s" % get_bytes_human_readable(crit_bytes) + else: + # warn/crit level are float => percentages of max size, otherwise MB + if type(warn) == float: + warn_bytes = warn / 100.0 * size_bytes + levels_text = "%.1f%%" % warn + else: + warn_bytes = warn * 1024 * 1024 + levels_text = get_bytes_human_readable(warn_bytes) + + if type(crit) == float: + crit_bytes = crit / 100.0 * size_bytes + levels_text += "/%.1f%%" % crit + else: + crit_bytes = crit * 1024 * 1024 + levels_text += "/%s" % get_bytes_human_readable(crit_bytes) + + return warn_bytes, crit_bytes, levels_text + + +def check_db2_tablespaces(item, params, info): + try: + instance, tbsname = item.split('.') + except ValueError: + return (3, 'Invalid check item given (must be .)') + + started = False + for line in info: + if line[0] == "[[["+instance+"]]]": + started = True + + elif len(line) == 7 and started and line[0] == tbsname: + try: + tbsp_name, tbsp_type, tbsp_state = line[0:3] + usable, total, used, free = map(lambda x: float(x)*1024, line[3:7]) + except Exception, e: + continue + + #perc_used = used/usable*100.0 + if tbsp_type == "SMS": + usable = free # for SMS free size is the amount of disk space available to the db file + + perc_free = 100.0 - used/usable*100.0 + + infotext = "%.1f%% free" % perc_free + + status = 0 + sym = "" + + warn, crit, levels_text = get_tablespace_levels_in_bytes(usable, params) + if (crit is not None and free <= crit) or (warn is not None and free <= warn): + infotext += " (%s)" % levels_text + if free <= crit: + status = 2 + sym = "(!!)" + elif free <= warn: + status = max(1, status) + sym = "(!)" + + infotext += ", %s of %s usable" % \ + (get_bytes_human_readable(used), + get_bytes_human_readable(usable)) + + sym = "" + if tbsp_state.lower() != "normal": + status = max(1, status) + sym = "(!)" + + infotext += ", State: %s%s, Type: %s" % ( tbsp_state, sym, tbsp_type ) + + perfdata = [ ("size", usable, total - (warn or 0), total - (crit or 0)), + ("used", used), + ("max_size", total) ] + + return status, infotext, perfdata + + return 3, "no such tablespace found" + +check_info['db2_tablespaces'] = { + "service_description" : "DB2 Tablespace %s", + "check_function" : check_db2_tablespaces, + "inventory_function" : inventory_db2_tablespaces, + "has_perfdata" : True, + "group" : "db2_tablespaces", + "default_levels_variable" : "db2_tablespaces_defaults", +} diff -Nru check-mk-1.2.2p3/treasures/incomplete_checks/db2/db2_versions check-mk-1.2.6p12/treasures/incomplete_checks/db2/db2_versions --- check-mk-1.2.2p3/treasures/incomplete_checks/db2/db2_versions 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/incomplete_checks/db2/db2_versions 2014-10-30 13:30:24.000000000 +0000 @@ -0,0 +1,50 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# ------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# ------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# <<>> +# db2taddm DB2v10.1.0.4,s140509(IP23577) + +def inventory_db2_versions(info): + inventory = [] + for line in info: + instance = line[0] + inventory.append((instance, None)) + return inventory + +def check_db2_versions(item, no_params, info): + for line in info: + instance, version = line + if item == instance: + yield 0, version + return + else: + yield 3, "Instance not found in agent output" + +check_info['db2_versions'] = { + "service_description" : "DB2 Version %s", + "check_function" : check_db2_versions, + "inventory_function" : inventory_db2_versions, +} diff -Nru check-mk-1.2.2p3/treasures/incomplete_checks/db2/mk_db2 check-mk-1.2.6p12/treasures/incomplete_checks/db2/mk_db2 --- check-mk-1.2.2p3/treasures/incomplete_checks/db2/mk_db2 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/incomplete_checks/db2/mk_db2 2014-10-30 13:30:24.000000000 +0000 @@ -0,0 +1,60 @@ +#!/bin/ksh +# $HOME/sqllib/db2profile + +INSTANCES=$(ps -ef | grep [d]b2sysc | awk '{print $1 }') + +for INSTANCE in $INSTANCES; do + # find home directory + HOMEDIR=`grep "^$INSTANCE" /etc/passwd | awk -F: '{print $6}'|grep "$INSTANCE$"` + db_version=$(su $INSTANCE -c ". $HOMEDIR/sqllib/db2profile; db2 get snapshot for dbm; exit"| grep -e "Product name" -e "Service level"|awk -v FS="=" '{print $2}'|sed 'N;s/\n/,/g'|sed 's/ //g') + echo "<<>>" + echo $INSTANCE $db_version + + DBS=$(su $INSTANCE -c ". $HOMEDIR/sqllib/db2profile; db2 list database directory on $HOMEDIR " | grep "Database name" | awk '{ print $NF }') + + # Each database in an instance has the same port information + db2_tcp_service=`su $INSTANCE -c ". $HOMEDIR/sqllib/db2profile > /dev/null; db2 connect to $(echo $DBS|tail -1) > /dev/null;db2 -x get dbm cfg|grep $INSTANCE|grep \"TCP/IP Service\"; exit"|awk -v FS="=" '{print $2}'|tr -d ' ' ` + db_port="port 0" + if ( grep $db2_tcp_service /etc/services|grep -q "^$db2_tcp_service " ); then + db_port="port "`grep $db2_tcp_service /etc/services|grep "^$db2_tcp_service "|awk '{print $2}'|awk -v FS="/" '{print $1}'` + fi + + for DB in $DBS; do + echo "<<>>" + echo "[[[$INSTANCE:$DB]]]" + + su $INSTANCE -c ". $HOMEDIR/sqllib/db2profile > /dev/null; db2 connect to $DB > /dev/null;db2 -x \"SELECT tbsp_name, tbsp_type, tbsp_state, tbsp_usable_size_kb, tbsp_total_size_kb, tbsp_used_size_kb, tbsp_free_size_kb FROM sysibmadm.tbsp_utilization WHERE tbsp_type = 'DMS' UNION ALL SELECT tu.tbsp_name, tu.tbsp_type, tu.tbsp_state, tu.tbsp_usable_size_kb, tu.tbsp_total_size_kb, tu.tbsp_used_size_kb, (cu.fs_total_size_kb - cu.fs_used_size_kb) AS tbsp_free_size_kb FROM sysibmadm.tbsp_utilization tu INNER JOIN ( SELECT tbsp_id, 1 AS fs_total_size_kb, 0 AS fs_used_size_kb FROM sysibmadm.container_utilization WHERE (fs_total_size_kb IS NULL OR fs_used_size_kb IS NULL) GROUP BY tbsp_id) cu ON (tu.tbsp_type = 'SMS' AND tu.tbsp_id = cu.tbsp_id) UNION ALL SELECT tu.tbsp_name, tu.tbsp_type, tu.tbsp_state, tu.tbsp_usable_size_kb, tu.tbsp_total_size_kb, tu.tbsp_used_size_kb, (cu.fs_total_size_kb - cu.fs_used_size_kb) AS tbsp_free_size_kb FROM sysibmadm.tbsp_utilization tu INNER JOIN ( SELECT tbsp_id, SUM(fs_total_size_kb) AS fs_total_size_kb, SUM(fs_used_size_kb) AS fs_used_size_kb FROM sysibmadm.container_utilization WHERE (fs_total_size_kb IS NOT NULL AND fs_used_size_kb IS NOT NULL) GROUP BY tbsp_id) cu ON (tu.tbsp_type = 'SMS' AND tu.tbsp_id = cu.tbsp_id)\";exit"|awk '{print $1" "$2" "$3" "$4" "$5" "$6" "$7}' + + echo "<<>>" + deadlocks=`su $INSTANCE -c ". $HOMEDIR/sqllib/db2profile > /dev/null; db2 connect to $DB > /dev/null;db2 -x \"SELECT deadlocks from sysibmadm.snapdb\"; exit"` + echo "$INSTANCE:$DB deadlocks $deadlocks" + lockwaits=`su $INSTANCE -c ". $HOMEDIR/sqllib/db2profile > /dev/null; db2 connect to $DB > /dev/null;db2 -x \"SELECT lock_waits from sysibmadm.snapdb\"; exit"` + echo "$INSTANCE:$DB lockwaits $lockwaits" + sortoverflows=`su $INSTANCE -c ". $HOMEDIR/sqllib/db2profile > /dev/null; db2 connect to $DB > /dev/null;db2 -x \"SELECT sort_overflows from sysibmadm.snapdb\"; exit"` + echo "$INSTANCE:$DB sortoverflows $sortoverflows" + + echo "<<>>" + echo "[[[$INSTANCE:$DB]]]" +# su $INSTANCE -c ". $HOMEDIR/sqllib/db2profile > /dev/null; db2 connect to $DB > /dev/null;db2 \"select total_log_used as Log_Used, total_log_available as Log_Free, tot_log_used_top as Max_Log_Used, sec_log_used_top as Max_Secondary_Used, sec_logs_allocated as Secondaries from sysibmadm.snapdb\";exit"|grep -v "selected."|sed -e '/^$/d' -e '/^-/d' + echo "used_space "$(su $INSTANCE -c ". $HOMEDIR/sqllib/db2profile > /dev/null; db2 connect to $DB > /dev/null;db2 -x \"select total_log_used from sysibmadm.snapdb\";exit" | tr -d ' ') + su $INSTANCE -c ". $HOMEDIR/sqllib/db2profile > /dev/null; db2 connect to $DB > /dev/null;db2 -x \"SELECT NAME, VALUE FROM SYSIBMADM.DBCFG WHERE NAME IN ('logfilsiz','logprimary','logsecond')\"| awk '{print \$1\" \"\$2}'; exit" + + echo "<<>>" + echo "[[[$INSTANCE:$DB]]]" + echo $db_port + echo "connections:$(su $INSTANCE -c ". $HOMEDIR/sqllib/db2profile > /dev/null; db2 connect to $DB > /dev/null;db2 list applications |grep -v Auth|grep -v Name|sed -e '/^$/d'|wc -l|tr -d ' ';exit")" + su $INSTANCE -c ". $HOMEDIR/sqllib/db2profile > /dev/null; ksh -c \"time db2 connect to $DB > /dev/null\" 2>&1 | grep real | awk '{print \"latency \"\$2}'| sed -e 's/m/:/' -e 's/s//';exit" + + echo "<<>>" + echo "[[[$INSTANCE:$DB]]]" + su $INSTANCE -c ". $HOMEDIR/sqllib/db2profile > /dev/null; db2 connect to $DB > /dev/null;db2 \"SELECT SUBSTR(BP_NAME,1,14) AS BP_NAME, TOTAL_HIT_RATIO_PERCENT, DATA_HIT_RATIO_PERCENT, INDEX_HIT_RATIO_PERCENT, XDA_HIT_RATIO_PERCENT FROM SYSIBMADM.BP_HITRATIO\";exit"|grep -v "selected."|sed -e '/^$/d' -e '/^-/d' + + echo "<<>>" + echo "$INSTANCE:$DB $(su $INSTANCE -c ". $HOMEDIR/sqllib/db2profile > /dev/null; db2 connect to $DB > /dev/null;db2 \"get snapshot for database on $DB\";exit"|grep -i sort)" + + echo "<<>>" + echo "$INSTANCE:$DB $(su $INSTANCE -c ". $HOMEDIR/sqllib/db2profile > /dev/null; db2 connect to $DB > /dev/null;db2 \"select SQLM_ELM_LAST_BACKUP from table(SNAPSHOT_DATABASE( cast( null as VARCHAR(255)), cast(null as + int))) as ref\";exit"|grep -v "selected."|sed -e '/^$/d' -e '/^[- ]*/d')" + done +done +exit diff -Nru check-mk-1.2.2p3/treasures/inventory/extract_inventory.py check-mk-1.2.6p12/treasures/inventory/extract_inventory.py --- check-mk-1.2.2p3/treasures/inventory/extract_inventory.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/inventory/extract_inventory.py 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,296 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Author: Goetz Golla, gg@mathias-kettner.de + +# This script extracts data of the hardware inventory to csv files + +relations = { + "devices": { + "columns": ( + ( "@hostname", "import_id" ), # special functions start with "@" + ( "!sla", "import_data_source_id" ), # fixed value is prepended with "!" + ( "!default", "import_org_level_2_id" ), + ( "@hostname", "device_key" ), + ( "hardware.system.manufacturer", "device_manufacturer" ), + ( "hardware.system.family", "device_model" ), + ( "hardware.system.serial", "serial_number" ), + ( "software.os.name", "operating_system" ), + ( "@inventory_date", "inventory_date" ), + ( "software.os.install_date", "installation_date" ), + ( "hardware.cpu.sockets", "cpu_socket_count" ), + ( "hardware.cpu.cpus", "cpu_chip_count" ), + ( "hardware.cpu.cores", "cpu_core_count" ), + ( "hardware.cpu.max_speed", "cpu_speed" ), + ( "hardware.cpu.model", "cpu_name" ), + ), + "filter": {}, + "converter": { + "software.os.install_date": lambda val: time.strftime("%Y-%m-%d", time.localtime(val)), + "@inventory_date": lambda val: time.strftime("%Y-%m-%d", time.localtime(val)), + "hardware.cpu.max_speed": lambda val: val/1000000, # hz in mhz + }, + }, + "inv_raw_arp": { + "columns": ( + ( "software.packages:*.+@hostname+vendor+name+version", "import_id" ), + ( "software.packages:*.vendor", "publisher" ), + ( "software.packages:*.name", "product" ), + ( "software.packages:*.version", "product_version" ), + ( "@hostname", "import_device_id" ), + ), + "filter": { + "software.packages:*.package_type": "registry", # nur aus registry + }, + "converter": {}, + }, + "inv_raw_file": { + "columns": ( + ( "software.packages:*.+@hostname+name+path", "import_id" ), + ( "software.packages:*.name", "file_name" ), + ( "software.packages:*.size", "file_size" ), + ( "software.packages:*.path", "file_path" ), + ( "software.packages:*.vendor", "publisher" ), + ( "software.packages:*.summary", "product" ), + ( "software.packages:*.version", "product_version" ), + ( "@hostname", "import_device_id" ), + ), + "filter": { + "software.packages:*.package_type": "exe", # nur exe files + }, + "converter": {}, + }, + "inv_raw_generic(OS)": { + "columns": ( + ( "software.os.name", "generic_key" ), + ( "@hostname", "import_id" ), + ), + "filter": {}, + "converter": {}, + }, + "inv_raw_generic(Linux)": { + "columns": ( + ( "software.packages:*.+@hostname+name+version", "import_id" ), + ( "software.packages:*.name", "name" ), + ( "software.packages:*.version", "product_version" ), + ( "@hostname", "import_device_id" ), + ), + "filter": { + "software.packages:*.package_type": "deb", # nur exe files + }, + "converter": {}, + }, +} + +import os, sys, re, time, hashlib + +omd_root = os.environ["OMD_ROOT"] + +# both directories need to have a trailing slash "/" ! +inv_dir = "%s/var/check_mk/inventory/" % omd_root +out_dir = "/var/tmp/" + +if not omd_root: + print "This script is only executable as site user" + sys.exit(1) + +def is_list(relation): + list_start = "" + if type(relation) == dict: # filter and converter are dicts, check them too + relation = relation.keys() + for field in relation: + if not field.startswith("@"): + if ":*" in field: + is_list = True + list_start = field.split(":")[0] + else: + is_list = False + break + for field in relation: + if ( is_list != (":*" in field) or not field.startswith(list_start) ) \ + and not field.startswith("@") and not field.startswith("!"): + print "bad definition of relation, must be list or dict, not both:" + sys.exit(1) + return list_start + +def filt_it(package, relation): + filt_start = is_list(relation["filter"]) + elements = [col[0] for col in relation["columns"]] + list_start = is_list(elements) + if filt_start != list_start: # do not filter if filter does not fit + return False + for field in relation["filter"].keys(): + if field: + should_be = relation["filter"][field] + field = re.sub(list_start+":\*.", "", field) + for item in field.split("."): + value = package[item] + if type(value) in (str, int, float) and value == should_be: + return False + return True + +def convert_it(c_relation, item, field): + for c_field in c_relation.keys(): + if c_field == field: + item = c_relation[field](item) # apply the function defined to item + return item + +def print_line(out_rel, items): + outtxt = "\", \"".join(map(str,items)) + out_rel.write("\"") + out_rel.write("%s" % outtxt) + out_rel.write("\"\n") + +# special values starting with a "@" +def special_value(item, hostname): + if item == "@hostname": + return hostname + elif item == "@inventory_date": + return inventory_date[hostname] + else: + return "" + +def no_list_get(hostname, field): + out_line = "" + if field.startswith("!"): + out_line = re.sub("^!", "", field) + else: + subtree = all_data[hostname] + for item in field.split("."): + if item.startswith("@"): # take subtree from special_value + subtree = special_value(item,hostname) + else: + try: + subtree = subtree[item] + except: + break + if type(subtree) in (str, int, float): + out_line = convert_it(relations[ofs]['converter'], subtree,field) + return out_line + +def list_get(hostname, list_start): + items = [] + subtree = all_data[hostname] + for item in list_start.split("."): + try: + subtree = subtree[item] + except: + print " %s does not exist in database of host" % item + if type(subtree) == list: + for package in subtree: + if filt_it(package, relations[ofs]): + continue + for field in elements: + if field: + field = re.sub(list_start+":\*.", "", field) + for item in field.split("."): + if item.startswith("@"): # take subtree vom special_value + value = special_value(item,hostname) + else: + try: + value = package[item] + except: + break + if type(value) in (str, int, float): + items.append(value) + else: + items.append("") + return items + +# extract all data +all_data = {} +inventory_date = {} +for hostname in os.listdir(inv_dir): + # ignore gziped files and invisible files in directory for now + if hostname.endswith(".gz") or hostname.startswith("."): + continue + fn = inv_dir + hostname + if os.path.isfile(fn): + a = eval(open(fn,'r').read()) + all_data[hostname] = a + inventory_date[hostname] = os.path.getmtime(fn) + + +# loop over all relations, create an output file for each relation +for ofs in relations: + ofn = out_dir + ofs + out_rel = open(ofn,'w') + titles = [col[1] for col in relations[ofs]["columns"]] + print_line(out_rel, titles) + elements = [col[0] for col in relations[ofs]["columns"]] + list_start = is_list(elements) + if list_start == "": + for hostname in all_data: + print "creating relation %s for %s" % ( ofs, hostname ) + items = [] + for field in elements: + items.append(no_list_get(hostname, field)) + print_line(out_rel, items) + out_rel.close() + else: + for hostname in all_data: + print "creating relation %s for %s" % ( ofs, hostname ) + subtree = all_data[hostname] + for item in list_start.split("."): + try: + subtree = subtree[item] + except: + print " %s does not exist in database of host" % item + if type(subtree) == list: + for package in subtree: + if filt_it(package, relations[ofs]): + continue + items = [] + for field in elements: + if field: + field = re.sub(list_start+":\*.", "", field) + concat = "" + for item in field.split("."): + if item.startswith("@"): # take subtree vom special_value + value = special_value(item,hostname) + elif item.startswith("+"): + for item2 in item.split("+"): + if item2: + if item2.startswith("@"): # take subtree vom special_value + concat += special_value(item2,hostname) + else: + try: + concat += package[item2] + except: + continue + value = hashlib.md5(concat).hexdigest() + else: + try: + value = package[item] + except: + items.append("") + break + if type(value) in (str, int, float): + items.append(value) + else: + items.append("") + print_line(out_rel, items) + out_rel.close() diff -Nru check-mk-1.2.2p3/treasures/inventory_helper.sh check-mk-1.2.6p12/treasures/inventory_helper.sh --- check-mk-1.2.2p3/treasures/inventory_helper.sh 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/treasures/inventory_helper.sh 2014-12-11 10:15:03.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -43,8 +43,10 @@ # Use the automation API to run an inventory, only for new objects. check_mk --automation inventory new $HOST >/dev/null -# Then reschedule the inventory check right now to clear up. +# Then reschedule the service discovery check right now to clear up. # (currently we're running it just once a day at the same time on all hosts) +echo "COMMAND [$now] SCHEDULE_FORCED_SVC_CHECK;$HOST;Check_MK Discovery;$now" | lq +# handle old service description echo "COMMAND [$now] SCHEDULE_FORCED_SVC_CHECK;$HOST;Check_MK inventory;$now" | lq } @@ -57,7 +59,9 @@ INVENTORY_INFO=`echo "GET services Columns: host_name long_plugin_output +Filter: description = Check_MK Discovery Filter: description = Check_MK inventory +Or: 2 Filter: plugin_output !~~ no unchecked" | lq` if [ "$INVENTORY_INFO" != "" ]; then File /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/treasures/livedump is a regular file while file /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/treasures/livedump is a directory diff -Nru check-mk-1.2.2p3/treasures/liveproxy/liveproxyd check-mk-1.2.6p12/treasures/liveproxy/liveproxyd --- check-mk-1.2.2p3/treasures/liveproxy/liveproxyd 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/liveproxy/liveproxyd 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,1119 @@ +#!/usr/bin/python +# encoding: utf-8 + +import os, sys, getopt, signal, time, socket, select, traceback, re, gzip, StringIO + +VERSION="1.2.6p12" + +sites = {} +dump_interval = 5 # dump state once every five seconds + +# .--Proxy---------------------------------------------------------------. +# | ____ | +# | | _ \ _ __ _____ ___ _ | +# | | |_) | '__/ _ \ \/ / | | | | +# | | __/| | | (_) > <| |_| | | +# | |_| |_| \___/_/\_\\__, | | +# | |___/ | +# +----------------------------------------------------------------------+ +# | The actual proxy code | +# '----------------------------------------------------------------------' + +# State of all sites +g_sites = {} + +# Example for this dictionary: +# 'mysite': {'channels': [{'since': 1370420497.907061, +# 'socket': , +# 'state': 'ready'}, +# {'since': 1370420502.930155, +# 'socket': , +# 'state': 'busy'}, +# {'since': 1370420492.883395, +# 'socket': , +# 'state': 'heartbeat'}], +# 'clients': [{'since': 1370420648.132495, +# 'socket': , +# 'state': 'idle'}, +# {'channel': {'client': , +# 'since': 1370420695.609493, +# 'socket': , +# 'state': 'busy'}], +# 'heartbeat': {'channel': None, +# 'count': 3, +# 'since': 1370420507.965436}, +# 'last_failed_connect': 0, +# 'last_reset': 1370420492.879927, +# 'last_reload' : 1370420492, +# 'last_inventory_update' : 1370420492.879927, +# 'inventory_pid' : None, +# 'cache' : {}, +# 'socket': , +# 'state': 'ready'}} +# + + + +# The main loop of the daemon goes here + +g_need_restart = False +g_need_reload = False + +def liveproxyd_run(): + global g_need_restart, g_need_reload + + open_client_sockets() + last_dump = 0 + + while True: + + try: + if g_need_restart: + do_restart() + g_need_restart = False + if g_need_reload: + do_reload() + g_need_reload = False + + initiate_connections() + do_heartbeats() + + readable, writable = do_select(0.2) + + complete_connections(writable) + accept_new_clients(readable) + get_new_requests(readable) + distribute_requests() + get_responses(readable) # also heartbeats + send_responses(writable) + handle_client_timeouts() + garbage_collect_sockets() + collect_inventory_updates() + + now = time.time() + if dump_interval != None and now - last_dump > dump_interval: + dump_state() + last_dump = now + + except Exception, e: + if opt_debug: + raise + + if "Too many open files" in str(e): + log("Too many open files! Please increase the ulimit for 'nofiles'!") + log("Waiting for 5 seconds...") + time.sleep(5) + log("Restarting, in the hope that files will the again be enough for a while.") + do_restart() + + log("Ignoring exception: %s: %s" % (e, traceback.format_exc())) + time.sleep(1) # Avoid CPU loop in case of permanent error + +def initiate_connections(): + # Create new channels to target sites. Nonblocking! + for sitename, siteconf in sites.items(): + sitestate = g_sites[sitename] + channels = sitestate["channels"] + if len(channels) < siteconf["channels"]: + if time.time() - sitestate["last_failed_connect"] >= siteconf["connect_retry"]: + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.setblocking(0) + try: + s.connect(siteconf["socket"]) + except socket.error, e: + try: + errno = e.errno + except: + errno = None + + if e.errno != 115: # "Operation now in progress" + sitestate["last_failed_connect"] = time.time() + if opt_debug: + raise + else: + log("Error: cannot create TCP channel to %s:%d: %s" % (siteconf["socket"] + (e,))) + break + + except Exception, e: + sitestate["last_failed_connect"] = time.time() + if opt_debug: + raise + log("Error: cannot create TCP channel to %s:%d: %s" % (siteconf["socket"] + (e,))) + break + + channels.append({ "socket" : s, "state" : "connecting", "since": time.time()}) + + +def do_heartbeats(): + now = time.time() + for sitename, sitestate in g_sites.items(): + if sitestate["channels"]: + rate, timeout = sites[sitename]["heartbeat"] + channel = sitestate["heartbeat"]["channel"] + since = sitestate["heartbeat"]["since"] + if channel: # wait for response + age = now - since + if age >= timeout: + log("Heartbeat timeout after %.1f sec on channel %s/%d" % ( + age, sitename, channel["socket"].fileno())) + sitestate["last_failed_connect"] = now + disconnect_from_site(sitename) + else: + if now - since >= rate: + if not send_heartbeat(sitename, sitestate): + if now - since >= 2 * rate: + log("Could not send heartbeat to %s for %d secs - no free channel" % ( sitename, (2 * rate))) + disconnect_from_site(sitename) + + + +def send_heartbeat(sitename, sitestate): + # find free channel that is unused for the longest time + now = time.time() + while True: + next_channel = None + for channel in sitestate["channels"]: + if channel["state"] == "ready": + if next_channel == None or channel["since"] < next_channel["since"]: + next_channel = channel + if not next_channel: + #log("Cannot send heartbeat to %s: no channel ready" % sitename) + return False + + else: + channel = next_channel + # log("Sending heartbeat to channel %s/%d" % (sitename, channel["socket"].fileno())) + try: + channel["socket"].send("GET status\nKeepAlive: on\nColumns: program_start mk_inventory_last\n\n") + channel["state"] = "heartbeat" + channel["since"] = now + sitestate["heartbeat"]["since"] = now + sitestate["heartbeat"]["channel"] = channel + return True + except Exception, e: + log("Cannot send heartbeat to channel %s/%d: %s" % (sitename, channel["socket"].fileno(), e)) + channel["state"] = "error" + + +# Close all connections. Site is considered to be dead +def disconnect_from_site(sitename): + now = time.time() + log("Site %s is considered dead. Closing all connections." % sitename) + sitestate = g_sites[sitename] + sitestate["channels"] = [] + sitestate["cache"] = {} + sitestate["state"] = "starting" + sitestate["heartbeat"] = { "since" : now, "channel" : None, "count" : 0} + sitestate["last_reload"] = now + sitestate["last_reset"] = now + for client in sitestate["clients"]: + if client["state"] == "wait_for_response": + client["state"] = "wait_for_channel" # request still stored there + + +def complete_connections(writable): + for sitename, sitestate in g_sites.items(): + for channel in sitestate["channels"]: + if channel["state"] == "connecting" and channel["socket"].fileno() in writable: + try: + channel["socket"].send("") + channel["socket"].setblocking(1) # avoid signals from interrupting us + log("Channel %s/%d successfully connected" % (sitename, channel["socket"].fileno())) + channel["state"] = "ready" + except Exception, e: + log("Failed to connect channel %s/%d: %s" % (sitename, channel["socket"].fileno(), e)) + channel["state"] = "error" + sitestate["last_failed_connect"] = time.time() + +# Master/Mega/Central poll(). We though we are the select() master. But select() +# is limited to 1024 filedescriptors in most Python versions. So we rather use +# poll(). If you have many sites and many users you will easily get more +# filedescriptors... +def do_select(timeout): + p = select.poll() + + for sitename, sitestate in g_sites.items(): + # outgoing connections currently building up + for channel in sitestate["channels"]: + sock = channel["socket"] + if channel["state"] == "connecting": + p.register(sock, select.POLLOUT) + + # new client connections + sock = sitestate["socket"] + p.register(sock, select.POLLIN) + + for client in sitestate["clients"]: + sock = client["socket"] + + # new requests from existing clients + if client["state"] == "idle": + p.register(sock, select.POLLIN) + + # clients ready to receive a response + elif client["state"] == "response": + p.register(sock, select.POLLOUT) + + # Responses from channels, also heartbeat responses + for channel in sitestate["channels"]: + if channel["state"] in [ "busy", "heartbeat" ]: + p.register(channel["socket"], select.POLLIN) + + try: + readylist = p.poll(timeout * 1000) + r_able = [] + w_able = [] + for fd, event in readylist: + if event & select.POLLIN: + r_able.append(fd) + if event & select.POLLOUT: + w_able.append(fd) + return r_able, w_able + + except select.error, e: + log("Error during poll(): %s" % e) + return [], [] + + + +def accept_new_clients(readable): + for sitename, sitestate in g_sites.items(): + if sitestate["socket"].fileno() in readable: + try: + s, addrinfo = sitestate["socket"].accept() + s.setblocking(1) + # log("Accepted new client %s/%d" % (sitename, s.fileno())) + sitestate["clients"].append({"socket" : s, "state" : "idle", "since" : time.time()}) + except Exception, e: + if opt_debug: + raise + log("Failed to accept new client for %s: %s" % (sitename, e)) + +def get_new_requests(readable): + for sitename, sitestate in g_sites.items(): + for client in sitestate["clients"]: + if client["state"] == "idle" and \ + (client["socket"].fileno() in readable or client.get("nextrequest")): + try: + request = receive_request(sitename, client) + if not request: + client["state"] = "closed" + else: + if sitestate["state"] == "starting": + respond_client_with_error(client, "Site is currently not reachable.") + elif not request.startswith("GET") and not request.startswith("COMMAND"): + log("Invalid request [%s] from client %s/%d" % ( + request.replace("\n", "\\n"), sitename, client["socket"].fileno())) + client["state"] = "closed" + elif not request.startswith("COMMAND") and "ResponseHeader: fixed16\n" not in request: + respond_client_with_error(client, + "Invalid request, you must specify ResponseHeader: fixed16.") + else: + # Try to find a matching response in our cache + if not respond_from_cache(sitename, sitestate, client, request): + client["state"] = "wait_for_channel" + client["since"] = time.time() + client["request"] = cache_regex.sub("", request) + except Exception, e: + if opt_debug: + raise + log("Cannot read request from client %s/%d: %s" % + (sitename, client["socket"].fileno(), e)) + +cache_regex = re.compile("\nCache: *([^\n]*)") +localtime_regex = re.compile("\nLocaltime:[^\n]*") + +def respond_from_cache(sitename, sitestate, client, request): + mo = cache_regex.search(request) + if not mo: + client["add_to_cache"] = False # do not cache the response + return False + + # Get the cache time option. It is either a number of + # seconds (not implemented) or the word "reload", which + # means: cache until the configuration of the site changed + # due to a reload. Currently we simply ignore this option, + # since we only know "reload". + # cache_time = mo.group(1) + + # Remove Localtime: header, since it contains a timestamp + # and therefore is always unique. + cleared_request = localtime_regex.sub("", request) + + # Now lets look into our cache if it has a cached response + response = sitestate["cache"].get(cleared_request) + if response: + client["response"] = response + client["response_offset"] = 0 + client["state"] = "response" + return True + else: + client["add_to_cache"] = cleared_request # get response into cache + return False + +def distribute_requests(): + for sitename, sitestate in g_sites.items(): + if sitestate["state"] != "ready": + continue + waiting_clients = [ client for client in sitestate["clients"] if client["state"] == "wait_for_channel"] + + # Sort after waiting time, we should be fair to all... + waiting_clients.sort(cmp = lambda a, b: cmp(a["since"], b["since"])) + + # one channel must always be kept for heartbeat + allowed_channels = len([c for c in sitestate["channels"] if c["state"] in ["ready", "heartbeat"]]) + if allowed_channels <= 1: + sitestate["state"] = "busy" + else: + for channel in sitestate["channels"]: + if not waiting_clients: + break + if channel["state"] == "ready": + client = waiting_clients[0] + del waiting_clients[0] + forward_request(sitename, client, channel) + +def forward_request(sitename, client, channel): + cls = client["socket"] + chs = channel["socket"] + try: + chs.send(client["request"]) + if not client["request"].startswith("COMMAND"): + client["state"] = "wait_for_response" + channel["state"] = "busy" + client["channel"] = channel + channel["client"] = client + else: + client["request"] = "" + client["state"] = "idle" + except Exception, e: + if opt_debug: + raise + log("Error: %s" % e) + respond_client_with_error(client, str(e)) + + +def respond_client_with_error(client, message): + try: + while True: + try: + response = "400%12d\n%s\n" % (len(message) + 1, message) + client["socket"].send(response) + break + except socket.error, e: + if e.errno == 4: + continue # Interrupted system call + else: + raise + except Exception, e: + if opt_debug: + raise + log("Cannot send error message to client %d: %s" % ( + client["socket"].fileno(), e)) + client["state"] = "closed" + + if client.get("channel"): + channel = client["channel"] + del channel["client"] + del client["channel"] + + client["state"] = "idle" + client["request"] = "" + + + +# TODO: one malicious client can hang the whole proxy. In order +# to prevent this we'd need partial requests... +def receive_request(sitename, client): + # Note: Multisite can send several requests at once. For example + # a command and a wait query (reschedule button) + request = client.get("nextrequest", "") + client["nextrequest"] = "" + while "\n\n" not in request: + try: + chunk = client["socket"].recv(65536) + request += chunk + except socket.error, e: + if e.errno == 104: # Connection reset by peer + return None + elif e.errno != 4: # Interrupted system cal + raise + + if not chunk: + # log("Client %s/%d closed connection." % (sitename, client["socket"].fileno())) + return None + end = request.index("\n\n") + client["nextrequest"] = request[end+2:] + request = request[:end+2] + return request + + +def get_responses(readable): + for sitename, sitestate in g_sites.items(): + for channel in sitestate["channels"]: + if channel["socket"].fileno() in readable: + if channel["state"] == "busy": + receive_response(sitename, channel) + else: + receive_heartbeat(sitename, channel) + + + +def receive_response(sitename, channel): + client = channel.get("client") # None -> client timed out before response! + if not client: + log("Response from timed-out client arrived lately on channel %s/%d" % + (sitename, channel["socket"].fileno())) + # We always assume fixed16 as response header! + old_response = channel.get("response", "") + try: + chunk = channel["socket"].recv(65536) + if not chunk: + raise Exception("Connection closed by foreign host") + + response = old_response + chunk + channel["response"] = response + + except Exception, e: + log("Cannot read response from %s/%d: %s" % + (sitename, channel["socket"].fileno(), e)) + if client: + client["channel"] = None + client["state"] = "wait_for_channel" + channel["state"] = "closed" + return + + if len(response) < 16: # header not yet complete + return + + try: + bodylength = int(response[3:15]) + except Exception, e: + log("Malformed response header from cannel %s/%d: [%s]" % + (sitename, channel["socket"].fileno(), response[:16])) + if client: + client["channel"] = None + client["state"] = "wait_for_channel" + channel["state"] = "error" + return + + if len(response) > bodylength + 16: + log("Too large response on channel %s/%d (%d exceeding bytes: [%s])" % ( + sitename, channel["socket"].fileno(), len(response) - bodylength - 16, response[bodylength + 16:])) + if client: + client["channel"] = None + client["state"] = "wait_for_channel" + channel["state"] = "error" + return + + + elif len(response) < bodylength + 16: + return + + # Response complete + sitestatus = g_sites[sitename] + channel["state"] = "ready" + sitestatus["state"] = "ready" # at least one channel free + channel["response"] = "" + + if client: + del channel["client"] + del client["channel"] + client["response"] = response + client["response_offset"] = 0 + client["state"] = "response" + if client["add_to_cache"]: + cleared_request = client["add_to_cache"] + sitestatus["cache"][cleared_request] = response + del client["add_to_cache"] + +def send_responses(writable): + for sitename, sitestatus in g_sites.items(): + for client in sitestatus["clients"]: + if client["state"] == "response" and client["socket"].fileno() in writable: + + try: + # ACHTUNG: Beim senden an den Client können wir blockieren, wenn + # der Empfänger uns ausbremst. Dürfen wir aber nicht. Wir brauchen + # eine Queue, müssen mit select() warten, usw. + offset = client["response_offset"] + chunk = client["response"][offset:offset + 8192] + client["socket"].setblocking(0) # TEST TEST TEST + bytes_sent = client["socket"].send(chunk) + client["socket"].setblocking(1) + if bytes_sent <= 0: + raise Exception("Could not send any bytes of response to client") + if offset + bytes_sent == len(client["response"]): + client["state"] = "idle" + del client["response"] + del client["response_offset"] + else: + client["response_offset"] += bytes_sent + + except Exception, e: + if opt_debug: + raise + log("Cannot forward next %d bytes of response to client %s/%d: %s" % + (len(chunk), sitename, client["socket"].fileno(), e)) + client["state"] = "error" + del client["response"] + del client["response_offset"] + +def receive_heartbeat(sitename, channel): + sitestate = g_sites[sitename] + try: + chunk = channel["socket"].recv(4096) + if not chunk: + log("Channel %s/%d closed by foreign host while reading heartbeat" % + (sitename, channel["socket"].fileno())) + disconnect_from_site(sitename) + else: + # We expect two timestamps (integer): the time of the last + # configuration change and the time of the last HW/SW-Inventory + # update. Example responses: + # 1414747788;0 -> no inventory never ever done + # 1414747788;1414747545 -> last inventory was at 1414747545 + if len(chunk) < 12 or not chunk[0].isdigit() or chunk[-1] != "\n" or chunk.count(";") != 1: + log("Channel %s/%d: invalid response \"%s\" to heartbeat" % ( + sitename, channel["socket"].fileno(), chunk)) + disconnect_from_site(sitename) + else: + parts = chunk.split(";") + last_reload = int(parts[0]) + mk_inventory_last = int(parts[1]) + if mk_inventory_last: # Remote site has inventory data + check_inventory_update(mk_inventory_last, sitename) + + # Set site to ready if at least one other channel exists + if len(sitestate["channels"]) > 1: + sitestate["state"] = "ready" + sitestate["state"] = "ready" # at least one connection is up + channel["state"] = "ready" + sitestate["heartbeat"]["since"] = time.time() + sitestate["heartbeat"]["channel"] = None + sitestate["heartbeat"]["count"] += 1 + if sitestate["last_reload"] != last_reload: + if sitestate["cache"]: + log("Site %s might have new configuration. Dropping cache" % sitename) + sitestate["cache"] = {} + sitestate["last_reload"] = last_reload + + except Exception, e: + if opt_debug: + raise + log("Exception while reading heartbeat from channel %s/%d: %s" % + (sitename, channel["socket"].fileno(), e)) + + +def handle_client_timeouts(): + now = time.time() + for sitename, sitestatus in g_sites.items(): + siteconf = sites[sitename] + + for client in sitestatus["clients"]: + if client["state"] != "idle": + age = now - client["since"] + if client["state"] == "wait_for_channel" and \ + age >= siteconf["channel_timeout"]: + respond_client_with_error(client, "Timeout while waiting for free Livestatus channel to site %s." % + sitename) + elif client["state"] == "wait_for_response" and \ + age > siteconf["query_timeout"]: + respond_client_with_error(client, "Timeout while waiting for response from site %s." % sitename) + + + + + +def open_client_sockets(): + if not os.path.exists(opt_socketdir): + os.makedirs(opt_socketdir) + + for sitename, siteconf in sites.items(): + create_unix_socket(sitename) + +def create_unix_socket(sitename): + path = opt_socketdir + "/" + sitename + if os.path.exists(path): + log("Removing left-over unix socket %s" % path) + os.remove(path) + try: + s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + s.bind(path) + s.listen(10) + except Exception, e: + if opt_debug: + raise + bail_out("Cannot create unix socket %s: %s" % (path, e)) + + g_sites[sitename]["socket"] = s + +def garbage_collect_sockets(): + for sitename, sitestate in g_sites.items(): + sitestate["channels"] = [ + channel for channel in sitestate["channels"] + if channel["state"] not in [ "error", "closed" ]] + if len(sitestate["channels"]) == 0: + sitestate["state"] = "starting" + sitestate["clients"] = [ + channel for channel in sitestate["clients"] + if channel["state"] not in [ "error", "closed" ]] + +def format_time(t): + return "%s (%3d secs ago)" % ( + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(t)), time.time() - t) + + +def dump_state(): + sf = file(opt_statedumppath + ".new", "w") + + sf.write("----------------------------------------------\n") + sf.write("Current state:\n") + now = time.time() + for sitename, sitestate in g_sites.items(): + sf.write("[%s]\n" % sitename) + sf.write(" State: %s\n" % sitestate["state"]) + sf.write(" Last Reset: %s\n" % format_time(sitestate["last_reset"])) + sf.write(" Site's last reload: %s\n" % format_time(sitestate["last_reload"])) + sf.write(" Last failed connect: %s\n" % format_time(sitestate["last_failed_connect"])) + sf.write(" Cached responses: %d\n" % len(sitestate["cache"])) + sf.write(" Last inventory update: %s\n" % format_time(sitestate["last_inventory_update"])) + sf.write(" PID of inventory update: %s\n" % sitestate["inventory_pid"]) + sf.write(" Channels:\n") + for channel in sitestate["channels"]: + sf.write(" %3d - %-18s- client: %4s - since: %s\n" % + (channel["socket"].fileno(), channel["state"], + channel.get("client") and channel["client"]["socket"].fileno() or "none", + format_time(channel["since"]))) + sf.write(" Clients:\n") + for client in sitestate["clients"]: + sf.write(" %3d - %-18s- channel: %4s - since: %s\n" % + (client["socket"].fileno(), client["state"], + client.get("channel") and client["channel"]["socket"].fileno() or "none", + format_time(client["since"]))) + + sf.write(" Heartbeat:\n") + hb = sitestate["heartbeat"] + sf.write(" heartbeats received: %d\n" % hb["count"]) + if hb["channel"]: + sf.write(" out since %.1fs\n" % (now - hb["since"])) + else: + sf.write(" next in %.1fs\n" % (sites[sitename]["heartbeat"][0] - (now - hb["since"]))) + + # import pprint + # sf.write("\n\n%s\n" % pprint.pformat(g_sites)) + os.rename(opt_statedumppath + ".new", opt_statedumppath) + + +#. +# .--HW/SW-Inventory-----------------------------------------------------. +# | ___ _ | +# | |_ _|_ ____ _____ _ __ | |_ ___ _ __ _ _ | +# | | || '_ \ \ / / _ \ '_ \| __/ _ \| '__| | | | | +# | | || | | \ V / __/ | | | || (_) | | | |_| | | +# | |___|_| |_|\_/ \___|_| |_|\__\___/|_| \__, | | +# | |___/ | +# +----------------------------------------------------------------------+ +# | Synchronization of distributed HW/SW-Inventory | +# '----------------------------------------------------------------------' + +def check_inventory_update(mk_inventory_last, sitename): + if not opt_inventory_dir: + return # only works if this is set + + if not os.path.exists(opt_inventory_dir): + os.makedirs(opt_inventory_dir) + + sitestatus = g_sites[sitename] + if sitestatus["inventory_pid"]: + return # Sync already running + + if mk_inventory_last <= sitestatus["last_inventory_update"]: + return # we are up-to-date + + # log("Site %s: new HW/SW-Inventory data since last update %d secs ago" % ( + # sitename, mk_inventory_last - sitestatus["last_inventory_update"])) + + # Inventory update goes in two steps: + # 1. Getting the list of all hosts that have new inventory + # 2. For each of those retriev the gzipped inventory data and store it + # locally + # We cannot do this here since we must not block our main process! + # Instead we spawn a helper process that inherits one of the channels, + # does the update and returns a status code. + + # Find a free channel + for cha in sitestatus["channels"]: + if cha["state"] == "ready": + channel = cha + break + else: + log("Currently no free channel, postponing until next heartbeat") + return + + channel["state"] = "inventory" + + # Now create helper process that inherits the filedescriptor of this + # channel. + pid = os.fork() + if pid: + sitestatus["inventory_pid"] = pid + sitestatus["inventory_new_last"] = mk_inventory_last + + else: + try: + do_inventory_update(sitename, channel, sitestatus["last_inventory_update"]) + sys.exit(0) + except Exception, e: + log("Inventory update of %s failed: %s" % (sitename, e)) + sys.exit(1) + + +def collect_inventory_updates(): + for sitename, sitestatus in g_sites.items(): + if sitestatus["inventory_pid"]: + pid, status = os.waitpid(sitestatus["inventory_pid"], os.WNOHANG) + if pid: + sitestatus["inventory_pid"] = None + for channel in sitestatus["channels"]: + if channel["state"] == "inventory": + channel["state"] = "ready" + if not status: # success + sitestatus["last_inventory_update"] = sitestatus["inventory_new_last"] + del sitestatus["inventory_new_last"] + + +def do_inventory_update(sitename, channel, since): + # Close all filedescriptors that we do not need. + needed_fds = [ + channel["socket"].fileno(), + g_logfile.fileno(), + ] + + for fd in range(4096): + if fd not in needed_fds: + try: + os.close(fd) + except: + pass + + # Get list of all hosts that have more recent inventory data + query = "GET hosts\n" \ + "Columns: name mk_inventory_last\n" \ + "Filter: mk_inventory_last > %d\n" \ + "KeepAlive: on\n" \ + "ResponseHeader: fixed16\n\n" % \ + since + + channel["socket"].send(query) + response = get_livestatus_response(channel["socket"]) + + entries = [ l.split(";") for l in response.split() ] + + for host_name, timestamp in entries: + do_inventory_update_of(channel["socket"], sitename, host_name, int(timestamp)) + + +def get_livestatus_response(socket): + header = socket.recv(16) + if len(header) != 16: + raise Exception("Invalid resonse '%s' from remote site" % header) + + status = header[:3] + length = int(header[3:].strip()) + response = "" + while(len(response) != length): + response += socket.recv(4096) + + if status != "200": raise Exception("Livestatus query failed: %s" % response) + + return response + + +# Update the inventory data of a host *if* its timestamp differs from +# the file that we already have +def do_inventory_update_of(socket, sitename, host_name, timestamp): + path = opt_inventory_dir + "/" + host_name + try: + last_mtime = int(os.stat(path).st_mtime) + if last_mtime == timestamp: + log("Inventory of %s is uptodate" % host_name) + return + except: + pass + + query = "GET hosts\n" \ + "Columns: mk_inventory_gz\n" \ + "Filter: name = %s\n" \ + "KeepAlive: on\n" \ + "ResponseHeader: fixed16\n\n" % host_name + + socket.send(query) + response = get_livestatus_response(socket) + gzipped_invdata = response[:-1] # drop final linefeed + file(path + ".gz", "w").write(gzipped_invdata) + os.utime(path + ".gz", (timestamp, timestamp)) + unzipped_invdata = gzip.GzipFile(fileobj=StringIO.StringIO(gzipped_invdata)).read() + file(path, "w").write(unzipped_invdata) + os.utime(path, (timestamp, timestamp)) + log("Site %s: new HW/SW-Inventory data for %s (%d bytes)" % ( + sitename, host_name, len(gzipped_invdata))) + + +#. +# .--Daemon/main---------------------------------------------------------. +# | ____ __ _ | +# || _ \ __ _ ___ _ __ ___ ___ _ __ / / __ ___ __ _(_)_ __ | +# || | | |/ _` |/ _ \ '_ ` _ \ / _ \| '_ \ / / '_ ` _ \ / _` | | '_ \ | +# || |_| | (_| | __/ | | | | | (_) | | | |/ /| | | | | | (_| | | | | | | +# ||____/ \__,_|\___|_| |_| |_|\___/|_| |_/_/ |_| |_| |_|\__,_|_|_| |_| | +# | | +# +----------------------------------------------------------------------+ +# | Global code, daemonize, getopt and usage | +# '----------------------------------------------------------------------' + +# Open logfile, fall back to stdout if this is not successfull +def open_log(): + global g_logfile + try: + g_logfile = file(opt_logfilepath, "a") + g_logfile.flush() + except Exception, e: + if opt_debug: + raise + g_logfile = sys.stderr + log("Cannot open logfile %s: %s" % (opt_logfilepath, e)) + +# Send message to logfile. In foreground mode we omit the timestamp +def log(text): + if type(text) == unicode: + text = text.encode("utf-8") + try: + if not opt_foreground: + t = time.time() + g_logfile.write('%s.%06d ' % (time.strftime("%F %T", time.localtime(t)), int(t * 1000000) % 1000000)) + g_logfile.write("%s\n" % text) + g_logfile.flush() + except: + sys.stderr.write("%s\n" % text) + +# Log a message, but only in verbose mode +def verbose(text): + if opt_verbose: + log(text) + + +def bail_out(reason): + log("FATAL ERROR: %s" % reason) + sys.exit(1) + + +def usage(): + sys.stdout.write("""Usage: liveproxyd [OPTIONS] + + -v, --verbose Enable verbose output + -g, --foreground Do not daemonize, run in foreground + -c, --conifg CONFIG Read configuration from file CONFIG + --debug Enable debug mode (let exceptions through) + -h, --help Show this help and exit + -V, --version Print version and exit + +""") + + +def daemonize(user=0, group=0): + # do the UNIX double-fork magic, see Stevens' "Advanced + # Programming in the UNIX Environment" for details (ISBN 0201563177) + try: + pid = os.fork() + if pid > 0: + # exit first parent + sys.exit(0) + except OSError, e: + sys.stderr.write("Fork failed (#1): %d (%s)\n" % (e.errno, e.strerror)) + sys.exit(1) + + # decouple from parent environment + # chdir -> don't prevent unmounting... + os.chdir("/") + + # Create new process group with the process as leader + os.setsid() + + # Set user/group depending on params + if group: + os.setregid(getgrnam(group)[2], getgrnam(group)[2]) + if user: + os.setreuid(getpwnam(user)[2], getpwnam(user)[2]) + + # do second fork + try: + pid = os.fork() + if pid > 0: + sys.exit(0) + except OSError, e: + sys.stderr.write("Fork failed (#2): %d (%s)\n" % (e.errno, e.strerror)) + sys.exit(1) + + sys.stdout.flush() + sys.stderr.flush() + + si = os.open("/dev/null", os.O_RDONLY) + so = os.open("/dev/null", os.O_WRONLY) + os.dup2(si, 0) + os.dup2(so, 1) + os.dup2(so, 2) + os.close(si) + os.close(so) + + open_log() + log("Successfully daemonized with PID %d." % os.getpid()) + +def signal_handler(signum, stack_frame): + global g_need_restart, g_need_reload + if signum in [ 2, 3, 15 ]: + log("Got signal %d. Good bye" % signum) + sys.exit(0) + elif signum == 10: + g_need_restart = True + elif signum == 12: + dump_state() + elif signum == 1: + g_need_reload = True + +def read_configuration(): + if os.path.exists(opt_configfile): + try: + execfile(opt_configfile, globals()) + except Exception, e: + if opt_debug: + raise + bail_out("Cannot read configuration file %s: %s" % ( + opt_configfile, e)) + + # Create state object for all sites + now = time.time() + for sitename, siteconf in sites.items(): + siteconf.setdefault("channels", 5) + siteconf.setdefault("heartbeat", (5, 2.0)) + siteconf.setdefault("channel_timeout", 3.0) + siteconf.setdefault("query_timeout", 120.0) + siteconf.setdefault("connect_retry", 4.0) + g_sites[sitename] = { + "state": "starting", + "channels" : [], + "clients" : [], + "heartbeat" : { + "since" : time.time(), + "channel" : None, + "count" : 0, + }, + "last_reset" : now, + "last_reload" : int(now), + "last_failed_connect" : 0, + "last_inventory_update" : 0, + "inventory_pid" : None, + "cache" : {}, + } + if siteconf["channels"] <= 1: + raise bail_out("Invalid configuration for site %s: you need at least two channels" % sitename) + +def do_restart(): + log("Restarting myself") + for fd in range(3, 8192): + try: + os.close(fd) + except: + pass + os.execvp("liveproxyd", sys.argv) + +def do_reload(): + log("Reload not implemented.") + +os.unsetenv("LANG") + +opt_verbose = 0 +opt_debug = False +opt_foreground = False +g_logfile = sys.stderr + +# Please adapt this piece of code +omd_root = os.getenv("OMD_ROOT") +if omd_root: + opt_logfilepath = omd_root + "/var/log/liveproxyd.log" + opt_statedumppath = omd_root + "/var/log/liveproxyd.state" + opt_pid_file = omd_root + "/tmp/run/liveproxyd.pid" + opt_configfile = omd_root + "/etc/check_mk/liveproxyd.mk" + opt_socketdir = omd_root + "/tmp/run/liveproxy" + opt_inventory_dir = omd_root + "/var/check_mk/inventory" +else: + curdir = os.path.abspath('.') + opt_logfilepath = curdir + "/liveproxyd.log" + opt_statedumppath = curdir + "/liveproxyd.state" + opt_pid_file = curdir + "/liveproxyd.pid" + opt_configfile = curdir + "/liveproxyd.mk" + opt_socketdir = curdir + "/liveproxy" + opt_inventory_dir = None # Not supported for non-OMD installations + +short_options = "hvVgc" +long_options = [ "help", "version", "verbose", "debug", "foreground", "config=" ] + +try: + opts, args = getopt.getopt(sys.argv[1:], short_options, long_options) + + # first parse modifers + for o, a in opts: + if o in [ '-v', '--verbose' ]: + opt_verbose += 1 + elif o in [ '-d', '--debug' ]: + opt_debug = True + elif o in [ '-g', '--foreground' ]: + opt_foreground = True + elif o in [ '-c', '--conifg' ]: + opt_config = a + + # now handle action options + for o, a in opts: + if o in [ '-h', '--help' ]: + usage() + sys.exit(0) + elif o in [ '-V', '--version' ]: + sys.stdout.write("liveproxyd version %s\n" % VERSION) + sys.exit(0) + + if not opt_foreground: + daemonize() + + log("----------------------------------------------------------") + log("Livestatus Proxy-Daemon starting...") + + read_configuration() + log("Configured %d sites" % len(sites)) + + # Create PID file + file(opt_pid_file, "w").write("%d\n" % os.getpid()) + + # Install signal hander + signal.signal(1, signal_handler) # HUP (--> reload) + signal.signal(2, signal_handler) # INT + signal.signal(3, signal_handler) # QUIT + signal.signal(15, signal_handler) # TERM + signal.signal(10, signal_handler) # USR1 + signal.signal(12, signal_handler) # USR2 + signal.signal(13, signal.SIG_IGN) # PIPE + + # Now let's go... + liveproxyd_run() + + # We reach this point, if the server has been killed by + # a signal or hitting Ctrl-C (in foreground mode) + + log("Successfully shut down.") + os.remove(opt_pid_file) + sys.exit(0) + +except Exception, e: + if opt_debug: + raise + bail_out(e) + diff -Nru check-mk-1.2.2p3/treasures/livestatus-to-nsca.sh check-mk-1.2.6p12/treasures/livestatus-to-nsca.sh --- check-mk-1.2.2p3/treasures/livestatus-to-nsca.sh 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/treasures/livestatus-to-nsca.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,51 +0,0 @@ -#!/bin/sh -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# ails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# A helper script that build a bridge between modern livedump -# for state export while still using legacy NSCA as transport. -# if NSCA is not a strict requirement, walk on and try livedump! - -# Advantages: -# - automagic detection of NSCA-enabled devices -# - use one bulk transfer even with NCSA -# - can run every minute - -# Disadvantages: -# - still using NSCA -# - still have to maintain the config on the NSCA receiver. - - -# Edit these parameters and the livestatus path to match your submission config -# or source them from your submit script. -NagiosDir="/usr/local/nagios" -NscaBin="$NagiosDir/libexec/send_nsca" -NscaCfg="$NagiosDir/etc/send_nsca.cfg" -LiveStatusPipe="$NagiosDir/var/rw/livestatus.cmd" -NagiosHost="nagioshost" - -# Add obsess_over_host = 1 to your filter if you wish to supress superfluous hosts. -echo "GET services -Columns: host_name description state plugin_output -Filter: obsess_over_service = 1" | unixcat $LiveStatusPipe | tr \; "\t" | $NscaBin $NagiosHost -c $NscaCfg diff -Nru check-mk-1.2.2p3/treasures/localchecks/check_bi_local.py check-mk-1.2.6p12/treasures/localchecks/check_bi_local.py --- check-mk-1.2.2p3/treasures/localchecks/check_bi_local.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/localchecks/check_bi_local.py 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,116 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example for creating real Nagios checks from BI aggregations. + +# Installation: +# 1. Put this file in /usr/lib/check_mk_agent/local +# 2. Make the file executable +# 3. Add a correct url_prefix (OMD site and slash) +# user with read access to Multisite. +# 4. Add password OR automation secret of this user + +url_prefix = "" # non-OMD installations +# url_prefix = "mysite/" # with OMD site name + +# Authentication credentials +user = "omdadmin" + +# use password OR automation_secret (you do not need both of them!!) +# set the other one to the empty string "" +# either: +password = "omd" +automation_secret="" + +# or: +# password = "" +# automation_secret = "LSEGRILPWQVLDBCYCKOC" + +# set "http" or "https" here +protocol = "http" +# protocol = "https" + +# Do you want to accept any Certificate when using HTTPS? +# You may set this to True or False +# You should leave it to False wherever possible +# You need to change this to True if using a self signed certificate +# Please note that this might be a security issue because then +# *every* SSL certificate is accepted +# If using http as protocol, this setting is ignored +accept_any_certificate = False +# accept_any_certificate = True + +# Do not change anything below + +import os, sys + +if protocol != "https": + protocol = "http" + +if protocol == "https" and accept_any_certificate == True: + cert_option = "--insecure" +else: + cert_option = "" + +if automation_secret != "": + url = '%s://localhost/%scheck_mk/view.py?view_name=aggr_summary&output_format=python' \ + '&_username=%s&_secret=%s' % (protocol, url_prefix, user, automation_secret) +elif password != "": + url = '%s://localhost/%scheck_mk/login.py?_login=1&_username=%s&_password=%s' \ + '&_origtarget=view.py%%3Fview_name=aggr_summary%%26output_format=python' % \ + (protocol, url_prefix, user, password) +else: + sys.stderr.write("You need to specify a password or an automation secret in the script source\n") + sys.exit(1) + + +try: + command = "curl -u \"%s:%s\" -b /dev/null -L --noproxy localhost %s --silent '%s'" % \ + (user, password, cert_option, url) + output = os.popen(command).read() + data = eval(output) +except: + sys.stderr.write("Invalid output from URL %s:\n" % url) + sys.stderr.write(output) + sys.stderr.write("Command was: %s\n" % command) + sys.exit(1) + +states = { + "OK" : 0, + "WARN" : 1, + "CRIT" : 2, + "UNKNOWN" : 3, +} + +for name, state, output in data[1:]: + state_nr = states.get(state, -1) + descr = "BI_Aggr_" + name.replace(" ", "_") + if state_nr != -1: + text = "%d %s - %s" % (state_nr, descr, state) + if output: + text += " - " + output + print text.encode("utf-8") + diff -Nru check-mk-1.2.2p3/treasures/localchecks/check_fstab_mounts check-mk-1.2.6p12/treasures/localchecks/check_fstab_mounts --- check-mk-1.2.2p3/treasures/localchecks/check_fstab_mounts 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/localchecks/check_fstab_mounts 2014-12-11 10:15:03.000000000 +0000 @@ -0,0 +1,38 @@ +#!/usr/bin/python +# This localcheck makes sure that every filesystem that is listed in /etc/fstab to +# be mounted automatically, also appears mounted in /proc/mounts + +# /dev/mapper/vg0-lv--root / ext4 errors=remount-ro 0 1 +# # /boot was on /dev/md0 during installation +# UUID=bec44dfa-7c70-4da7-857a-2e324cc230bd /boot ext4 defaults 0 2 +# /dev/mapper/vg0-lv--home /home ext4 defaults 0 2 +# tmpfs /omd/sites/aq/tmp tmpfs noauto,user,mode=755,uid=aq,gid=aq 0 0 +# tmpfs /opt/omd/sites/zentrale/tmp tmpfs noauto,user,mode=755,uid=zentrale,gid=zentrale 0 0 +# tmpfs /opt/omd/sites/hirn/tmp tmpfs noauto,user,mode=755,uid=hirn,gid=hirn 0 0 +# tmpfs /opt/omd/sites/heute/tmp tmpfs noauto,user,mode=755,uid=heute,gid=heute 0 0 + +mounted = [ l.split()[1] for l in file("/proc/mounts") ] + +missing = [] +count = 0 +for line in file("/etc/fstab"): + if not line.strip() or line.strip().startswith("#"): + continue + + device, mountpoint, fstype, options, rest = line.split(None, 4) + options = options.split(",") + expected = "noauto" not in options and fstype != "swap" + if expected and mountpoint not in mounted: + missing.append("%s is not mounted on %s" % (device, mountpoint)) + elif expected: + count += 1 + +if missing: + state = 2 + output = ", ".join(missing) + +else: + state = 0 + output = "All %d expected filesystems of /etc/fstab are mounted" % count + +print "%d Mounted_Filesystems - %s" % (state, output) diff -Nru check-mk-1.2.2p3/treasures/localchecks/check_mount_rw check-mk-1.2.6p12/treasures/localchecks/check_mount_rw --- check-mk-1.2.2p3/treasures/localchecks/check_mount_rw 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/localchecks/check_mount_rw 2014-07-04 17:50:27.000000000 +0000 @@ -0,0 +1,105 @@ +#!/bin/bash + +#The MIT License (MIT) +#Henry Huang (http://blog.unicsolution.com) +#Most updated file: https://github.com/bbhenry/check_mk_local/edit/master/check_mount_rw + +PROGNAME=`basename $0` +PROGPATH=`echo $0 | sed -e 's,[\\/][^\\/][^\\/]*$,,'` +REVISION="1.0" +FQDNNAME=`hostname -f` +TESTFILE="test-rw-$FQDNNAME" +RAND=$RANDOM + +print_usage() { + echo "Usage: $PROGNAME" +} + +print_help() { + print_revision $PROGNAME $REVISION + echo "" + print_usage + echo "" + echo "This plugin will write temporary file to all detected nfs and glusterfs directories and read them to verify the network mount points are working without fail" + echo "" + exit 0 +} + +check_mount_rw() { + MOUNTPOINTS=(`grep -E 'glusterfs|nfs' /proc/mounts | awk '{ gsub("fuse.glusterfs","glusterfs",$3); print $2":"toupper($3) }'`) + + for a in "${MOUNTPOINTS[@]}" + do + DIRECTORY=`echo $a | cut -f1 -d:` + CONNECTIONTYPE=`echo $a | cut -f2 -d:` + + timeout 1s ls $DIRECTORY/$TESTFILE + RET_V=$? + if [[ $RET_V -eq 124 || $RET_V -eq 137 ]]; then + echo "2 Mount_${CONNECTIONTYPE}_RW_:$DIRECTORY - Accessing mount point $DIRECTORY timed out" + continue + elif [[ $RET_V -eq 0 ]]; then + echo "2 Mount_${CONNECTIONTYPE}_RW_:$DIRECTORY - Test file $DIRECTORY/$TESTFILE already exist" + rm -f $DIRECTORY/$TESTFILE + continue + fi + + timeout 1s echo $RAND > $DIRECTORY/$TESTFILE + RET_W=$? + if [[ $RET_W -eq 124 || $RET_W -eq 137 ]]; then + echo "2 Mount_${CONNECTIONTYPE}_RW_:$DIRECTORY - Writing test file to $DIRECTORY/$TESTFILE timed out" + continue + elif [[ $RET_W -ne 0 ]]; then + echo "3 Mount_${CONNECTIONTYPE}_RW_:$DIRECTORY - Unknown write error $RET_W has occured while writing test file to $DIRECTORY" + continue + fi + + TESTREAD=`timeout 1s cat $DIRECTORY/$TESTFILE` + RET_R=$? + if [[ $RET_R -eq 124 || $RET_R -eq 137 ]]; then + echo "2 Mount_${CONNECTIONTYPE}_RW_:$DIRECTORY - Reading test from $DIRECTORY/$TESTFILE timed out" + continue + elif [[ $RET_R -ne 0 ]]; then + echo "3 Mount_${CONNECTIONTYPE}_RW_:$DIRECTORY - Unknown read error $RET_R has occured while reading test file from $DIRECTORY/$TESTFILE" + continue + fi + + if [[ $TESTREAD -eq $RAND ]]; then + timeout 1s rm -f $DIRECTORY/$TESTFILE + RET_RM=$? + if [[ $RET_RM -eq 124 || $RET_RM -eq 137 ]]; then + echo "2 Mount_${CONNECTIONTYPE}_RW_:$DIRECTORY - Removing the test file from $DIRECTORY/$TESTFILE timed out" + continue + elif [[ $RET_RM -ne 0 ]]; then + echo "2 Mount_${CONNECTIONTYPE}_RW_:$DIRECTORY - Failed to remove the test file from $DIRECTORY/$TESTFILE" + continue + else + echo "0 Mount_${CONNECTIONTYPE}_RW_:$DIRECTORY - Write and read test to the $DIRECTORY mount point was successful" + continue + fi + else + echo "2 Mount_${CONNECTIONTYPE}_RW_:$DIRECTORY - Content in the test file from $DIRECTORY/$TESTFILE does not match the source input" + continue + fi + done +} + +while getopts "hv" opt; do + case "$opt" in + h) + print_help + exit 0 + ;; + v) + echo "$PROGNAME version: $REVISION" + exit 0 + ;; + \?) + print_help + exit 0 + ;; + esac +done + +check_mount_rw +exit 0 diff -Nru check-mk-1.2.2p3/treasures/localchecks/zombies check-mk-1.2.6p12/treasures/localchecks/zombies --- check-mk-1.2.2p3/treasures/localchecks/zombies 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/localchecks/zombies 2014-12-11 10:15:03.000000000 +0000 @@ -0,0 +1,5 @@ +#!/bin/bash + +zombies=$( ps aux | awk '{ print $8 " " $2 }' | grep -w Z | wc -l ) + +echo "P Zombies zombies=$zombies;1;3" diff -Nru check-mk-1.2.2p3/treasures/migrate_cpu_load.sh check-mk-1.2.6p12/treasures/migrate_cpu_load.sh --- check-mk-1.2.2p3/treasures/migrate_cpu_load.sh 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/treasures/migrate_cpu_load.sh 2014-10-30 13:30:24.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/treasures/mknotifyd check-mk-1.2.6p12/treasures/mknotifyd --- check-mk-1.2.2p3/treasures/mknotifyd 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/mknotifyd 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,606 @@ +#!/usr/bin/python +# encoding: utf-8 +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2012 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +import socket, os, time, sys, getopt, signal, thread, pprint, re, select, subprocess, stat, pickle, uuid +from pwd import getpwnam +from grp import getgrnam + + +VERSION="1.2.6p12" +# .--Helper functions----------------------------------------------------. +# | _ _ _ | +# | | | | | ___| |_ __ ___ _ __ ___ | +# | | |_| |/ _ \ | '_ \ / _ \ '__/ __| | +# | | _ | __/ | |_) | __/ | \__ \ | +# | |_| |_|\___|_| .__/ \___|_| |___/ | +# | |_| | +# +----------------------------------------------------------------------+ +# | Various helper functions | +# '----------------------------------------------------------------------' +def format_exception(): + import StringIO, traceback + txt = StringIO.StringIO() + t, v, tb = sys.exc_info() + traceback.print_exception(t, v, tb, None, txt) + return txt.getvalue() + +def bail_out(reason): + log("FATAL ERROR: %s" % reason) + log("%s" % format_exception()) + sys.exit(1) + +def make_parentdirs(file_path): + dir_path = os.path.dirname(file_path) + if not os.path.exists(dir_path): + os.makedirs(dir_path) + +def process_exists(pid): + try: + os.kill(pid, 0) + return True + except: + return False + +def open_logfile(): + global g_logfile + g_logfile = file(g_logfile_path, "a") + +def log(text): + global g_logfile + if type(text) == unicode: + text = text.encode("utf-8") + try: + g_logfile.write('%s %s\n' % (time.strftime("%F %T", time.localtime()), text)) + g_logfile.flush() + except: + sys.stderr.write("%s\n" % text) + +def verbose(text, level = 1): + if opt_verbose >= level: + log(text) + +#. +# .--Spoolfiles Handler--------------------------------------------------. +# | ____ _ __ _ _ | +# | / ___| _ __ ___ ___ | |/ _(_) | ___ ___ | +# | \___ \| '_ \ / _ \ / _ \| | |_| | |/ _ \/ __| | +# | ___) | |_) | (_) | (_) | | _| | | __/\__ \ | +# | |____/| .__/ \___/ \___/|_|_| |_|_|\___||___/ | +# | |_| | +# | _ _ _ _ | +# | | | | | __ _ _ __ __| | | ___ _ __ | +# | | |_| |/ _` | '_ \ / _` | |/ _ \ '__| | +# | | _ | (_| | | | | (_| | | __/ | | +# | |_| |_|\__,_|_| |_|\__,_|_|\___|_| | +# | | +# +----------------------------------------------------------------------+ +# | Processes the spoolfiles in the spool and deferred directories | +# | by processing them with 'cmk --notify spoolfile {name}' or | +# | forwarding them to another mknotifyd instance if applicable | +# '----------------------------------------------------------------------' + +class SpoolfilesHandler: + def __init__(self): + self._should_terminate = False + self._is_running = False + + def run(self): + log("Starting SpoolfilesHandler") + self._is_running = True + try: + while not self._should_terminate: + self.process_directory(g_spool_dir) + self.process_directory(g_deferred_dir, g_config["notification_deferred_retention_time"]) + time.sleep(1) + except Exception, e: + log("Error processing spoolfile %s" % format_exception()) + + log("Stopping SpoolfilesHandler") + self._is_running = False + + + def process_directory(self, dir_path, files_older_than = 3): + for root, dirs, files in os.walk(dir_path): + for spoolfile in files: + now = time.time() + spoolfile_process_result = -1 + # Check spoolfile type + # Spoolfiles with the key forward are handled locally + spoolfile_path = "%s/%s" % (root, spoolfile) + file_age = now - os.stat(spoolfile_path)[8] + + if file_age < files_older_than: + continue + + verbose("Processing spoolfile: %s" % spoolfile_path) + try: + content = eval(file(spoolfile_path).read()) + if not content.get("context"): + raise Exception("Unable to find key context") + except Exception, e: + log("Invalid spoolfile %s\n%s" % (spoolfile_path,e)) + now = time.time() + os.utime(spoolfile_path, (now, now)) + continue + + # Spool file for forwarding to remote host. Contains a raw context + if content.get("forward"): + response_text = "" + try: + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.settimeout(10) + host, port = content["forward"].split(':',1) + del content["forward"] + verbose("Forward notification to %s %s" % (host, port), 2) + sock.connect((host, int(port))) + data_to_send = pickle.dumps(content) + sock.send("%10d" % len(data_to_send)) + sock.send(data_to_send) + + # Wait for OK response + while True: + readable = select.select([sock], [], [], 5)[0] + data = None + try: + chunk = sock.recv(16) + response_text += chunk + if not chunk: + break + except: + break # Error while reading + except Exception, e: + # Connection problems + log("Failed to forward notification %s to %s:%s: %s" % (spoolfile_path, host, port, e)) + verbose(format_exception(), 2) + spoolfile_process_result = response_text != "OK" and 1 or 0 + + # Spool file for asynchronous local delivery. Contains a plugin context + else: + spoolfile_process_result = os.system("cmk --notify spoolfile %s" % spoolfile_path) + + verbose("process result <%d> of file %s " % (spoolfile_process_result, spoolfile_path), 2) + if spoolfile_process_result == 1: + # Moving logfile to deferred and retry later + deferredfile_path = "%s/%s" % (g_deferred_dir, spoolfile) + os.rename(spoolfile_path, deferredfile_path) + now = time.time() + os.utime(deferredfile_path, (now, now)) + else: + os.remove(spoolfile_path) + +#. +# .--TCP-Server----------------------------------------------------------. +# | _____ ____ ____ ____ | +# | |_ _/ ___| _ \ / ___| ___ _ ____ _____ _ __ | +# | | || | | |_) |___\___ \ / _ \ '__\ \ / / _ \ '__| | +# | | || |___| __/_____|__) | __/ | \ V / __/ | | +# | |_| \____|_| |____/ \___|_| \_/ \___|_| | +# | | +# +----------------------------------------------------------------------+ +# | Receives TCP Messages from foreign mknotifyd instances and creates | +# | spoolfiles out of it | +# '----------------------------------------------------------------------' + +class TcpServer: + def __init__(self): + self._tcp_socket = None + self._should_terminate = False + self._is_running = False + self._reopen_sockets = False + + def open_sockets(self): + listen_port = g_config["notification_daemon_listen_port"] + log("Listen for remote notifications at port %d" % listen_port) + try: + self._tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self._tcp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + self._tcp_socket.bind(("0.0.0.0", listen_port)) + self._tcp_socket.listen(200) + except: + log("Error opening socket.\n%s" % format_exception()) + self._should_terminate = True + + def close_sockets(self): + if self._tcp_socket: + self._tcp_socket.close() + self._tcp_socket = None + log("No longer listen for remote notificiations") + + def run(self): + self._should_terminate = False + self._is_running = True + log("Starting TcpServer") + self.open_sockets() + + handled_connections = 0 # Debug info + while not self._should_terminate: + readable = select.select([self._tcp_socket], [], [], 0.2)[0] + for s in readable: + client_data = "" + chunk_count = 0 + client_socket, addr_info = s.accept() + from_name = "%s:%s" % addr_info + data_ready = True + data_header = client_socket.recv(10) + data_size = int(data_header) + while True: + try: + chunk = client_socket.recv(data_size) + chunk_count += 1 + client_data += chunk + if len(client_data) >= data_size: + break + + # Break if not data was received for the last 5 seconds + data_ready = select.select([client_socket], [], [], 5)[0] + if not data_ready: + break + except: + break # Error while reading + + if opt_verbose > 1: + verbose("Received notification %d from %s in %d chunks" % ( handled_connections, from_name, chunk_count), 2) + handled_connections = handled_connections + 1 + + try: + content = pickle.loads(client_data) + if content: + if not os.path.exists(g_spool_dir): + os.makedirs(g_spool_dir) + spoolfile = "%s/%s" % (g_spool_dir, uuid.uuid1()) + file(spoolfile,"w").write(pprint.pformat(content)) + verbose("client data processed - sending OK", 2) + client_socket.send("OK") + + client_socket.close() + except Exception, e: + log("Error processing data from %s: %s\n%s" % (from_name, e, format_exception())) + try: + if client_socket: + client_socket.send("ERROR") + client_socket.close() + client_socket = None + except: + pass + client_socket = None + if self._reopen_sockets: + log("Reopen TCP socket") + self.close_sockets() + self.open_sockets() + self._reopen_sockets = False + + log("Stopping TcpServer") + self.close_sockets() + self._is_running = False + + +#. +# .--Daemonize-----------------------------------------------------------. +# | ____ _ | +# | | _ \ __ _ ___ _ __ ___ ___ _ __ (_)_______ | +# | | | | |/ _` |/ _ \ '_ ` _ \ / _ \| '_ \| |_ / _ \ | +# | | |_| | (_| | __/ | | | | | (_) | | | | |/ / __/ | +# | |____/ \__,_|\___|_| |_| |_|\___/|_| |_|_/___\___| | +# | | +# +----------------------------------------------------------------------+ +# | Code for daemonizing | +# '----------------------------------------------------------------------' + +def daemonize(user=0, group=0): + # do the UNIX double-fork magic, see Stevens' "Advanced + # Programming in the UNIX Environment" for details (ISBN 0201563177) + try: + pid = os.fork() + if pid > 0: + # exit first parent + sys.exit(0) + except OSError, e: + sys.stderr.write("Fork failed (#1): %d (%s)\n" % (e.errno, e.strerror)) + sys.exit(1) + + # decouple from parent environment + # chdir -> don't prevent unmounting... + os.chdir("/") + + # Create new process group with the process as leader + os.setsid() + + # Set user/group depending on params + if group: + os.setregid(getgrnam(group)[2], getgrnam(group)[2]) + if user: + os.setreuid(getpwnam(user)[2], getpwnam(user)[2]) + + # do second fork + try: + pid = os.fork() + if pid > 0: + sys.exit(0) + except OSError, e: + sys.stderr.write("Fork failed (#2): %d (%s)\n" % (e.errno, e.strerror)) + sys.exit(1) + + sys.stdout.flush() + sys.stderr.flush() + + si = os.open("/dev/null", os.O_RDONLY) + so = os.open("/dev/null", os.O_WRONLY) + os.dup2(si, 0) + os.dup2(so, 1) + os.dup2(so, 2) + os.close(si) + os.close(so) + + log("Daemonized with PID %d." % os.getpid()) + + +def load_configuration(): + global g_config, g_config_changed + last_config = g_config.copy() + config_dir = "%s/mknotifyd.d" % g_config_dir + + try: + list_of_files = reduce(lambda a,b: a+b, + [ [ "%s/%s" % (d, f) for f in fs if f.endswith(".mk")] + for d, sb, fs in os.walk(config_dir) ], []) + list_of_files.sort(cmp = cmp_config_paths) + for path in list_of_files: + verbose("Reading configuration file %s" % path, 3) + execfile(path, {}, g_config) + except: + g_config = last_config + + if last_config != g_config: + log("Configuration has changed") + g_config_changed = True + +# This function has been stolen from check_mk.py. +# Helper function that determines the sort order of the +# configuration files. The following two rules are implemented: +# 1. *.mk files in the same directory will be read +# according to their lexical order. +# 2. subdirectories in the same directory will be +# scanned according to their lexical order. +# 3. subdirectories of a directory will always be read *after* +# the *.mk files in that directory. +def cmp_config_paths(a, b): + pa = a.split('/') + pb = b.split('/') + return cmp(pa[:-1], pb[:-1]) or \ + cmp(len(pa), len(pb)) or \ + cmp(pa, pb) + +def usage(): + sys.stdout.write("""Usage: mknotifyd [OPTIONS] + + -v, --verbose Enable verbose output, twice for more details + -g, --foreground Do not daemonize, run in foreground + -s, --single Single shot, exit after one cycle + +""") + if os.getenv("OMD_ROOT"): + sys.stdout.write("""You are running OMD, which is generally a good idea. +The following defaults are set: + + Config dir: %(g_config_dir)s + Var dir: %(g_var_dir)s + PID file: %(g_pid_file)s + Log file: %(g_logfile_path)s + +""" % globals()) + + +def run_thread(run_function, args=()): + return thread.start_new_thread(run_function, args) + +def run_notifyd(): + global g_tpc_server, g_spoolfiles_handler + global g_spool_dir, g_deferred_dir + global g_config_changed + + g_spool_dir = "%s/notify/spool" % g_var_dir + g_deferred_dir = "%s/notify/deferred" % g_var_dir + + if not os.path.exists(g_spool_dir): + os.makedirs(g_spool_dir) + + if not os.path.exists(g_deferred_dir): + os.makedirs(g_deferred_dir) + + # Start worker threads + if g_config["notification_daemon_listen_port"]: + run_thread(g_tcp_server.run) + + run_thread(g_spoolfiles_handler.run) + + while True: + try: + time.sleep(1) + # Read configuration again an check for changes + # Our tcp server might need a restart after its config has changed + load_configuration() + if g_config_changed: + if g_config["notification_daemon_listen_port"]: + if not g_tcp_server._is_running: + run_thread(g_tcp_server.run) + else: + g_tcp_server._reopen_sockets = True + else: + g_tcp_server._should_terminate = True + g_config_changed = False + time.sleep(0.2) + + # Check if worker threads are still running + if g_config["notification_daemon_listen_port"] and not g_tcp_server._is_running: + log("TcpServer thread crashed. Restarting...") + run_thread(g_tcp_server.run) + + if not g_spoolfiles_handler._is_running: + log("SpoolfilesHandler thread crashed. Restarting...") + run_thread(g_spoolfiles_handler.run) + + if opt_single_cycle: + raise MKSignalException(1) + + except MKSignalException, e: + # Initiate shutdown + g_tcp_server._should_terminate = True + g_spoolfiles_handler._should_terminate = True + now = time.time() + while (g_tcp_server._is_running or g_spoolfiles_handler._is_running) \ + and time.time() - now < 6: + time.sleep(0.1) + + if g_tcp_server._is_running: + log("Error: Couldn't stop TcpServer thread") + if g_spoolfiles_handler._is_running: + log("Error: Couldn't stop SpoolfilesHandler thread") + break + +class MKSignalException(Exception): + def __init__(self, signum): + Exception.__init__(self, "Got signal %d" % signum) + self._signum = signum + +def signal_handler(signum, stack_frame): + log("Got signal %d" % signum) + raise MKSignalException(signum) + + +#. +# .--Main----------------------------------------------------------------. +# | __ __ _ | +# | | \/ | __ _(_)_ __ | +# | | |\/| |/ _` | | '_ \ | +# | | | | | (_| | | | | | | +# | |_| |_|\__,_|_|_| |_| | +# | | +# +----------------------------------------------------------------------+ +# | Main entry and option parsing | +# '----------------------------------------------------------------------' + +os.unsetenv("LANG") +opt_verbose = False +opt_foreground = False +opt_single_cycle = False + +# Set default values for options +omd_root = os.getenv("OMD_ROOT") +if omd_root: + g_config_dir = omd_root + "/etc/check_mk" + g_var_dir = omd_root + "/var/check_mk" + g_pid_file = omd_root + "/tmp/run/mknotifyd/pid" + g_logfile_path = omd_root + "/var/log/mknotifyd.log" +else: + g_config_dir = "/etc/check_mk" + g_var_dir = "/var/check_mk" + g_pid_file = "/var/run/mknotifyd.pid" + g_logfile_path = "/var/log/mknotifyd.log" + + +g_config = { + "notification_daemon_listen_port": None, + "notification_deferred_retention_time": 180, + "notification_forward_to": "", +} + +short_options = "hVvgs" +long_options = [ "help", "version", "verbose", "foreground", "single" ] + +try: + opts, args = getopt.getopt(sys.argv[1:], short_options, long_options) + + # first parse modifers + for o, a in opts: + if o in [ '-v', '--verbose' ]: + opt_verbose += 1 + elif o in [ '-g', '--foreground' ]: + opt_foreground = True + elif o in [ '-s', '--single' ]: + opt_single_cycle = True + + # now handle action options + for o, a in opts: + if o in [ '-h', '--help' ]: + usage() + sys.exit(0) + elif o in [ '-V', '--version' ]: + sys.stdout.write("mknotifyd version %s\n" % VERSION) + sys.exit(0) + + # Prepare logging if running in daemon mode + if not opt_foreground: + open_logfile() + + log("-" * 65) + log("mknotifyd version %s starting" % VERSION) + + load_configuration() + g_config_changed = False # Of course its changed on startup... + + if os.path.exists(g_pid_file): + old_pid = int(file(g_pid_file).read()) + if process_exists(old_pid): + bail_out("Old PID file %s still existing and mknotifyd still running with PID %d." % + (g_pid_file, old_pid)) + os.remove(g_pid_file) + log("Removed orphaned PID file %s (process %d not running anymore)." % (g_pid_file, old_pid)) + + # Make sure paths exist + make_parentdirs(g_logfile_path) + make_parentdirs(g_pid_file) + + # Create worker classes + g_tcp_server = TcpServer() + g_spoolfiles_handler = SpoolfilesHandler() + + # Daemonize + if not opt_foreground: + make_parentdirs(g_pid_file) + daemonize() + + # Create PID file + file(g_pid_file, "w").write("%d\n" % os.getpid()) + + # Install signal hander + signal.signal(1, signal_handler) # HUP + signal.signal(2, signal_handler) # INT + signal.signal(3, signal_handler) # QUIT + signal.signal(15, signal_handler) # TERM + + # Now let's go... + run_notifyd() + + # We reach this point, if the server has been killed by + # a signal or hitting Ctrl-C (in foreground mode) + os.remove(g_pid_file) + log("Successfully shut down.") + sys.exit(0) + +except Exception, e: + bail_out(e) + diff -Nru check-mk-1.2.2p3/treasures/mk_oracle.old check-mk-1.2.6p12/treasures/mk_oracle.old --- check-mk-1.2.2p3/treasures/mk_oracle.old 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/mk_oracle.old 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,223 @@ +#!/bin/bash +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Check_MK agent plugin for monitoring ORACLE databases + +# Get list of all running databases +SIDS=$(UNIX95=true ps ax -o args | sed -n '/^\(ora\|xe\)_pmon_\([^ ]*\)/s//\2/p') +if [ -z "$SIDS" ] ; then + # If on this system we've already found a database + if [ -e "$MK_CONFDIR/mk_oracle.found" ] ; then + echo '<<>>' + echo '<<>>' + echo '<<>>' + echo '<<>>' + fi + exit 0 +fi + +touch $MK_CONFDIR/mk_oracle.found + +# Recreate data if cachefile is older than 120 seconds. +# If you set this to 0, then the cache file will be created +# as often as possible. If the database queries last longer +# then your check interval, caching will be active nevertheless. +CACHE_MAXAGE=120 + +# Source the optional configuration file for this agent plugin +if [ -e "$MK_CONFDIR/mk_oracle.cfg" ] +then + . $MK_CONFDIR/mk_oracle.cfg +fi + +# You can specify a list of SIDs to monitor. Those databases will +# only be handled, if they are found running, though! +# +# ONLY_SIDS="XE HIRN SEPP" +# +# It is possible to filter SIDS negatively. Just add the following to +# the mk_oracle.cfg file: +# +# EXCLUDE_="ALL" +# +# Another option is to filter single checks for SIDS. Just add +# lines as follows to the mk_oracle.cfg file. One service per +# line: +# +# EXCLUDE_="" +# +# For example skip oracle_sessions and oracle_logswitches checks +# for the instance "mysid". +# +# EXCLUDE_mysid="sessions logswitches" +# +# +# This check uses a cache file to prevent problems with long running +# SQL queries. It starts building a cache when +# a) no cache is present or the cache is too old and +# b) the cache is not currently being built +# The cache is used for $CACHE_MAXAGE seconds. The CACHE_MAXAGE +# option is pre-set to 120 seconds but can be changed in mk_oracle.cfg. + +function sqlplus () +{ + if OUTPUT=$({ echo 'set pages 0' ; echo 'whenever sqlerror exit 1'; echo 'set lines 8000' ; echo 'set feedback off'; cat ; } | $MK_CONFDIR/sqlplus.sh $1) + then + echo "${OUTPUT}" | sed -e 's/[[:space:]]\+/ /g' -e '/^[[:space:]]*$/d' -e "s/^/$1 /" + else + echo "${OUTPUT}" | sed "s/^/$1 FAILURE /" + fi +} + + +for SID in $SIDS; do + # Check if SID is listed in ONLY_SIDS if this is used + if [ "$ONLY_SIDS" ] ; then + SKIP=yes + for S in $ONLY_SIDS ; do + if [ "$S" = "$SID" ] ; then + SKIP= + break + fi + done + if [ "$SKIP" ] ; then continue ; fi + fi + + EXCLUDE=EXCLUDE_$SID + EXCLUDE=${!EXCLUDE} + # SID filtered totally? + if [ "$EXCLUDE" = "ALL" ]; then + continue + fi + + # Do Version-Check (use as a general login check) without caching + if [ "$EXCLUDE" = "${EXCLUDE/version/}" ]; then + echo '<<>>' + echo "select banner from v\$version where banner like 'Oracle%';" | sqlplus "$SID" + fi + + CACHE_FILE=$MK_CONFDIR/oracle_$SID.cache + + # Check if file exists and recent enough + CACHE_FILE_UPTODATE= + if [ -s $CACHE_FILE ]; then + NOW=$(date +%s) + MTIME=$(stat -c %Y $CACHE_FILE) + if [ $(($NOW - $MTIME)) -le $CACHE_MAXAGE ]; then + CACHE_FILE_UPTODATE=1 + fi + fi + + # If the cache file exists, output it, regardless of its age. If it's outdated + # then it will be recreated *asynchronously*. It's new contents will not + # be available here anyway. + if [ -s "$CACHE_FILE" ] ; then cat "$CACHE_FILE" ; fi + + # When the cache file is not valid, we recreated it, but only if there is not + # yet a background process from a previous check still doing this! We see this + # because of the existance of the .new file + # When the cache is old and there is no *new file present, then start a query + # to update the information for this instance. + if [ -z "$CACHE_FILE_UPTODATE" -a ! -e "$CACHE_FILE.new" ] + then + setsid bash -c " + set -o noclobber + function sqlplus () + { + if OUTPUT=\$({ echo 'set pages 0' ; echo 'whenever sqlerror exit 1'; echo 'set lines 8000' ; echo 'set feedback off'; cat ; } | $MK_CONFDIR/sqlplus.sh \$1) + then + echo \"\${OUTPUT}\" | sed -e 's/[[:space:]]\+/ /g' -e '/^[[:space:]]*$/d' -e \"s/^/\$1 /\" + else + echo \"\${OUTPUT}\" | sed \"s/^/\$1 FAILURE /\" + fi + } + + { + # Only execute checks when not filtered + if [ '$EXCLUDE' = '${EXCLUDE/sessions/}' ]; then + echo '<<>>' + echo \"select count(1) from v\\\$session where status = 'ACTIVE';\" | sqlplus \"$SID\" + fi + + if [ '$EXCLUDE' = '${EXCLUDE/logswitches/}' ]; then + echo '<<>>' + echo \"select count(1) from v\\\$loghist where first_time > sysdate - 1/24;\" | sqlplus \"$SID\" + fi + + if [ '$EXCLUDE' = '${EXCLUDE/jobs/}' ]; then + echo '<<>>' + sqlplus "$SID" <>>' + sqlplus "$SID" <'RUNNING' GROUP BY input_type) b + WHERE a.COMMAND_ID = b.COMMAND_ID; +EOF + fi + + if [ '$EXCLUDE' = '${EXCLUDE/tablespaces/}' ]; then + echo '<<>>' + sqlplus "$SID" < $CACHE_FILE.new && mv $CACHE_FILE.new $CACHE_FILE || rm -f $CACHE_FILE* + " + fi +done Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/treasures/modbus/agent_modbus and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/treasures/modbus/agent_modbus differ diff -Nru check-mk-1.2.2p3/treasures/modbus/agent_modbus.cpp check-mk-1.2.6p12/treasures/modbus/agent_modbus.cpp --- check-mk-1.2.2p3/treasures/modbus/agent_modbus.cpp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/modbus/agent_modbus.cpp 2013-12-20 16:56:15.000000000 +0000 @@ -0,0 +1,109 @@ +// agent_modbus 1.0 +// +// vincent.tacquet@gmail.com +// http://www.tacquet.be + +#include +#include +#include +#include +#include +#include +#include + +void print_usage(int exitcode) + +{ + printf("-----------------------------------------------------------------\n"); + printf("agent_modbus - Vincent Tacquet - 2013 - vincent.tacquet@gmail.com\n"); + printf("version 1.0\n\n"); + printf("usage: agent_modbus () ...\n"); + printf("example: agent_modbus 192.168.0.1 502 856:2:counter:active_energy 790:2:gauge:active_power\n"); + printf("-----------------------------------------------------------------\n\n"); + exit(exitcode); + +} + +int main(int argc, char *argv[]) +{ + modbus_t *mb; + uint16_t tab_reg[32]; + uint32_t mb_doubleword; + int rc; + int tcp_port, mb_address, mb_words; + char* mb_cg; + char* mb_name; + + if (argc < 4) + print_usage(2); + + tcp_port = atoi(argv[2]); + + mb = modbus_new_tcp(argv[1], tcp_port); + modbus_connect(mb); + + int args = 3; + + while (args < argc) + { + char* chk; + chk = strtok(argv[args],":"); + int counter = 0; + + while (chk != NULL) + { + counter++; + if (counter == 1) + mb_address = atoi(chk); + else if (counter == 2) + mb_words = atoi(chk); + else if (counter == 3) + mb_cg = chk; + else if (counter == 4) + mb_name = chk; + else + print_usage(2); + chk = strtok(NULL,":"); + } + + + if (counter == 4) + { + rc = modbus_read_registers(mb, mb_address, mb_words, tab_reg); + if (rc == -1) + { + fprintf(stderr, "error: %s\n", modbus_strerror(errno)); + return -1; + } + + if (args == 3) + { + printf("<<>>\n"); + } + + if (mb_words == 1) + { + printf("%d %d %s %s\n", mb_address, tab_reg[0], mb_cg, mb_name); + } + else if (mb_words == 2) + { + mb_doubleword = 0; + mb_doubleword = tab_reg[0] << 16; + mb_doubleword += tab_reg[1]; + printf("%d %d %s %s\n", mb_address, mb_doubleword, mb_cg, mb_name); + } + else + { + exit(2); + } + } + else + { + exit(2); + } + args++; + } + modbus_close(mb); + modbus_free(mb); + exit(0); +} diff -Nru check-mk-1.2.2p3/treasures/modbus/checkman/modbus_value check-mk-1.2.6p12/treasures/modbus/checkman/modbus_value --- check-mk-1.2.2p3/treasures/modbus/checkman/modbus_value 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/modbus/checkman/modbus_value 2013-12-20 16:56:15.000000000 +0000 @@ -0,0 +1,17 @@ +title: Modbus value +agents: modbus +catalog: hw/environment +license: GPL +distribution: check_mk +description: + This check is the first approach to monitor the Modbus. Modbus is an Industry Bus which + can be used to query information. To get that information, you will find agent_modbus in + the agent/special directory. This Check currently can handle gauge and counter data. + If you set no levels, only a pnp graph will be created and the check anytimes shows {OK}. + +inventory: + One service will be created for each value + +examples: +#Default levels 300, 350 +modbus_value_default_levels = ( 300, 350 ) diff -Nru check-mk-1.2.2p3/treasures/modbus/checks/agent_modbus check-mk-1.2.6p12/treasures/modbus/checks/agent_modbus --- check-mk-1.2.2p3/treasures/modbus/checks/agent_modbus 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/modbus/checks/agent_modbus 2013-12-20 16:56:15.000000000 +0000 @@ -0,0 +1,38 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# example: agent_modbus 192.168.0.1 502 856:2:counter:active_energy 790:2:gauge:active_power +def agent_modbus_arguments(params, hostname, ipaddress): + port, vars = params + args = quote_shell_string(ipaddress) + + args += ' ' + str(port) + for cid, ctype, words, name in vars: + args += " %s:%s:%s:%s" % ( cid, ctype, words, name.replace(" ", "_")) + + return args + +special_agent_info['modbus'] = agent_modbus_arguments diff -Nru check-mk-1.2.2p3/treasures/modbus/modbus_value check-mk-1.2.6p12/treasures/modbus/modbus_value --- check-mk-1.2.2p3/treasures/modbus/modbus_value 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/modbus/modbus_value 2013-12-20 16:56:15.000000000 +0000 @@ -0,0 +1,65 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +# <<>> +# 856 1827893 counter active_energy +# 790 11204 gauge active_power + +modbus_value_default_levels = ( None, None ) +def inventory_modbus_value(info): + return [ (x[-1], "modbus_value_default_levels") for x in info ] + +def check_modbus_value(item, params, info): + for cid, value, ctype, name in info: + if name == item: + value = saveint(value) + message_end = "" + if ctype == "gauge": + now = time.time() + timedif, value = get_counter( "modbus.value."+name, now, value ) + message_end = "in last %.0fsec " % timedif + warn, crit = params + state = 0 + if crit and crit >= value: + state = 2 + message_end += "(Levels Warning/Critical at %s/%s)" + if warn and warn >= value: + state = 1 + message_end += "(Levels Warning/Critical at %s/%s)" + message = "Current: %.2f %s (%s)" % ( value, message_end, cid ) + return state, message, [ (name, value, warn, crit ) ] + + return 3, "Value not found in Agent output" + +check_info["modbus_value"] = { + "check_function" : check_modbus_value, + "inventory_function" : inventory_modbus_value, + "service_description" : "Value %s", + "has_perfdata" : True, + "group" : "modbus_value", +} + diff -Nru check-mk-1.2.2p3/treasures/modbus/perfometer/modbus.py check-mk-1.2.6p12/treasures/modbus/perfometer/modbus.py --- check-mk-1.2.2p3/treasures/modbus/perfometer/modbus.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/modbus/perfometer/modbus.py 2014-10-30 13:30:24.000000000 +0000 @@ -0,0 +1,35 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Put this file into share/check_mk/web/plugins/perfometer + +def perfometer_modbus_value(row, check_command, perf_data): + value = int(perf_data[0][1]) + return perf_data[0][1], perfometer_logarithmic(value, value*3, 2, '#3366cc') + +perfometers['check_mk-modbus_value'] = perfometer_modbus_value + + diff -Nru check-mk-1.2.2p3/treasures/modbus/wato/modbus.py check-mk-1.2.6p12/treasures/modbus/wato/modbus.py --- check-mk-1.2.2p3/treasures/modbus/wato/modbus.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/modbus/wato/modbus.py 2014-10-30 13:30:24.000000000 +0000 @@ -0,0 +1,79 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Put this file into share/check_mk/web/plugins/wato. It will create a rules +# for modbus checks and a rule in the configuration of the special agents. + +register_check_parameters( + subgroup_environment, + "modbus_value", + _("Modbus Performance Values"), + Tuple( + elements = [ + Integer(title = _("Warning if above")), + Integer(title = _("Critical if above")) + ] + ), + TextAscii( title = _("Value Name") ), + None +) + + +register_rule(group, + "special_agents:modbus", + Tuple( + title = _("Check Modbus devices"), + help = _( "Configure the Server Address and the ids you want to query from the device" + "Please refer to the documentation of the device to find out which ids you want"), + elements = [ + Integer( title = _("Port Number"), default_value=502 ), + ListOf( + Tuple( + elements = [ + Integer( title=_("Counter ID") ), + DropdownChoice( + title = _("Number of words"), + choices = [ + ( 1 , _("1 Word") ), + ( 2, _("2 Words") ), + ] + ), + DropdownChoice( + title = _("Value type"), + choices = [ + ( "counter" , _("Its a counter value") ), + ( "gauge", _("Its a gauge value") ), + ] + ), + TextAscii( title = _("Counter Description")), + ] + ) + ) + ] + ), + match = "first") + + diff -Nru check-mk-1.2.2p3/treasures/msexchange/README check-mk-1.2.6p12/treasures/msexchange/README --- check-mk-1.2.2p3/treasures/msexchange/README 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/msexchange/README 2014-07-04 17:50:27.000000000 +0000 @@ -0,0 +1,26 @@ +Exchange Monitoring: + +How to get the counter IDS +regedit: HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Perflib\009\Counter +The id is the number befor the name. +or with lodctr + +---------------------------------------------------------------------- +counters = 111111:msx_dc_access +MSExchange ADAccess Domain Controllers +---------------------------------------------------------------------- +counters = 11111:msx_dumpster +MSExchangeTransport Dumpster +---------------------------------------------------------------------- +counters = 68312:msx_queued_mailbox +MSExchangeIS Mailbox +---------------------------------------------------------------------- +counters = 64848:msx_activesync +MSExchange ActiveSync +---------------------------------------------------------------------- +counters = 66210:msx_rpc_clientaccess +MSExchange RpcClientAccess +---------------------------------------------------------------------- +counters = 66804:msx_db_reads_avg_latency +MSExchange Database +---------------------------------------------------------------------- diff -Nru check-mk-1.2.2p3/treasures/msexchange/winperf_msx_activesync check-mk-1.2.6p12/treasures/msexchange/winperf_msx_activesync --- check-mk-1.2.2p3/treasures/msexchange/winperf_msx_activesync 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/msexchange/winperf_msx_activesync 2014-07-04 17:50:27.000000000 +0000 @@ -0,0 +1,50 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: + +def inventory_winperf_msx_activesync(info): + return [ (None, None) ] + +def check_winperf_msx_activesync(_no_item, _no_params, info): + ping_commands = info[72][1] + sync_commands = info[84][1] + + perf = [ + ( "ping", ping_commands ), + ( "sync", sync_commands ), + ] + return 0, "%s Ping-/ %s Sync Commands are pending" % ( ping_commands, sync_commands ), perf + + + +check_info["winperf_msx_activesync"] = { + "check_function" : check_winperf_msx_activesync, + "inventory_function" : inventory_winperf_msx_activesync, + "service_description" : "MSX Active Sync", + "has_perfdata" : True, +} + diff -Nru check-mk-1.2.2p3/treasures/msexchange/winperf_msx_db_reads_avg_latency check-mk-1.2.6p12/treasures/msexchange/winperf_msx_db_reads_avg_latency --- check-mk-1.2.2p3/treasures/msexchange/winperf_msx_db_reads_avg_latency 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/msexchange/winperf_msx_db_reads_avg_latency 2014-07-04 17:50:27.000000000 +0000 @@ -0,0 +1,83 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +#<<>> +#1387274514.76 66804 +#3 instances: Information_Store msexchangerepl edgetransport +#2 0 0 0 nodata +#4 0 0 0 nodata +#6 0 0 0 nodata +#8 0 0 0 nodata +#10 0 0 0 rawcount +#12 0 0 0 rawcount +#14 0 0 0 nodata +#16 36126 0 0 nodata +#18 36126 0 0 nodata +#20 0 0 0 nodata +#22 0 0 0 nodata +#24 0 0 0 nodata +#26 0 0 0 nodata +#28 0 0 0 nodata +#30 0 0 0 nodata +#32 0 0 0 nodata +#34 0 0 0 nodata +#... + +def winperf_msx_db_reads_avg_latency_convert(info): + i = 274 + data = {} + instances = info[1][2:] + for instance in instances: + data[instance] = int(info[i][2]) + i += 1 + return data + +winperf_msx_db_reads_avg_latency_default = ( 20, 25 ) +def inventory_winperf_msx_db_reads_avg_latency(info): + return [ (x, 'winperf_msx_db_reads_avg_latency_default') for x in info[1][2:] ] + +def check_winperf_msx_db_reads_avg_latency(item, params, info): + info = winperf_msx_db_reads_avg_latency_convert(info) + for instance, counter in info.items(): + state = 0 + if instance == item: + warn, crit = params + if counter >= crit: + state = 2 + elif counter >= warn: + state = 1 + perf = [('avg', counter, warn, crit )] + return state, "%sms AVG Database Reads Latency" % counter, perf + return 3, "Configuration problem" + +check_info["winperf_msx_db_reads_avg_latency"] = { + "check_function" : check_winperf_msx_db_reads_avg_latency, + "inventory_function" : inventory_winperf_msx_db_reads_avg_latency, + "service_description" : "MSX DB Read AVG %s", + "has_perfdata" : True, +} + diff -Nru check-mk-1.2.2p3/treasures/msexchange/winperf_msx_dc_access check-mk-1.2.6p12/treasures/msexchange/winperf_msx_dc_access --- check-mk-1.2.2p3/treasures/msexchange/winperf_msx_dc_access 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/msexchange/winperf_msx_dc_access 2014-07-04 17:50:27.000000000 +0000 @@ -0,0 +1,96 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +#<<>> +#1377600100.83 40156 +#2 instances: srvc1dc02.ft.de srvc1dc01.ft.de +#2 1043618 792918 counter +#4 898494 654026 counter +#6 0 0 rawcount +#8 0 0 rawcount +#10 0 0 rawcount +#12 0 0 rawcount + +def winperf_msx_dc_access_convert(info): + i = 2 + data = {} + servers = info[1][2:] + num_srv = len(servers) + for server in servers: + data[server] = (int(info[i][2]), int(info[i+num_srv][2])) + i += 1 + return data + +winperf_msx_dc_accss_default_levels = { + "read_time" : ( 50, 60 ), + "search_time" : ( 50, 60 ) +} + +def inventory_winperf_msx_dc_access(info): + return [ (x, 'winperf_msx_dc_accss_default_levels') for x in info[1][2:] ] + +def check_winperf_msx_dc_access(item, params, info): + info = winperf_msx_dc_access_convert(info) + for server, counter in info.items(): + if server == item: + act = {} + act['read_time'], act['search_time'] = counter + now = time.time() + data = {} + wrapped = False + state = 0 + message = [] + perf = [] + for what in ['read_time', 'search_time']: + try: + timediff, data[what] = get_counter("msx_dc_access_%s_%s" % (item, what), now, act[what]) + except: + wrapped = True + continue + warn, crit = params[what] + msg = "%s: AVG %dms for last %ds" % ( what, data[what], timediff ) + if data[what] >= crit: + state = 2 + msg += "(!!) (Levels at %d/%d)" % (warn, crit) + elif data[what] >= warn: + msg += "(!) (Levels at %d/%d)" % (warn, crit) + state = max(1, state) + message.append(msg) + perf.append((what, data[what], warn, crit )) + + if wrapped: + return 3, "Initialing counters" + return state, ", ".join(message), perf + return 3, "Configuration problem" + +check_info["winperf_msx_dc_access"] = { + "check_function" : check_winperf_msx_dc_access, + "inventory_function" : inventory_winperf_msx_dc_access, + "service_description" : "MSX DC %s Access", + "has_perfdata" : True, +} + diff -Nru check-mk-1.2.2p3/treasures/msexchange/winperf_msx_dumpster check-mk-1.2.6p12/treasures/msexchange/winperf_msx_dumpster --- check-mk-1.2.2p3/treasures/msexchange/winperf_msx_dumpster 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/msexchange/winperf_msx_dumpster 2014-07-04 17:50:27.000000000 +0000 @@ -0,0 +1,78 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +#<<>> +#1387196179.28 62694 +#2 6 rawcount +#4 130270 rawcount +#6 46917 counter +#8 46911 counter +#10 0 rawcount +#12 57 large_rawcount +#14 2 rawcount +#16 0 average_timer +#18 0 average_base +#20 0 average_timer +#22 0 average_base +#24 0 average_bulk +#26 0 average_base +#28 0 rawcount +#30 0 rawcount + + +def inventory_winperf_msx_dumpster(info): + return [ (None, None) ] + +def check_winperf_msx_dumpster(_no_item, _no_params, info): + counters = [] + + #find the values + for line in info: + if line[0] == '2': + counters.append(saveint(line[1])) + if line[0] == '4': + counters.append(saveint(line[1])) + break + + if len(counters) == 2: + count, size = counters + perf = [ + ( 'size', size ), + ( 'items', count), + ] + return 0, "Size is %s (%d Items)" % ( get_bytes_human_readable(size), count ), perf + + return 3, "Counter not found" + + +check_info["winperf_msx_dumpster"] = { + "check_function" : check_winperf_msx_dumpster, + "inventory_function" : inventory_winperf_msx_dumpster, + "service_description" : "MSX Dumpster", + "has_perfdata" : True, +} + diff -Nru check-mk-1.2.2p3/treasures/msexchange/winperf_msx_queued_mailbox check-mk-1.2.6p12/treasures/msexchange/winperf_msx_queued_mailbox --- check-mk-1.2.2p3/treasures/msexchange/winperf_msx_queued_mailbox 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/msexchange/winperf_msx_queued_mailbox 2014-07-04 17:50:27.000000000 +0000 @@ -0,0 +1,63 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: + +def winperf_msx_queued_mailbox_convert(info): + i = 84 + data = {} + servers = info[1][2:] + for server in servers: + data[server] = int(info[i][2]) + i += 1 + return data + +winperf_msx_queued_mailbox_default = ( 40, 50 ) + +def inventory_winperf_msx_queued_mailbox(info): + return [ (x, 'winperf_msx_queued_mailbox_default') for x in info[1][2:] ] + +def check_winperf_msx_queued_mailbox(item, params, info): + try: + value = winperf_msx_queued_mailbox_convert(info)[item] + warn, crit = params + message = "%s entries" % value + perf = [ ('queued', value )] + if value >= crit: + return 2, message, perf + if value >= warn: + return 1, message, perf + return 0, message, perf + except: + return 3, "Instance not found in agent Ouput" + +check_info["winperf_msx_queued_mailbox"] = { + "check_function" : check_winperf_msx_queued_mailbox, + "inventory_function" : inventory_winperf_msx_queued_mailbox, + "service_description" : "Queue Submission %s", + "has_perfdata" : True, +} + diff -Nru check-mk-1.2.2p3/treasures/msexchange/winperf_msx_rpc_clientaccess check-mk-1.2.6p12/treasures/msexchange/winperf_msx_rpc_clientaccess --- check-mk-1.2.2p3/treasures/msexchange/winperf_msx_rpc_clientaccess 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/msexchange/winperf_msx_rpc_clientaccess 2014-07-04 17:50:27.000000000 +0000 @@ -0,0 +1,77 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +## Example output from agent: +#<<>> +#1387273178.72 66210 +#2 1 rawcount +#4 8758106 counter +#6 18468322 counter +#8 1 rawcount +#10 1641652247 large_rawcount +#12 1341174309 large_rawcount +#14 1682095824 large_rawcount +#16 3135531663 large_rawcount +#18 287 large_rawcount +#20 92 large_rawcount +#22 202 large_rawcount +#24 10271890 large_rawcount +#26 10269732 large_rawcount +#28 10269728 large_rawcount +#30 4 large_rawcount +#32 2158 large_rawcount +#34 2158 large_rawcount +#36 0 large_rawcount +#38 2233 large_rawcount +#40 460 large_rawcount +#42 223 large_rawcount +winperf_msx_rpc_clientaccess_default = ( 40, 45 ) + +def inventory_winperf_msx_rpc_clientaccess(info): + return [ (None, "winperf_msx_rpc_clientaccess_default") ] + +def check_winperf_msx_rpc_clientaccess(_no_item, params, info): + try: + value = saveint(info[1][1]) + warn, crit = params + state = 0 + if value >= crit: + state = 2 + elif value >= warn: + state = 1 + perf = [('connections', value, warn, crit )] + return state, "Current: %s Client connections" % ( value ), perf + except KeyError: + return 3, "Counter not found" + + +check_info["winperf_msx_rpc_clientaccess"] = { + "check_function" : check_winperf_msx_rpc_clientaccess, + "inventory_function" : inventory_winperf_msx_rpc_clientaccess, + "service_description" : "MSX RPC Client Access", + "has_perfdata" : True, +} + diff -Nru check-mk-1.2.2p3/treasures/multisite_to_mrpe check-mk-1.2.6p12/treasures/multisite_to_mrpe --- check-mk-1.2.2p3/treasures/multisite_to_mrpe 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/treasures/multisite_to_mrpe 2015-09-21 10:59:54.000000000 +0000 @@ -190,7 +190,7 @@ state = { "OK" : 0, "WARN" : 1, "CRIT" : 2, "UNKNOWN" : 3}.get(service['service_state']) if state != None: # skip pending services sys.stdout.write("(%s) %s %d %s|%s\n" % ( - service["svc_check_command"], + service["svc_check_command"].replace(" ", "_"), service["service_description"].replace(" ", "_"), state, service["svc_plugin_output"], diff -Nru check-mk-1.2.2p3/treasures/nagvis_icon/nagvis_icon.mk check-mk-1.2.6p12/treasures/nagvis_icon/nagvis_icon.mk --- check-mk-1.2.2p3/treasures/nagvis_icon/nagvis_icon.mk 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/nagvis_icon/nagvis_icon.mk 2014-07-04 17:50:27.000000000 +0000 @@ -0,0 +1,19 @@ +#This config file adds the name of each nagvis map contaning a host as custom macro. +#The information is used for the nagvis_icon.py to show a nagvis icon in the gui + +# Just place this file to check_mk/conf.d + +_path = '/omd/sites/%s/etc/nagvis/maps/*.cfg' % omd_site +_hosts = {} +for _nm in glob.glob(_path): + _mapname = _nm.split("/")[-1].split('.')[0] + for _nhost in [ _l for _l in file(_nm).readlines() if _l.startswith('host_name')]: + _nhost = _nhost.split('=')[-1].strip() + _hosts.setdefault(_nhost, []) + if _mapname not in _hosts[_nhost]: + _hosts[_nhost].append(_mapname) + +extra_host_conf['_nagvismaps'] = [] +for _nhost, _maps in _hosts.items(): + extra_host_conf['_nagvismaps'].append( ( ",".join(_maps), [_nhost] ) ) + diff -Nru check-mk-1.2.2p3/treasures/nagvis_icon/nagvis_icon.py check-mk-1.2.6p12/treasures/nagvis_icon/nagvis_icon.py --- check-mk-1.2.2p3/treasures/nagvis_icon/nagvis_icon.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/nagvis_icon/nagvis_icon.py 2014-10-30 13:30:24.000000000 +0000 @@ -0,0 +1,43 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Please refer to nagvis_icon.mk to see a way how to add the nagvismaps custom macro + +# copy me to ~/local/share/check_mk/web/pluins/icon and restart the site apache + +def paint_nagvis_image(what, row, tags, custom_vars): + if what != 'host' or not custom_vars.get('NAGVISMAPS'): + return + h = "" + for nagvis_map in custom_vars['NAGVISMAPS'].split(','): + h += '' \ + % ( nagvis_map, nagvis_map ) + + return h + +multisite_icons.append({ + 'paint': paint_nagvis_image, +}) diff -Nru check-mk-1.2.2p3/treasures/notification_report.sh check-mk-1.2.6p12/treasures/notification_report.sh --- check-mk-1.2.2p3/treasures/notification_report.sh 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/treasures/notification_report.sh 2014-10-30 13:30:24.000000000 +0000 @@ -6,7 +6,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/treasures/notifications/mobilant check-mk-1.2.6p12/treasures/notifications/mobilant --- check-mk-1.2.2p3/treasures/notifications/mobilant 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/notifications/mobilant 2014-10-30 13:30:24.000000000 +0000 @@ -0,0 +1,97 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Send SMS via Mobilant # encoding: utf-8 +# +# This notification script can be put below share/check_mk/notifications. It sends +# SMS via mobilant.com. Please add your personal configuration directly in this +# script. The target phone number is take from the contact's pager address. +# You can override this by specifying it as a parameter + +import sys, os, urllib + +key = "8F37ksjf8kJ8k37f729Btlllsw8" # Enter your mobilant web key here! + +# This does not need to be changed +to = os.environ.get("NOTIFY_CONTACTPAGER") +fromname = "Check_MK" + + +if len(sys.argv) > 1: + to = sys.argv[1] + +if not to: + sys.stderr.write("NOTIFY_CONTACTPAGER is not set.\n") + sys.exit(1) + + +max_len = 160 +message = os.environ['NOTIFY_HOSTNAME'] + " " + +if os.environ['NOTIFY_WHAT'] == 'SERVICE': + message += os.environ['NOTIFY_SERVICESTATE'][:2] + " " + avail_len = max_len - len(message) + message += os.environ['NOTIFY_SERVICEDESC'][:avail_len] + " " + avail_len = max_len - len(message) + message += os.environ['NOTIFY_SERVICEOUTPUT'][:avail_len] + +else: + message += "is " + os.environ['NOTIFY_HOSTSTATE'] + + +url = "http://gw.mobilant.com/?" + urllib.urlencode([ + ( "key", key ), + ( "to", to ), + ( "message", message ), + ( "route", "gold" ), + ( "from", fromname ) +]) + +exitcodes = { + "10" : u"Empfängernummer nicht korrekt, Parameter: to", + "20" : u"Absenderkennung nicht korrekt, Parameter: from", + "30" : u"Nachrichtentext nicht korrekt, Parameter: message", + "31" : u"Messagetyp nicht korrekt, Parameter: messagetype", + "40" : u"SMS Route nicht korrekt, Parameter: route", + "50" : u"Identifikation fehlgeschlagen, Parameter: key", + "60" : u"nicht genügend Guthaben", + "70" : u"Netz wird nicht abgedeckt, Parameter: route", + "71" : u"Feature nicht möglich, Parameter: route", + "80" : u"Übergabe an SMS-C, fehlgeschlagen", + "100" : u"SMS wurde angenommen und, versendet", +} + +try: + handle = urllib.urlopen(url) + response = handle.read().strip() + if response == "100": + sys.stdout.write("Successfully sent SMS to %s\n" % to) + else: + sys.stderr.write("Error sending SMS to %s: %s\n" % (to, exitcodes.get(response, "Invalid exit code %s" % response))) + sys.stderr.write("URL was %s\n" % url) +except Exception, e: + sys.stderr.write("Error sending SMS to %s. Exception: %s%s\n" % e) + diff -Nru check-mk-1.2.2p3/treasures/notifications/multitech check-mk-1.2.6p12/treasures/notifications/multitech --- check-mk-1.2.2p3/treasures/notifications/multitech 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/notifications/multitech 2014-10-30 13:30:24.000000000 +0000 @@ -0,0 +1,133 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Send SMS via MultiTech SMS-Gateway # encoding: utf-8 +# +# This notification script can be put below share/check_mk/notifications. It sends +# SMS via a MultiTech SMS-Gateway +# (http://www.multitech.com/en_US/PRODUCTS/Families/MultiModemiSMS/) +# Please add your personal configuration directly in this +# script. The target phone number is take from the contact's pager address. +# You can override this by specifying it as a parameter +# +# Some hints for setup of the MultiTech SMS-Gateway: +# +# * Please use at least Firmware Version 1.51.9 earlier versions did cause much +# trouble. The devices are not yet delivered with this version, so an upgrade is +# required. You get SF100-u-v1.51.9-16Jan2013.bin.zip e. g. at +# https://shop.netways.de/attachment.php?id_attachment=64 +# +# * Deactivate the PIN of the SIM card. This can be done most easy by inserting +# the SIM into a mobile phone. +# +# * By default, the device has IP 192.168.2.1, user admin, password admin. +# You can change these in the admin interface by browser (http). +# +# * Look into the status information in the web interface to make sure, the +# SIM card is displayed as enabled there. +# If not: Make sure you did insert the SIM card with contacts to the bottom, +# and the cut off corner to the front right. +# +# * Under +# Administration > Admin Access > Allowed Networks +# you can restrict access to the device. Make sure, the IPs of the sending +# Check_MK machines are included there. +# +# * Under +# SMS Services > Send API +# enable HTTP Status, set port to 80 +# +# * Under +# SMS Services > International Number +# clear the check box "Disable International Number" +# +# * Under +# SMS Services > Send SMS Users +# create a user for Check_MK. This one needs to be entered below. +# Make sure you choose a password, which is not longer than 8 characters. +# On the device it is possible to set a longer password, but authentication +# with it is impossible!! :-( +# +# * Do not forget to go to the "Save & Restart" tab and click "save" there. +# This writes your changes into the flash memory of the device. Otherwise +# they will be lost on next reboot. +# + + +import sys, os, urllib + +# This does not need to be changed +to = os.environ.get("NOTIFY_CONTACTPAGER") +fromname = "Check_MK" +user = "nagios" +passwd = "test123" +url = "http://isms.example.com/sendmsg?" + + +if len(sys.argv) > 1: + to = sys.argv[1] + +if not to: + sys.stderr.write("NOTIFY_CONTACTPAGER is not set.\n") + sys.exit(1) + + +max_len = 160 +message = os.environ['NOTIFY_HOSTNAME'] + " " + +if os.environ['NOTIFY_WHAT'] == 'SERVICE': + message += os.environ['NOTIFY_SERVICESTATE'][:2] + " " + avail_len = max_len - len(message) + message += os.environ['NOTIFY_SERVICEDESC'][:avail_len] + " " + avail_len = max_len - len(message) + message += os.environ['NOTIFY_SERVICEOUTPUT'][:avail_len] + +else: + message += "is " + os.environ['NOTIFY_HOSTSTATE'] + +# constructing a url like +# http://isms.example.com/sendmsg?user=nagios&passwd=test123&cat=1&to=017012345678&text=sample' +url += urllib.urlencode([ + ( "user", user ), + ( "passwd", passwd ), + ( "cat", "1" ), + ( "to", to ), + ( "text", message ) +]) + + +try: + handle = urllib.urlopen(url) + response = handle.read().strip() + sys.stdout.write("%s\n" % response) + if response.startswith("ID:"): + sys.stdout.write("Successfully sent SMS to %s\n" % to) + else: + sys.stderr.write("Error sending SMS to %s: %s\n" % (to, response)) + sys.stderr.write("URL was %s\n" % url) +except Exception, e: + sys.stderr.write("Error sending SMS to %s. Exception: %s%s\n" % e) + diff -Nru check-mk-1.2.2p3/treasures/notifications/pushover check-mk-1.2.6p12/treasures/notifications/pushover --- check-mk-1.2.2p3/treasures/notifications/pushover 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/notifications/pushover 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,67 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Send notification to Pushover +# +# This notification script can be put below share/check_mk/notifications. It sends +# notifications to Pushover. Note: this currently is just a simple test. The device +# should probably be set to a user name or similar. + +import sys, os, urllib, httplib + +# Enter your keys and tokens here. For details about what that's +# about please refer to the Pushover documentation. +user_key = "XXXXXXXXXXXXXXXXXXXXXXXXXXX" +app_token = "XXXXXXXXXXXXXXXXXXXXXXXXXXX" +device = "XXXXX" + +message = os.environ['NOTIFY_HOSTNAME'] + " " + +if os.environ['NOTIFY_WHAT'] == 'SERVICE': + message += os.environ['NOTIFY_SERVICESTATE'][:2] + " " + avail_len = max_len - len(message) + message += os.environ['NOTIFY_SERVICEDESC'][:avail_len] + " " + avail_len = max_len - len(message) + message += os.environ['NOTIFY_SERVICEOUTPUT'][:avail_len] + +else: + message += "is " + os.environ['NOTIFY_HOSTSTATE'] + + +conn = httplib.HTTPSConnection("api.pushover.net:443") +conn.request("POST", "/1/messages.json", +urllib.urlencode({ + "token" : app_token, + "user" : user_key, + "message" : message, +}), { "Content-type": "application/x-www-form-urlencoded" }) +response = conn.getresponse() +headers = dict(response.getheaders()) +status = headers.get('status') +code, explanation = status.split(None, 1) +if code != "200": + sys.stdout.write("Failed to notify via Pushover: %s, HTTP status is %s\n" % (explanation, status)) + sys.exit(1) diff -Nru check-mk-1.2.2p3/treasures/notifications/README check-mk-1.2.6p12/treasures/notifications/README --- check-mk-1.2.2p3/treasures/notifications/README 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/notifications/README 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,8 @@ +These are notifications plugins for Check_MK. You can put them +in the notifications/ directory of your Check_MK installations. +OMD-based users put this into their site in to. +local/share/check_mk/notifications/ + +These scripts to not yet have a WATO integration. You might +need to update keys, passwords, etc. directly within these +scripts currently. diff -Nru check-mk-1.2.2p3/treasures/notifications/snmp_trap check-mk-1.2.6p12/treasures/notifications/snmp_trap --- check-mk-1.2.2p3/treasures/notifications/snmp_trap 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/notifications/snmp_trap 2014-12-11 10:14:57.000000000 +0000 @@ -0,0 +1,26 @@ +#!/bin/bash +# Create Ticket via SNMP Trap +# Create Tickets in HP Service Center, +# Author Bastian Kuhn, + + +RECEIVER="" +SENDER="" +SNMP_COMMUNITY="" + +TIMESTAMP=$(date +%s) + +#Fall fuer Serivces Down +if [ "$NOTIFY_WHAT" == "SERVICE" ]; then + snmptrap -v 1 -c $SNMP_COMMUNITY $RECEIVER 1.3.6.1.4.1.791.2.9.2.2 $SENDER 6 \ + 12 00:00:00 1.3.6.1.4.1.791.2.9.2.1 s \ + "$TIMESTAMP^$NOTIFY_HOSTNAME^$NOTIFY_CONTACTNAME^$NOTIFY_SHORTDATETIME^SERVER^!^$NOTIFY_SERVICEOUTPUT^!^$NOTIFY_SERVICEDESC" +#Fall fuer Host Down +else + snmptrap -v 1 -c $SNMP_COMMUNITY $RECEIVER 1.3.6.1.4.1.791.2.9.2.2 $SENDER 6 \ + 12 00:00:00 1.3.6.1.4.1.791.2.9.2.1 s \ + "$TIMESTAMP^$NOTIFY_HOSTNAME^$NOTIFY_CONTACTNAME^$NOTIFY_SHORTDATETIME^SERVER^!^$NOTIFY_HOSTOUTPUT^!^HOSTSTATE" +fi + + + diff -Nru check-mk-1.2.2p3/treasures/notify_trap check-mk-1.2.6p12/treasures/notify_trap --- check-mk-1.2.2p3/treasures/notify_trap 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/treasures/notify_trap 1970-01-01 00:00:00.000000000 +0000 @@ -1,26 +0,0 @@ -#!/bin/bash -# Create Ticket via SNMP Trap -# Create Tickets in HP Service Center, -# Author Bastian Kuhn, - - -RECEIVER="" -SENDER="" -SNMP_COMMUNITY="" - -TIMESTAMP=$(date +%s) - -#Fall fuer Serivces Down -if [ $NOTIFY_SERVICEOUTPUT ]; then - /usr/bin/snmptrap -v 1 -c $SNMP_COMMUNITY $RECEIVER 1.3.6.1.4.1.791.2.9.2.2 $SENDER 6 \ - 12 00:00:00 1.3.6.1.4.1.791.2.9.2.1 s \ - "$TIMESTAMP^$NOTIFY_HOSTNAME^$NOTIFY_CONTACTNAME^$NOTIFY_SHORTDATETIME^SERVER^!^$NOTIFY_HOSTOUTPUT^!^HOSTSTATE" -#Fall fuer Host Down -else - /usr/bin/snmptrap -v 1 -c $SNMP_COMMUNITY $RECEIVER 1.3.6.1.4.1.791.2.9.2.2 $SENDER 6 \ - 12 00:00:00 1.3.6.1.4.1.791.2.9.2.1 s \ - "$TIMESTAMP^$NOTIFY_HOSTNAME^$NOTIFY_CONTACTNAME^$NOTIFY_SHORTDATETIME^SERVER^!^$NOTIFY_SERVICEOUTPUT^!^$NOTIFY_SERVICEDESC" -fi - - - diff -Nru check-mk-1.2.2p3/treasures/opcmsg check-mk-1.2.6p12/treasures/opcmsg --- check-mk-1.2.2p3/treasures/opcmsg 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/opcmsg 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,69 @@ +#!/usr/bin/python +# Create message for HPOpenView +# This notification plugin forwards the notification to the +# local HPOpenView instance +# +# Note: Some paths are still hardcoded here. + +import os, sys, re + +def substitute_context(template, context): + # First replace all known variables + for varname, value in context.items(): + template = template.replace('$'+varname+'$', value) + + # Remove the rest of the variables and make them empty + template = re.sub("\$[A-Z]+\$", "", template) + return template + +def main(): + try: + opcmsg_bin = "/opt/OV/bin/opcmsg" + + # gather all options from env + context = dict([ + (var[7:], value.decode("utf-8")) + for (var, value) + in os.environ.items() + if var.startswith("NOTIFY_")]) + + + # Severity and message text + if context["WHAT"] == "HOST": + msg_t = context["HOSTOUTPUT"] + severity = context["HOSTSTATEID"] == "0" and "ok" or "critical" + else: + msg_t = context["SERVICEOUTPUT"] + state_map = { "0": "normal", "1": "warning", "2": "critical", "3": "warning" } + try: + severity = state_map[context["SERVICESTATEID"]] + except: + severity = "normal" + + # application + application = "RWWS4.0" + + # object + # Wichtig: " escapen + the_object = context["HOSTNAME"] + if context["WHAT"] == "SERVICE": + the_object += ":" + context["SERVICEDESC"] + + # msg_grp + msg_grp = context['CONTACTNAME'] + + # node + node = context['MONITORING_HOST'] + + # Assemble the command + command = "%s severity=%s application=%s object=\"%s\" msg_grp=\"%s\" msg_t=\"%s\" node=%s" % ( + opcmsg_bin, severity, application, the_object, msg_grp, msg_t, node) + + # Execute the command + print "executing command" , command + os.system(command) + except Exception, e: + sys.stdout.write("ERROR %r" % e) + sys.exit(1) + +main() diff -Nru check-mk-1.2.2p3/treasures/README.livedump check-mk-1.2.6p12/treasures/README.livedump --- check-mk-1.2.2p3/treasures/README.livedump 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/treasures/README.livedump 1970-01-01 00:00:00.000000000 +0000 @@ -1,69 +0,0 @@ -LIVEDUMP ----------------- - -livedump is a small utility that can dump configuration and status -information from a running Nagios core in order to transport that -data to another core - effectively replicating the data. - -This can be used as a very efficient replacement for NSCA. - -Advantages: -- Much more performant -- Creates your Nagios configuration for the passive services -- No obsess_over.. configuration neccessary on the source host - -Disadvantages: -- Introduces some latency - -How to setup livedump ---------------------- -Copy the file livedump to a convenient place. Make sure that -../livestatus/api/python/livestatus.py is either in the same -directory as livedump or somewhere in the Python path. - -Using livedump --------------- -The first step is to extract the configuration from the source system using. This step -is needed whenever your configuration of hosts or services changes. -NSCA users will now this. This is easily done by: - -./livedump -TC > some_file.cfg - -and then copying that file to your objects or conf.d directory on -your target nagios. Note: The option -C will add some templates -to the configuration that are used by the created host and service -definitions. If you import data from more than on source those -template will be duplicate. Use -T in that case in order to avoid -duplication. - -Now restart your target system and new hosts and services will appear -and be in pending state. - -Now create a cronjob that does the following every 1 or 5 minutes: - -1. ./livedump > ca1b2c3 -2. copy that file to the target system into the checkresults directory - (in OMD this is ~/tmp/nagios/checkresults) -3. After a correct copy touch the file ca1b2c3.ok in that - directory - -Nagios will now read in that file (which is containing all current -host and service states). The filename is arbitrary, but has to be -7 characters in length, starting with a "c". - -Filtering ---------- -In each operation mode livedump allows to filter the number of data -to be dumped. This is done by adding options -H and -S for host -and service livestatus headers. The following example will only dump -hosts and services of the host group "foo". Please make sure that -all hosts that are needed by the dumped services are also dumped: - -./livedump -H "Filter: host_groups > foo" - -Note: The -H headers will also be added to the service queries. -Make sure that all columns are prefixed with host_ in these. - -If you are using this is in conjunction with NSCA transport and do not -wish to transfer the templated configuration, you can use the filter -to only export services that have "obsess_over_service = 1" set. diff -Nru check-mk-1.2.2p3/treasures/SLAviews/README.sla_view check-mk-1.2.6p12/treasures/SLAviews/README.sla_view --- check-mk-1.2.2p3/treasures/SLAviews/README.sla_view 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/treasures/SLAviews/README.sla_view 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -Put the file reporting.py into the omd-site directory local/share/check_mk/web/plugins/views -This file registers a datasource which queries the livestatus table statehist. -Additionally some painters and filters are registered for the new view "Availability statistics" which is visible to all users. diff -Nru check-mk-1.2.2p3/treasures/SLAviews/reporting.py check-mk-1.2.6p12/treasures/SLAviews/reporting.py --- check-mk-1.2.2p3/treasures/SLAviews/reporting.py 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/treasures/SLAviews/reporting.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,324 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# ails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -import time - -def paint_state_statehistory(row): - if row["statehist_state"] == -1: - return "state svcstate statep", "UNMON" - is_host = row["service_description"] == "" - - state = row["statehist_state"] - if is_host: - if state in nagios_short_host_state_names: - name = nagios_short_state_names[row["statehist_state"]] - return "state hstate hstate%s" % state, name - else: - if state in nagios_short_state_names: - name = nagios_short_state_names[row["statehist_state"]] - return "state svcstate state%s" % state, name - return "state svcstate statep", "PEND" - -def paint_state_duration(duration): - days = int(duration / 86400) - days_text = days > 0 and "%s days " % days or "" - - hours = int(duration % 86400 / 3600) - hours_text = ((days > 0 or hours > 0) and "%s hrs " % hours) or "" - - minutes = int(duration % 86400 % 3600 / 60) - minutes_text = ((days > 0 or hours > 0 or minutes > 0) and "%s min " % minutes) or "" - - seconds = int(duration % 86400 % 3600 % 60) - seconds_text = (days > 0 or hours > 0 or minutes > 0) and " " or "%s sec" % seconds - - return "number", "%s%s%s%s" % (days_text, hours_text, minutes_text, seconds_text) - -def paint_float_to_percent(value): - return "number", "%.2f %%" % (100 * float(value)), - -# From / Until -multisite_painters["statehist_from"] = { - "title" : _("State history: interval start"), - "short" : _("From"), - "columns" : [ "statehist_from", "statehist_time" ], - "options" : [ "ts_format", "ts_date" ], - "paint" : lambda row: paint_age(row["statehist_from"], row["statehist_time"], 0), -} - -multisite_painters["statehist_until"] = { - "title" : _("State history: interval end"), - "short" : _("Until"), - "columns" : ["statehist_until", "statehist_time"], - "options" : ["ts_format", "ts_date"], - "paint" : lambda row: paint_age(row["statehist_until"], row["statehist_time"], 0), -} - -# check_output -multisite_painters["statehist_check_output"] = { - "title" : _("State history: log output"), - "short" : _("Log output"), - "columns" : [ "statehist_log_output" ], - "paint" : lambda row: ("", row["log_output"]), -} - -multisite_painters["statehist_trigger"] = { - "title" : _("State history: debug information (triggered by)"), - "short" : _("Trigger"), - "columns" : [ "statehist_debug_info" ], - "paint" : lambda row: ("", row["statehist_debug_info"]), -} - -# states -multisite_painters["statehist_state"] = { - "title" : _("State history: state"), - "short" : _("State"), - "columns" : [ "statehist_state" ], - "paint" : paint_state_statehistory -} - -multisite_painters["statehist_in_downtime"] = { - "title" : _("State history: host or service in downtime"), - "short" : _("Downtime"), - "columns" : [ "statehist_in_downtime" ], - "paint" : lambda row: ("", row["statehist_in_downtime"]), -} - -multisite_painters["statehist_in_host_downtime"] = { - "title" : _("State history: host in downtime"), - "short" : _("Host downtime"), - "columns" : [ "statehist_in_host_downtime" ], - "paint" : lambda row: ("", row["statehist_in_host_downtime"]), -} - -multisite_painters["statehist_host_down"] = { - "title" : _("State history: host down"), - "short" : _("Host down"), - "columns" : [ "statehist_host_down" ], - "paint" : lambda row: ("", row["statehist_host_down"]), -} - -multisite_painters["statehist_is_flapping"] = { - "title" : _("State history: host or service flapping"), - "short" : _("Flapping"), - "columns" : [ "statehist_is_flapping" ], - "paint" : lambda row: ("", row["statehist_is_flapping"]), -} - -multisite_painters["statehist_in_notification_period"] = { - "title" : _("State history: host or service in notification period"), - "short" : _("In notification"), - "columns" : [ "statehist_in_notification_period" ], - "paint" : lambda row: ("", row["statehist_in_notification_period"]), -} - -multisite_painter_options["statehist_duration_format"] = { - "title" : _("State duration format"), - "default" : "percent", - "values" : [ - ("percent", _("Percent of query interval")), - ("seconds", _("Seconds")), - ("timestamp", _("Timestamp")), - ] -} - -def paint_statehist_duration(in_seconds, in_part): - mode = get_painter_option("statehist_duration_format") - if mode == "seconds": - return "number", in_seconds - if mode == "timestamp": - return paint_state_duration(in_seconds) - if mode == "percent": - return paint_float_to_percent(in_part) - - -# duration -multisite_painters["statehist_duration"] = { - "title" : _("State history: state duration"), - "short" : _("Duration"), - "columns" : [ "statehist_duration", "statehist_duration_part" ], - "options" : [ "statehist_duration_format" ], - "paint" : lambda row: paint_statehist_duration(row["statehist_duration"], row["statehist_duration_part"]) -} - -#multisite_painters["statehist_duration_ok"] = { -# "title" : _("State history: state duration OK"), -# "short" : _("Duration OK"), -# "columns" : ["statehist_duration_ok", "statehist_duration_part_ok"], -# "options" : ["statehist_duration_format"], -# "paint" : lambda row: paint_statehist_duration(row["statehist_duration_ok"], row["statehist_duration_part_ok"]) -#} -#multisite_painters["statehist_duration_warning"] = { -# "title" : _("State history: state duration WARNING"), -# "short" : _("Duration WARN"), -# "columns" : ["statehist_duration_warning", "statehist_duration_part_warning"], -# "options" : ["statehist_duration_format"], -# "paint" : lambda row: paint_statehist_duration(row["statehist_duration_warning"], row["statehist_duration_part_warning"]) -#} -#multisite_painters["statehist_duration_critical"] = { -# "title" : _("State history: state duration CRITICAL"), -# "short" : _("Duration CRIT"), -# "columns" : ["statehist_duration_critical", "statehist_duration_part_critical"], -# "options" : ["statehist_duration_format"], -# "paint" : lambda row: paint_statehist_duration(row["statehist_duration_critical"], row["statehist_duration_part_critical"]) -#} -#multisite_painters["statehist_duration_unknown"] = { -# "title" : _("State history: state duration UNKNOWN"), -# "short" : _("Duration UNKNOWN"), -# "columns" : ["statehist_duration_unknown", "statehist_duration_part_unknown"], -# "options" : ["statehist_duration_format"], -# "paint" : lambda row: paint_statehist_duration(row["statehist_duration_unknown"], row["statehist_duration_part_unknown"]) -#} -#multisite_painters["statehist_duration_unmonitored"] = { -# "title" : _("State history: state duration UNMONITORED"), -# "short" : _("Duration UNMONITORED"), -# "columns" : ["statehist_duration_unmonitored", "statehist_duration_part_unmonitored"], -# "options" : ["statehist_duration_format"], -# "paint" : lambda row: paint_statehist_duration(row["statehist_duration_unmonitored"], row["statehist_duration_part_unmonitored"]) -#} - -# stats duration ( sum duration ) -multisite_painters["statehist_stats_duration_ok"] = { - "title" : _("State history: sum of duration OK"), - "short" : _("OK"), - "columns" : ["stats_ok", "stats_part_ok"], - "options" : ["statehist_duration_format"], - "paint" : lambda row: paint_statehist_duration(row["stats_ok"], row["stats_part_ok"]) -} - -multisite_painters["statehist_stats_duration_warning"] = { - "title" : _("State history: sum of duration WARNING"), - "short" : _("WARN"), - "columns" : ["stats_warning", "stats_part_warning"], - "options" : ["statehist_duration_format"], - "paint" : lambda row: paint_statehist_duration(row["stats_warning"], row["stats_part_warning"]) -} - -multisite_painters["statehist_stats_duration_critical"] = { - "title" : _("State history: sum of duration CRITICAL"), - "short" : _("CRIT"), - "columns" : [ "stats_critical", "stats_part_critical"], - "options" : [ "statehist_duration_format" ], - "paint" : lambda row: paint_statehist_duration(row["stats_critical"], row["stats_part_critical"]) -} - -multisite_painters["statehist_stats_duration_unknown"] = { - "title" : _("State history: sum of duration UNKNOWN"), - "short" : _("UNKNOWN"), - "columns" : ["stats_unknown", "stats_part_unknown"], - "options" : ["statehist_duration_format"], - "paint" : lambda row: paint_statehist_duration(row["stats_unknown"], row["stats_part_unknown"]) -} - -multisite_painters["statehist_stats_duration_unmonitored"] = { - "title" : _("State history: sum of duration UNMONITORED"), - "short" : _("UNMONITORED"), - "columns" : ["stats_unmonitored", "stats_part_unmonitored"], - "options" : ["statehist_duration_format"], - "paint" : lambda row: paint_statehist_duration(row["stats_unmonitored"], row["stats_part_unmonitored"]) -} - -# datasources -multisite_datasources["statehist"] = { - "title" : _("State history"), - "table" : "statehist", - "infos" : [ "statehist", "statehist_time", "host", "service", "log" ], - "keys" : [], - "idkeys" : [], - "ignore_limit": True -} - -multisite_datasources["statehist_stats"] = { - "title" : _("State history statistics"), - "table" : "statehist", - "infos" : [ "statehist_time", "host", "service", "log" ], - "add_headers" : "Stats: sum duration_ok\nStats: sum duration_part_ok\n" - "Stats: sum duration_warning\nStats: sum duration_part_warning\n" - "Stats: sum duration_critical\nStats: sum duration_part_critical\n" - "Stats: sum duration_unknown\nStats: sum duration_part_unknown\n" - "Stats: sum duration_unmonitored\nStats: sum duration_part_unmonitored\n", - "add_columns" : [ "stats_ok", "stats_part_ok", "stats_warning", "stats_part_warning", - "stats_critical", "stats_part_critical", "stats_unknown", "stats_part_unknown", - "stats_unmonitored", "stats_part_unmonitored"], - "keys" : [], - "idkeys" : [], - "ignore_limit": True -} - -# filters -declare_filter(251, FilterTime("statehist_time", "filter_statehist_time", _("Statehistory query interval"), "time")) - -multisite_builtin_views.update({'availability_stats': {'browser_reload': 0, - 'column_headers': 'off', - 'datasource': 'statehist_stats', - 'description': u'', - 'group_painters': [], - 'hard_filters': [], - 'hard_filtervars': [('filter_statehist_time_from', - '24'), - ('filter_statehist_time_from_range', - '3600'), - ('filter_statehist_time_until', - ''), - ('filter_statehist_time_until_range', - '3600'), - ('host', ''), - ('service', '')], - 'hidden': False, - 'hide_filters': [], - 'hidebutton': False, - 'icon': None, - 'layout': 'boxed', - 'linktitle': u'Availabliltiy Statistics', - 'mobile': False, - 'mustsearch': True, - 'name': 'availability_stats', - 'num_columns': 1, - 'owner': 'demo123', - 'painters': [('host', None, ''), - ('service_description', None, ''), - ('statehist_stats_duration_ok', - None, - ''), - ('statehist_stats_duration_warning', - None, - ''), - ('statehist_stats_duration_critical', - None, - ''), - ('statehist_stats_duration_unmonitored', - None, - '')], - 'play_sounds': False, - 'public': False, - 'show_checkboxes': None, - 'show_filters': ['filter_statehist_time', - 'hostregex', - 'serviceregex'], - 'sorters': [], - 'title': u'Availabliltiy Statistics', - 'topic': u'Other', - 'user_sortable': 'on'}}) diff -Nru check-mk-1.2.2p3/treasures/solaris_cache_plugins.sh check-mk-1.2.6p12/treasures/solaris_cache_plugins.sh --- check-mk-1.2.2p3/treasures/solaris_cache_plugins.sh 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/treasures/solaris_cache_plugins.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,54 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# ails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -MK_CONFDIR=/etc/check_mk -CACHE_FILE=$MK_CONFDIR/db2-logs.cache -if [ ! -d $MK_CONFDIR ]; then - mkdir -p $MK_CONFDIR -fi - -# Do not use cache file after 20 minutes -MAXAGE=1200 - -# Check if file exists and is recent enough -if [ -s $CACHE_FILE ] -then - MTIME=$(perl -e 'if (! -f $ARGV[0]){die "0000000"};$mtime=(stat($ARGV[0]))[9];print ($^T-$mtime);' $CACHE_FILE ) - if [ $MTIME -le $MAXAGE ] ; then - USE_CACHE_FILE=1 - fi -fi - -if [ -s "$CACHE_FILE" ] -then - cat $CACHE_FILE -fi - -if [ -z "$USE_CACHE_FILE" -a ! -e "$CACHE_FILE.new" ] -then - nohup bash -c "COMMAND | grep -v 'mail'" > $CACHE_FILE.new 2> /dev/null && mv $CACHE_FILE.new $CACHE_FILE & -fi - diff -Nru check-mk-1.2.2p3/treasures/unix_cache_plugins.sh check-mk-1.2.6p12/treasures/unix_cache_plugins.sh --- check-mk-1.2.2p3/treasures/unix_cache_plugins.sh 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/unix_cache_plugins.sh 2014-10-30 13:30:24.000000000 +0000 @@ -0,0 +1,45 @@ +#!/bin/sh +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +CACHE_FILE=/tmp/XXXXX.cache +# Do not use cache file after 20 minutes +MAXAGE=1200 +USE_CACHE_FILE="" +# Check if file exists and is recent enough +if [ -s $CACHE_FILE ] +then + MTIME=$(/usr/bin/perl -e 'if (! -f $ARGV[0]){die "0000000"};$mtime=(stat($ARGV[0]))[9];print ($^T-$mtime);' $CACHE_FILE ) + if (( $MTIME < $MAXAGE )) ; then + USE_CACHE_FILE=1 + fi +fi +if [ -s "$CACHE_FILE" ] +then + cat $CACHE_FILE +fi +if [ -z "$USE_CACHE_FILE" -a ! -e "$CACHE_FILE.new" ] +then + nohup sh -c "XXXXXX" > $CACHE_FILE.new 2> /dev/null && mv $CACHE_FILE.new $CACHE_FILE & +fi diff -Nru check-mk-1.2.2p3/treasures/wato_geo_fields.py check-mk-1.2.6p12/treasures/wato_geo_fields.py --- check-mk-1.2.2p3/treasures/wato_geo_fields.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/wato_geo_fields.py 2014-10-30 13:30:24.000000000 +0000 @@ -0,0 +1,50 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# place this file to ~/local/share/check_mk/web/plugins/wato to get two new fields in the wato host properties. +# this fields can be used to add Latiude and Longitude information. Usefull for the Nagvis Geomap + +declare_host_attribute( + NagiosTextAttribute( + "lat", + "_LAT", + "Latitude", + "Latitude", + ), + show_in_table = False, + show_in_folder = False, +) + +declare_host_attribute( + NagiosTextAttribute( + "long", + "_LONG", + "Longitude", + "Longitude", + ), + show_in_table = False, + show_in_folder = False, +) diff -Nru check-mk-1.2.2p3/treasures/wato_hook_cleanup_folders.py check-mk-1.2.6p12/treasures/wato_hook_cleanup_folders.py --- check-mk-1.2.2p3/treasures/wato_hook_cleanup_folders.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/wato_hook_cleanup_folders.py 2014-10-30 13:30:24.000000000 +0000 @@ -0,0 +1,49 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# put this script into local/share/check_mk/web/plugins/wato +# of a slave site +# this deletes all WATO folders immediatelly after sync except one +# folder with the same name as the site name of the slave site +# +# it can be used to avoid a customer to see config of other customers +# for this to work you need to have one folder per customer on the top +# level and one site per customer with exactly the same name +def pre_activate_changes_cleanup(_unused): + log = open('%s/tmp/hook.log' % defaults.omd_root,'w') + log.write('omd_site: %s, omd_root: %s\n' % (defaults.omd_site, defaults.omd_root)) + confd = "%s/etc/check_mk/conf.d/wato/" % defaults.omd_root + for dirname, dirnames, filenames in os.walk(confd): + for subdirname in dirnames: + if subdirname == defaults.omd_site: + log.write("keeping subdir: %s\n" % subdirname) + else: + log.write("deletinging subdir: %s\n" % subdirname) + shutil.rmtree(confd + subdirname) + break + log.close() + +api.register_hook('pre-activate-changes', pre_activate_changes_cleanup) diff -Nru check-mk-1.2.2p3/treasures/wato_host_svc_groups.py check-mk-1.2.6p12/treasures/wato_host_svc_groups.py --- check-mk-1.2.2p3/treasures/wato_host_svc_groups.py 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/treasures/wato_host_svc_groups.py 2014-10-30 13:30:24.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/treasures/wato_import.py check-mk-1.2.6p12/treasures/wato_import.py --- check-mk-1.2.2p3/treasures/wato_import.py 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/treasures/wato_import.py 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -29,6 +29,7 @@ import os import sys try: + path = os.environ.pop('OMD_ROOT') pathlokal = "~/etc/check_mk/conf.d/wato/" pathlokal = os.path.expanduser(pathlokal) datei = open(sys.argv[1],'r') @@ -36,29 +37,38 @@ print """Run this script inside a OMD site Usage: ./wato_import.py csvfile.csv CSV Example: - wato_foldername;hostname;host_alias""" + wato_foldername;hostname;host_alias;ipaddress|None""" sys.exit() -path = path + "/etc/check_mk/wato/" folders = {} for line in datei: - ordner, name, alias = line.split(';')[:3] + if line.startswith('#'): + continue + ordner, name, alias, ipaddress = line.split(';')[:4] if ordner: try: os.makedirs(pathlokal+ordner) except os.error: pass folders.setdefault(ordner,[]) - - folders[ordner].append((name,alias)) + ipaddress = ipaddress.strip() + if ipaddress == "None": + ipaddress = False + folders[ordner].append((name,alias,ipaddress)) datei.close() for folder in folders: all_hosts = "" host_attributes = "" - for name, alias in folders[folder]: + ips = "" + for name, alias, ipaddress in folders[folder]: all_hosts += "'%s',\n" % (name) - host_attributes += "'%s' : {'alias' : u'%s' },\n" % (name, alias) + if ipaddress: + host_attributes += "'%s' : {'alias' : u'%s', 'ipaddress' : '%s' },\n" % (name, alias, ipaddress) + ips += "'%s' : '%s'," % ( name, ipaddress ) + else: + host_attributes += "'%s' : {'alias' : u'%s' },\n" % (name, alias) + ziel = open(pathlokal + folder + '/hosts.mk','w') ziel.write('all_hosts += [') @@ -66,5 +76,9 @@ ziel.write(']\n\n') ziel.write('host_attributes.update({') ziel.write(host_attributes) - ziel.write('})') + ziel.write('})\n\n') + if len(ips) > 0: + ziel.write('ipaddresses.update({') + ziel.write(ips) + ziel.write('})\n\n') ziel.close() diff -Nru check-mk-1.2.2p3/treasures/wato_include_hosts check-mk-1.2.6p12/treasures/wato_include_hosts --- check-mk-1.2.2p3/treasures/wato_include_hosts 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/wato_include_hosts 2014-07-04 17:50:27.000000000 +0000 @@ -0,0 +1,90 @@ + +# This file needs to be appended to the existing hosts.mk file +# Upon parsing the hosts.mk file the include dir is evaluated. +# Within the include dir there are host definition files with the format +# +# ipaddress:1.2.3.4 +# tag_agent:cmk-agent +# tag_criticality:critical +# tag_networking:lan +# alias:Alias of Host A +# +# If the WATO folder is saved the already existing hosts are merged with +# the hosts of the included files. After the hosts.mk is newly written this +# script appendix is removed, too. + +# Configuration options +_include_dir = ".devops" +_remove_unknown_hosts = True + +# TODO: add the complete include dir from a shadow path so they do not +# interfere with the rest of the configuration + +# TODO: exit if this script is appened multiple times to a hosts.mk file +import os, inspect +def add_host_data(_filename): + global all_hosts, host_attributes, ipaddresses, extra_host_conf + + try: + _host_ip = None + _tags_plain = [] + _host_attributes = {} + _alias = None + + _lines = file(_filename).readlines() + _hostname = os.path.basename(_filename) + # Parse data + for _line in _lines: + _what, _data = _line.split(":",1) + _data = _data[:-1] + if _what.startswith("tag_"): + _tags_plain.append(_data) + elif _what == "ipaddress": + _host_ip = _data + elif _what == "alias": + _alias = _data + _host_attributes.update({_what: _data}) + + # Add data to config + all_hosts += [ _hostname + "|" + "|".join(_tags_plain) + "|/" + FOLDER_PATH + "/" ] + if _host_ip: + ipaddresses.update({_hostname: _host_ip}) + + if _alias: + extra_host_conf.setdefault('alias', []).extend([(_alias, [_hostname])]) + + host_attributes.update({_hostname: _host_attributes}) + except Exception, e: + pass + +_hosts_mk_path = os.path.dirname(inspect.getsourcefile(lambda _: None)) +for _dirpath, _dirname, _filenames in os.walk(_hosts_mk_path + "/" + _include_dir): + for _filename in _filenames: + if _filename.startswith("."): + continue + for _hh in all_hosts: + if _hh.startswith(_filename + "|"): + # Host already in config + break + else: + # Add host to config + add_host_data("%s/%s" % (_dirpath, _filename)) + + +# Remove any hosts with no avaiable include files +if _remove_unknown_hosts: + _hosts_to_remove = [] + for _idx, _hh in enumerate(all_hosts): + print _idx, _hh + if _hh.endswith("|/%s/" % FOLDER_PATH): + _hostname = _hh.split("|",1)[0] + if _hostname not in _filenames: + _hosts_to_remove.append( (_hostname, _idx) ) + + for _hostname, _idx in _hosts_to_remove[::-1]: + del all_hosts[_idx] + if _hostname in ipaddresses: + del ipaddresses[_hostname] + if _hostname in host_attributes: + del host_attributes[_hostname] + Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/treasures/webapps/cmk_nagios_webapps-1.1.mkp and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/treasures/webapps/cmk_nagios_webapps-1.1.mkp differ diff -Nru check-mk-1.2.2p3/treasures/webapps/README check-mk-1.2.6p12/treasures/webapps/README --- check-mk-1.2.2p3/treasures/webapps/README 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/webapps/README 2014-07-04 17:50:27.000000000 +0000 @@ -0,0 +1,17 @@ +Installing this mkp package results in the following new features + +* INAG support (iPhone nagios app) +Only works with basic auth activated + +* Nagstatus support (Windows sidebar / desktop gadget) +http://monitoringhost/site/check_mk/nagios_webapps.py?cmd=nagstatus.xml&_username=XXXXXX&_secret=MAWSYCJAMAICCFBADFDS + +* Get Logfile of last 24 hours +http://monitoringhost/site/check_mk/nagios_webapps.py?cmd=nagios.log&_username=XXXXXX&_secret=MAWSYCJAMAICCFBADFDS + +* Get nagios status.dat +http://monitoringhost/site/check_mk/nagios_webapps.py?cmd=status.dat&_username=XXXXXX&_secret=MAWSYCJAMAICCFBADFDS + +Note: +All of this features also work in distributed setups +So you can aquire on big status.dat file which reflects the state of several sites. diff -Nru check-mk-1.2.2p3/treasures/wiki_painter.py check-mk-1.2.6p12/treasures/wiki_painter.py --- check-mk-1.2.2p3/treasures/wiki_painter.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/wiki_painter.py 2014-10-30 13:30:24.000000000 +0000 @@ -0,0 +1,56 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def paint_wiki_notes(row): + host = row["host_name"] + svc = row.get("service_description") + svc = svc.replace(':','') + svc = svc.replace('/','') + svc = svc.replace('\\','') + svc = svc.replace(' ','_') + svc = svc.lower() + host = host.lower() + filename = defaults.omd_root + '/var/dokuwiki/data/pages/docu/%s/%s.txt' % (host, svc) + if not os.path.isfile(filename): + filename = defaults.omd_root + '/var/dokuwiki/data/pages/docu/default/%s.txt' % (svc,) + + text = u"Edit Default Instructions - " % svc + text += u"Edit Host Instructions
    " % (host, svc) + + try: + import codecs + text += codecs.open(filename, "r", "utf-8").read() + except IOError: + text += "No instructions found in " + filename + + return "", text + "

    " + +multisite_painters["svc_wiki_notes"] = { + "title" : _("Instructions"), + "short" : _("Instr"), + "columns" : [ "host_name", "service_description" ], + "paint" : paint_wiki_notes, +} diff -Nru check-mk-1.2.2p3/treasures/wiki_snapin.py check-mk-1.2.6p12/treasures/wiki_snapin.py --- check-mk-1.2.2p3/treasures/wiki_snapin.py 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/treasures/wiki_snapin.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,125 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# ails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -#!/usr/bin/python -#Author Bastian Kuhn - -#This file is a Multisite Snapin which will show -#a the dokuwiki navigation menu if set -#Place the file to ~/local/share/check/web/plugins/sidebar -#and restart apacher. - -def render_wiki(): - import re - filename = defaults.omd_root + '/var/dokuwiki/data/pages/sidebar.txt' - start_ul = True - ul_started = False - try: - for line in file(filename).readlines(): - line = line.strip() - if line == "": - if ul_started == True: - html.write("") - start_ul = True - ul_started = False - elif line == "----": - html.write("
    ") - - elif line.startswith("*"): - if start_ul == True: - html.write("
      ") - start_ul = False - ul_started = True - - erg = re.findall('\[\[(.*)\]\]', line) - if len(erg) == 0: - continue - erg = erg[0].split('|') - if len(erg) > 1: - link = erg[0] - name = erg[1] - else: - link = erg[0] - name = erg[0] - - - if link.startswith("http://") or link.startswith("https://"): - html.write('
    • ') - simplelink(name, link, "_blank") - html.write('
    • ') - else: - erg = name.split(':') - if len(erg) > 0: - name = erg[-1] - else: - name = erg[0] - bulletlink(name, "/%s/wiki/doku.php?id=%s" % (defaults.omd_site, link)) - - else: - html.write(line) - - if ul_started == True: - html.write("
    ") - except IOError: - html.write("You have to create a sidebar first") - - html.write("
    ") - html.javascript(""" - function wiki_search() - { - var oInput = document.getElementById('wikisearch_input'); - top.frames["main"].location.href = - "/%s/wiki/doku.php?do=search&id=" + escape(oInput.value); - } - """ % defaults.omd_site) - html.begin_form("wikisearch", onsubmit="wiki_search();") - html.text_input("search", "", id="wikisearch_input", ) - html.end_form() - - -sidebar_snapins["wiki"] = { - "title" : _("Wiki"), - "description" : _("Shows the Wiki Navigation of the OMD Site"), - "render" : render_wiki, - "allowed" : [ "admin", "user", "guest" ], - "styles" : """ - input#wikisearch_input { - margin-top: 3px; - width: 222px; - } - #snapin_container_wiki hr { - margin: 2px; - margin-bottom: 2.5px; - } - #snapin_container_wiki ul { - margin: 1px; - } - - """ - - -} - diff -Nru check-mk-1.2.2p3/treasures/windows_msi/build_msi.bat check-mk-1.2.6p12/treasures/windows_msi/build_msi.bat --- check-mk-1.2.2p3/treasures/windows_msi/build_msi.bat 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/windows_msi/build_msi.bat 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,5 @@ +"C:\Program Files (x86)\wix\candle.exe" -ext WixUtilExtension C:\mkmsi\cmk_WixUI_InstallDir.wxs +"C:\Program Files (x86)\wix\candle.exe" -ext WixUtilExtension C:\mkmsi\cmk_InstallDirDlg.wxs +"C:\Program Files (x86)\wix\candle.exe" -ext WixUtilExtension C:\mkmsi\check_mk_agent.wxs +"C:\Program Files (x86)\wix\light.exe" -ext WixUIExtension -ext WixUtilExtension -sval -o check_mk_agent.msi C:\mkmsi\check_mk_agent.wixobj C:\mkmsi\cmk_WixUI_InstallDir.wixobj C:\mkmsi\cmk_InstallDirDlg.wixobj +@pause diff -Nru check-mk-1.2.2p3/treasures/windows_msi/check_mk_agent_baked.wxs check-mk-1.2.6p12/treasures/windows_msi/check_mk_agent_baked.wxs --- check-mk-1.2.2p3/treasures/windows_msi/check_mk_agent_baked.wxs 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/windows_msi/check_mk_agent_baked.wxs 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,141 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + NOT REMOVE="ALL" AND INSTALLSERVICE + NOT REMOVE="ALL" AND INSTALLSERVICE + + + + REMOVE="ALL" + REMOVE="ALL" + + + + + + NOT VersionNT64 + + + + VersionNT64 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru check-mk-1.2.2p3/treasures/windows_msi/check_mk_agent.wixobj check-mk-1.2.6p12/treasures/windows_msi/check_mk_agent.wixobj --- check-mk-1.2.2p3/treasures/windows_msi/check_mk_agent.wixobj 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/windows_msi/check_mk_agent.wixobj 2014-07-28 09:51:39.000000000 +0000 @@ -0,0 +1 @@ +
    112522Installation Database3Check_MK 32/64 bit MSI installer4Mathias Kettner GmbH5Installer6This installer database contains the logic and data required to install Check_MK Agent MSI.7Intel;10339*14200152192
    EXISTINGINSTALLDIRLocate_EXISTINGINSTALLDIR
    agent32{60D6BA87-1F56-4D24-817F-A5966EB8B116}INSTALLDIR0NOT VersionNT64check_mk_agent.exeagent64{7C2E82EC-C80E-436E-9A29-FE7D0E8F3589}INSTALLDIR0VersionNT64check_mk_agent64.execheck_mk.example.ini{1383472D-D59E-4C7D-BECA-02574758212A}INSTALLDIR0check_mk.example.inilocalFolder{F0CDD975-2DAD-45C5-B089-D566D7418AE7}LocalName0pluginFolder{2384DA34-A212-4EDA-AC1B-365D2F2686AB}PluginName0RegistryEntries{C1FEAA52-851D-4C68-BD3E-6725BBDF6370}INSTALLDIR4reg822B5E6295CC5C9F38BF0DFB7CB71EEC
    LocalNamelocalFolderPluginNamepluginFolder
    Set_INSTALLDIR307INSTALLDIR[EXISTINGINSTALLDIR]install_service98INSTALLDIR[INSTALLDIR]check_mk_agent.exe installuninstall_service98INSTALLDIR[INSTALLDIR]check_mk_agent.exe removestart_service98INSTALLDIRnet start check_mk_agentstop_service98INSTALLDIRnet stop check_mk_agent
    INSTALLDIRAPPLICATIONROOTDIRECTORY.APPLICATIONROOTDIRECTORYProgramFilesFoldercheck_mkProgramFilesFolderTARGETDIR.TARGETDIRSourceDirLocalNameINSTALLDIRlocalPluginNameINSTALLDIRplugin
    Agent210
    check_mk_agent.exeagent32l4xjiod0.exe|check_mk_agent.exe01536check_mk_agent64.exeagent64zing7kys.exe|check_mk_agent.exe01536check_mk.example.inicheck_mk.example.inimfvjh0bd.ini|check_mk.example.ini0512
    10#product.cab
    EXISTINGINSTALLDIRINSTALLSERVICE1WIXUI_INSTALLDIRINSTALLDIR
    reg822B5E6295CC5C9F38BF0DFB7CB71EEC2Software\check_mk_agentInstall_Dir[INSTALLDIR]RegistryEntries
    Locate_EXISTINGINSTALLDIR2Software\check_mk_agentInstall_Dir0
    {854BB2C1-F4AA-4C5C-89E4-8FB0BDFB9EE4}1.2.5256NEWERVERSIONDETECTED{854BB2C1-F4AA-4C5C-89E4-8FB0BDFB9EE4}0.0.01.2.5256OLDERVERSIONBEINGUPGRADED
    InstallExecuteSequenceSet_INSTALLDIRNOT Installed AND (NOT INSTALLDIR) AND EXISTINGINSTALLDIRCostFinalize0InstallUISequenceSet_INSTALLDIRNOT Installed AND (NOT INSTALLDIR) AND EXISTINGINSTALLDIRCostFinalize0InstallExecuteSequenceinstall_serviceNOT REMOVE="ALL" AND INSTALLSERVICEInstallFinalize0InstallExecuteSequencestart_serviceNOT REMOVE="ALL" AND INSTALLSERVICEInstallFinalize0InstallExecuteSequencestop_serviceREMOVE="ALL"InstallInitialize0InstallExecuteSequenceuninstall_serviceREMOVE="ALL"InstallInitialize0InstallExecuteSequenceRemoveExistingProductsInstallInitialize0
    Agent1agent3210Agent1agent6410Agent1check_mk.example.ini10Agent1localFolder10Agent1pluginFolder10Agent1RegistryEntries10*5Agent20
    check_mk_agent.exeINSTALLDIR1sources\check_mk_agent.exe-110check_mk_agent64.exeINSTALLDIR1sources\check_mk_agent-64.exe-110check_mk.example.iniINSTALLDIR1sources\check_mk.example.ini-110
    AgentFeatureagent32ComponentAgentFeatureagent64ComponentAgentFeaturecheck_mk.example.iniComponentAgentFeaturelocalFolderComponentAgentFeaturepluginFolderComponentAgentFeatureRegistryEntriesComponent*ProductAgentFeature
    1none
    EXISTINGINSTALLDIR4
    PropertyManufacturerPropertyProductCodePropertyProductLanguagePropertyProductNamePropertyProductVersionPropertyUpgradeCodeWixActionInstallExecuteSequence/RemoveExistingProductsWixActionInstallExecuteSequence/RemoveExistingProductsCustomActionSet_INSTALLDIRWixActionInstallExecuteSequence/CostFinalizeCustomActionSet_INSTALLDIRWixActionInstallUISequence/CostFinalizeWixUICmkWixUI_InstallDirDirectoryINSTALLDIRDirectoryINSTALLDIRDirectoryINSTALLDIRDirectoryINSTALLDIRCustomActioninstall_serviceWixActionInstallExecuteSequence/InstallFinalizeCustomActionstart_serviceWixActionInstallExecuteSequence/InstallFinalizeCustomActionstop_serviceWixActionInstallExecuteSequence/InstallInitializeCustomActionuninstall_serviceWixActionInstallExecuteSequence/InstallInitializeDirectoryINSTALLDIRMedia1Media1Media1DirectoryLocalNameDirectoryPluginNameWixActionInstallExecuteSequence/InstallInitializeComponentagent32Componentagent64Componentcheck_mk.example.iniComponentlocalFolderComponentpluginFolderComponentRegistryEntries
    WixUILicenseRtfsources\GPL-V2.rtf0
    ManufacturerMathias Kettner GmbH
    ProductCode*
    ProductLanguage1033
    ProductNameCheck_MK Agent MSI
    ProductVersion1.2.5
    UpgradeCode{854BB2C1-F4AA-4C5C-89E4-8FB0BDFB9EE4}
    \ No newline at end of file Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/treasures/windows_msi/check_mk_agent.wixpdb and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/treasures/windows_msi/check_mk_agent.wixpdb differ diff -Nru check-mk-1.2.2p3/treasures/windows_msi/check_mk_agent.wxs check-mk-1.2.6p12/treasures/windows_msi/check_mk_agent.wxs --- check-mk-1.2.2p3/treasures/windows_msi/check_mk_agent.wxs 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/windows_msi/check_mk_agent.wxs 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,137 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + NOT REMOVE="ALL" AND INSTALLSERVICE + NOT REMOVE="ALL" AND INSTALLSERVICE + + + + REMOVE="ALL" + REMOVE="ALL" + + + + + + NOT VersionNT64 + + + + VersionNT64 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru check-mk-1.2.2p3/treasures/windows_msi/cmk_InstallDirDlg.wixobj check-mk-1.2.6p12/treasures/windows_msi/cmk_InstallDirDlg.wixobj --- check-mk-1.2.2p3/treasures/windows_msi/cmk_InstallDirDlg.wixobj 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/windows_msi/cmk_InstallDirDlg.wixobj 2014-07-28 09:51:39.000000000 +0000 @@ -0,0 +1 @@ +
    INSTALLSERVICE1
    CmkInstallDirDlgNextPushButton23624356173!(loc.WixUINext)BackCmkInstallDirDlgBackPushButton18024356173!(loc.WixUIBack)CancelCmkInstallDirDlgCancelPushButton30424356173!(loc.WixUICancel)BannerBitmapCmkInstallDirDlgDescriptionText252328015196611!(loc.InstallDirDlgDescription)CmkInstallDirDlgTitleText15620015196611!(loc.InstallDirDlgTitle)CmkInstallDirDlgBannerBitmapBitmap00370441!(loc.InstallDirDlgBannerBitmap)FolderCmkInstallDirDlgBannerLineLine04437001CmkInstallDirDlgBottomLineLine023437001CmkInstallDirDlgFolderLabelText206029030131075!(loc.InstallDirDlgFolderLabel)CmkInstallDirDlgFolderPathEdit201003201811WIXUI_INSTALLDIRChangeFolderCmkInstallDirDlgChangeFolderPushButton2012056173!(loc.InstallDirDlgChange)InstallServiceCheckboxCmkInstallDirDlgInstallServiceCheckboxCheckBox20160290173INSTALLSERVICEInstall and start service.Next
    CmkInstallDirDlgCancelSpawnDialogCancelDlg11
    CmkInstallDirDlg50503702707!(loc.InstallDirDlg_Title)NextNextCancel
    DialogCancelDlg
    \ No newline at end of file diff -Nru check-mk-1.2.2p3/treasures/windows_msi/cmk_InstallDirDlg.wxs check-mk-1.2.6p12/treasures/windows_msi/cmk_InstallDirDlg.wxs --- check-mk-1.2.2p3/treasures/windows_msi/cmk_InstallDirDlg.wxs 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/windows_msi/cmk_InstallDirDlg.wxs 2014-07-28 09:51:39.000000000 +0000 @@ -0,0 +1,33 @@ + + + + + + + + + + 1 + + + + + + + + + + + + + + + + diff -Nru check-mk-1.2.2p3/treasures/windows_msi/cmk_WixUI_InstallDir.wixobj check-mk-1.2.6p12/treasures/windows_msi/cmk_WixUI_InstallDir.wixobj --- check-mk-1.2.2p3/treasures/windows_msi/cmk_WixUI_InstallDir.wixobj 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/windows_msi/cmk_WixUI_InstallDir.wixobj 2014-07-28 09:51:39.000000000 +0000 @@ -0,0 +1 @@ +
    BrowseDlgOKDoActionWixUIValidatePath13BrowseDlgOKSpawnDialogInvalidDirDlgWIXUI_INSTALLDIR_VALID<>"1"4ExitDialogFinishEndDialogReturn1999WelcomeDlgNextNewDialogLicenseAgreementDlgNOT Installed1WelcomeDlgNextNewDialogVerifyReadyDlgInstalled AND PATCH1LicenseAgreementDlgBackNewDialogWelcomeDlg11LicenseAgreementDlgNextNewDialogCmkInstallDirDlgLicenseAccepted = "1"1CmkInstallDirDlgBackNewDialogLicenseAgreementDlg11CmkInstallDirDlgNextSetTargetPath[WIXUI_INSTALLDIR]11CmkInstallDirDlgNextDoActionWixUIValidatePathNOT WIXUI_DONTVALIDATEPATH2CmkInstallDirDlgNextSpawnDialogInvalidDirDlgNOT WIXUI_DONTVALIDATEPATH AND WIXUI_INSTALLDIR_VALID<>"1"3CmkInstallDirDlgNextNewDialogVerifyReadyDlgWIXUI_DONTVALIDATEPATH OR WIXUI_INSTALLDIR_VALID="1"4CmkInstallDirDlgChangeFolder[_BrowseProperty][WIXUI_INSTALLDIR]11CmkInstallDirDlgChangeFolderSpawnDialogBrowseDlg12VerifyReadyDlgBackNewDialogCmkInstallDirDlgNOT Installed1VerifyReadyDlgBackNewDialogMaintenanceTypeDlgInstalled AND NOT PATCH2VerifyReadyDlgBackNewDialogWelcomeDlgInstalled AND PATCH2MaintenanceWelcomeDlgNextNewDialogMaintenanceTypeDlg11MaintenanceTypeDlgRepairButtonNewDialogVerifyReadyDlg11MaintenanceTypeDlgRemoveButtonNewDialogVerifyReadyDlg11MaintenanceTypeDlgBackNewDialogMaintenanceWelcomeDlg11
    DefaultUIFontWixUI_Font_NormalWixUI_ModeInstallDirARPNOMODIFYyes
    WixUI_Font_NormalTahoma8WixUI_Font_BiggerTahoma12WixUI_Font_TitleTahoma91
    ARPNOMODIFY4
    DialogBrowseDlgDialogDiskCostDlgDialogErrorDlgDialogFatalErrorDialogFilesInUseDialogMsiRMFilesInUseDialogPrepareDlgDialogProgressDlgDialogResumeDlgDialogUserExitDialogBrowseDlgCustomActionWixUIValidatePathDialogBrowseDlgDialogInvalidDirDlgDialogExitDialogDialogWelcomeDlgDialogLicenseAgreementDlgDialogWelcomeDlgDialogVerifyReadyDlgDialogLicenseAgreementDlgDialogWelcomeDlgDialogLicenseAgreementDlgDialogCmkInstallDirDlgDialogCmkInstallDirDlgDialogLicenseAgreementDlgDialogCmkInstallDirDlgDialogCmkInstallDirDlgCustomActionWixUIValidatePathDialogCmkInstallDirDlgDialogInvalidDirDlgDialogCmkInstallDirDlgDialogVerifyReadyDlgDialogCmkInstallDirDlgDialogCmkInstallDirDlgDialogBrowseDlgDialogVerifyReadyDlgDialogCmkInstallDirDlgDialogVerifyReadyDlgDialogMaintenanceTypeDlgDialogVerifyReadyDlgDialogWelcomeDlgDialogMaintenanceWelcomeDlgDialogMaintenanceTypeDlgDialogMaintenanceTypeDlgDialogVerifyReadyDlgDialogMaintenanceTypeDlgDialogVerifyReadyDlgDialogMaintenanceTypeDlgDialogMaintenanceWelcomeDlgWixUIWixUI_Common
    CmkWixUI_InstallDir
    \ No newline at end of file diff -Nru check-mk-1.2.2p3/treasures/windows_msi/cmk_WixUI_InstallDir.wxs check-mk-1.2.6p12/treasures/windows_msi/cmk_WixUI_InstallDir.wxs --- check-mk-1.2.2p3/treasures/windows_msi/cmk_WixUI_InstallDir.wxs 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/windows_msi/cmk_WixUI_InstallDir.wxs 2014-07-28 09:51:39.000000000 +0000 @@ -0,0 +1,86 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + 1 + "1"]]> + + 1 + + NOT Installed + Installed AND PATCH + + 1 + LicenseAccepted = "1" + + 1 + 1 + NOT WIXUI_DONTVALIDATEPATH + "1"]]> + WIXUI_DONTVALIDATEPATH OR WIXUI_INSTALLDIR_VALID="1" + 1 + 1 + + NOT Installed + Installed AND NOT PATCH + Installed AND PATCH + + 1 + + 1 + 1 + 1 + + + + + + + diff -Nru check-mk-1.2.2p3/treasures/windows_msi/README check-mk-1.2.6p12/treasures/windows_msi/README --- check-mk-1.2.2p3/treasures/windows_msi/README 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/windows_msi/README 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,35 @@ +BUILD MSI PACKAGE +================= + +These scripts allow you to build an initial msi packages for the Check_MK windows agent. +The package contains both versions (32/64 bit) of the windows agent. +Upon installation the correct agent is automatically chosen and registered as service. +This msi packages is only build once. Afterwards its contents are exchanged by a different +mechanism (msibuild) found in the windows agent directory. + +Requires +--------------------------------- +- A wine environment with the program packages Mono and Wix-Toolset + http://www.go-mono.com/mono-downloads/download.html + http://wixtoolset.org/ + +- A working directory where the msi package is build, containing the files + cmk_InstallDirDlg.wxs # Modified Install Dialog with Install Service Checkbox + cmk_WixUI_InstallDir.wxs # Modified Install Dialog with Install Service Checkbox + check_mk_agent.wxs # The msi package description + built_msi.bat # The batch script calling various commands to create the msi + sourceFiles/check_mk_agent.exe # 32 Bit binary + sourceFiles/check_mk_agent-64.exe # 64 Bit binary + sourceFiles/check_mk.example.ini # example configuration + sourceFiles/GPL-V2.rtf # GPL-V2 license displayed within the installer + + The default path for this directory is C:/mkmsi. + If you want to change this you need to update the file built_msi.bat + + +Build steps: +--------------------------------- +1) Copy the 32/64 bit agent binaries and the check_mk.example.ini into the sources folder +2) Switch to the mksi directory and type: "wine cmd < build_msi.bat" + + Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/treasures/windows_msi/sources/check_mk_agent-64.exe and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/treasures/windows_msi/sources/check_mk_agent-64.exe differ Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/treasures/windows_msi/sources/check_mk_agent.exe and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/treasures/windows_msi/sources/check_mk_agent.exe differ diff -Nru check-mk-1.2.2p3/treasures/windows_msi/sources/check_mk.example.ini check-mk-1.2.6p12/treasures/windows_msi/sources/check_mk.example.ini --- check-mk-1.2.2p3/treasures/windows_msi/sources/check_mk.example.ini 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/windows_msi/sources/check_mk.example.ini 2014-07-28 09:51:39.000000000 +0000 @@ -0,0 +1,92 @@ +[global] + # Restrict access to certain IP addresses + # only_from = 127.0.0.1 192.168.56.0/24 + + # Change port where the agent is listening ( default 6556 ) + # port = 6556 + + # Do only execute programs and scripts with + # the following suffixes (in local and plugins). + # Per default all extensions except txt and dir + # are being executed. + # execute = exe bat vbs + + # Restrict the following configuration variables + # in [global] to the following hosts + # host = winsrv* zab???ad + + # Just output certain sections (e.g. for upper hosts) + # sections = check_mk winperf + + # Maximum timeout for all plugins combined + # timeout_plugins_total = 60 + + # Write a logfile for tackling down crashes of the agent + # crash_debug = yes + + +[winperf] + # Select counters to extract. The following counters + # are needed by checks shipped with check_mk. + # counters = 10332:msx_queues + # counters = 638:tcp_conn + + +[logfiles] + # # Define textfiles to be monitored, separated by | + # textfile = C:\tmp logfiles\message_*.log|D:\log\sample.txt + # # Set patterns for defined textfiles + # ok = Successfully logged in* + # crit = Error in* + # warn = Unable to contact* + # ignore = Backup * saved + + # # Define additional textfiles with different patterns + # textfile = C:\tmp\memo.udf + # # Set patterns for defined textfile + # warn = *overdue* + # ok = *mail sent* + +[logwatch] + # Testing: output *all* messages from the eventlogs + # sendall = yes + + # From application log send only critical messages + # logfile application = crit + + # From system log send only warning/critical messages, + # but suppress any context messages + # logfile system = warn nocontext + + # From the security log send all messages + # logfile security = all + + # Switch all other logfiles off. Default is warn: + # send messages of type warn or crit + # logfile * = off + +[mrpe] + # Run classical Nagios plugins. The word before the command + # line is the service description for Nagios. Use backslashes + # in Windows-paths. + # check = Dummy mrpe\check_crit + # check = IP_Configuration mrpe\check_ipconfig 1.2.3.4 + # check = Whatever c:\myplugins\check_whatever -w 10 -c 20 + +[fileinfo] + # path = C:\Programs\Foo\*.log + # path = M:\Bar Test\*.* + +[local] + # define timeouts for local scripts matching + # specific patterns - first match wins + # timeout *.vbs = 20 + # timeout *.bat = 10 + # timeout * = 30 + +[plugins] + # define timeouts for plugin scripts matching + # specific patterns - first match wins + # timeout windows_updates.vbs = 20 + # timeout *.vbs = 10 + # timeout * = 30 diff -Nru check-mk-1.2.2p3/treasures/windows_msi/sources/gpl_v2.rtf check-mk-1.2.6p12/treasures/windows_msi/sources/gpl_v2.rtf --- check-mk-1.2.2p3/treasures/windows_msi/sources/gpl_v2.rtf 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/windows_msi/sources/gpl_v2.rtf 2014-07-28 09:51:39.000000000 +0000 @@ -0,0 +1,848 @@ + + + + + + + + + + + + + + + + + File : LICENSE.GPL2.rtf - +Ohloh Code Search + + + + + + + + +
    + + + +
    +
    + Project: + Enki editor + + +
    + +
    + Code Location: + git://github.com/hlamer/enki.gitmaster +
    +
    + +
    +
    +
    +
    + Browse + +
    + / +
    + +
    +
    +
    +
    + + + +
    +
    + +
    + .gitignore + +
    + +
    +
    + +
    + LICENSE.GPL2 + +
    + +
    +
    + + + +
    +
    + + + +
    +
    + +
    + MANIFEST.in + +
    + +
    +
    + +
    + README.md + +
    + +
    +
    + +
    + default.css + +
    + +
    +
    + +
    + enki.desktop + +
    + +
    +
    + +
    + setup.py + +
    + +
    +
    +
    + +
    + +
    + +
    +
    + Download File + +
    + + LICENSE.GPL2.rtf + +
    +
    + +
    + + +
    {\rtf1\adeflang1025\ansi\ansicpg1252\uc1\adeff0\deff0\stshfdbch0\stshfloch0\stshfhich0\stshfbi0\deflang1033\deflangfe1033\themelang1033\themelangfe0\themelangcs0{\fonttbl{\f0\fbidi \froman\fcharset0\fprq2{\*\panose 02020603050405020304}Times New Roman;}{\f2\fbidi \fmodern\fcharset0\fprq1{\*\panose 02070309020205020404}Courier New;}
    +{\f2\fbidi \fmodern\fcharset0\fprq1{\*\panose 02070309020205020404}Courier New;}{\f160\fbidi \fmodern\fcharset0\fprq1{\*\panose 020b0609020204030204}Consolas;}{\flomajor\f31500\fbidi \froman\fcharset0\fprq2{\*\panose 02020603050405020304}Times New Roman;}
    +{\fdbmajor\f31501\fbidi \froman\fcharset0\fprq2{\*\panose 02020603050405020304}Times New Roman;}{\fhimajor\f31502\fbidi \fswiss\fcharset0\fprq2{\*\panose 020f0302020204030204}Calibri Light;}
    +{\fbimajor\f31503\fbidi \froman\fcharset0\fprq2{\*\panose 02020603050405020304}Times New Roman;}{\flominor\f31504\fbidi \froman\fcharset0\fprq2{\*\panose 02020603050405020304}Times New Roman;}
    +{\fdbminor\f31505\fbidi \froman\fcharset0\fprq2{\*\panose 02020603050405020304}Times New Roman;}{\fhiminor\f31506\fbidi \fswiss\fcharset0\fprq2{\*\panose 020f0502020204030204}Calibri;}
    +{\fbiminor\f31507\fbidi \froman\fcharset0\fprq2{\*\panose 02020603050405020304}Times New Roman;}{\f317\fbidi \froman\fcharset238\fprq2 Times New Roman CE;}{\f318\fbidi \froman\fcharset204\fprq2 Times New Roman Cyr;}
    +{\f320\fbidi \froman\fcharset161\fprq2 Times New Roman Greek;}{\f321\fbidi \froman\fcharset162\fprq2 Times New Roman Tur;}{\f322\fbidi \froman\fcharset177\fprq2 Times New Roman (Hebrew);}{\f323\fbidi \froman\fcharset178\fprq2 Times New Roman (Arabic);}
    +{\f324\fbidi \froman\fcharset186\fprq2 Times New Roman Baltic;}{\f325\fbidi \froman\fcharset163\fprq2 Times New Roman (Vietnamese);}{\f337\fbidi \fmodern\fcharset238\fprq1 Courier New CE;}{\f338\fbidi \fmodern\fcharset204\fprq1 Courier New Cyr;}
    +{\f340\fbidi \fmodern\fcharset161\fprq1 Courier New Greek;}{\f341\fbidi \fmodern\fcharset162\fprq1 Courier New Tur;}{\f342\fbidi \fmodern\fcharset177\fprq1 Courier New (Hebrew);}{\f343\fbidi \fmodern\fcharset178\fprq1 Courier New (Arabic);}
    +{\f344\fbidi \fmodern\fcharset186\fprq1 Courier New Baltic;}{\f345\fbidi \fmodern\fcharset163\fprq1 Courier New (Vietnamese);}{\f337\fbidi \fmodern\fcharset238\fprq1 Courier New CE;}{\f338\fbidi \fmodern\fcharset204\fprq1 Courier New Cyr;}
    +{\f340\fbidi \fmodern\fcharset161\fprq1 Courier New Greek;}{\f341\fbidi \fmodern\fcharset162\fprq1 Courier New Tur;}{\f342\fbidi \fmodern\fcharset177\fprq1 Courier New (Hebrew);}{\f343\fbidi \fmodern\fcharset178\fprq1 Courier New (Arabic);}
    +{\f344\fbidi \fmodern\fcharset186\fprq1 Courier New Baltic;}{\f345\fbidi \fmodern\fcharset163\fprq1 Courier New (Vietnamese);}{\f1917\fbidi \fmodern\fcharset238\fprq1 Consolas CE;}{\f1918\fbidi \fmodern\fcharset204\fprq1 Consolas Cyr;}
    +{\f1920\fbidi \fmodern\fcharset161\fprq1 Consolas Greek;}{\f1921\fbidi \fmodern\fcharset162\fprq1 Consolas Tur;}{\f1924\fbidi \fmodern\fcharset186\fprq1 Consolas Baltic;}{\f1925\fbidi \fmodern\fcharset163\fprq1 Consolas (Vietnamese);}
    +{\flomajor\f31508\fbidi \froman\fcharset238\fprq2 Times New Roman CE;}{\flomajor\f31509\fbidi \froman\fcharset204\fprq2 Times New Roman Cyr;}{\flomajor\f31511\fbidi \froman\fcharset161\fprq2 Times New Roman Greek;}
    +{\flomajor\f31512\fbidi \froman\fcharset162\fprq2 Times New Roman Tur;}{\flomajor\f31513\fbidi \froman\fcharset177\fprq2 Times New Roman (Hebrew);}{\flomajor\f31514\fbidi \froman\fcharset178\fprq2 Times New Roman (Arabic);}
    +{\flomajor\f31515\fbidi \froman\fcharset186\fprq2 Times New Roman Baltic;}{\flomajor\f31516\fbidi \froman\fcharset163\fprq2 Times New Roman (Vietnamese);}{\fdbmajor\f31518\fbidi \froman\fcharset238\fprq2 Times New Roman CE;}
    +{\fdbmajor\f31519\fbidi \froman\fcharset204\fprq2 Times New Roman Cyr;}{\fdbmajor\f31521\fbidi \froman\fcharset161\fprq2 Times New Roman Greek;}{\fdbmajor\f31522\fbidi \froman\fcharset162\fprq2 Times New Roman Tur;}
    +{\fdbmajor\f31523\fbidi \froman\fcharset177\fprq2 Times New Roman (Hebrew);}{\fdbmajor\f31524\fbidi \froman\fcharset178\fprq2 Times New Roman (Arabic);}{\fdbmajor\f31525\fbidi \froman\fcharset186\fprq2 Times New Roman Baltic;}
    +{\fdbmajor\f31526\fbidi \froman\fcharset163\fprq2 Times New Roman (Vietnamese);}{\fhimajor\f31528\fbidi \fswiss\fcharset238\fprq2 Calibri Light CE;}{\fhimajor\f31529\fbidi \fswiss\fcharset204\fprq2 Calibri Light Cyr;}
    +{\fhimajor\f31531\fbidi \fswiss\fcharset161\fprq2 Calibri Light Greek;}{\fhimajor\f31532\fbidi \fswiss\fcharset162\fprq2 Calibri Light Tur;}{\fhimajor\f31535\fbidi \fswiss\fcharset186\fprq2 Calibri Light Baltic;}
    +{\fhimajor\f31536\fbidi \fswiss\fcharset163\fprq2 Calibri Light (Vietnamese);}{\fbimajor\f31538\fbidi \froman\fcharset238\fprq2 Times New Roman CE;}{\fbimajor\f31539\fbidi \froman\fcharset204\fprq2 Times New Roman Cyr;}
    +{\fbimajor\f31541\fbidi \froman\fcharset161\fprq2 Times New Roman Greek;}{\fbimajor\f31542\fbidi \froman\fcharset162\fprq2 Times New Roman Tur;}{\fbimajor\f31543\fbidi \froman\fcharset177\fprq2 Times New Roman (Hebrew);}
    +{\fbimajor\f31544\fbidi \froman\fcharset178\fprq2 Times New Roman (Arabic);}{\fbimajor\f31545\fbidi \froman\fcharset186\fprq2 Times New Roman Baltic;}{\fbimajor\f31546\fbidi \froman\fcharset163\fprq2 Times New Roman (Vietnamese);}
    +{\flominor\f31548\fbidi \froman\fcharset238\fprq2 Times New Roman CE;}{\flominor\f31549\fbidi \froman\fcharset204\fprq2 Times New Roman Cyr;}{\flominor\f31551\fbidi \froman\fcharset161\fprq2 Times New Roman Greek;}
    +{\flominor\f31552\fbidi \froman\fcharset162\fprq2 Times New Roman Tur;}{\flominor\f31553\fbidi \froman\fcharset177\fprq2 Times New Roman (Hebrew);}{\flominor\f31554\fbidi \froman\fcharset178\fprq2 Times New Roman (Arabic);}
    +{\flominor\f31555\fbidi \froman\fcharset186\fprq2 Times New Roman Baltic;}{\flominor\f31556\fbidi \froman\fcharset163\fprq2 Times New Roman (Vietnamese);}{\fdbminor\f31558\fbidi \froman\fcharset238\fprq2 Times New Roman CE;}
    +{\fdbminor\f31559\fbidi \froman\fcharset204\fprq2 Times New Roman Cyr;}{\fdbminor\f31561\fbidi \froman\fcharset161\fprq2 Times New Roman Greek;}{\fdbminor\f31562\fbidi \froman\fcharset162\fprq2 Times New Roman Tur;}
    +{\fdbminor\f31563\fbidi \froman\fcharset177\fprq2 Times New Roman (Hebrew);}{\fdbminor\f31564\fbidi \froman\fcharset178\fprq2 Times New Roman (Arabic);}{\fdbminor\f31565\fbidi \froman\fcharset186\fprq2 Times New Roman Baltic;}
    +{\fdbminor\f31566\fbidi \froman\fcharset163\fprq2 Times New Roman (Vietnamese);}{\fhiminor\f31568\fbidi \fswiss\fcharset238\fprq2 Calibri CE;}{\fhiminor\f31569\fbidi \fswiss\fcharset204\fprq2 Calibri Cyr;}
    +{\fhiminor\f31571\fbidi \fswiss\fcharset161\fprq2 Calibri Greek;}{\fhiminor\f31572\fbidi \fswiss\fcharset162\fprq2 Calibri Tur;}{\fhiminor\f31575\fbidi \fswiss\fcharset186\fprq2 Calibri Baltic;}
    +{\fhiminor\f31576\fbidi \fswiss\fcharset163\fprq2 Calibri (Vietnamese);}{\fbiminor\f31578\fbidi \froman\fcharset238\fprq2 Times New Roman CE;}{\fbiminor\f31579\fbidi \froman\fcharset204\fprq2 Times New Roman Cyr;}
    +{\fbiminor\f31581\fbidi \froman\fcharset161\fprq2 Times New Roman Greek;}{\fbiminor\f31582\fbidi \froman\fcharset162\fprq2 Times New Roman Tur;}{\fbiminor\f31583\fbidi \froman\fcharset177\fprq2 Times New Roman (Hebrew);}
    +{\fbiminor\f31584\fbidi \froman\fcharset178\fprq2 Times New Roman (Arabic);}{\fbiminor\f31585\fbidi \froman\fcharset186\fprq2 Times New Roman Baltic;}{\fbiminor\f31586\fbidi \froman\fcharset163\fprq2 Times New Roman (Vietnamese);}}
    +{\colortbl;\red0\green0\blue0;\red0\green0\blue255;\red0\green255\blue255;\red0\green255\blue0;\red255\green0\blue255;\red255\green0\blue0;\red255\green255\blue0;\red255\green255\blue255;\red0\green0\blue128;\red0\green128\blue128;\red0\green128\blue0;
    +\red128\green0\blue128;\red128\green0\blue0;\red128\green128\blue0;\red128\green128\blue128;\red192\green192\blue192;\caccentone\ctint255\cshade127\red31\green77\blue120;}{\*\defchp }{\*\defpap 
    +\ql \li0\ri0\widctlpar\wrapdefault\aspalpha\aspnum\faauto\adjustright\rin0\lin0\itap0 }\noqfpromote {\stylesheet{\ql \li0\ri0\widctlpar\wrapdefault\aspalpha\aspnum\faauto\adjustright\rin0\lin0\itap0 \rtlch\fcs1 \af0\afs24\alang1025 \ltrch\fcs0 
    +\fs24\lang1033\langfe1033\loch\f0\hich\af0\dbch\af31505\cgrid\langnp1033\langfenp1033 \snext0 \sqformat \spriority0 Normal;}{
    +\s3\ql \li0\ri0\sb100\sa100\sbauto1\saauto1\widctlpar\wrapdefault\aspalpha\aspnum\faauto\outlinelevel2\adjustright\rin0\lin0\itap0 \rtlch\fcs1 \ab\af0\afs27\alang1025 \ltrch\fcs0 
    +\b\fs27\lang1033\langfe1033\loch\f0\hich\af0\dbch\af31505\cgrid\langnp1033\langfenp1033 \sbasedon0 \snext3 \slink15 \sqformat \spriority9 heading 3;}{\*\cs10 \additive \ssemihidden \sunhideused \spriority1 Default Paragraph Font;}{\*
    +\ts11\tsrowd\trftsWidthB3\trpaddl108\trpaddr108\trpaddfl3\trpaddft3\trpaddfb3\trpaddfr3\tblind0\tblindtype3\tsvertalt\tsbrdrt\tsbrdrl\tsbrdrb\tsbrdrr\tsbrdrdgl\tsbrdrdgr\tsbrdrh\tsbrdrv 
    +\ql \li0\ri0\widctlpar\wrapdefault\aspalpha\aspnum\faauto\adjustright\rin0\lin0\itap0 \rtlch\fcs1 \af0\afs20\alang1025 \ltrch\fcs0 \fs20\lang1033\langfe1033\cgrid\langnp1033\langfenp1033 \snext11 \ssemihidden \sunhideused Normal Table;}{\*\cs15 \additive 
    +\rtlch\fcs1 \af31503\afs24 \ltrch\fcs0 \fs24\cf17\loch\f31502\hich\af31502\dbch\af31501 \sbasedon10 \slink3 \slocked \ssemihidden \spriority9 Heading 3 Char;}{
    +\s16\ql \li0\ri0\sb100\sa100\sbauto1\saauto1\widctlpar\wrapdefault\aspalpha\aspnum\faauto\adjustright\rin0\lin0\itap0 \rtlch\fcs1 \af0\afs24\alang1025 \ltrch\fcs0 \fs24\lang1033\langfe1033\loch\f0\hich\af0\dbch\af31505\cgrid\langnp1033\langfenp1033 
    +\sbasedon0 \snext16 \ssemihidden \sunhideused Normal (Web);}{\s17\ql \li0\ri0\widctlpar
    +\tx916\tx1832\tx2748\tx3664\tx4580\tx5496\tx6412\tx7328\tx8244\tx9160\tx10076\tx10992\tx11908\tx12824\tx13740\tx14656\wrapdefault\aspalpha\aspnum\faauto\adjustright\rin0\lin0\itap0 \rtlch\fcs1 \af2\afs20\alang1025 \ltrch\fcs0 
    +\fs20\lang1033\langfe1033\loch\f2\hich\af2\dbch\af31505\cgrid\langnp1033\langfenp1033 \sbasedon0 \snext17 \slink18 \ssemihidden \sunhideused HTML Preformatted;}{\*\cs18 \additive \rtlch\fcs1 \af160 \ltrch\fcs0 \loch\f160\hich\af160\dbch\af31505 
    +\sbasedon10 \slink17 \slocked \ssemihidden HTML Preformatted Char;}{\*\cs19 \additive \rtlch\fcs1 \ab\af0 \ltrch\fcs0 \b \sbasedon10 \sqformat \spriority22 Strong;}{\*\cs20 \additive \rtlch\fcs1 \ai\af0 \ltrch\fcs0 \i 
    +\sbasedon10 \ssemihidden \sunhideused HTML Variable;}{\*\cs21 \additive \rtlch\fcs1 \af2 \ltrch\fcs0 \loch\f2\hich\af2\dbch\af31505 \sbasedon10 \ssemihidden \sunhideused HTML Sample;}{\*\cs22 \additive \rtlch\fcs1 \af0 \ltrch\fcs0 \ul\cf2 
    +\sbasedon10 \ssemihidden \sunhideused Hyperlink;}{\*\cs23 \additive \rtlch\fcs1 \af0 \ltrch\fcs0 \ul\cf12 \sbasedon10 \ssemihidden \sunhideused FollowedHyperlink;}}{\*\rsidtbl \rsid741713\rsid7281809}{\mmathPr\mmathFont34\mbrkBin0\mbrkBinSub0\msmallFrac0
    +\mdispDef1\mlMargin0\mrMargin0\mdefJc1\mwrapIndent1440\mintLim0\mnaryLim1}{\info{\title GNU General Public License v2.0 - GNU Project - Free Software Foundation (FSF)}{\author Bryan Jones}{\operator Bryan Jones}{\creatim\yr2013\mo12\dy2\hr10\min36}
    +{\revtim\yr2013\mo12\dy2\hr10\min36}{\version2}{\edmins1}{\nofpages7}{\nofwords2637}{\nofchars15031}{\*\company Mississippi State University}{\nofcharsws17633}{\vern57435}}{\*\xmlnstbl {\xmlns1 http://schemas.microsoft.com/office/word/2003/wordml}}
    +\paperw12240\paperh15840\margl1440\margr1440\margt1440\margb1440\gutter0\ltrsect 
    +\widowctrl\ftnbj\aenddoc\trackmoves0\trackformatting1\donotembedsysfont1\relyonvml0\donotembedlingdata0\grfdocevents0\validatexml1\showplaceholdtext0\ignoremixedcontent0\saveinvalidxml0\showxmlerrors1\noxlattoyen
    +\expshrtn\noultrlspc\dntblnsbdb\nospaceforul\formshade\horzdoc\dgmargin\dghspace180\dgvspace180\dghorigin1440\dgvorigin1440\dghshow1\dgvshow1
    +\jexpand\viewkind1\viewscale100\pgbrdrhead\pgbrdrfoot\splytwnine\ftnlytwnine\htmautsp\nolnhtadjtbl\useltbaln\alntblind\lytcalctblwd\lyttblrtgr\lnbrkrule\nobrkwrptbl\allowfieldendsel\nojkernpunct\rsidroot7281809
    +\newtblstyruls\usenormstyforlist\noindnmbrts\felnbrelev\nocxsptable\indrlsweleven\noafcnsttbl\afelev\utinl\hwelev\spltpgpar\notcvasp\notbrkcnstfrctbl\notvatxbx\krnprsnet\cachedcolbal \nouicompat \fet0{\*\wgrffmtfilter 2450}\nofeaturethrottle1
    +\ilfomacatclnup0\ltrpar \sectd \ltrsect\linex0\endnhere\pgbrdropt32\sectlinegrid360\sectdefaultcl\sftnbj {\*\pnseclvl1\pnucrm\pnstart1\pnindent720\pnhang {\pntxta .}}{\*\pnseclvl2\pnucltr\pnstart1\pnindent720\pnhang {\pntxta .}}{\*\pnseclvl3
    +\pndec\pnstart1\pnindent720\pnhang {\pntxta .}}{\*\pnseclvl4\pnlcltr\pnstart1\pnindent720\pnhang {\pntxta )}}{\*\pnseclvl5\pndec\pnstart1\pnindent720\pnhang {\pntxtb (}{\pntxta )}}{\*\pnseclvl6\pnlcltr\pnstart1\pnindent720\pnhang {\pntxtb (}{\pntxta )}}
    +{\*\pnseclvl7\pnlcrm\pnstart1\pnindent720\pnhang {\pntxtb (}{\pntxta )}}{\*\pnseclvl8\pnlcltr\pnstart1\pnindent720\pnhang {\pntxtb (}{\pntxta )}}{\*\pnseclvl9\pnlcrm\pnstart1\pnindent720\pnhang {\pntxtb (}{\pntxta )}}\pard\plain \ltrpar
    +\s3\ql \li0\ri0\sb100\sa100\sbauto1\saauto1\widctlpar\wrapdefault\aspalpha\aspnum\faauto\outlinelevel2\adjustright\rin0\lin0\itap0 \rtlch\fcs1 \ab\af0\afs27\alang1025 \ltrch\fcs0 
    +\b\fs27\lang1033\langfe1033\loch\af0\hich\af0\dbch\af31505\cgrid\langnp1033\langfenp1033 {\rtlch\fcs1 \af0 \ltrch\fcs0 \dbch\af0\insrsid741713 GNU GENERAL PUBLIC LICENSE
    +\par }\pard\plain \ltrpar\s16\ql \li0\ri0\sb100\sa100\sbauto1\saauto1\widctlpar\wrapdefault\aspalpha\aspnum\faauto\adjustright\rin0\lin0\itap0 \rtlch\fcs1 \af0\afs24\alang1025 \ltrch\fcs0 
    +\fs24\lang1033\langfe1033\loch\af0\hich\af0\dbch\af31505\cgrid\langnp1033\langfenp1033 {\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 \hich\af0\dbch\af31505\loch\f0 Version 2, June 1991 
    +\par }\pard\plain \ltrpar\s17\ql \li0\ri0\widctlpar\tx916\tx1832\tx2748\tx3664\tx4580\tx5496\tx6412\tx7328\tx8244\tx9160\tx10076\tx10992\tx11908\tx12824\tx13740\tx14656\wrapdefault\aspalpha\aspnum\faauto\adjustright\rin0\lin0\itap0 \rtlch\fcs1 
    +\af2\afs20\alang1025 \ltrch\fcs0 \fs20\lang1033\langfe1033\loch\af2\hich\af2\dbch\af31505\cgrid\langnp1033\langfenp1033 {\rtlch\fcs1 \af2 \ltrch\fcs0 \insrsid741713 
    +\par \hich\af2\dbch\af31505\loch\f2 Copyright (C) 1989, 1991 Free Software Foundation, Inc.  
    +\par \hich\af2\dbch\af31505\loch\f2 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA
    +\par 
    +\par \hich\af2\dbch\af31505\loch\f2 Everyone is permitted to copy and distribute verbatim copies
    +\par \hich\af2\dbch\af31505\loch\f2 of this license document, but changing it is not allowed.}{\rtlch\fcs1 \af2 \ltrch\fcs0 \insrsid741713 
    +\par }\pard\plain \ltrpar\s3\ql \li0\ri0\sb100\sa100\sbauto1\saauto1\widctlpar\wrapdefault\aspalpha\aspnum\faauto\outlinelevel2\adjustright\rin0\lin0\itap0 \rtlch\fcs1 \ab\af0\afs27\alang1025 \ltrch\fcs0 
    +\b\fs27\lang1033\langfe1033\loch\af0\hich\af0\dbch\af31505\cgrid\langnp1033\langfenp1033 {\rtlch\fcs1 \af0 \ltrch\fcs0 \dbch\af0\insrsid741713 Preamble
    +\par }\pard\plain \ltrpar\s16\ql \li0\ri0\sb100\sa100\sbauto1\saauto1\widctlpar\wrapdefault\aspalpha\aspnum\faauto\adjustright\rin0\lin0\itap0 \rtlch\fcs1 \af0\afs24\alang1025 \ltrch\fcs0 
    +\fs24\lang1033\langfe1033\loch\af0\hich\af0\dbch\af31505\cgrid\langnp1033\langfenp1033 {\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 \hich\af0\dbch\af31505\loch\f0 
    +The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and \hich\af0\dbch\af31505\loch\f0 
    +change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundati
    +\hich\af0\dbch\af31505\loch\f0 o\hich\af0\dbch\af31505\loch\f0 n software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. 
    +\par \hich\af0\dbch\af31505\loch\f0 When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have th\hich\af0\dbch\af31505\loch\f0 
    +e freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do t
    +\hich\af0\dbch\af31505\loch\f0 h\hich\af0\dbch\af31505\loch\f0 ese things. 
    +\par \hich\af0\dbch\af31505\loch\f0 
    +To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, 
    +\hich\af0\dbch\af31505\loch\f0 or if you modify it. 
    +\par \hich\af0\dbch\af31505\loch\f0 
    +For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show 
    +\hich\af0\dbch\af31505\loch\f0 them these terms so they know their rights. 
    +\par \hich\af0\dbch\af31505\loch\f0 We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. 
    +\par \hich\af0\dbch\af31505\loch\f0 Also, for each author's protect\hich\af0\dbch\af31505\loch\f0 
    +ion and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that an
    +\hich\af0\dbch\af31505\loch\f0 y\hich\af0\dbch\af31505\loch\f0  problems introduced by others will not reflect on the original authors' reputations. 
    +\par \hich\af0\dbch\af31505\loch\f0 Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent\hich\af0\dbch\af31505\loch\f0 
    + licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. 
    +\par \hich\af0\dbch\af31505\loch\f0 The precise terms and conditions for copying, distribution and modification follow\hich\af0\dbch\af31505\loch\f0 . }{\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 
    +\par }\pard\plain \ltrpar\s3\ql \li0\ri0\sb100\sa100\sbauto1\saauto1\widctlpar\wrapdefault\aspalpha\aspnum\faauto\outlinelevel2\adjustright\rin0\lin0\itap0 \rtlch\fcs1 \ab\af0\afs27\alang1025 \ltrch\fcs0 
    +\b\fs27\lang1033\langfe1033\loch\af0\hich\af0\dbch\af31505\cgrid\langnp1033\langfenp1033 {\rtlch\fcs1 \af0 \ltrch\fcs0 \dbch\af0\insrsid741713 TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION}{\rtlch\fcs1 \af0 \ltrch\fcs0 
    +\dbch\af0\insrsid741713 
    +\par }\pard\plain \ltrpar\s16\ql \li0\ri0\sb100\sa100\sbauto1\saauto1\widctlpar\wrapdefault\aspalpha\aspnum\faauto\adjustright\rin0\lin0\itap0 \rtlch\fcs1 \af0\afs24\alang1025 \ltrch\fcs0 
    +\fs24\lang1033\langfe1033\loch\af0\hich\af0\dbch\af31505\cgrid\langnp1033\langfenp1033 {\rtlch\fcs1 \ab\af0 \ltrch\fcs0 \cs19\b\insrsid741713 0.}{\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 \hich\af0\dbch\af31505\loch\f0 
    + This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The \hich\af0\dbch\af31505\loch\f0 
    +"Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifica
    +\hich\af0\dbch\af31505\loch\f0 t\hich\af0\dbch\af31505\loch\f0 ions and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". 
    +\par \hich\af0\dbch\af31505\loch\f0 Activities other than copying, distribution and modification are not covered by this L\hich\af0\dbch\af31505\loch\f0 
    +icense; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether 
    +\hich\af0\dbch\af31505\loch\f0 t\hich\af0\dbch\af31505\loch\f0 hat is true depends on what the Program does. }{\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 
    +\par }{\rtlch\fcs1 \ab\af0 \ltrch\fcs0 \cs19\b\insrsid741713 \hich\af0\dbch\af31505\loch\f0 1.}{\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 \hich\af0\dbch\af31505\loch\f0 
    + You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright not\hich\af0\dbch\af31505\loch\f0 
    +ice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. 
    +\par \hich\af0\dbch\af31505\loch\f0 You may charge a fee for the physical ac\hich\af0\dbch\af31505\loch\f0 t of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. }{\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 
    +
    +\par }{\rtlch\fcs1 \ab\af0 \ltrch\fcs0 \cs19\b\insrsid741713 \hich\af0\dbch\af31505\loch\f0 2.}{\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 \hich\af0\dbch\af31505\loch\f0 
    + You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modific\hich\af0\dbch\af31505\loch\f0 
    +ations or work under the terms of Section 1 above, provided that you also meet all of these conditions: 
    +\par }\pard\plain \ltrpar\ql \li720\ri0\widctlpar\wrapdefault\aspalpha\aspnum\faauto\adjustright\rin0\lin720\itap0 \rtlch\fcs1 \af0\afs24\alang1025 \ltrch\fcs0 \fs24\lang1033\langfe1033\loch\af0\hich\af0\dbch\af31505\cgrid\langnp1033\langfenp1033 {\rtlch\fcs1 
    +\ab\af0 \ltrch\fcs0 \cs19\b\dbch\af0\insrsid741713 a)}{\rtlch\fcs1 \af0 \ltrch\fcs0 \dbch\af0\insrsid741713  You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. 
    +\par }{\rtlch\fcs1 \ab\af0 \ltrch\fcs0 \cs19\b\dbch\af0\insrsid741713 b)}{\rtlch\fcs1 \af0 \ltrch\fcs0 \dbch\af0\insrsid741713  You must cause any w
    +ork that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. 
    +\par }{\rtlch\fcs1 \ab\af0 \ltrch\fcs0 \cs19\b\dbch\af0\insrsid741713 c)}{\rtlch\fcs1 \af0 \ltrch\fcs0 \dbch\af0\insrsid741713  If the modified program normally read
    +s commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, sa
    +y
    +ing that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement
    +, your work based on the Program is not required to print an announcement.) 
    +\par }\pard\plain \ltrpar\s16\ql \li0\ri0\sb100\sa100\sbauto1\saauto1\widctlpar\wrapdefault\aspalpha\aspnum\faauto\adjustright\rin0\lin0\itap0 \rtlch\fcs1 \af0\afs24\alang1025 \ltrch\fcs0 
    +\fs24\lang1033\langfe1033\loch\af0\hich\af0\dbch\af31505\cgrid\langnp1033\langfenp1033 {\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 \hich\af0\dbch\af31505\loch\f0 
    +These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and s\hich\af0\dbch\af31505\loch\f0 
    +eparate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution o
    +\hich\af0\dbch\af31505\loch\f0 f\hich\af0\dbch\af31505\loch\f0  the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. 
    +\par \hich\af0\dbch\af31505\loch\f0 Thus, it is not the intent of this section to claim rights or contest your right\hich\af0\dbch\af31505\loch\f0 
    +s to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. 
    +\par \hich\af0\dbch\af31505\loch\f0 In addition, mere aggregation of another work not based on the Program \hich\af0\dbch\af31505\loch\f0 
    +with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. }{\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 
    +\par }{\rtlch\fcs1 \ab\af0 \ltrch\fcs0 \cs19\b\insrsid741713 \hich\af0\dbch\af31505\loch\f0 3.}{\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 \hich\af0\dbch\af31505\loch\f0  You may copy and distribute the Program (or a work based on it, under Section 2) in ob
    +\hich\af0\dbch\af31505\loch\f0 ject code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: }{\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 
    +\par }\pard\plain \ltrpar\ql \li720\ri0\widctlpar\wrapdefault\aspalpha\aspnum\faauto\adjustright\rin0\lin720\itap0 \rtlch\fcs1 \af0\afs24\alang1025 \ltrch\fcs0 \fs24\lang1033\langfe1033\loch\af0\hich\af0\dbch\af31505\cgrid\langnp1033\langfenp1033 {\rtlch\fcs1 
    +\ab\af0 \ltrch\fcs0 \cs19\b\dbch\af0\insrsid741713 a)}{\rtlch\fcs1 \af0 \ltrch\fcs0 \dbch\af0\insrsid741713  Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 an
    +d 2 above on a medium customarily used for software interchange; or, 
    +\par }{\rtlch\fcs1 \ab\af0 \ltrch\fcs0 \cs19\b\dbch\af0\insrsid741713 b)}{\rtlch\fcs1 \af0 \ltrch\fcs0 \dbch\af0\insrsid741713 
    + Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complet
    +e machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, 
    +\par }{\rtlch\fcs1 \ab\af0 \ltrch\fcs0 \cs19\b\dbch\af0\insrsid741713 c)}{\rtlch\fcs1 \af0 \ltrch\fcs0 \dbch\af0\insrsid741713  Accompany it with the information you received as to the offer to distribut
    +e corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) 
    +\par }\pard\plain \ltrpar\s16\ql \li0\ri0\sb100\sa100\sbauto1\saauto1\widctlpar\wrapdefault\aspalpha\aspnum\faauto\adjustright\rin0\lin0\itap0 \rtlch\fcs1 \af0\afs24\alang1025 \ltrch\fcs0 
    +\fs24\lang1033\langfe1033\loch\af0\hich\af0\dbch\af31505\cgrid\langnp1033\langfenp1033 {\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 \hich\af0\dbch\af31505\loch\f0 The source code for a work means t\hich\af0\dbch\af31505\loch\f0 
    +he preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation 
    +\hich\af0\dbch\af31505\loch\f0 a\hich\af0\dbch\af31505\loch\f0 
    +nd installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operati
    +\hich\af0\dbch\af31505\loch\f0 n\hich\af0\dbch\af31505\loch\f0 g system on which the executable runs, unless that component itself accompanies the executable. 
    +\par \hich\af0\dbch\af31505\loch\f0 If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source cod\hich\af0\dbch\af31505\loch\f0 
    +e from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. }{\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 
    +\par }{\rtlch\fcs1 \ab\af0 \ltrch\fcs0 \cs19\b\insrsid741713 \hich\af0\dbch\af31505\loch\f0 4.}{\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 \hich\af0\dbch\af31505\loch\f0 
    + You may not copy, modify, sublicense, or distribute the Program except as expressly provided und\hich\af0\dbch\af31505\loch\f0 
    +er this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will n
    +\hich\af0\dbch\af31505\loch\f0 o\hich\af0\dbch\af31505\loch\f0 t have their licenses terminated so long as such parties remain in full compliance. }{\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 
    +\par }{\rtlch\fcs1 \ab\af0 \ltrch\fcs0 \cs19\b\insrsid741713 \hich\af0\dbch\af31505\loch\f0 5.}{\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 \hich\af0\dbch\af31505\loch\f0 
    + You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its deriva\hich\af0\dbch\af31505\loch\f0 
    +tive works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditi
    +\hich\af0\dbch\af31505\loch\f0 o\hich\af0\dbch\af31505\loch\f0 ns for copying, distributing or modifying the Program or works based on it. }{\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 
    +\par }{\rtlch\fcs1 \ab\af0 \ltrch\fcs0 \cs19\b\insrsid741713 \hich\af0\dbch\af31505\loch\f0 6.}{\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 \hich\af0\dbch\af31505\loch\f0 
    + Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modi\hich\af0\dbch\af31505\loch\f0 
    +fy the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. }{\rtlch\fcs1 
    +\af0 \ltrch\fcs0 \insrsid741713 
    +\par }{\rtlch\fcs1 \ab\af0 \ltrch\fcs0 \cs19\b\insrsid741713 \hich\af0\dbch\af31505\loch\f0 7.}{\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 \hich\af0\dbch\af31505\loch\f0  If, as a cons\hich\af0\dbch\af31505\loch\f0 
    +equence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do
    +\hich\af0\dbch\af31505\loch\f0  \hich\af0\dbch\af31505\loch\f0 
    +not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For exam
    +\hich\af0\dbch\af31505\loch\f0 p\hich\af0\dbch\af31505\loch\f0 
    +le, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribu
    +\hich\af0\dbch\af31505\loch\f0 t\hich\af0\dbch\af31505\loch\f0 ion of the Program. 
    +\par \hich\af0\dbch\af31505\loch\f0 If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. 
    +
    +\par \hich\af0\dbch\af31505\loch\f0 It is not the p\hich\af0\dbch\af31505\loch\f0 
    +urpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is impleme
    +\hich\af0\dbch\af31505\loch\f0 n\hich\af0\dbch\af31505\loch\f0 
    +ted by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willin
    +\hich\af0\dbch\af31505\loch\f0 g\hich\af0\dbch\af31505\loch\f0  to distribute software through any other system and a licensee cannot impose that choice. 
    +\par \hich\af0\dbch\af31505\loch\f0 This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. }{\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 
    +\par }{\rtlch\fcs1 \ab\af0 \ltrch\fcs0 \cs19\b\insrsid741713 \hich\af0\dbch\af31505\loch\f0 8.}{\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 \hich\af0\dbch\af31505\loch\f0  If the distribution and/or use of the Progra\hich\af0\dbch\af31505\loch\f0 
    +m is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distri
    +\hich\af0\dbch\af31505\loch\f0 b\hich\af0\dbch\af31505\loch\f0 ution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. }{\rtlch\fcs1 \af0 
    +\ltrch\fcs0 \insrsid741713 
    +\par }{\rtlch\fcs1 \ab\af0 \ltrch\fcs0 \cs19\b\insrsid741713 \hich\af0\dbch\af31505\loch\f0 9.}{\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 \hich\af0\dbch\af31505\loch\f0 
    + The Free Software Foundation may publish revised and/or new versions of the General Publi\hich\af0\dbch\af31505\loch\f0 
    +c License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. 
    +\par \hich\af0\dbch\af31505\loch\f0 Each version is given a distinguishing version number. If the Program specifies a version number\hich\af0\dbch\af31505\loch\f0 
    + of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version num
    +\hich\af0\dbch\af31505\loch\f0 b\hich\af0\dbch\af31505\loch\f0 er of this License, you may choose any version ever published by the Free Software Foundation. }{\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 
    +\par }{\rtlch\fcs1 \ab\af0 \ltrch\fcs0 \cs19\b\insrsid741713 \hich\af0\dbch\af31505\loch\f0 10.}{\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 \hich\af0\dbch\af31505\loch\f0 
    + If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permiss\hich\af0\dbch\af31505\loch\f0 
    +ion. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free
    +\hich\af0\dbch\af31505\loch\f0  \hich\af0\dbch\af31505\loch\f0 software and of promoting the sharing and reuse of software generally. }{\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 
    +\par }{\rtlch\fcs1 \ab\af0 \ltrch\fcs0 \cs19\b\insrsid741713 \hich\af0\dbch\af31505\loch\f0 NO WARRANTY}{\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 
    +\par }{\rtlch\fcs1 \ab\af0 \ltrch\fcs0 \cs19\b\insrsid741713 \hich\af0\dbch\af31505\loch\f0 11.}{\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 \hich\af0\dbch\af31505\loch\f0 
    + BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING\hich\af0\dbch\af31505\loch\f0 
    + THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RI
    +\hich\af0\dbch\af31505\loch\f0 S\hich\af0\dbch\af31505\loch\f0 K AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. }{\rtlch\fcs1 \af0 
    +\ltrch\fcs0 \insrsid741713 
    +\par }{\rtlch\fcs1 \ab\af0 \ltrch\fcs0 \cs19\b\insrsid741713 \hich\af0\dbch\af31505\loch\f0 12.}{\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 \hich\af0\dbch\af31505\loch\f0  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY CO
    +\hich\af0\dbch\af31505\loch\f0 
    +PYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM 
    +\hich\af0\dbch\af31505\loch\f0 (\hich\af0\dbch\af31505\loch\f0 
    +INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY
    +\hich\af0\dbch\af31505\loch\f0  \hich\af0\dbch\af31505\loch\f0 OF SUCH DAMAGES. 
    +\par }\pard\plain \ltrpar\s3\ql \li0\ri0\sb100\sa100\sbauto1\saauto1\widctlpar\wrapdefault\aspalpha\aspnum\faauto\outlinelevel2\adjustright\rin0\lin0\itap0 \rtlch\fcs1 \ab\af0\afs27\alang1025 \ltrch\fcs0 
    +\b\fs27\lang1033\langfe1033\loch\af0\hich\af0\dbch\af31505\cgrid\langnp1033\langfenp1033 {\rtlch\fcs1 \af0 \ltrch\fcs0 \dbch\af0\insrsid741713 END OF TERMS AND CONDITIONS}{\rtlch\fcs1 \af0 \ltrch\fcs0 \dbch\af0\insrsid741713 
    +\par }{\rtlch\fcs1 \af0 \ltrch\fcs0 \dbch\af0\insrsid741713 How to Apply These Terms to Your New Programs
    +\par }\pard\plain \ltrpar\s16\ql \li0\ri0\sb100\sa100\sbauto1\saauto1\widctlpar\wrapdefault\aspalpha\aspnum\faauto\adjustright\rin0\lin0\itap0 \rtlch\fcs1 \af0\afs24\alang1025 \ltrch\fcs0 
    +\fs24\lang1033\langfe1033\loch\af0\hich\af0\dbch\af31505\cgrid\langnp1033\langfenp1033 {\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 \hich\af0\dbch\af31505\loch\f0 
    +If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which every\hich\af0\dbch\af31505\loch\f0 one can redistribute and change under these terms. 
    +\par \hich\af0\dbch\af31505\loch\f0 To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least
    +\hich\af0\dbch\af31505\loch\f0  the "copyright" line and a pointer to where the full notice is found. 
    +\par }\pard\plain \ltrpar\s17\ql \li0\ri0\widctlpar\tx916\tx1832\tx2748\tx3664\tx4580\tx5496\tx6412\tx7328\tx8244\tx9160\tx10076\tx10992\tx11908\tx12824\tx13740\tx14656\wrapdefault\aspalpha\aspnum\faauto\adjustright\rin0\lin0\itap0 \rtlch\fcs1 
    +\af2\afs20\alang1025 \ltrch\fcs0 \fs20\lang1033\langfe1033\loch\af2\hich\af2\dbch\af31505\cgrid\langnp1033\langfenp1033 {\rtlch\fcs1 \af2 \ltrch\fcs0 \insrsid741713 
    +\par }{\rtlch\fcs1 \ai\af2 \ltrch\fcs0 \cs20\i\insrsid741713 \hich\af2\dbch\af31505\loch\f2 one line to give the program's name and an idea of what it does.}{\rtlch\fcs1 \af2 \ltrch\fcs0 \insrsid741713 
    +\par \hich\af2\dbch\af31505\loch\f2 Copyright (C) }{\rtlch\fcs1 \ai\af2 \ltrch\fcs0 \cs20\i\insrsid741713 \hich\af2\dbch\af31505\loch\f2 yyyy}{\rtlch\fcs1 \af2 \ltrch\fcs0 \insrsid741713 \hich\af2\dbch\af31505\loch\f2   }{\rtlch\fcs1 \ai\af2 \ltrch\fcs0 
    +\cs20\i\insrsid741713 \hich\af2\dbch\af31505\loch\f2 name of author}{\rtlch\fcs1 \af2 \ltrch\fcs0 \insrsid741713 
    +\par 
    +\par \hich\af2\dbch\af31505\loch\f2 This program is free software; you can redistribute it and/or
    +\par \hich\af2\dbch\af31505\loch\f2 modify it under the terms of the GNU General Public License
    +\par \hich\af2\dbch\af31505\loch\f2 as published by the Free Software Foundation; either version 2
    +\par \hich\af2\dbch\af31505\loch\f2 of the License, or (at your option) any later version.
    +\par 
    +\par \hich\af2\dbch\af31505\loch\f2 This program is distributed in the hope that it will be useful,
    +\par \hich\af2\dbch\af31505\loch\f2 but WITHOUT A\hich\af2\dbch\af31505\loch\f2 NY WARRANTY; without even the implied warranty of
    +\par \hich\af2\dbch\af31505\loch\f2 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    +\par \hich\af2\dbch\af31505\loch\f2 GNU General Public License for more details.
    +\par 
    +\par \hich\af2\dbch\af31505\loch\f2 You should have received a copy of the GNU General Public License
    +\par \hich\af2\dbch\af31505\loch\f2 along with this program; if not,\hich\af2\dbch\af31505\loch\f2  write to the Free Software
    +\par \hich\af2\dbch\af31505\loch\f2 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
    +\par }\pard\plain \ltrpar\s16\ql \li0\ri0\sb100\sa100\sbauto1\saauto1\widctlpar\wrapdefault\aspalpha\aspnum\faauto\adjustright\rin0\lin0\itap0 \rtlch\fcs1 \af0\afs24\alang1025 \ltrch\fcs0 
    +\fs24\lang1033\langfe1033\loch\af0\hich\af0\dbch\af31505\cgrid\langnp1033\langfenp1033 {\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 \hich\af0\dbch\af31505\loch\f0 Also add information on how to contact you by electronic and paper mail. 
    +\par \hich\af0\dbch\af31505\loch\f0 If the program is interactive, make it output a short notice like this whe\hich\af0\dbch\af31505\loch\f0 n it starts in an interactive mode: 
    +\par }\pard\plain \ltrpar\s17\ql \li0\ri0\widctlpar\tx916\tx1832\tx2748\tx3664\tx4580\tx5496\tx6412\tx7328\tx8244\tx9160\tx10076\tx10992\tx11908\tx12824\tx13740\tx14656\wrapdefault\aspalpha\aspnum\faauto\adjustright\rin0\lin0\itap0 \rtlch\fcs1 
    +\af2\afs20\alang1025 \ltrch\fcs0 \fs20\lang1033\langfe1033\loch\af2\hich\af2\dbch\af31505\cgrid\langnp1033\langfenp1033 {\rtlch\fcs1 \af2 \ltrch\fcs0 \insrsid741713 
    +\par \hich\af2\dbch\af31505\loch\f2 Gnomovision version 69, Copyright (C) }{\rtlch\fcs1 \ai\af2 \ltrch\fcs0 \cs20\i\insrsid741713 \hich\af2\dbch\af31505\loch\f2 year}{\rtlch\fcs1 \af2 \ltrch\fcs0 \insrsid741713 \hich\af2\dbch\af31505\loch\f2  }{\rtlch\fcs1 
    +\ai\af2 \ltrch\fcs0 \cs20\i\insrsid741713 \hich\af2\dbch\af31505\loch\f2 name of author}{\rtlch\fcs1 \af2 \ltrch\fcs0 \insrsid741713 
    +\par \hich\af2\dbch\af31505\loch\f2 Gnomovision comes with ABSOLUTELY NO WARRANTY; for details
    +\par \hich\af2\dbch\af31505\loch\f2 type `show w'.  This is free software, and you are welcome
    +\par \hich\af2\dbch\af31505\loch\f2 to redistribute it under certain condition\hich\af2\dbch\af31505\loch\f2 s; type `show c' 
    +\par \hich\af2\dbch\af31505\loch\f2 for details.
    +\par }\pard\plain \ltrpar\s16\ql \li0\ri0\sb100\sa100\sbauto1\saauto1\widctlpar\wrapdefault\aspalpha\aspnum\faauto\adjustright\rin0\lin0\itap0 \rtlch\fcs1 \af0\afs24\alang1025 \ltrch\fcs0 
    +\fs24\lang1033\langfe1033\loch\af0\hich\af0\dbch\af31505\cgrid\langnp1033\langfenp1033 {\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 \hich\af0\dbch\af31505\loch\f0 The hypothetical commands }{\rtlch\fcs1 \af2 \ltrch\fcs0 \cs21\f2\insrsid741713 
    +\hich\af2\dbch\af31505\loch\f2 `show w'}{\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 \hich\af0\dbch\af31505\loch\f0  and }{\rtlch\fcs1 \af2 \ltrch\fcs0 \cs21\f2\insrsid741713 \hich\af2\dbch\af31505\loch\f2 `show c'}{\rtlch\fcs1 \af0 \ltrch\fcs0 
    +\insrsid741713 \hich\af0\dbch\af31505\loch\f0  should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than }{\rtlch\fcs1 \af2 \ltrch\fcs0 \cs21\f2\insrsid741713 
    +\hich\af2\dbch\af31505\loch\f2 `show w'}{\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 \hich\af0\dbch\af31505\loch\f0  and }{\rtlch\fcs1 \af2 \ltrch\fcs0 \cs21\f2\insrsid741713 \hich\af2\dbch\af31505\loch\f2 `show c'}{\rtlch\fcs1 \af0 \ltrch\fcs0 
    +\insrsid741713 \hich\af0\dbch\af31505\loch\f0 ; they could even be mou\hich\af0\dbch\af31505\loch\f0 se-clicks or menu items--whatever suits your program. 
    +\par \hich\af0\dbch\af31505\loch\f0 You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: 
    +\par }\pard\plain \ltrpar\s17\ql \li0\ri0\widctlpar\tx916\tx1832\tx2748\tx3664\tx4580\tx5496\tx6412\tx7328\tx8244\tx9160\tx10076\tx10992\tx11908\tx12824\tx13740\tx14656\wrapdefault\aspalpha\aspnum\faauto\adjustright\rin0\lin0\itap0 \rtlch\fcs1 
    +\af2\afs20\alang1025 \ltrch\fcs0 \fs20\lang1033\langfe1033\loch\af2\hich\af2\dbch\af31505\cgrid\langnp1033\langfenp1033 {\rtlch\fcs1 \af2 \ltrch\fcs0 \insrsid741713 
    +\par \hich\af2\dbch\af31505\loch\f2 Yoyodyne, In\hich\af2\dbch\af31505\loch\f2 c., hereby disclaims all copyright
    +\par \hich\af2\dbch\af31505\loch\f2 interest in the program `Gnomovision'
    +\par \hich\af2\dbch\af31505\loch\f2 (which makes passes at compilers) written 
    +\par \hich\af2\dbch\af31505\loch\f2 by James Hacker.
    +\par 
    +\par }{\rtlch\fcs1 \ai\af2 \ltrch\fcs0 \cs20\i\insrsid741713 \hich\af2\dbch\af31505\loch\f2 signature of Ty Coon}{\rtlch\fcs1 \af2 \ltrch\fcs0 \insrsid741713 \hich\af2\dbch\af31505\loch\f2 , 1 April 1989
    +\par \hich\af2\dbch\af31505\loch\f2 Ty Coon, President of Vice
    +\par }\pard\plain \ltrpar\s16\ql \li0\ri0\sb100\sa100\sbauto1\saauto1\widctlpar\wrapdefault\aspalpha\aspnum\faauto\adjustright\rin0\lin0\itap0 \rtlch\fcs1 \af0\afs24\alang1025 \ltrch\fcs0 
    +\fs24\lang1033\langfe1033\loch\af0\hich\af0\dbch\af31505\cgrid\langnp1033\langfenp1033 {\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 \hich\af0\dbch\af31505\loch\f0 
    +This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you wan
    +\hich\af0\dbch\af31505\loch\f0 t\hich\af0\dbch\af31505\loch\f0  to do, use the }{\field\fldedit{\*\fldinst {\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 \hich\af0\dbch\af31505\loch\f0  \hich\af0\dbch\af31505\loch\f0 
    +HYPERLINK "http://www.gnu.org/licenses/lgpl.html"\hich\af0\dbch\af31505\loch\f0  }}{\fldrslt {\rtlch\fcs1 \af0 \ltrch\fcs0 \cs22\ul\cf2\insrsid741713 \hich\af0\dbch\af31505\loch\f0 GNU Lesser General Public License}}}\sectd \ltrsect
    +\linex0\endnhere\pgbrdropt32\sectlinegrid360\sectdefaultcl\sftnbj {\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 \hich\af0\dbch\af31505\loch\f0  instead of this License. }{\rtlch\fcs1 \af0 \ltrch\fcs0 \insrsid741713 
    +\par }{\*\themedata 504b030414000600080000002100e9de0fbfff0000001c020000130000005b436f6e74656e745f54797065735d2e786d6cac91cb4ec3301045f748fc83e52d4a
    +9cb2400825e982c78ec7a27cc0c8992416c9d8b2a755fbf74cd25442a820166c2cd933f79e3be372bd1f07b5c3989ca74aaff2422b24eb1b475da5df374fd9ad
    +5689811a183c61a50f98f4babebc2837878049899a52a57be670674cb23d8e90721f90a4d2fa3802cb35762680fd800ecd7551dc18eb899138e3c943d7e503b6
    +b01d583deee5f99824e290b4ba3f364eac4a430883b3c092d4eca8f946c916422ecab927f52ea42b89a1cd59c254f919b0e85e6535d135a8de20f20b8c12c3b0
    +0c895fcf6720192de6bf3b9e89ecdbd6596cbcdd8eb28e7c365ecc4ec1ff1460f53fe813d3cc7f5b7f020000ffff0300504b030414000600080000002100a5d6
    +a7e7c0000000360100000b0000005f72656c732f2e72656c73848fcf6ac3300c87ef85bd83d17d51d2c31825762fa590432fa37d00e1287f68221bdb1bebdb4f
    +c7060abb0884a4eff7a93dfeae8bf9e194e720169aaa06c3e2433fcb68e1763dbf7f82c985a4a725085b787086a37bdbb55fbc50d1a33ccd311ba548b6309512
    +0f88d94fbc52ae4264d1c910d24a45db3462247fa791715fd71f989e19e0364cd3f51652d73760ae8fa8c9ffb3c330cc9e4fc17faf2ce545046e37944c69e462
    +a1a82fe353bd90a865aad41ed0b5b8f9d6fd010000ffff0300504b0304140006000800000021006b799616830000008a0000001c0000007468656d652f746865
    +6d652f7468656d654d616e616765722e786d6c0ccc4d0ac3201040e17da17790d93763bb284562b2cbaebbf600439c1a41c7a0d29fdbd7e5e38337cedf14d59b
    +4b0d592c9c070d8a65cd2e88b7f07c2ca71ba8da481cc52c6ce1c715e6e97818c9b48d13df49c873517d23d59085adb5dd20d6b52bd521ef2cdd5eb9246a3d8b
    +4757e8d3f729e245eb2b260a0238fd010000ffff0300504b030414000600080000002100aa5225dfc60600008b1a0000160000007468656d652f7468656d652f
    +7468656d65312e786d6cec595d8bdb46147d2ff43f08bd3bfe92fcb1c41b6cd9ceb6d94d42eca4e4716c8fadc98e344633de8d0981923c160aa569e943037deb
    +43691b48a02fe9afd936a54d217fa17746b63c638fbb9b2585a5640d8b343af7ce997bafce1d4997afdc8fa87384134e58dc708b970aae83e3211b9178d2706f
    +f7bbb99aeb7081e211a22cc60d778eb97b65f7c30f2ea31d11e2083b601ff31dd4704321a63bf93c1fc230e297d814c7706dcc920809384d26f951828ec16f44
    +f3a542a1928f10895d274611b8bd311e932176fad2a5bbbb74dea1701a0b2e078634e949d7d8b050d8d1615122f89c0734718e106db830cf881df7f17de13a14
    +7101171a6e41fdb9f9ddcb79b4b330a2628bad66d7557f0bbb85c1e8b0a4e64c26836c52cff3bd4a33f3af00546ce23ad54ea553c9fc29001a0e61a52917dda7
    +dfaab7dafe02ab81d2438bef76b55d2e1a78cd7f798373d3973f03af40a97f6f03dfed06104503af4029dedfc07b5eb51478065e81527c65035f2d34db5ed5c0
    +2b5048497cb8812ef89572b05c6d061933ba6785d77daf5b2d2d9caf50500d5975c929c62c16db6a2d42f758d2058004522448ec88f9148fd110aa3840940c12
    +e2ec93490885374531e3305c2815ba8532fc973f4f1da988a01d8c346bc90b98f08d21c9c7e1c3844c45c3fd18bcba1ae4cdcb1fdfbc7cee9c3c7a71f2e89793
    +c78f4f1efd9c3a32acf6503cd1ad5e7fffc5df4f3f75fe7afeddeb275fd9f15cc7fffed367bffdfaa51d082b5d85e0d5d7cffe78f1ecd5379ffff9c3130bbc99
    +a0810eef930873e73a3e766eb10816a6426032c783e4ed2cfa2122ba45339e701423398bc57f478406fafa1c5164c1b5b019c13b09488c0d787576cf20dc0b93
    +9920168fd7c2c8001e30465b2cb146e19a9c4b0b737f164fec9327331d770ba123dbdc018a8dfc766653d05662731984d8a07993a258a0098eb170e4357688b1
    +6575770931e27a408609e36c2c9cbbc46921620d499f0c8c6a5a19ed9108f232b711847c1bb139b8e3b418b5adba8d8f4c24dc15885ac8f73135c27815cd048a
    +6c2efb28a27ac0f791086d247bf364a8e33a5c40a6279832a733c29cdb6c6e24b05e2de9d7405eec693fa0f3c84426821cda7cee23c674649b1d06218aa6366c
    +8fc4a18efd881f428922e7261336f80133ef10790e7940f1d674df21d848f7e96a701b9455a7b42a107965965872791533a37e7b733a4658490d08bfa1e71189
    +4f15f73559f7ff5b5907217df5ed53cbaa2eaaa0371362bda3f6d6647c1b6e5dbc03968cc8c5d7ee369ac53731dc2e9b0decbd74bf976ef77f2fdddbeee7772f
    +d82b8d06f9965bc574abae36eed1d67dfb9850da13738af7b9daba73e84ca32e0c4a3bf5cc8ab3e7b8690887f24e86090cdc2441cac64998f88488b017a229ec
    +ef8bae7432e10bd713ee4c19876dbf1ab6fa96783a8b0ed8287d5c2d16e5a3692a1e1c89d578c1cfc6e15143a4e84a75f50896b9576c27ea51794940dabe0d09
    +6d329344d942a2ba1c9441520fe610340b09b5b277c2a26e615193ee97a9da6001d4b2acc0d6c9810d57c3f53d30012378a242148f649ed2542fb3ab92f92e33
    +bd2d984605c03e625901ab4cd725d7adcb93ab4b4bed0c99364868e566925091513d8c87688417d52947cf42e36d735d5fa5d4a02743a1e683d25ad1a8d6fe8d
    +c579730d76ebda40635d2968ec1c37dc4ad9879219a269c31dc3633f1c4653a81d2eb7bc884ee0ddd95024e90d7f1e6599265cb4110fd3802bd149d520220227
    +0e2551c395cbcfd24063a5218a5bb104827061c9d541562e1a3948ba99643c1ee3a1d0d3ae8dc848a7a7a0f0a95658af2af3f383a5259b41ba7be1e8d819d059
    +720b4189f9d5a20ce0887078fb534ca33922f03a3313b255fdad35a685eceaef13550da5e3884e43b4e828ba98a77025e5191d7596c5403b5bac1902aa8564d1
    +080713d960f5a01add34eb1a2987ad5df7742319394d34573dd35015d935ed2a66ccb06c036bb13c5f93d7582d430c9aa677f854bad725b7bed4bab57d42d625
    +20e059fc2c5df70c0d41a3b69acca026196fcab0d4ecc5a8d93b960b3c85da599a84a6fa95a5dbb5b8653dc23a1d0c9eabf383dd7ad5c2d078b9af549156df3d
    +f44f136c700fc4a30d2f81675470954af8f09020d810f5d49e24950db845ee8bc5ad0147ce2c210df741c16f7a41c90f72859adfc97965af90abf9cd72aee9fb
    +e562c72f16daadd243682c228c8a7efacda50bafa2e87cf1e5458d6f7c7d89966fdb2e0d599467eaeb4a5e11575f5f8aa5ed5f5f1c02a2f3a052ead6cbf55625
    +572f37bb39afddaae5ea41a5956b57826abbdb0efc5abdfbd0758e14d86b9603afd2a9e52ac520c8799582a45fabe7aa5ea9d4f4aacd5ac76b3e5c6c6360e5a9
    +7c2c6201e155bc76ff010000ffff0300504b0304140006000800000021000dd1909fb60000001b010000270000007468656d652f7468656d652f5f72656c732f
    +7468656d654d616e616765722e786d6c2e72656c73848f4d0ac2301484f78277086f6fd3ba109126dd88d0add40384e4350d363f2451eced0dae2c082e8761be
    +9969bb979dc9136332de3168aa1a083ae995719ac16db8ec8e4052164e89d93b64b060828e6f37ed1567914b284d262452282e3198720e274a939cd08a54f980
    +ae38a38f56e422a3a641c8bbd048f7757da0f19b017cc524bd62107bd5001996509affb3fd381a89672f1f165dfe514173d9850528a2c6cce0239baa4c04ca5b
    +babac4df000000ffff0300504b01022d0014000600080000002100e9de0fbfff0000001c0200001300000000000000000000000000000000005b436f6e74656e
    +745f54797065735d2e786d6c504b01022d0014000600080000002100a5d6a7e7c0000000360100000b00000000000000000000000000300100005f72656c732f
    +2e72656c73504b01022d00140006000800000021006b799616830000008a0000001c00000000000000000000000000190200007468656d652f7468656d652f74
    +68656d654d616e616765722e786d6c504b01022d0014000600080000002100aa5225dfc60600008b1a00001600000000000000000000000000d6020000746865
    +6d652f7468656d652f7468656d65312e786d6c504b01022d00140006000800000021000dd1909fb60000001b0100002700000000000000000000000000d00900007468656d652f7468656d652f5f72656c732f7468656d654d616e616765722e786d6c2e72656c73504b050600000000050005005d010000cb0a00000000}
    +{\*\colorschememapping 3c3f786d6c2076657273696f6e3d22312e302220656e636f64696e673d225554462d3822207374616e64616c6f6e653d22796573223f3e0d0a3c613a636c724d
    +617020786d6c6e733a613d22687474703a2f2f736368656d61732e6f70656e786d6c666f726d6174732e6f72672f64726177696e676d6c2f323030362f6d6169
    +6e22206267313d226c743122207478313d22646b3122206267323d226c743222207478323d22646b322220616363656e74313d22616363656e74312220616363
    +656e74323d22616363656e74322220616363656e74333d22616363656e74332220616363656e74343d22616363656e74342220616363656e74353d22616363656e74352220616363656e74363d22616363656e74362220686c696e6b3d22686c696e6b2220666f6c486c696e6b3d22666f6c486c696e6b222f3e}
    +{\*\latentstyles\lsdstimax371\lsdlockeddef0\lsdsemihiddendef0\lsdunhideuseddef0\lsdqformatdef0\lsdprioritydef99{\lsdlockedexcept \lsdqformat1 \lsdpriority0 \lsdlocked0 Normal;\lsdqformat1 \lsdpriority9 \lsdlocked0 heading 1;
    +\lsdsemihidden1 \lsdunhideused1 \lsdqformat1 \lsdpriority9 \lsdlocked0 heading 2;\lsdqformat1 \lsdpriority9 \lsdlocked0 heading 3;\lsdsemihidden1 \lsdunhideused1 \lsdqformat1 \lsdpriority9 \lsdlocked0 heading 4;
    +\lsdsemihidden1 \lsdunhideused1 \lsdqformat1 \lsdpriority9 \lsdlocked0 heading 5;\lsdsemihidden1 \lsdunhideused1 \lsdqformat1 \lsdpriority9 \lsdlocked0 heading 6;\lsdsemihidden1 \lsdunhideused1 \lsdqformat1 \lsdpriority9 \lsdlocked0 heading 7;
    +\lsdsemihidden1 \lsdunhideused1 \lsdqformat1 \lsdpriority9 \lsdlocked0 heading 8;\lsdsemihidden1 \lsdunhideused1 \lsdqformat1 \lsdpriority9 \lsdlocked0 heading 9;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 index 1;
    +\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 index 2;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 index 3;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 index 4;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 index 5;
    +\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 index 6;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 index 7;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 index 8;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 index 9;
    +\lsdsemihidden1 \lsdunhideused1 \lsdpriority39 \lsdlocked0 toc 1;\lsdsemihidden1 \lsdunhideused1 \lsdpriority39 \lsdlocked0 toc 2;\lsdsemihidden1 \lsdunhideused1 \lsdpriority39 \lsdlocked0 toc 3;
    +\lsdsemihidden1 \lsdunhideused1 \lsdpriority39 \lsdlocked0 toc 4;\lsdsemihidden1 \lsdunhideused1 \lsdpriority39 \lsdlocked0 toc 5;\lsdsemihidden1 \lsdunhideused1 \lsdpriority39 \lsdlocked0 toc 6;
    +\lsdsemihidden1 \lsdunhideused1 \lsdpriority39 \lsdlocked0 toc 7;\lsdsemihidden1 \lsdunhideused1 \lsdpriority39 \lsdlocked0 toc 8;\lsdsemihidden1 \lsdunhideused1 \lsdpriority39 \lsdlocked0 toc 9;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Normal Indent;
    +\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 footnote text;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 annotation text;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 header;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 footer;
    +\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 index heading;\lsdsemihidden1 \lsdunhideused1 \lsdqformat1 \lsdpriority35 \lsdlocked0 caption;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 table of figures;
    +\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 envelope address;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 envelope return;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 footnote reference;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 annotation reference;
    +\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 line number;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 page number;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 endnote reference;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 endnote text;
    +\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 table of authorities;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 macro;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 toa heading;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 List;
    +\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 List Bullet;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 List Number;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 List 2;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 List 3;
    +\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 List 4;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 List 5;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 List Bullet 2;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 List Bullet 3;
    +\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 List Bullet 4;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 List Bullet 5;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 List Number 2;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 List Number 3;
    +\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 List Number 4;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 List Number 5;\lsdqformat1 \lsdpriority10 \lsdlocked0 Title;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Closing;
    +\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Signature;\lsdsemihidden1 \lsdunhideused1 \lsdpriority1 \lsdlocked0 Default Paragraph Font;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Body Text;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Body Text Indent;
    +\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 List Continue;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 List Continue 2;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 List Continue 3;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 List Continue 4;
    +\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 List Continue 5;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Message Header;\lsdqformat1 \lsdpriority11 \lsdlocked0 Subtitle;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Salutation;
    +\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Date;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Body Text First Indent;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Body Text First Indent 2;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Note Heading;
    +\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Body Text 2;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Body Text 3;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Body Text Indent 2;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Body Text Indent 3;
    +\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Block Text;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Hyperlink;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 FollowedHyperlink;\lsdqformat1 \lsdpriority22 \lsdlocked0 Strong;
    +\lsdqformat1 \lsdpriority20 \lsdlocked0 Emphasis;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Document Map;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Plain Text;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 E-mail Signature;
    +\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 HTML Top of Form;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 HTML Bottom of Form;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Normal (Web);\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 HTML Acronym;
    +\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 HTML Address;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 HTML Cite;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 HTML Code;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 HTML Definition;
    +\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 HTML Keyboard;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 HTML Preformatted;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 HTML Sample;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 HTML Typewriter;
    +\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 HTML Variable;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Normal Table;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 annotation subject;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 No List;
    +\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Outline List 1;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Outline List 2;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Outline List 3;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table Simple 1;
    +\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table Simple 2;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table Simple 3;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table Classic 1;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table Classic 2;
    +\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table Classic 3;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table Classic 4;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table Colorful 1;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table Colorful 2;
    +\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table Colorful 3;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table Columns 1;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table Columns 2;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table Columns 3;
    +\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table Columns 4;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table Columns 5;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table Grid 1;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table Grid 2;
    +\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table Grid 3;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table Grid 4;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table Grid 5;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table Grid 6;
    +\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table Grid 7;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table Grid 8;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table List 1;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table List 2;
    +\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table List 3;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table List 4;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table List 5;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table List 6;
    +\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table List 7;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table List 8;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table 3D effects 1;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table 3D effects 2;
    +\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table 3D effects 3;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table Contemporary;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table Elegant;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table Professional;
    +\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table Subtle 1;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table Subtle 2;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table Web 1;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table Web 2;
    +\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table Web 3;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Balloon Text;\lsdpriority39 \lsdlocked0 Table Grid;\lsdsemihidden1 \lsdunhideused1 \lsdlocked0 Table Theme;\lsdsemihidden1 \lsdlocked0 Placeholder Text;
    +\lsdqformat1 \lsdpriority1 \lsdlocked0 No Spacing;\lsdpriority60 \lsdlocked0 Light Shading;\lsdpriority61 \lsdlocked0 Light List;\lsdpriority62 \lsdlocked0 Light Grid;\lsdpriority63 \lsdlocked0 Medium Shading 1;\lsdpriority64 \lsdlocked0 Medium Shading 2;
    +\lsdpriority65 \lsdlocked0 Medium List 1;\lsdpriority66 \lsdlocked0 Medium List 2;\lsdpriority67 \lsdlocked0 Medium Grid 1;\lsdpriority68 \lsdlocked0 Medium Grid 2;\lsdpriority69 \lsdlocked0 Medium Grid 3;\lsdpriority70 \lsdlocked0 Dark List;
    +\lsdpriority71 \lsdlocked0 Colorful Shading;\lsdpriority72 \lsdlocked0 Colorful List;\lsdpriority73 \lsdlocked0 Colorful Grid;\lsdpriority60 \lsdlocked0 Light Shading Accent 1;\lsdpriority61 \lsdlocked0 Light List Accent 1;
    +\lsdpriority62 \lsdlocked0 Light Grid Accent 1;\lsdpriority63 \lsdlocked0 Medium Shading 1 Accent 1;\lsdpriority64 \lsdlocked0 Medium Shading 2 Accent 1;\lsdpriority65 \lsdlocked0 Medium List 1 Accent 1;\lsdsemihidden1 \lsdlocked0 Revision;
    +\lsdqformat1 \lsdpriority34 \lsdlocked0 List Paragraph;\lsdqformat1 \lsdpriority29 \lsdlocked0 Quote;\lsdqformat1 \lsdpriority30 \lsdlocked0 Intense Quote;\lsdpriority66 \lsdlocked0 Medium List 2 Accent 1;\lsdpriority67 \lsdlocked0 Medium Grid 1 Accent 1;
    +\lsdpriority68 \lsdlocked0 Medium Grid 2 Accent 1;\lsdpriority69 \lsdlocked0 Medium Grid 3 Accent 1;\lsdpriority70 \lsdlocked0 Dark List Accent 1;\lsdpriority71 \lsdlocked0 Colorful Shading Accent 1;\lsdpriority72 \lsdlocked0 Colorful List Accent 1;
    +\lsdpriority73 \lsdlocked0 Colorful Grid Accent 1;\lsdpriority60 \lsdlocked0 Light Shading Accent 2;\lsdpriority61 \lsdlocked0 Light List Accent 2;\lsdpriority62 \lsdlocked0 Light Grid Accent 2;\lsdpriority63 \lsdlocked0 Medium Shading 1 Accent 2;
    +\lsdpriority64 \lsdlocked0 Medium Shading 2 Accent 2;\lsdpriority65 \lsdlocked0 Medium List 1 Accent 2;\lsdpriority66 \lsdlocked0 Medium List 2 Accent 2;\lsdpriority67 \lsdlocked0 Medium Grid 1 Accent 2;\lsdpriority68 \lsdlocked0 Medium Grid 2 Accent 2;
    +\lsdpriority69 \lsdlocked0 Medium Grid 3 Accent 2;\lsdpriority70 \lsdlocked0 Dark List Accent 2;\lsdpriority71 \lsdlocked0 Colorful Shading Accent 2;\lsdpriority72 \lsdlocked0 Colorful List Accent 2;\lsdpriority73 \lsdlocked0 Colorful Grid Accent 2;
    +\lsdpriority60 \lsdlocked0 Light Shading Accent 3;\lsdpriority61 \lsdlocked0 Light List Accent 3;\lsdpriority62 \lsdlocked0 Light Grid Accent 3;\lsdpriority63 \lsdlocked0 Medium Shading 1 Accent 3;\lsdpriority64 \lsdlocked0 Medium Shading 2 Accent 3;
    +\lsdpriority65 \lsdlocked0 Medium List 1 Accent 3;\lsdpriority66 \lsdlocked0 Medium List 2 Accent 3;\lsdpriority67 \lsdlocked0 Medium Grid 1 Accent 3;\lsdpriority68 \lsdlocked0 Medium Grid 2 Accent 3;\lsdpriority69 \lsdlocked0 Medium Grid 3 Accent 3;
    +\lsdpriority70 \lsdlocked0 Dark List Accent 3;\lsdpriority71 \lsdlocked0 Colorful Shading Accent 3;\lsdpriority72 \lsdlocked0 Colorful List Accent 3;\lsdpriority73 \lsdlocked0 Colorful Grid Accent 3;\lsdpriority60 \lsdlocked0 Light Shading Accent 4;
    +\lsdpriority61 \lsdlocked0 Light List Accent 4;\lsdpriority62 \lsdlocked0 Light Grid Accent 4;\lsdpriority63 \lsdlocked0 Medium Shading 1 Accent 4;\lsdpriority64 \lsdlocked0 Medium Shading 2 Accent 4;\lsdpriority65 \lsdlocked0 Medium List 1 Accent 4;
    +\lsdpriority66 \lsdlocked0 Medium List 2 Accent 4;\lsdpriority67 \lsdlocked0 Medium Grid 1 Accent 4;\lsdpriority68 \lsdlocked0 Medium Grid 2 Accent 4;\lsdpriority69 \lsdlocked0 Medium Grid 3 Accent 4;\lsdpriority70 \lsdlocked0 Dark List Accent 4;
    +\lsdpriority71 \lsdlocked0 Colorful Shading Accent 4;\lsdpriority72 \lsdlocked0 Colorful List Accent 4;\lsdpriority73 \lsdlocked0 Colorful Grid Accent 4;\lsdpriority60 \lsdlocked0 Light Shading Accent 5;\lsdpriority61 \lsdlocked0 Light List Accent 5;
    +\lsdpriority62 \lsdlocked0 Light Grid Accent 5;\lsdpriority63 \lsdlocked0 Medium Shading 1 Accent 5;\lsdpriority64 \lsdlocked0 Medium Shading 2 Accent 5;\lsdpriority65 \lsdlocked0 Medium List 1 Accent 5;\lsdpriority66 \lsdlocked0 Medium List 2 Accent 5;
    +\lsdpriority67 \lsdlocked0 Medium Grid 1 Accent 5;\lsdpriority68 \lsdlocked0 Medium Grid 2 Accent 5;\lsdpriority69 \lsdlocked0 Medium Grid 3 Accent 5;\lsdpriority70 \lsdlocked0 Dark List Accent 5;\lsdpriority71 \lsdlocked0 Colorful Shading Accent 5;
    +\lsdpriority72 \lsdlocked0 Colorful List Accent 5;\lsdpriority73 \lsdlocked0 Colorful Grid Accent 5;\lsdpriority60 \lsdlocked0 Light Shading Accent 6;\lsdpriority61 \lsdlocked0 Light List Accent 6;\lsdpriority62 \lsdlocked0 Light Grid Accent 6;
    +\lsdpriority63 \lsdlocked0 Medium Shading 1 Accent 6;\lsdpriority64 \lsdlocked0 Medium Shading 2 Accent 6;\lsdpriority65 \lsdlocked0 Medium List 1 Accent 6;\lsdpriority66 \lsdlocked0 Medium List 2 Accent 6;
    +\lsdpriority67 \lsdlocked0 Medium Grid 1 Accent 6;\lsdpriority68 \lsdlocked0 Medium Grid 2 Accent 6;\lsdpriority69 \lsdlocked0 Medium Grid 3 Accent 6;\lsdpriority70 \lsdlocked0 Dark List Accent 6;\lsdpriority71 \lsdlocked0 Colorful Shading Accent 6;
    +\lsdpriority72 \lsdlocked0 Colorful List Accent 6;\lsdpriority73 \lsdlocked0 Colorful Grid Accent 6;\lsdqformat1 \lsdpriority19 \lsdlocked0 Subtle Emphasis;\lsdqformat1 \lsdpriority21 \lsdlocked0 Intense Emphasis;
    +\lsdqformat1 \lsdpriority31 \lsdlocked0 Subtle Reference;\lsdqformat1 \lsdpriority32 \lsdlocked0 Intense Reference;\lsdqformat1 \lsdpriority33 \lsdlocked0 Book Title;\lsdsemihidden1 \lsdunhideused1 \lsdpriority37 \lsdlocked0 Bibliography;
    +\lsdsemihidden1 \lsdunhideused1 \lsdqformat1 \lsdpriority39 \lsdlocked0 TOC Heading;\lsdpriority41 \lsdlocked0 Plain Table 1;\lsdpriority42 \lsdlocked0 Plain Table 2;\lsdpriority43 \lsdlocked0 Plain Table 3;\lsdpriority44 \lsdlocked0 Plain Table 4;
    +\lsdpriority45 \lsdlocked0 Plain Table 5;\lsdpriority40 \lsdlocked0 Grid Table Light;\lsdpriority46 \lsdlocked0 Grid Table 1 Light;\lsdpriority47 \lsdlocked0 Grid Table 2;\lsdpriority48 \lsdlocked0 Grid Table 3;\lsdpriority49 \lsdlocked0 Grid Table 4;
    +\lsdpriority50 \lsdlocked0 Grid Table 5 Dark;\lsdpriority51 \lsdlocked0 Grid Table 6 Colorful;\lsdpriority52 \lsdlocked0 Grid Table 7 Colorful;\lsdpriority46 \lsdlocked0 Grid Table 1 Light Accent 1;\lsdpriority47 \lsdlocked0 Grid Table 2 Accent 1;
    +\lsdpriority48 \lsdlocked0 Grid Table 3 Accent 1;\lsdpriority49 \lsdlocked0 Grid Table 4 Accent 1;\lsdpriority50 \lsdlocked0 Grid Table 5 Dark Accent 1;\lsdpriority51 \lsdlocked0 Grid Table 6 Colorful Accent 1;
    +\lsdpriority52 \lsdlocked0 Grid Table 7 Colorful Accent 1;\lsdpriority46 \lsdlocked0 Grid Table 1 Light Accent 2;\lsdpriority47 \lsdlocked0 Grid Table 2 Accent 2;\lsdpriority48 \lsdlocked0 Grid Table 3 Accent 2;
    +\lsdpriority49 \lsdlocked0 Grid Table 4 Accent 2;\lsdpriority50 \lsdlocked0 Grid Table 5 Dark Accent 2;\lsdpriority51 \lsdlocked0 Grid Table 6 Colorful Accent 2;\lsdpriority52 \lsdlocked0 Grid Table 7 Colorful Accent 2;
    +\lsdpriority46 \lsdlocked0 Grid Table 1 Light Accent 3;\lsdpriority47 \lsdlocked0 Grid Table 2 Accent 3;\lsdpriority48 \lsdlocked0 Grid Table 3 Accent 3;\lsdpriority49 \lsdlocked0 Grid Table 4 Accent 3;
    +\lsdpriority50 \lsdlocked0 Grid Table 5 Dark Accent 3;\lsdpriority51 \lsdlocked0 Grid Table 6 Colorful Accent 3;\lsdpriority52 \lsdlocked0 Grid Table 7 Colorful Accent 3;\lsdpriority46 \lsdlocked0 Grid Table 1 Light Accent 4;
    +\lsdpriority47 \lsdlocked0 Grid Table 2 Accent 4;\lsdpriority48 \lsdlocked0 Grid Table 3 Accent 4;\lsdpriority49 \lsdlocked0 Grid Table 4 Accent 4;\lsdpriority50 \lsdlocked0 Grid Table 5 Dark Accent 4;
    +\lsdpriority51 \lsdlocked0 Grid Table 6 Colorful Accent 4;\lsdpriority52 \lsdlocked0 Grid Table 7 Colorful Accent 4;\lsdpriority46 \lsdlocked0 Grid Table 1 Light Accent 5;\lsdpriority47 \lsdlocked0 Grid Table 2 Accent 5;
    +\lsdpriority48 \lsdlocked0 Grid Table 3 Accent 5;\lsdpriority49 \lsdlocked0 Grid Table 4 Accent 5;\lsdpriority50 \lsdlocked0 Grid Table 5 Dark Accent 5;\lsdpriority51 \lsdlocked0 Grid Table 6 Colorful Accent 5;
    +\lsdpriority52 \lsdlocked0 Grid Table 7 Colorful Accent 5;\lsdpriority46 \lsdlocked0 Grid Table 1 Light Accent 6;\lsdpriority47 \lsdlocked0 Grid Table 2 Accent 6;\lsdpriority48 \lsdlocked0 Grid Table 3 Accent 6;
    +\lsdpriority49 \lsdlocked0 Grid Table 4 Accent 6;\lsdpriority50 \lsdlocked0 Grid Table 5 Dark Accent 6;\lsdpriority51 \lsdlocked0 Grid Table 6 Colorful Accent 6;\lsdpriority52 \lsdlocked0 Grid Table 7 Colorful Accent 6;
    +\lsdpriority46 \lsdlocked0 List Table 1 Light;\lsdpriority47 \lsdlocked0 List Table 2;\lsdpriority48 \lsdlocked0 List Table 3;\lsdpriority49 \lsdlocked0 List Table 4;\lsdpriority50 \lsdlocked0 List Table 5 Dark;
    +\lsdpriority51 \lsdlocked0 List Table 6 Colorful;\lsdpriority52 \lsdlocked0 List Table 7 Colorful;\lsdpriority46 \lsdlocked0 List Table 1 Light Accent 1;\lsdpriority47 \lsdlocked0 List Table 2 Accent 1;\lsdpriority48 \lsdlocked0 List Table 3 Accent 1;
    +\lsdpriority49 \lsdlocked0 List Table 4 Accent 1;\lsdpriority50 \lsdlocked0 List Table 5 Dark Accent 1;\lsdpriority51 \lsdlocked0 List Table 6 Colorful Accent 1;\lsdpriority52 \lsdlocked0 List Table 7 Colorful Accent 1;
    +\lsdpriority46 \lsdlocked0 List Table 1 Light Accent 2;\lsdpriority47 \lsdlocked0 List Table 2 Accent 2;\lsdpriority48 \lsdlocked0 List Table 3 Accent 2;\lsdpriority49 \lsdlocked0 List Table 4 Accent 2;
    +\lsdpriority50 \lsdlocked0 List Table 5 Dark Accent 2;\lsdpriority51 \lsdlocked0 List Table 6 Colorful Accent 2;\lsdpriority52 \lsdlocked0 List Table 7 Colorful Accent 2;\lsdpriority46 \lsdlocked0 List Table 1 Light Accent 3;
    +\lsdpriority47 \lsdlocked0 List Table 2 Accent 3;\lsdpriority48 \lsdlocked0 List Table 3 Accent 3;\lsdpriority49 \lsdlocked0 List Table 4 Accent 3;\lsdpriority50 \lsdlocked0 List Table 5 Dark Accent 3;
    +\lsdpriority51 \lsdlocked0 List Table 6 Colorful Accent 3;\lsdpriority52 \lsdlocked0 List Table 7 Colorful Accent 3;\lsdpriority46 \lsdlocked0 List Table 1 Light Accent 4;\lsdpriority47 \lsdlocked0 List Table 2 Accent 4;
    +\lsdpriority48 \lsdlocked0 List Table 3 Accent 4;\lsdpriority49 \lsdlocked0 List Table 4 Accent 4;\lsdpriority50 \lsdlocked0 List Table 5 Dark Accent 4;\lsdpriority51 \lsdlocked0 List Table 6 Colorful Accent 4;
    +\lsdpriority52 \lsdlocked0 List Table 7 Colorful Accent 4;\lsdpriority46 \lsdlocked0 List Table 1 Light Accent 5;\lsdpriority47 \lsdlocked0 List Table 2 Accent 5;\lsdpriority48 \lsdlocked0 List Table 3 Accent 5;
    +\lsdpriority49 \lsdlocked0 List Table 4 Accent 5;\lsdpriority50 \lsdlocked0 List Table 5 Dark Accent 5;\lsdpriority51 \lsdlocked0 List Table 6 Colorful Accent 5;\lsdpriority52 \lsdlocked0 List Table 7 Colorful Accent 5;
    +\lsdpriority46 \lsdlocked0 List Table 1 Light Accent 6;\lsdpriority47 \lsdlocked0 List Table 2 Accent 6;\lsdpriority48 \lsdlocked0 List Table 3 Accent 6;\lsdpriority49 \lsdlocked0 List Table 4 Accent 6;
    +\lsdpriority50 \lsdlocked0 List Table 5 Dark Accent 6;\lsdpriority51 \lsdlocked0 List Table 6 Colorful Accent 6;\lsdpriority52 \lsdlocked0 List Table 7 Colorful Accent 6;}}{\*\datastore 010500000200000018000000
    +4d73786d6c322e534158584d4c5265616465722e362e3000000000000000000000060000
    +d0cf11e0a1b11ae1000000000000000000000000000000003e000300feff090006000000000000000000000001000000010000000000000000100000feffffff00000000feffffff0000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
    +ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
    +ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
    +ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
    +fffffffffffffffffdfffffffeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
    +ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
    +ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
    +ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
    +ffffffffffffffffffffffffffffffff52006f006f007400200045006e00740072007900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000016000500ffffffffffffffffffffffff0c6ad98892f1d411a65f0040963251e5000000000000000000000000e086
    +13997cefce01feffffff00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffff00000000000000000000000000000000000000000000000000000000
    +00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000
    +000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffff000000000000000000000000000000000000000000000000
    +0000000000000000000000000000000000000000000000000105000000000000}}
    +
    + +
    +
    +
    +
    +
    + About + | Forums + | Terms + | Privacy + | Downloads + | Meta +
    +
    + Code Sight v2.4.1 | Copyright © 2013 Black Duck Software, Inc., All Rights Reserved.
    + Ohloh ® and the Ohloh logo are registered trademarks of Black Duck
    + Software, Inc. in the United States and/or other jurisdictions. All other
    + trademarks are the property of their respective holders. +
    +
    + Code Sight +
    +
    +
    + + + + + diff -Nru check-mk-1.2.2p3/treasures/windows_msi/sources/GPL-V2.rtf check-mk-1.2.6p12/treasures/windows_msi/sources/GPL-V2.rtf --- check-mk-1.2.2p3/treasures/windows_msi/sources/GPL-V2.rtf 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/windows_msi/sources/GPL-V2.rtf 2014-07-28 09:51:39.000000000 +0000 @@ -0,0 +1,342 @@ +{\rtf1\ansi\ansicpg1252\deff0\deflang1031{\fonttbl{\f0\fswiss\fcharset0 Arial;}} +{\*\generator Msftedit 5.41.15.1515;}\viewkind4\uc1\pard\f0\fs16\tab\tab GNU GENERAL PUBLIC LICENSE\par +\tab\tab Version 2, June 1991\par +\par + Copyright (C) 1989, 1991 Free Software Foundation, Inc.,\par + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\par + Everyone is permitted to copy and distribute verbatim copies\par + of this license document, but changing it is not allowed.\par +\par +\tab\tab\tab Preamble\par +\par + The licenses for most software are designed to take away your\par +freedom to share and change it. By contrast, the GNU General Public\par +License is intended to guarantee your freedom to share and change free\par +software--to make sure the software is free for all its users. This\par +General Public License applies to most of the Free Software\par +Foundation's software and to any other program whose authors commit to\par +using it. (Some other Free Software Foundation software is covered by\par +the GNU Lesser General Public License instead.) You can apply it to\par +your programs, too.\par +\par + When we speak of free software, we are referring to freedom, not\par +price. Our General Public Licenses are designed to make sure that you\par +have the freedom to distribute copies of free software (and charge for\par +this service if you wish), that you receive source code or can get it\par +if you want it, that you can change the software or use pieces of it\par +in new free programs; and that you know you can do these things.\par +\par + To protect your rights, we need to make restrictions that forbid\par +anyone to deny you these rights or to ask you to surrender the rights.\par +These restrictions translate to certain responsibilities for you if you\par +distribute copies of the software, or if you modify it.\par +\par + For example, if you distribute copies of such a program, whether\par +gratis or for a fee, you must give the recipients all the rights that\par +you have. You must make sure that they, too, receive or can get the\par +source code. And you must show them these terms so they know their\par +rights.\par +\par + We protect your rights with two steps: (1) copyright the software, and\par +(2) offer you this license which gives you legal permission to copy,\par +distribute and/or modify the software.\par +\par + Also, for each author's protection and ours, we want to make certain\par +that everyone understands that there is no warranty for this free\par +software. If the software is modified by someone else and passed on, we\par +want its recipients to know that what they have is not the original, so\par +that any problems introduced by others will not reflect on the original\par +authors' reputations.\par +\par + Finally, any free program is threatened constantly by software\par +patents. We wish to avoid the danger that redistributors of a free\par +program will individually obtain patent licenses, in effect making the\par +program proprietary. To prevent this, we have made it clear that any\par +patent must be licensed for everyone's free use or not licensed at all.\par +\par + The precise terms and conditions for copying, distribution and\par +modification follow.\par +\par +\tab\tab GNU GENERAL PUBLIC LICENSE\par + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION\par +\par + 0. This License applies to any program or other work which contains\par +a notice placed by the copyright holder saying it may be distributed\par +under the terms of this General Public License. The "Program", below,\par +refers to any such program or work, and a "work based on the Program"\par +means either the Program or any derivative work under copyright law:\par +that is to say, a work containing the Program or a portion of it,\par +either verbatim or with modifications and/or translated into another\par +language. (Hereinafter, translation is included without limitation in\par +the term "modification".) Each licensee is addressed as "you".\par +\par +Activities other than copying, distribution and modification are not\par +covered by this License; they are outside its scope. The act of\par +running the Program is not restricted, and the output from the Program\par +is covered only if its contents constitute a work based on the\par +Program (independent of having been made by running the Program).\par +Whether that is true depends on what the Program does.\par +\par + 1. You may copy and distribute verbatim copies of the Program's\par +source code as you receive it, in any medium, provided that you\par +conspicuously and appropriately publish on each copy an appropriate\par +copyright notice and disclaimer of warranty; keep intact all the\par +notices that refer to this License and to the absence of any warranty;\par +and give any other recipients of the Program a copy of this License\par +along with the Program.\par +\par +You may charge a fee for the physical act of transferring a copy, and\par +you may at your option offer warranty protection in exchange for a fee.\par +\par + 2. You may modify your copy or copies of the Program or any portion\par +of it, thus forming a work based on the Program, and copy and\par +distribute such modifications or work under the terms of Section 1\par +above, provided that you also meet all of these conditions:\par +\par + a) You must cause the modified files to carry prominent notices\par + stating that you changed the files and the date of any change.\par +\par + b) You must cause any work that you distribute or publish, that in\par + whole or in part contains or is derived from the Program or any\par + part thereof, to be licensed as a whole at no charge to all third\par + parties under the terms of this License.\par +\par + c) If the modified program normally reads commands interactively\par + when run, you must cause it, when started running for such\par + interactive use in the most ordinary way, to print or display an\par + announcement including an appropriate copyright notice and a\par + notice that there is no warranty (or else, saying that you provide\par + a warranty) and that users may redistribute the program under\par + these conditions, and telling the user how to view a copy of this\par + License. (Exception: if the Program itself is interactive but\par + does not normally print such an announcement, your work based on\par + the Program is not required to print an announcement.)\par +\par +These requirements apply to the modified work as a whole. If\par +identifiable sections of that work are not derived from the Program,\par +and can be reasonably considered independent and separate works in\par +themselves, then this License, and its terms, do not apply to those\par +sections when you distribute them as separate works. But when you\par +distribute the same sections as part of a whole which is a work based\par +on the Program, the distribution of the whole must be on the terms of\par +this License, whose permissions for other licensees extend to the\par +entire whole, and thus to each and every part regardless of who wrote it.\par +\par +Thus, it is not the intent of this section to claim rights or contest\par +your rights to work written entirely by you; rather, the intent is to\par +exercise the right to control the distribution of derivative or\par +collective works based on the Program.\par +\par +In addition, mere aggregation of another work not based on the Program\par +with the Program (or with a work based on the Program) on a volume of\par +a storage or distribution medium does not bring the other work under\par +the scope of this License.\par +\par + 3. You may copy and distribute the Program (or a work based on it,\par +under Section 2) in object code or executable form under the terms of\par +Sections 1 and 2 above provided that you also do one of the following:\par +\par + a) Accompany it with the complete corresponding machine-readable\par + source code, which must be distributed under the terms of Sections\par + 1 and 2 above on a medium customarily used for software interchange; or,\par +\par + b) Accompany it with a written offer, valid for at least three\par + years, to give any third party, for a charge no more than your\par + cost of physically performing source distribution, a complete\par + machine-readable copy of the corresponding source code, to be\par + distributed under the terms of Sections 1 and 2 above on a medium\par + customarily used for software interchange; or,\par +\par + c) Accompany it with the information you received as to the offer\par + to distribute corresponding source code. (This alternative is\par + allowed only for noncommercial distribution and only if you\par + received the program in object code or executable form with such\par + an offer, in accord with Subsection b above.)\par +\par +The source code for a work means the preferred form of the work for\par +making modifications to it. For an executable work, complete source\par +code means all the source code for all modules it contains, plus any\par +associated interface definition files, plus the scripts used to\par +control compilation and installation of the executable. However, as a\par +special exception, the source code distributed need not include\par +anything that is normally distributed (in either source or binary\par +form) with the major components (compiler, kernel, and so on) of the\par +operating system on which the executable runs, unless that component\par +itself accompanies the executable.\par +\par +If distribution of executable or object code is made by offering\par +access to copy from a designated place, then offering equivalent\par +access to copy the source code from the same place counts as\par +distribution of the source code, even though third parties are not\par +compelled to copy the source along with the object code.\par +\par + 4. You may not copy, modify, sublicense, or distribute the Program\par +except as expressly provided under this License. Any attempt\par +otherwise to copy, modify, sublicense or distribute the Program is\par +void, and will automatically terminate your rights under this License.\par +However, parties who have received copies, or rights, from you under\par +this License will not have their licenses terminated so long as such\par +parties remain in full compliance.\par +\par + 5. You are not required to accept this License, since you have not\par +signed it. However, nothing else grants you permission to modify or\par +distribute the Program or its derivative works. These actions are\par +prohibited by law if you do not accept this License. Therefore, by\par +modifying or distributing the Program (or any work based on the\par +Program), you indicate your acceptance of this License to do so, and\par +all its terms and conditions for copying, distributing or modifying\par +the Program or works based on it.\par +\par + 6. Each time you redistribute the Program (or any work based on the\par +Program), the recipient automatically receives a license from the\par +original licensor to copy, distribute or modify the Program subject to\par +these terms and conditions. You may not impose any further\par +restrictions on the recipients' exercise of the rights granted herein.\par +You are not responsible for enforcing compliance by third parties to\par +this License.\par +\par + 7. If, as a consequence of a court judgment or allegation of patent\par +infringement or for any other reason (not limited to patent issues),\par +conditions are imposed on you (whether by court order, agreement or\par +otherwise) that contradict the conditions of this License, they do not\par +excuse you from the conditions of this License. If you cannot\par +distribute so as to satisfy simultaneously your obligations under this\par +License and any other pertinent obligations, then as a consequence you\par +may not distribute the Program at all. For example, if a patent\par +license would not permit royalty-free redistribution of the Program by\par +all those who receive copies directly or indirectly through you, then\par +the only way you could satisfy both it and this License would be to\par +refrain entirely from distribution of the Program.\par +\par +If any portion of this section is held invalid or unenforceable under\par +any particular circumstance, the balance of the section is intended to\par +apply and the section as a whole is intended to apply in other\par +circumstances.\par +\par +It is not the purpose of this section to induce you to infringe any\par +patents or other property right claims or to contest validity of any\par +such claims; this section has the sole purpose of protecting the\par +integrity of the free software distribution system, which is\par +implemented by public license practices. Many people have made\par +generous contributions to the wide range of software distributed\par +through that system in reliance on consistent application of that\par +system; it is up to the author/donor to decide if he or she is willing\par +to distribute software through any other system and a licensee cannot\par +impose that choice.\par +\par +This section is intended to make thoroughly clear what is believed to\par +be a consequence of the rest of this License.\par +\par + 8. If the distribution and/or use of the Program is restricted in\par +certain countries either by patents or by copyrighted interfaces, the\par +original copyright holder who places the Program under this License\par +may add an explicit geographical distribution limitation excluding\par +those countries, so that distribution is permitted only in or among\par +countries not thus excluded. In such case, this License incorporates\par +the limitation as if written in the body of this License.\par +\par + 9. The Free Software Foundation may publish revised and/or new versions\par +of the General Public License from time to time. Such new versions will\par +be similar in spirit to the present version, but may differ in detail to\par +address new problems or concerns.\par +\par +Each version is given a distinguishing version number. If the Program\par +specifies a version number of this License which applies to it and "any\par +later version", you have the option of following the terms and conditions\par +either of that version or of any later version published by the Free\par +Software Foundation. If the Program does not specify a version number of\par +this License, you may choose any version ever published by the Free Software\par +Foundation.\par +\par + 10. If you wish to incorporate parts of the Program into other free\par +programs whose distribution conditions are different, write to the author\par +to ask for permission. For software which is copyrighted by the Free\par +Software Foundation, write to the Free Software Foundation; we sometimes\par +make exceptions for this. Our decision will be guided by the two goals\par +of preserving the free status of all derivatives of our free software and\par +of promoting the sharing and reuse of software generally.\par +\par +\tab\tab\tab NO WARRANTY\par +\par + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY\par +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN\par +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES\par +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED\par +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\par +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS\par +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE\par +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,\par +REPAIR OR CORRECTION.\par +\par + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\par +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR\par +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,\par +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING\par +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED\par +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY\par +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER\par +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE\par +POSSIBILITY OF SUCH DAMAGES.\par +\par +\tab\tab END OF TERMS AND CONDITIONS\par +\par +\tab How to Apply These Terms to Your New Programs\par +\par + If you develop a new program, and you want it to be of the greatest\par +possible use to the public, the best way to achieve this is to make it\par +free software which everyone can redistribute and change under these terms.\par +\par + To do so, attach the following notices to the program. It is safest\par +to attach them to the start of each source file to most effectively\par +convey the exclusion of warranty; and each file should have at least\par +the "copyright" line and a pointer to where the full notice is found.\par +\par + \par + Copyright (C) \par +\par + This program is free software; you can redistribute it and/or modify\par + it under the terms of the GNU General Public License as published by\par + the Free Software Foundation; either version 2 of the License, or\par + (at your option) any later version.\par +\par + This program is distributed in the hope that it will be useful,\par + but WITHOUT ANY WARRANTY; without even the implied warranty of\par + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\par + GNU General Public License for more details.\par +\par + You should have received a copy of the GNU General Public License along\par + with this program; if not, write to the Free Software Foundation, Inc.,\par + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\par +\par +Also add information on how to contact you by electronic and paper mail.\par +\par +If the program is interactive, make it output a short notice like this\par +when it starts in an interactive mode:\par +\par + Gnomovision version 69, Copyright (C) year name of author\par + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.\par + This is free software, and you are welcome to redistribute it\par + under certain conditions; type `show c' for details.\par +\par +The hypothetical commands `show w' and `show c' should show the appropriate\par +parts of the General Public License. Of course, the commands you use may\par +be called something other than `show w' and `show c'; they could even be\par +mouse-clicks or menu items--whatever suits your program.\par +\par +You should also get your employer (if you work as a programmer) or your\par +school, if any, to sign a "copyright disclaimer" for the program, if\par +necessary. Here is a sample; alter the names:\par +\par + Yoyodyne, Inc., hereby disclaims all copyright interest in the program\par + `Gnomovision' (which makes passes at compilers) written by James Hacker.\par +\par + , 1 April 1989\par + Ty Coon, President of Vice\par +\par +This General Public License does not permit incorporating your program into\par +proprietary programs. If your program is a subroutine library, you may\par +consider it more useful to permit linking proprietary applications with the\par +library. If this is what you want to do, use the GNU Lesser General\par +Public License instead of this License.\par +} + \ No newline at end of file diff -Nru check-mk-1.2.2p3/treasures/workplace/screenrc check-mk-1.2.6p12/treasures/workplace/screenrc --- check-mk-1.2.2p3/treasures/workplace/screenrc 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/workplace/screenrc 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,14 @@ +startup_message off # Keine Willkommensnachricht +vbell off # Kein visuelles Blinken +msgwait 1 # Nachrichten für 1 Sekunden anzeigen +defutf8 on # utf8 bei neuen fenstern +shelltitle 'bash' # Fenstertitel +hardcopydir $HOME # Screenshots ins Homeverzeichnis +defscrollback 10000 # Buffer +defmonitor off # Aktivitäten nicht in Fenstertitel schreiben + +screen -t bash +screen -t bash + +caption always "%{= wk} %-w%{= KW} [%n %t] %{-}%+w %= | @%H | %l | %Y-%m-%d %c " + diff -Nru check-mk-1.2.2p3/treasures/workplace/vimrc check-mk-1.2.6p12/treasures/workplace/vimrc --- check-mk-1.2.2p3/treasures/workplace/vimrc 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/treasures/workplace/vimrc 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,74 @@ +set ignorecase + +" Die just-one-space Funktion von Emacs, direkt auf der Leertaste :-) +nmap :s/[[:space:]]\+\%#[[:space:]]\+/ /e + + +syntax on + + +" Einruecken von Python +" set cindent +set smartindent cinwords=if,elif,else,for,while,try,except,finally,def,class +inoremap # X# + +set cino=g0 +map \ :wa:make -j 8 -z 2>/dev/null ; f12 +map :wa:make -j 8 -z 2>/dev/null ; f12 +imap :wa:make -j 8 -z 2>/dev/null ; f12 + +set fileencodings=ucs-bom,utf-8,latin1 +set ls=2 +set ruler +set showmode +set nocompatible +set sidescroll=1 +set sidescrolloff=10 +set nolinebreak +set nobackup + +set incsearch +set hlsearch +set tabstop=8 +set softtabstop=4 +set shiftwidth=4 +set expandtab +set autoindent +set ignorecase + +" Absatz auf ___ Spalten umbrechen +nmap }!{fmt -80 -u + +map! d$ +map! dw + +" Check_MK-Dateien erkennen +autocmd BufRead *.mk set syntax=python filetype=python +autocmd BufRead *.wsgi set syntax=python filetype=python +autocmd BufRead *.lektion set syntax=tex filetype=tex + +" Statusbar +set laststatus=2 +set statusline= +set statusline+=%-3.3n\ " buffer number +set statusline+=%f\ " file name +set statusline+=%h%m%r%w " flags +set statusline+=\[%{strlen(&ft)?&ft:'none'}, " filetype +set statusline+=%{&encoding}, " encoding +set statusline+=%{&fileformat}] " file format +set statusline+=%= " right align +set statusline+=0x%-8B\ " current char +set statusline+=%-10.(%l,%c%V%)\ %<%P " offset + +" Faltung von wato.py +set foldenable +map :set foldlevel=0 +map :set foldlevel=1 +set foldmethod=marker +set foldmarker=#\ \ \ .,#. + +" Zeile vom letzten mal merken +autocmd BufReadPost * + \ if line("'\"") > 0 && line("'\"") <= line("$") | + \ exe "normal g`\"" | + \ endif diff -Nru check-mk-1.2.2p3/treasures/zombies.mk check-mk-1.2.6p12/treasures/zombies.mk --- check-mk-1.2.2p3/treasures/zombies.mk 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/treasures/zombies.mk 2015-09-21 10:59:54.000000000 +0000 @@ -13,8 +13,10 @@ for _z in range(0, _num_zombies): _name = "zombie%04d" % _z all_hosts.append(_name + "|zombie") - ipaddresses[_name] = "127.0.0.1" + _x = _z % 255 + 1 + _y = (_z / 255) % 255 + 1 + ipaddresses[_name] = "127.0.%d.%d" % (_y, _x) datasource_programs = [ - ( "cat /var/lib/check_mk/cache/localhost", [ "zombie" ], ALL_HOSTS), + ( "cat ~/tmp/check_mk/cache/localhost", [ "zombie" ], ALL_HOSTS), ] diff -Nru check-mk-1.2.2p3/tsm_drives check-mk-1.2.6p12/tsm_drives --- check-mk-1.2.2p3/tsm_drives 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/tsm_drives 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -88,8 +88,8 @@ monstate = 2 infotext += "(!!)" - return (monstate, nagios_state_names[monstate] + " - " + infotext) - return (3, "UNKNOWN - drive not found") + return (monstate, infotext) + return (3, "drive not found") diff -Nru check-mk-1.2.2p3/tsm_paths check-mk-1.2.6p12/tsm_paths --- check-mk-1.2.2p3/tsm_paths 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/tsm_paths 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,43 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_tsm_paths(info): + return [(None, None)] + +def check_tsm_paths(item, _no_params, info): + count_pathes = len(info) + error_paths = [ x[1] for x in info if x[2] != 'YES' ] + if len(error_paths) > 0: + return 2, "Paths with errors: %s" % ", ".join(error_paths) + return 0, " %d paths OK" % count_pathes + +check_info["tsm_paths"] = { + "check_function" : check_tsm_paths, + "inventory_function" : inventory_tsm_paths, + "service_description" : "TSM Paths", + "has_perfdata" : False, +} + diff -Nru check-mk-1.2.2p3/tsm_scratch check-mk-1.2.6p12/tsm_scratch --- check-mk-1.2.2p3/tsm_scratch 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/tsm_scratch 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,69 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +# Put here the example output from your TCP-Based agent. If the +# check is SNMP-Based, then remove this section + +tsm_scratch_default_levels = ( 5, 7 ) +def inventory_tsm_scratch(info): + inventory = [] + for inst, tapes, library in info: + if inst != 'default': + item = "%s / %s" % ( inst, library ) + else: + item = library + inventory.append((item, "tsm_scratch_default_levels")) + + return inventory + +def check_tsm_scratch(item, params, info): + crit, warn = params + for inst, tapes, library in info: + if inst != 'default': + found_item = "%s / %s" % ( inst, library ) + else: + found_item = library + if found_item == item: + tapes = saveint(tapes) + state = 0 + if tapes <= crit: + state = 2 + elif tapes <= warn: + state = 1 + + return state, "Found %d tapes" % tapes + + + return 3, "UNKNOWN - Check no implemented" + +check_info["tsm_scratch"] = { + "check_function" : check_tsm_scratch, + "inventory_function" : inventory_tsm_scratch, + "service_description" : "Scratch Pool %s", + "has_perfdata" : False, +} + diff -Nru check-mk-1.2.2p3/tsm_sessions check-mk-1.2.6p12/tsm_sessions --- check-mk-1.2.2p3/tsm_sessions 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/tsm_sessions 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,61 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +# Put here the example output from your TCP-Based agent. If the +# check is SNMP-Based, then remove this section +tsm_session_default_levels = ( 300, 600 ) + +def inventory_tsm_sessions(info): + return [(None, tsm_session_default_levels)] + +def check_tsm_sessions(item, params, info): + state = 0 + warn, crit = params + count = 0 + for entry in info: + if len(entry) == 4: + sid, client_name, proc_state, wait = entry + else: + sid, proc_state, wait = entry + if proc_state in ['RecvW', 'MediaW']: + wait = saveint(wait) + if wait >= crit: + state = 2 + count += 1 + elif wait >= warn: + state = max(state, 1) + count += 1 + return state, "%d sessions to long in RecvW or MediaW state" % count + + +check_info["tsm_sessions"] = { + "check_function" : check_tsm_sessions, + "inventory_function" : inventory_tsm_sessions, + "service_description" : "tsm_sessions", + "has_perfdata" : False, +} + diff -Nru check-mk-1.2.2p3/tsm_stagingpools check-mk-1.2.6p12/tsm_stagingpools --- check-mk-1.2.2p3/tsm_stagingpools 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/tsm_stagingpools 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -87,7 +87,7 @@ ("free", num_free_tapes, warn, crit), ("util", utilization) ] - return (state, nagios_state_names[state] + " - " + infotext, perfdata) + return (state, infotext, perfdata) check_info['tsm_stagingpools'] = { diff -Nru check-mk-1.2.2p3/tsm_stgpool check-mk-1.2.6p12/tsm_stgpool --- check-mk-1.2.2p3/tsm_stgpool 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/tsm_stgpool 1970-01-01 00:00:00.000000000 +0000 @@ -1,54 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# ails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - - -inventory_tsm_stgpool_check_params = 'tsm_stgpool_default_levels' -tsm_stgpool_default_levels = (90, 95) - -def inventory_tsm_stgpool(info): - return [ (line[0], '%s perc used' % line[1], inventory_tsm_stgpool_check_params ) for line in info ] - -def check_tsm_stgpool(item, params, info): - for line in info: - if line[0] == item: - current = float(line[1]) - warn, crit = params - infotext = "%.1f%% used" % current - perfdata = [ ("percused", "%.1f%%" % current, warn, crit, 0, 100.0) ] - if current >= crit: - return (2, "CRIT - %s (critical at %.1f%%)" % (infotext, crit), perfdata) - elif current >= warn: - return (1, "WARN - %s (warning at %.1f%%)" % (infotext, warn), perfdata) - else: - return (0, "OK - %s" % (infotext,), perfdata) - return (3, "No such storage pool found") - - -check_info['tsm_stgpool'] = ( - check_tsm_stgpool, - "Storage Pool %s", - 1, - inventory_tsm_stgpool) diff -Nru check-mk-1.2.2p3/tsm_storagepools check-mk-1.2.6p12/tsm_storagepools --- check-mk-1.2.2p3/tsm_storagepools 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/tsm_storagepools 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -42,9 +42,9 @@ def check_tsm_storagepools(item, _no_params, info): for inst, stype, name, size in info: if item == name or item == inst + " / " + name: - return (0, "OK - %s used - %s" % + return (0, "%s used - %s" % (get_bytes_human_readable(int(float(size) * 1024 * 1024)), stype)) - return (3, "UNKNOWN - no such storage pool", [ ("used", float(size)) ]) + return (3, "no such storage pool", [ ("used", float(size)) ]) check_info['tsm_storagepools'] = { diff -Nru check-mk-1.2.2p3/ucd_cpu_load check-mk-1.2.6p12/ucd_cpu_load --- check-mk-1.2.2p3/ucd_cpu_load 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/ucd_cpu_load 2015-09-16 14:25:30.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -25,33 +25,32 @@ # Boston, MA 02110-1301 USA. -cpuload_default_levels = (5, 10) +cpuload_default_levels = (5.0, 10.0) def inventory_ucd_cpu_load(info): if len(info) == 3: return [(None, "cpuload_default_levels")] def check_ucd_cpu_load(item, params, info): - load = [ float(line[0]) for line in info ] - warn, crit = params # apply on 15min average, relative to number of CPUs - perfdata = [ ('load' + str(z), l, warn, crit, 0 ) for (z,l) in [ (1,load[0]), (5,load[1]), (15, load[2]) ] ] - - if load[2] >= crit: - return (2, "CRIT - 15min Load %.2f (critical at %.2f)" % (load[2], crit), perfdata) - elif load[2] >= warn: - return (1, "WARN - 15min Load %.2f (warning at %.2f)" % (load[2], warn), perfdata) - else: - return (0, "OK - 15min Load %.2f" % load[2], perfdata) + # Note: Some dump devices send 12,540000 instead of 12.540000 + return check_cpu_load_generic(params, [ float(l[0].replace(",", ".")) for l in info ]) -check_info['ucd_cpu_load'] = (check_ucd_cpu_load, "CPU load", 1, inventory_ucd_cpu_load ) -checkgroup_of['ucd_cpu_load'] = "cpu_load" - -snmp_info['ucd_cpu_load'] = ( ".1.3.6.1.4.1.2021.10.1", [ 6 ] ) # We are not sure how to safely detect the UCD SNMP Daemon. We know that # it is mainly used on Linux, but not only. But fetching and OID outside # of the info area for scanning is not a good idea. It will slow down # scans for *all* hosts. -snmp_scan_functions['ucd_cpu_load'] = \ - lambda oid: "linux" in oid(".1.3.6.1.2.1.1.1.0").lower() or \ - "CMC-TC" in oid(".1.3.6.1.2.1.1.1.0") + +check_info["ucd_cpu_load"] = { + 'check_function': check_ucd_cpu_load, + 'inventory_function': inventory_ucd_cpu_load, + 'service_description': 'CPU load', + 'has_perfdata': True, + 'snmp_info': ('.1.3.6.1.4.1.2021.10.1', [6]), + 'snmp_scan_function': \ + lambda oid: "linux" in oid(".1.3.6.1.2.1.1.1.0").lower() or \ + "CMC-TC" in oid(".1.3.6.1.2.1.1.1.0") or \ + "HP Onboard Administrator" in oid(".1.3.6.1.2.1.1.1.0"), + 'group': 'cpu_load', + "includes": ["cpu_load.include"], +} diff -Nru check-mk-1.2.2p3/ucd_cpu_util check-mk-1.2.6p12/ucd_cpu_util --- check-mk-1.2.2p3/ucd_cpu_util 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/ucd_cpu_util 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -42,21 +42,15 @@ counters = map(saveint, info[0]) rates = [] this_time = time.time() - some_counter_wrapped = False + for n, c in enumerate(counters): name = "cpu.util.%d" % n - try: - timedif, rate = get_counter(name, this_time, c) - rates.append(rate) - except: - some_counter_wrapped = True - - if some_counter_wrapped: - return (0, "OK - first check, no counter data yet") + rate = get_rate(name, this_time, c) + rates.append(rate) total = sum(rates) if total == 0: - return (3, "UNKNOWN - counters have not moved since last check, looks like state fake data") + return (3, "counters have not moved since last check, looks like state fake data") parts = [ r/total for r in rates ] user = 100 * (parts[0] + parts[1]) @@ -68,28 +62,37 @@ ( "system", "%.3f" % system ), ( "wait", "%.3f" % wait ) ] - result = (0, "OK") + status = 0 + status_txt = '' try: warn, crit = params if wait >= crit: - result = (2, "CRIT - (wait too large)") + status = 2 + status_txt = ' wait too large (!!)' elif wait >= warn: - result = (1, "WARN - (wait too large)") + status = 1 + status_txt = ' wait too large (!)' except: pass - return (result[0], result[1] + " - user: %2.1f%%, system: %2.1f%%, wait: %2.1f%%" % - (user, system, wait), perfdata) + return (status, "user: %2.1f%%, system: %2.1f%%, wait: %2.1f%%%s" % + (user, system, wait, status_txt), perfdata) -check_info['ucd_cpu_util'] = (check_ucd_cpu_util, "CPU utilization", 1, inventory_ucd_cpu_util) -snmp_info['ucd_cpu_util'] = ( ".1.3.6.1.4.1.2021.11", [ 50, 51, 52, 53, 54, 55, 56 ]) - # We are not sure how to safely detect the UCD SNMP Daemon. We know that # it is mainly used on Linux, but not only. But fetching and OID outside # of the info area for scanning is not a good idea. It will slow down # scans for *all* hosts. -snmp_scan_functions['ucd_cpu_util'] = \ - lambda oid: "linux" in oid(".1.3.6.1.2.1.1.1.0").lower() or \ - "CMC-TC" in oid(".1.3.6.1.2.1.1.1.0") -checkgroup_of['ucd_cpu_util'] = "cpu_iowait" + +check_info["ucd_cpu_util"] = { + 'check_function': check_ucd_cpu_util, + 'inventory_function': inventory_ucd_cpu_util, + 'service_description': 'CPU utilization', + 'has_perfdata': True, + 'snmp_info': ('.1.3.6.1.4.1.2021.11', [50, 51, 52, 53, 54, 55, 56]), + 'snmp_scan_function': \ + lambda oid: "linux" in oid(".1.3.6.1.2.1.1.1.0").lower() or \ + "CMC-TC" in oid(".1.3.6.1.2.1.1.1.0") or \ + "HP Onboard Administrator" in oid(".1.3.6.1.2.1.1.1.0"), + 'group': 'cpu_iowait', +} diff -Nru check-mk-1.2.2p3/ucs_bladecenter_fans check-mk-1.2.6p12/ucs_bladecenter_fans --- check-mk-1.2.2p3/ucs_bladecenter_fans 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ucs_bladecenter_fans 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,141 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# <>> +# equipmentNetworkElementFanStats Dn sys/switch-A/fan-module-1-1/fan-1/stats SpeedAvg 8542 +# equipmentFanModuleStats Dn sys/chassis-2/fan-module-1-1/stats AmbientTemp 29.000000 +# equipmentFan Dn sys/chassis-1/fan-module-1-1/fan-1 Model N20-FAN5 OperState operable +# equipmentFanStats Dn sys/chassis-2/fan-module-1-1/fan-1/stats SpeedAvg 3652 + +def parse_ucs_bladecenter_fans(info): + data = ucs_bladecenter_convert_info(info) + fans = {} + + def get_item_name(key): + tokens = key.split("/") + tokens[1] = tokens[1].replace("fan-module-", "Module ").replace("-", ".") + tokens = map(lambda x: x[0].upper() + x[1:], tokens) + if len(tokens) > 2: + tokens[2] = tokens[2].replace("fan-", ".") + return " ".join(tokens).replace("-", " ") + + for component, key_low, key_high in [ + ("equipmentNetworkElementFanStats", 4, -6), + ("equipmentFanModuleStats", 4, -6), + ("equipmentFan", 4, 100), + ("equipmentFanStats", 4, -6), + ]: + for key, values in data.get(component, {}).items(): + fan = key[key_low:key_high] + del values["Dn"] + name = get_item_name(fan) + fans.setdefault(name, {}).update(values) + + return fans + +# .--Fans----------------------------------------------------------------. +# | _____ | +# | | ___|_ _ _ __ ___ | +# | | |_ / _` | '_ \/ __| | +# | | _| (_| | | | \__ \ | +# | |_| \__,_|_| |_|___/ | +# | | +# '----------------------------------------------------------------------' + +def inventory_ucs_bladecenter_fans(parsed): + items = set() + for key, values in parsed.items(): + if "SpeedAvg" in values: + yield " ".join(key.split()[:2]), None + + +def check_ucs_bladecenter_fans(item, _no_params, parsed): + my_fans = {} + for key, values in parsed.items(): + if key.startswith(item) and "OperState" in values: + my_fans[key] = values + + if not my_fans: + yield 3, "Fan statistics not available" + return + + yield 0, "%d Fans" % len(my_fans) + for key, fan in sorted(my_fans.items()): + if fan["OperState"] != "operable": + yield 2, "Fan %s %s: average speed %s RPM" % \ + (key.split()[-1][2:], fan["OperState"], fan.get("SpeedAvg")) + + +check_info["ucs_bladecenter_fans"] = { + 'parse_function': parse_ucs_bladecenter_fans, + 'inventory_function': inventory_ucs_bladecenter_fans, + 'check_function': check_ucs_bladecenter_fans, + 'service_description': 'Fans %s', + 'includes': [ 'ucs_bladecenter.include' ], +} + +#. +# .--Temperature---------------------------------------------------------. +# | _____ _ | +# | |_ _|__ _ __ ___ _ __ ___ _ __ __ _| |_ _ _ _ __ ___ | +# | | |/ _ \ '_ ` _ \| '_ \ / _ \ '__/ _` | __| | | | '__/ _ \ | +# | | | __/ | | | | | |_) | __/ | | (_| | |_| |_| | | | __/ | +# | |_|\___|_| |_| |_| .__/ \___|_| \__,_|\__|\__,_|_| \___| | +# | |_| | +# '----------------------------------------------------------------------' + +ucs_bladecenter_fans_temp_default_levels = { + "levels" : (40, 50), +} + +# Fans are grouped per module, usually 8 components +def inventory_ucs_bladecenter_fans_temp(parsed): + for key, values in parsed.items(): + if "AmbientTemp" in values: + yield "Ambient " + " ".join(key.split()[:2]), {} + + +def check_ucs_bladecenter_fans_temp(item, params, parsed): + sensor_item = item[8:] # drop "Ambient " + sensor_list = [] + for key, values in parsed.items(): + if key.startswith(sensor_item) and "AmbientTemp" in values: + loc = key.split()[-1].split(".") + sensor_list.append(( + "Module %s Fan %s" % (loc[0], loc[1]), + float(values.get("AmbientTemp")), + )) + return check_temperature_list(sensor_list, params) + + +check_info["ucs_bladecenter_fans.temp"] = { + 'inventory_function' : inventory_ucs_bladecenter_fans_temp, + 'check_function' : check_ucs_bladecenter_fans_temp, + 'service_description' : 'Temperature %s FAN', + 'has_perfdata' : True, + 'includes' : [ 'ucs_bladecenter.include', 'temperature.include' ], + 'default_levels_variable' :'ucs_bladecenter_fans_temp_default_levels' +} diff -Nru check-mk-1.2.2p3/ucs_bladecenter_fans.temp check-mk-1.2.6p12/ucs_bladecenter_fans.temp --- check-mk-1.2.2p3/ucs_bladecenter_fans.temp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ucs_bladecenter_fans.temp 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,38 @@ +title: UCS Bladecenter Fans: Temperature information +agents: agent_ucs_bladecenter +catalog: hw/server/cisco +license: GPL +distribution: check_mk +description: + This check monitors the temperature of the fans of the various + components inside an UCS bladecenter. + For each chassis a temperature summmary check is created. + You can configure levels which apply to all of the sensors. + If one sensor breaches the temperature limits the summary + check will point out its location. + +item: + The component where the fans are located prefixed by "Ambient", e.g. "Ambient Chassis 1" + +perfdata: + One variable: The temperature + +inventory: + One summary check for each component is created + +[parameters] +parameters(dict): This checks parameters are a dictionary with the + following (optional) keys: + + {"levels"}: A tuple (warn, crit) containing the upper levels. No defaults. + + {"levels_lower"}: A tuple (warn, crit) containing the lower levels. No defaults. + + {"output_unit"}: "c", "f" or "k", the check will output the temperature in the + specified unit. If this is not set, output is in degrees Celsius. + + {"input_unit"}: "c, "f" or "k". By default, the check interprets the sensor value + according to the unit sent by the device. This key allows to override that. Tread + lightly, as this may lead to a misinterpreted temperature. Should only be used if + the device reports its unit incorrectly. + diff -Nru check-mk-1.2.2p3/ucs_bladecenter_if check-mk-1.2.6p12/ucs_bladecenter_if --- check-mk-1.2.2p3/ucs_bladecenter_if 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ucs_bladecenter_if 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,325 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# <>> +# fcStats Dn sys/switch-A/slot-1/switch-fc/port-37/stats BytesRx 2411057759048 BytesTx 1350394110752 Suspect no +# fcStats Dn sys/switch-A/slot-1/switch-fc/port-40/stats BytesRx 0 BytesTx 0 Suspect no +# fcErrStats Dn sys/switch-B/slot-1/switch-fc/port-47/err-stats CrcRx 0 DiscardRx 0 DiscardTx 0 +# fcErrStats Dn sys/switch-B/slot-1/switch-fc/port-48/err-stats CrcRx 0 DiscardRx 0 DiscardTx 0 +# fabricFcSanEp Dn fabric/san/A/phys-slot-1-port-40 EpDn sys/switch-A/slot-1/switch-fc/port-40 AdminState disabled OperState up +# fabricFcSanEp Dn fabric/san/A/phys-slot-1-port-41 EpDn sys/switch-A/slot-1/switch-fc/port-41 AdminState disabled OperState up + +def parse_ucs_bladecenter_if(info): + data = ucs_bladecenter_convert_info(info) + + # ==== Fibrechannels ==== + fc_interfaces = {} + for key, values in data.get("fabricFcSanEp", {}).items(): + fc_interfaces.setdefault(values["EpDn"], {}).update(values) + + # TODO: fabricFcSanPc + # TODO: fabricFcSanPcEp + for what, cut in [ ("fcStats", 6), + ("fcErrStats", 10) ]: + if what in data: + for key, values in data[what].items(): + fc_name = key[:-cut] + if fc_name in fc_interfaces: + fc_interfaces[fc_name].setdefault(what, {}) + fc_interfaces[key[:-cut]][what].update(values) + + + # ==== Ethernet ==== + eth_interfaces = {} + for key, values in data.get("fabricEthLanEp", {}).items(): + eth_interfaces.setdefault(values["EpDn"], {}).update(values) + + # Get info for each portchannel + eth_pc_info = {} + for key, values in data.get("fabricEthLanPc", {}).items(): + eth_pc_info[key] = values + + # Ethernet-Portchannel Members + for key, values in data.get("fabricEthLanPcEp", {}).items(): + pc_name = "/".join(values.get("Dn").split("/")[:-1]) + values["portchannel"] = eth_pc_info[pc_name] + eth_pc_info[pc_name].setdefault("members", 0) + eth_pc_info[pc_name]["members"] += 1 + eth_interfaces.setdefault(values["EpDn"], {}).update(values) + + for what, cut in [ ("etherRxStats", 9), + ("etherTxStats", 9), + ("etherErrStats", 10) ]: + if what in data: + for key, values in data[what].items(): + eth_name = key[:-cut] + if eth_name in eth_interfaces: + eth_interfaces[eth_name].setdefault(what, {}) + eth_interfaces[key[:-cut]][what].update(values) + + # ==== Interconnects ==== + icnt_interfaces = {} + for key, values in data.get("fabricDceSwEp", {}).items(): + icnt_interfaces.setdefault(values["EpDn"], {}).update(values) + + # Get info for each portchannel + icnt_pc_info = {} + for key, values in data.get("fabricDceSwSrvPc", {}).items(): + icnt_pc_info[key] = values + + # Interconnect-Portchannel Members + for key, values in data.get("fabricDceSwSrvPcEp", {}).items(): + pc_name = "/".join(values.get("Dn").split("/")[:-1]) + values["portchannel"] = icnt_pc_info[pc_name] + icnt_pc_info[pc_name].setdefault("members", 0) + icnt_pc_info[pc_name]["members"] += 1 + icnt_interfaces.setdefault(values["EpDn"], {}).update(values) + + + for what, cut in [ ("etherRxStats", 9), + ("etherTxStats", 9), + ("etherErrStats", 10) ]: + if what in data: + for key, values in data[what].items(): + eth_name = key[:-cut] + if eth_name in icnt_interfaces: + icnt_interfaces[eth_name].setdefault(what, {}) + icnt_interfaces[key[:-cut]][what].update(values) + + + # Example interfaces + # fibrechannel: + # 'sys/switch-A/slot-1/switch-fc/port-38': + # {'AdminState': 'enabled', + # 'BytesRx': '51789849113704', + # 'BytesTx': '15914991789936', + # 'CrcRx': '1', + # 'DiscardRx': '0', + # 'DiscardTx': '0', + # 'EpDn': 'sys/switch-A/slot-1/switch-fc/port-38', + # 'OperState': 'up', + # 'PacketsRx': '26771306796', + # 'PacketsTx': '8735571946', + # 'PortId': '38', + # 'Rx': '1', + # 'SlotId': '1', + # 'Suspect': 'no', + # 'SwitchId': 'A', + # 'Tx': '0'}, + # ethernet: + # 'sys/switch-A/slot-1/switch-ether/port-18': + # {'AdminState': 'enabled', + # 'Dn': 'fabric/lan/A/pc-1/ep-slot-1-port-18', + # 'EpDn': 'sys/switch-A/slot-1/switch-ether/port-18', + # 'OperState': 'up', + # 'PortId': '18', + # 'SlotId': '1', + # 'SwitchId': 'A', + # 'etherRxStats': {'BroadcastPackets': '116544272', + # 'Dn': 'sys/switch-A/slot-1/switch-ether/port-18/rx-stats', + # 'MulticastPackets': '560456841', + # 'TotalBytes': '53066141169147', + # 'UnicastPackets': '138412352259'}, + # 'etherTxStats': {'BroadcastPackets': '4922247', + # 'Dn': 'sys/switch-A/slot-1/switch-ether/port-18/tx-stats', + # 'MulticastPackets': '82743790', + # 'TotalBytes': '79420242621595', + # 'UnicastPackets': '135007642584'}, + # interconnect: + # 'sys/switch-A/slot-1/switch-ether/port-2': + # {'AdminState': 'enabled', + # 'Dn': 'fabric/server/sw-A/pc-1025/ep-slot-1-port-2', + # 'EpDn': 'sys/switch-A/slot-1/switch-ether/port-2', + # 'OperState': 'up', + # 'PortId': '2', + # 'SlotId': '1', + # 'SwitchId': 'A', + # 'etherErrStats': {'Dn': 'sys/switch-A/slot-1/switch-ether/port-2/err-stats', + # 'OutDiscard': '0', + # 'Rcv': '0'}, + # 'etherRxStats': {'BroadcastPackets': '50432549', + # 'Dn': 'sys/switch-A/slot-1/switch-ether/port-2/rx-stats', + # 'MulticastPackets': '80349542', + # 'TotalBytes': '50633308808192', + # 'UnicastPackets': '53535107978'}, + # 'etherTxStats': {'BroadcastPackets': '4892153', + # 'Dn': 'sys/switch-A/slot-1/switch-ether/port-2/tx-stats', + # 'MulticastPackets': '328878878', + # 'TotalBytes': '97004901202254', + # 'UnicastPackets': '79555260499'}, + # 'portchannel': {'AdminState': 'enabled', + # 'Dn': 'fabric/server/sw-A/pc-1025', + # 'OperSpeed': '10gbps', + # 'OperState': 'up', + # 'PortId': '1025', + # 'members': 4}}, + + + # We need to fill this structure + # converted = [ + # [], # 0 ifIndex 0 + # [], # 1 ifDescr 1 + # [], # 2 ifType 2 + # [], # 3 ifHighSpeed .. 1000 means 1Gbit + # [], # 4 ifOperStatus 4 + # [], # 5 ifHCInOctets 5 + # [], # 6 ifHCInUcastPkts 6 + # [], # 7 ifHCInMulticastPkts 7 + # [], # 8 ifHCInBroadcastPkts 8 + # [], # 9 ifInDiscards 9 + # [], # 10 ifInErrors 10 + # [], # 11 ifHCOutOctets 11 + # [], # 12 ifHCOutUcastPkts 12 + # [], # 13 ifHCOutMulticastPkts 13 + # [], # 14 ifHCOutBroadcastPkts 14 + # [], # 15 ifOutDiscards 15 + # [], # 16 ifOutErrors 16 + # [], # 17 ifOutQLen 17 + # [], # 18 ifAlias 18 + # [], # 19 ifPhysAddress 19 + # ] + + + # Specify which values are to put into the resulting interface + tableindex = { + "fibrechannel": { + # a list of class and field + 2: "6", # This means Ethernet. We should set the real type here + # if.include does not support the interface type 56 + 5: [ ("fcStats", "BytesRx") ], + 6: [ ("fcStats", "PacketsRx") ], + 9: [ ("fcErrStats", "DiscardRx") ], + 10: [ ("fcErrStats", "Rx"), ("fcErrStats", "CrcRx") ], + + 11: [ ("fcStats", "BytesTx") ], + 12: [ ("fcStats", "PacketsTx") ], + 15: [ ("fcErrStats", "DiscardTx") ], + 16: [ ("fcErrStats", "Tx") ] + }, + "ethernet": { + 2: "6", + 5: [ ("etherRxStats", "TotalBytes") ], + 6: [ ("etherRxStats", "UnicastPackets") ], + 7: [ ("etherRxStats", "MulticastPackets") ], + 8: [ ("etherRxStats", "BroadcastPackets") ], + 10: [ ("etherErrStats", "Rcv") ], + + 11: [ ("etherTxStats", "TotalBytes") ], + 12: [ ("etherTxStats", "UnicastPackets") ], + 13: [ ("etherTxStats", "MulticastPackets") ], + 14: [ ("etherTxStats", "BroadcastPackets") ], + 15: [ ("etherErrStats", "OutDiscard") ], + }, + "interconnect": { + 2: "6", + 5: [ ("etherRxStats", "TotalBytes") ], + 6: [ ("etherRxStats", "UnicastPackets") ], + 7: [ ("etherRxStats", "MulticastPackets") ], + 8: [ ("etherRxStats", "BroadcastPackets") ], + 10: [ ("etherErrStats", "Rcv") ], + + 11: [ ("etherTxStats", "TotalBytes") ], + 12: [ ("etherTxStats", "UnicastPackets") ], + 13: [ ("etherTxStats", "MulticastPackets") ], + 14: [ ("etherTxStats", "BroadcastPackets") ], + 15: [ ("etherErrStats", "OutDiscard") ], + } + } + + + # Ethernet + converted = [] + last_index = 0 + for what, group_prefix, interfaces, item_template in [ + ("fibrechannel", "Fibrechannel-Group", fc_interfaces, "Slot %s FC-Switch %s Port %s"), + ("ethernet", "Ethernet-Group", eth_interfaces, "Slot %s Switch %s Port %s"), + ("interconnect", "Interconnect-Group", icnt_interfaces, "Slot %s IC-Switch %s Port %s"), + ]: + for index, (name, values) in enumerate(interfaces.items()): + item = item_template % ( values.get("SlotId"), + values.get("SwitchId"), + values.get("PortId") ) + + entry = ['0'] * 20 + converted.append(entry) + + # Interfaces in portchannels are automatically grouped by within if.include + # Grouped interfaces are identified when the type of the ifIndex field is a tuple + if "portchannel" in values: + entry[0] = (group_prefix + " " + values["portchannel"].get("Name", values["portchannel"].get("PortId", "")),\ + str(last_index + index)) + else: + entry[0] = str(last_index + index) + entry[1] = item + + # Speed and OperState + if values.get("portchannel"): + speed = values["portchannel"].get("AdminSpeed") or values["portchannel"].get("OperSpeed") + # It looks like that the AdminSpeed of a portchannel is the speed of one member + # speed = str(int(float(speed.replace("gbps", "000000000")) / values["portchannel"]["members"])) + speed = speed.replace("gbps", "000000000") + operStatus = values["portchannel"].get("AdminState", "disabled") == "enabled" and \ + values["portchannel"].get("OperState", "down") == "up" + else: + speed = values.get("AdminSpeed", "").replace("gbps", "000000000") + operStatus = values.get("AdminState", "disabled") == "enabled" and \ + values.get("OperState", "down") == "up" + + entry[3] = speed + entry[4] = operStatus and "1" or "2" + + entry[18] = item # ifAlias + entry[19] = '' # MAC address not known here + for table_index , ctr_keys in tableindex.get(what).items(): + ctr_value = 0 + # On summing keys there is a possiblility to overlook some counter wraps. + # Right now, it's only Recv-Errors (therefore unlikely). We can live with that + if type(ctr_keys) != list: # fixed value + ctr_value = ctr_keys + else: + for ctr_class, ctr_key in ctr_keys: # compute value from data + if ctr_class: + ctr_value += int(values[ctr_class].get(ctr_key, "0")) + else: + ctr_value += int(values.get(ctr_key, "0")) + entry[table_index] = str(ctr_value) + else: + index = 0 + last_index += index + 1 + + return converted + + +check_info["ucs_bladecenter_if"] = { + 'parse_function' : parse_ucs_bladecenter_if, + 'inventory_function' : lambda info: inventory_if_common(info), + 'check_function' : check_if_common, + 'service_description' : 'Interface %s', + 'has_perfdata' : True, + 'group' : 'if', + 'includes' : [ 'ucs_bladecenter.include', 'if.include' ], + 'default_levels_variable' : 'if_default_levels', +} diff -Nru check-mk-1.2.2p3/ucs_bladecenter.include check-mk-1.2.6p12/ucs_bladecenter.include --- check-mk-1.2.2p3/ucs_bladecenter.include 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ucs_bladecenter.include 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,36 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def ucs_bladecenter_convert_info(info): + result = {} + for line in info: + module = line[0] + elements = dict(map(lambda x: x.split(" ", 1), line[1:])) + if elements.get("Dn"): + result.setdefault(module, {}).update({elements.get("Dn"): elements}) + + return result + diff -Nru check-mk-1.2.2p3/ucs_bladecenter_psu check-mk-1.2.6p12/ucs_bladecenter_psu --- check-mk-1.2.2p3/ucs_bladecenter_psu 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ucs_bladecenter_psu 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,196 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# <<>> +# equipmentPsuInputStats Dn sys/switch-A/psu-2/input-stats Current 0.656250 PowerAvg 153.335938 Voltage 231.500000 +# equipmentPsuStats Dn sys/chassis-1/psu-1/stats AmbientTemp 17.000000 Output12vAvg 12.008000 Output3v3Avg 3.336000 + +def ucs_bladecenter_psu_parse(info): + data = ucs_bladecenter_convert_info(info) + psu = {} + + def get_item_name(key): + tokens = key.split("/") + tokens[1] = tokens[1].replace("psu-", " Module ") + tokens = map(lambda x: x[0].upper() + x[1:], tokens) + return "".join(tokens).replace("-", " ") + + for component, key_low, key_high in [ + ("equipmentPsuInputStats", 4, -12), + ("equipmentPsuStats", 4, -6), + ]: + for key, values in data.get(component, {}).items(): + name = get_item_name(key[key_low:key_high]) + del values["Dn"] + psu.setdefault(name, {}).update(values) + + return psu + + +#. +# .--Chassis Volt.-------------------------------------------------------. +# | ____ _ _ __ __ _ _ | +# | / ___| |__ __ _ ___ ___(_)___ \ \ / /__ | | |_ | +# | | | | '_ \ / _` / __/ __| / __| \ \ / / _ \| | __| | +# | | |___| | | | (_| \__ \__ \ \__ \ \ V / (_) | | |_ _ | +# | \____|_| |_|\__,_|___/___/_|___/ \_/ \___/|_|\__(_) | +# | | +# '----------------------------------------------------------------------' + +factory_settings["ucs_bladecenter_psu_default_levels"] = { + "levels_3v_lower" : (3.25, 3.20), + "levels_3v_upper" : (3.4 , 3.45), + "levels_12v_lower" : (11.9, 11.8), + "levels_12v_upper" : (12.1, 12.2) +} + +def inventory_ucs_bladecenter_psu(parsed): + items = set([]) + for key, values in parsed.items(): + if key.startswith("Chassis"): + yield key, {} + +def check_ucs_bladecenter_psu(item, params, parsed): + psu = parsed.get(item) + if not psu: + yield 3, "Chassis voltage info not available" + return + + info_texts = [] + state = 0 + for param_key, perfname, key, text in [ ("levels_3v_", "3_3v", "Output3v3Avg", "Output 3.3V-Average"), + ("levels_12v_", "12v", "Output12vAvg", "Output 12V-Average") ]: + voltage = float(psu[key]) + yield 0, "%s: %.2f V" % (text, voltage), [(perfname, voltage)] + if key in psu.keys(): + warn_lower, crit_lower = params[param_key + "lower"] + if voltage <= crit_lower: + yield 2, "too low (levels at %.2f/%.2f V)" % params[param_key + "lower"] + elif voltage <= warn_lower: + yield 1, "too low (levels at %.2f/%.2f V)" % params[param_key + "lower"] + + warn_upper, crit_upper = params[param_key + "upper"] + if voltage >= crit_upper: + yield 2, "too high (levels at %.2f/%.2f V)" % params[param_key + "upper"] + elif voltage >= warn_upper: + yield 1, "too high (levels at %.2f/%.2f V)" % params[param_key + "upper"] + + +check_info["ucs_bladecenter_psu"] = { + 'parse_function' : ucs_bladecenter_psu_parse, + 'inventory_function' : inventory_ucs_bladecenter_psu, + 'check_function' : check_ucs_bladecenter_psu, + 'service_description' : 'Voltage %s', + 'group' : 'ucs_bladecenter_chassis_voltage', + 'includes' : [ 'ucs_bladecenter.include' ], + 'has_perfdata' : True, + 'default_levels_variable' : "ucs_bladecenter_psu_default_levels", +} + +#. +# .--Power Supply--------------------------------------------------------. +# | ____ ____ _ | +# | | _ \ _____ _____ _ __ / ___| _ _ _ __ _ __ | |_ _ | +# | | |_) / _ \ \ /\ / / _ \ '__| \___ \| | | | '_ \| '_ \| | | | | | +# | | __/ (_) \ V V / __/ | ___) | |_| | |_) | |_) | | |_| | | +# | |_| \___/ \_/\_/ \___|_| |____/ \__,_| .__/| .__/|_|\__, | | +# | |_| |_| |___/ | +# '----------------------------------------------------------------------' + +def inventory_ucs_bladecenter_psu_switch_power(parsed): + items = set([]) + for key, values in parsed.items(): + if key.startswith("Switch"): + yield key, {} + + +def check_ucs_bladecenter_psu_switch_power(item, params, parsed): + psu = parsed.get(item) + if not psu: + return 3, "Switch power info not available" + + # Convert fields + for old, new in [ ("Current", "current"), + ("PowerAvg", "power"), + ("Voltage", "voltage") ]: + psu[new] = (float(psu[old]), None) + del psu[old] + + return check_elphase(item, params, {item: psu}) + +check_info["ucs_bladecenter_psu.switch_power"] = { + 'inventory_function': inventory_ucs_bladecenter_psu_switch_power, + 'check_function': check_ucs_bladecenter_psu_switch_power, + 'service_description': 'Power Supply %s', + 'includes': [ 'ucs_bladecenter.include', 'elphase.include' ], + 'has_perfdata': True, + 'group': "el_inphase" +} + + +#. +# .--Temperature---------------------------------------------------------. +# | _____ _ | +# | |_ _|__ _ __ ___ _ __ ___ _ __ __ _| |_ _ _ _ __ ___ | +# | | |/ _ \ '_ ` _ \| '_ \ / _ \ '__/ _` | __| | | | '__/ _ \ | +# | | | __/ | | | | | |_) | __/ | | (_| | |_| |_| | | | __/ | +# | |_|\___|_| |_| |_| .__/ \___|_| \__,_|\__|\__,_|_| \___| | +# | |_| | +# '----------------------------------------------------------------------' + +factory_settings["ucs_bladecenter_psu_chassis_temp_default_levels"] = { + "levels" : (35, 40), +} + +def inventory_ucs_bladecenter_psu_chassis_temp(parsed): + for key, values in parsed.items(): + if key.startswith("Chassis") and values.get("AmbientTemp"): + yield "Ambient " + " ".join(key.split()[:2]), {} + + +def check_ucs_bladecenter_psu_chassis_temp(item, params, parsed): + sensor_item = item[8:] # drop "Ambient " + sensor_list = [] + + for key, values in sorted(parsed.items()): + if key.startswith(sensor_item) and "AmbientTemp" in values: + sensor_list.append(( + "Module %s" % key.split()[-1], + float(values.get("AmbientTemp")), + )) + return check_temperature_list(sensor_list, params) + + +check_info["ucs_bladecenter_psu.chassis_temp"] = { + 'inventory_function' : inventory_ucs_bladecenter_psu_chassis_temp, + 'check_function' : check_ucs_bladecenter_psu_chassis_temp, + 'service_description' : 'Temperature %s', + 'group' : 'temperature', + 'has_perfdata' : True, + 'default_levels_variable' : "ucs_bladecenter_psu_chassis_temp_default_levels", + 'includes' : [ 'ucs_bladecenter.include', 'temperature.include' ], +} + diff -Nru check-mk-1.2.2p3/ucs_bladecenter_psu.chassis_temp check-mk-1.2.6p12/ucs_bladecenter_psu.chassis_temp --- check-mk-1.2.2p3/ucs_bladecenter_psu.chassis_temp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ucs_bladecenter_psu.chassis_temp 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,37 @@ +title: UCS Bladecenter: Chassis Temperature +agents: agent_ucs_bladecenter +catalog: hw/server/cisco +license: GPL +distribution: check_mk +description: + This check monitors the temperature levels for each chassis + power supply module of an UCS bladecenter. Each module may + have multiple sensors which are all checked together. + You can configure the upper and lower levels for the temperature + sensors. If one of the sensors breaches the configured limits + the check will point out its location. + +item: + The chassis description, e.g. Chassis 1 + +perfdata: + One variable: The temperature + +inventory: + Creates one service per chassis module + +[parameters] +parameters(dict): This checks parameters are a dictionary with the + following (optional) keys: + + {"levels"}: A tuple (warn, crit) containing the upper levels. No defaults. + + {"levels_lower"}: A tuple (warn, crit) containing the lower levels. No defaults. + + {"output_unit"}: "c", "f" or "k", the check will output the temperature in the + specified unit. If this is not set, output is in degrees Celsius. + + {"input_unit"}: "c, "f" or "k". By default, the check interprets the sensor value + according to the unit sent by the device. This key allows to override that. Tread + lightly, as this may lead to a misinterpreted temperature. Should only be used if + the device reports its unit incorrectly. diff -Nru check-mk-1.2.2p3/ucs_bladecenter_psu.switch check-mk-1.2.6p12/ucs_bladecenter_psu.switch --- check-mk-1.2.2p3/ucs_bladecenter_psu.switch 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ucs_bladecenter_psu.switch 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,28 @@ +title: UCS Bladecenter: Power Supply Switch +agents: agent_ucs_bladecenter +catalog: hw/server/cisco +license: GPL +distribution: check_mk +description: + This check monitors the power supply of the switches in a UCS bladecenter. + For each switch module a service with informations regarding voltage, current and + watt is created. + +item: + The component where the fans are located, e.g chassis or switch + +perfdata: + Three values: Voltage, current and power + +inventory: + Creates on service per switch module + +[parameters] +parameters (dict): This check supports three types of parameters, +none of which have default values. It is a dictionary with the following keys: + + {"voltage"}: Warning and critical levels for voltage in int, denoting the minimum required voltage in volt. + + {"current"}: Warning and critical levels for current in int, denoted in Ampère + + {"power"}: Warning and critical levels for power in int, denoted in Watt. diff -Nru check-mk-1.2.2p3/unitrends_backup check-mk-1.2.6p12/unitrends_backup --- check-mk-1.2.2p3/unitrends_backup 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/unitrends_backup 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,76 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Header: Schedule Name, Application Name, Schedule Description, Failures +#<<>> +#HEADER|DMZ-SR01|Hyper-V 2012|DMZ-HV01|0 +#rodc2|18761|Incremental|Successful +#rodc2|18761|Incremental|Successful +#owncloud-test|18762|Incremental|Successful + + +def inventory_unitrends_backup(info): + inventory = [] + for line in info: + if line[0] == "HEADER": + inventory.append( (line[1], None) ) + return inventory + +def check_unitrends_backup(item, _no_params, info): + found = False + details = [] + for line in info: + if line[0] == "HEADER" and found: + # We are finish collection detail informatoinen + break + + if found == True: + # Collection Backup deatils + app_type, bid, backup_type, status = line + details.append("Application Type: %s (%s), %s: %s" % \ + ( app_type, bid, backup_type, status)) + continue + + if line[0] == "HEADER" and line[1] == item: + found = True + head, sched_name, app_name, sched_desc, failures = line + message = "%s Errors in last 24/h for Application %s (%s) " % \ + ( failures, app_name, sched_desc ) + + if found == True: + message += "\n" + "\n".join(details) + if failures == '0': + return 0, message + else: + return 2, message + return 3, "Schedule not found in Agent Output" + +check_info["unitrends_backup"] = { + "check_function" : check_unitrends_backup, + "inventory_function" : inventory_unitrends_backup, + "service_description" : "Schedule %s", +} + diff -Nru check-mk-1.2.2p3/unitrends_replication check-mk-1.2.6p12/unitrends_replication --- check-mk-1.2.2p3/unitrends_replication 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/unitrends_replication 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,54 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +def inventory_unitrends_replication(info): + inventory = [] + for application, result, complete, target, instance in info: + if target not in [ x[0] for x in inventory ]: + inventory.append( (target, None) ) + return inventory + +def check_unitrends_replication(item, _no_params, info): + # this never gone be a blessed check :) + replications = [ x for x in info if x[3] == item ] + if len(replications) == 0: + return 3, "No Entries found" + not_successfull = [ x for x in replications if x[1] != "Success"] + if len(not_successfull) == 0: + return 0, "All Replications in the last 24 hours Successfull" + messages = [] + for application, result, complete, target, instance in not_successfull: + messages.append( "Target: %s, Result: %s, Instance: %s " % ( target, result, instance )) + # TODO: Maybe a good place to use multiline output here + return 2, "Errors from the last 24 hours: " + "/ ".join(messages) + +check_info["unitrends_replication"] = { + "check_function" : check_unitrends_replication, + "inventory_function" : inventory_unitrends_replication, + "service_description" : "Replicaion %s", +} + diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/c++/demo.cc check-mk-1.2.6p12/=unpacked-tar10=/api/c++/demo.cc --- check-mk-1.2.2p3/=unpacked-tar10=/api/c++/demo.cc 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/c++/demo.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,59 +0,0 @@ -// +------------------------------------------------------------------+ -// | ____ _ _ __ __ _ __ | -// | / ___| |__ ___ ___| | __ | \/ | |/ / | -// | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -// | | |___| | | | __/ (__| < | | | | . \ | -// | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -// | | -// | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -// +------------------------------------------------------------------+ -// -// This file is part of Check_MK. -// The official homepage is at http://mathias-kettner.de/check_mk. -// -// check_mk is free software; you can redistribute it and/or modify it -// under the terms of the GNU General Public License as published by -// the Free Software Foundation in version 2. check_mk is distributed -// in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -// out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -// PARTICULAR PURPOSE. See the GNU General Public License for more de- -// ails. You should have received a copy of the GNU General Public -// License along with GNU Make; see the file COPYING. If not, write -// to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -// Boston, MA 02110-1301 USA. - -#include -#include "Livestatus.h" - -const char *query = "GET status\nColumns: livestatus_version program_version\nColumnHeaders: on\n"; -#define MAX_LINE_SIZE 8192 - -int main(int argc, char **argv) -{ - if (argc != 2) { - fprintf(stderr, "Usage: %s SOCKETPATH\n", argv[0]); - return 1; - } - - const char *socket_path = argv[1]; - Livestatus live; - live.connectUNIX(socket_path); - if (live.isConnected()) { - live.sendQuery(query); - std::vector *row; - while (0 != (row = live.nextRow())) - { - printf("Line:\n"); - for (int i=0; isize(); i++) - printf("%s\n", (*row)[i].c_str()); - delete row; - } - live.disconnect(); - } - else { - fprintf(stderr, "Couldn't connect to socket '%s'\n", socket_path); - return 1; - } - return 0; -} - diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/c++/Livestatus.cc check-mk-1.2.6p12/=unpacked-tar10=/api/c++/Livestatus.cc --- check-mk-1.2.2p3/=unpacked-tar10=/api/c++/Livestatus.cc 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/c++/Livestatus.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,98 +0,0 @@ -// +------------------------------------------------------------------+ -// | ____ _ _ __ __ _ __ | -// | / ___| |__ ___ ___| | __ | \/ | |/ / | -// | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -// | | |___| | | | __/ (__| < | | | | . \ | -// | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -// | | -// | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -// +------------------------------------------------------------------+ -// -// This file is part of Check_MK. -// The official homepage is at http://mathias-kettner.de/check_mk. -// -// check_mk is free software; you can redistribute it and/or modify it -// under the terms of the GNU General Public License as published by -// the Free Software Foundation in version 2. check_mk is distributed -// in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -// out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -// PARTICULAR PURPOSE. See the GNU General Public License for more de- -// ails. You should have received a copy of the GNU General Public -// License along with GNU Make; see the file COPYING. If not, write -// to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -// Boston, MA 02110-1301 USA. - -#include -#include -#include -#include -#include -#include "Livestatus.h" - -#define SEPARATORS "Separators: 10 1 2 3\n" - -void Livestatus::connectUNIX(const char *socket_path) -{ - _connection = socket(PF_LOCAL, SOCK_STREAM, 0); - struct sockaddr_un sa; - sa.sun_family = AF_LOCAL; - strncpy(sa.sun_path, socket_path, sizeof(sa.sun_path)); - if (0 > connect(_connection, (const struct sockaddr *)&sa, sizeof(sockaddr_un))) { - close(_connection); - _connection = -1; - } - else - _file = fdopen(_connection, "r"); -} - - -Livestatus::~Livestatus() -{ - disconnect(); -} - -void Livestatus::disconnect() -{ - if (isConnected()) { - if (_file) - fclose(_file); - else - close(_connection); - } - _connection = -1; - _file = 0; -} - -void Livestatus::sendQuery(const char *query) -{ - write(_connection, query, strlen(query)); - write(_connection, SEPARATORS, strlen(SEPARATORS)); - shutdown(_connection, SHUT_WR); -} - - -std::vector *Livestatus::nextRow() -{ - char line[65536]; - if (0 != fgets(line, sizeof(line), _file)) { - // strip trailing linefeed - char *end = strlen(line) + line; - if (end > line && *(end-1) == '\n') { - *(end-1) = 0; - --end; - } - std::vector *row = new std::vector; - char *scan = line; - while (scan < end) { - char *zero = scan; - while (zero < end && *zero != '\001') zero++; - *zero = 0; - row->push_back(std::string(scan)); - scan = zero + 1; - } - return row; - } - else - return 0; -} - diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/c++/Livestatus.h check-mk-1.2.6p12/=unpacked-tar10=/api/c++/Livestatus.h --- check-mk-1.2.2p3/=unpacked-tar10=/api/c++/Livestatus.h 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/c++/Livestatus.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,54 +0,0 @@ -// +------------------------------------------------------------------+ -// | ____ _ _ __ __ _ __ | -// | / ___| |__ ___ ___| | __ | \/ | |/ / | -// | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -// | | |___| | | | __/ (__| < | | | | . \ | -// | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -// | | -// | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -// +------------------------------------------------------------------+ -// -// This file is part of Check_MK. -// The official homepage is at http://mathias-kettner.de/check_mk. -// -// check_mk is free software; you can redistribute it and/or modify it -// under the terms of the GNU General Public License as published by -// the Free Software Foundation in version 2. check_mk is distributed -// in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -// out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -// PARTICULAR PURPOSE. See the GNU General Public License for more de- -// ails. You should have received a copy of the GNU General Public -// License along with GNU Make; see the file COPYING. If not, write -// to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -// Boston, MA 02110-1301 USA. - -#ifndef Livestatus_h -#define Livestatus_h - -#include -#include -#include - -// simple C++ API for accessing Livestatus from C++, -// currently supports only UNIX sockets, no TCP. But -// this is only a simple enhancement. - -class Livestatus -{ - int _connection; - FILE *_file; - -public: - Livestatus() : _connection(-1), _file(0) {}; - ~Livestatus(); - void connectUNIX(const char *socketpath); - bool isConnected() const { return _connection >= 0; }; - void disconnect(); - void sendQuery(const char *query); - std::vector *nextRow(); -}; - - - -#endif // Livestatus_h - diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/c++/Makefile check-mk-1.2.6p12/=unpacked-tar10=/api/c++/Makefile --- check-mk-1.2.2p3/=unpacked-tar10=/api/c++/Makefile 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/c++/Makefile 1970-01-01 00:00:00.000000000 +0000 @@ -1,42 +0,0 @@ -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# ails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -ifneq (DEBUG,) - CXXFLAGS += -g -DDEBUG - LDFLAGS += -g -endif - -all: demo - -demo.o: demo.cc Livestatus.h - g++ $(CXXFLAGS) -c -o $@ $< - -Livestatus.o: Livestatus.cc Livestatus.h - g++ $(CXXFLAGS) -c -o $@ $< - -demo: demo.o Livestatus.o - g++ $(CXXFLAGS) $(LDFLAGS) -o $@ $^ - -clean: - rm -f demo.o demo Livetatus.o diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/Changes check-mk-1.2.6p12/=unpacked-tar10=/api/perl/Changes --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/Changes 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/Changes 1970-01-01 00:00:00.000000000 +0000 @@ -1,164 +0,0 @@ -Revision history for Perl extension Monitoring::Livestatus. - - -0.74 Fri Apr 22 00:16:37 CEST 2011 - - fixed problem with bulk commands - -0.72 Tue Apr 19 15:38:34 CEST 2011 - - fixed problem with inet timeout - -0.70 Sat Apr 16 16:43:57 CEST 2011 - - fixed tests using english - -0.68 Wed Mar 23 23:16:22 CET 2011 - - fixed typo - -0.66 Tue Mar 22 23:19:23 CET 2011 - - added support for additonal headers - -0.64 Fri Nov 5 11:02:51 CET 2010 - - removed useless test dependecies - -0.62 Wed Nov 3 15:20:02 CET 2010 - - fixed tests with threads > 1.79 - -0.60 Wed Aug 25 15:04:22 CEST 2010 - - fixed package and made author tests optional - -0.58 Wed Aug 11 09:30:30 CEST 2010 - - added callback support - -0.56 Tue Aug 10 09:45:28 CEST 2010 - - changed parser from csv to JSON::XS - -0.54 Wed Jun 23 16:43:11 CEST 2010 - - fixed utf8 support - -0.52 Mon May 17 15:54:42 CEST 2010 - - fixed connection timeout - -0.50 Mon May 17 12:29:20 CEST 2010 - - fixed test requirements - -0.48 Sun May 16 15:16:12 CEST 2010 - - added retry option for better core restart handling - - added new columns from livestatus 1.1.4 - -0.46 Tue Mar 16 15:19:08 CET 2010 - - error code have been changed in livestatus (1.1.3) - - fixed threads support - -0.44 Sun Feb 28 12:19:56 CET 2010 - - fixed bug when disabling backends and using threads - -0.42 Thu Feb 25 21:32:37 CET 2010 - - added possibility to disable specific backends - -0.41 Sat Feb 20 20:37:36 CET 2010 - - fixed tests on windows - -0.40 Thu Feb 11 01:00:20 CET 2010 - - fixed timeout for inet sockets - -0.38 Fri Jan 29 20:54:50 CET 2010 - - added limit option - -0.37 Thu Jan 28 21:23:19 CET 2010 - - removed inc from repository - -0.36 Sun Jan 24 00:14:13 CET 2010 - - added more backend tests - - fixed problem with summing up non numbers - -0.35 Mon Jan 11 15:37:51 CET 2010 - - added TCP_NODELAY option for inet sockets - - fixed undefined values - -0.34 Sun Jan 10 12:29:57 CET 2010 - - fixed return code with multi backend and different errors - -0.32 Sat Jan 9 16:12:48 CET 2010 - - added deepcopy option - -0.31 Thu Jan 7 08:56:48 CET 2010 - - added generic tests for livestatus backend - - fixed problem when selecting specific backend - -0.30 Wed Jan 6 16:05:33 CET 2010 - - renamed project to Monitoring::Livestatus - -0.29 Mon Dec 28 00:11:53 CET 2009 - - retain order of backends when merge outut - - renamed select_scalar_value to selectscalar_value - - fixed sums for selectscalar_value - - fixed missing META.yml - -0.28 Sat Dec 19 19:19:13 CET 2009 - - fixed bug in column alias - - added support for multiple peers - - changed to Module::Install - -0.26 Fri Dec 4 08:25:07 CET 2009 - - added peer name - - added peer arg (can be socket or server) - -0.24 Wed Dec 2 23:41:34 CET 2009 - - added support for StatsAnd: and StatsOr: queries - - table alias support for selectall_hashref and selectrow_hashref - - added support for Stats: ... as alias - - added support for StatsAnd:... as alias - - added support for StatsOr: ... as alias - - added support for StatsGroupBy: (with alias) - - added support column aliases for Column: header - -0.22 Fri Nov 27 01:04:16 CET 2009 - - fixed errors on socket problems - - fixed sending commands - -0.20 Sun Nov 22 12:41:39 CET 2009 - - added keepalive support - - added support for ResponseHeader: fixed16 - - added error handling - - added pod test - - added tests with real socket / server - - added column aliases - - added timeout option - - implemented select_scalar_value() - - fixed perl::critic tests - -0.18 Sat Nov 14 2009 08:58:02 GMT - - fixed requirements - - fixed typos - -0.17 Fri Nov 13 17:15:44 CET 2009 - - added support for tcp connections - -0.16 Sun Nov 8 23:17:35 CET 2009 - - added support for stats querys - -0.15 Sat Nov 7 21:28:33 CET 2009 - - fixed typos in doc - - minor bugfixes - -0.14 Fri Nov 6 09:39:56 CET 2009 - - implemented selectcol_arrayref - - implemented selectrow_array - - implemented selectrow_hashref - -0.13 Fri Nov 6 00:03:38 CET 2009 - - fixed tests on solaris - - implemented selectall_hashref() - -0.12 Thu Nov 5 09:34:59 CET 2009 - - fixed tests with thread support - - added more tests - -0.11 Wed Nov 4 23:12:16 2009 - - inital working version - -0.10 Tue Nov 3 17:13:16 2009 - - renamed to Nagios::MKLivestatus - -0.01 Tue Nov 3 00:07:46 2009 - - original version; created by h2xs 1.23 with options - -A -X -n Nagios::Livestatus diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/examples/dump.pl check-mk-1.2.6p12/=unpacked-tar10=/api/perl/examples/dump.pl --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/examples/dump.pl 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/examples/dump.pl 1970-01-01 00:00:00.000000000 +0000 @@ -1,104 +0,0 @@ -#!/usr/bin/env perl - -=head1 NAME - -dump.pl - print some information from a socket - -=head1 SYNOPSIS - -./dump.pl [ -h ] [ -v ] - -=head1 DESCRIPTION - -this script print some information from a given livestatus socket or server - -=head1 ARGUMENTS - -script has the following arguments - -=over 4 - -=item help - - -h - -print help and exit - -=item verbose - - -v - -verbose output - -=item socket/server - - server local socket file or - - server remote address of livestatus - -=back - -=head1 EXAMPLE - -./dump.pl /tmp/live.sock - -=head1 AUTHOR - -2009, Sven Nierlein, - -=cut - -use warnings; -use strict; -use Data::Dumper; -use Getopt::Long; -use Pod::Usage; -use lib 'lib'; -use lib '../lib'; -use Monitoring::Livestatus; - -$Data::Dumper::Sortkeys = 1; - -######################################################################### -# parse and check cmd line arguments -my ($opt_h, $opt_v, $opt_f); -Getopt::Long::Configure('no_ignore_case'); -if(!GetOptions ( - "h" => \$opt_h, - "v" => \$opt_v, - "<>" => \&add_file, -)) { - pod2usage( { -verbose => 1, -message => 'error in options' } ); - exit 3; -} - -if(defined $opt_h) { - pod2usage( { -verbose => 1 } ); - exit 3; -} -my $verbose = 0; -if(defined $opt_v) { - $verbose = 1; -} - -if(!defined $opt_f) { - pod2usage( { -verbose => 1, -message => 'socket/server is a required option' } ); - exit 3; -} - -######################################################################### -my $nl = Monitoring::Livestatus->new( peer => $opt_f, verbose => $opt_v ); - -######################################################################### -#my $hosts = $nl->selectall_hashref('GET hosts', 'name'); -#print Dumper($hosts); - -######################################################################### -my $services = $nl->selectall_arrayref("GET services\nColumns: description host_name state\nLimit: 2", { Slice => {}}); -print Dumper($services); - -######################################################################### -sub add_file { - my $file = shift; - $opt_f = $file; -} diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/examples/test.pl check-mk-1.2.6p12/=unpacked-tar10=/api/perl/examples/test.pl --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/examples/test.pl 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/examples/test.pl 1970-01-01 00:00:00.000000000 +0000 @@ -1,143 +0,0 @@ -#!/usr/bin/env perl - -=head1 NAME - -test.pl - print some information from a socket - -=head1 SYNOPSIS - -./test.pl [ -h ] [ -v ] - -=head1 DESCRIPTION - -this script print some information from a given livestatus socket or server - -=head1 ARGUMENTS - -script has the following arguments - -=over 4 - -=item help - - -h - -print help and exit - -=item verbose - - -v - -verbose output - -=item socket/server - - server local socket file or - - server remote address of livestatus - -=back - -=head1 EXAMPLE - -./test.pl /tmp/live.sock - -=head1 AUTHOR - -2009, Sven Nierlein, - -=cut - -use warnings; -use strict; -use Data::Dumper; -use Getopt::Long; -use Pod::Usage; -use Time::HiRes qw( gettimeofday tv_interval ); -use Log::Log4perl qw(:easy); -use lib 'lib'; -use lib '../lib'; -use Monitoring::Livestatus; - -$Data::Dumper::Sortkeys = 1; - -######################################################################### -# parse and check cmd line arguments -my ($opt_h, $opt_v, @opt_f); -Getopt::Long::Configure('no_ignore_case'); -if(!GetOptions ( - "h" => \$opt_h, - "v" => \$opt_v, - "<>" => \&add_file, -)) { - pod2usage( { -verbose => 1, -message => 'error in options' } ); - exit 3; -} - -if(defined $opt_h) { - pod2usage( { -verbose => 1 } ); - exit 3; -} -my $verbose = 0; -if(defined $opt_v) { - $verbose = 1; -} - -if(scalar @opt_f == 0) { - pod2usage( { -verbose => 1, -message => 'socket/server is a required option' } ); - exit 3; -} - -######################################################################### -Log::Log4perl->easy_init($DEBUG); -my $nl = Monitoring::Livestatus->new( - peer => \@opt_f, - verbose => $opt_v, - timeout => 5, - keepalive => 1, - logger => get_logger(), - ); -my $log = get_logger(); - -######################################################################### -my $querys = [ - { 'query' => "GET hostgroups\nColumns: members\nFilter: name = flap\nFilter: name = down\nOr: 2", - 'sub' => "selectall_arrayref", - 'opt' => {Slice => 1 } - }, -# { 'query' => "GET comments", -# 'sub' => "selectall_arrayref", -# 'opt' => {Slice => 1 } -# }, -# { 'query' => "GET downtimes", -# 'sub' => "selectall_arrayref", -# 'opt' => {Slice => 1, Sum => 1} -# }, -# { 'query' => "GET log\nFilter: time > ".(time() - 600)."\nLimit: 1", -# 'sub' => "selectall_arrayref", -# 'opt' => {Slice => 1, AddPeer => 1} -# }, -# { 'query' => "GET services\nFilter: contacts >= test\nFilter: host_contacts >= test\nOr: 2\nColumns: host_name description contacts host_contacts", -# 'sub' => "selectall_arrayref", -# 'opt' => {Slice => 1, AddPeer => 0} -# }, -# { 'query' => "GET services\nFilter: host_name = test_host_00\nFilter: description = test_flap_02\nOr: 2\nColumns: host_name description contacts host_contacts", -# 'sub' => "selectall_arrayref", -# 'opt' => {Slice => 1, AddPeer => 0} -# }, -]; -for my $query (@{$querys}) { - my $sub = $query->{'sub'}; - my $t0 = [gettimeofday]; - my $stats = $nl->$sub($query->{'query'}, $query->{'opt'}); - my $elapsed = tv_interval($t0); - print Dumper($stats); - print "Query took ".($elapsed)." seconds\n"; -} - - -######################################################################### -sub add_file { - my $file = shift; - push @opt_f, $file; -} diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/inc/Module/AutoInstall.pm check-mk-1.2.6p12/=unpacked-tar10=/api/perl/inc/Module/AutoInstall.pm --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/inc/Module/AutoInstall.pm 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/inc/Module/AutoInstall.pm 1970-01-01 00:00:00.000000000 +0000 @@ -1,820 +0,0 @@ -#line 1 -package Module::AutoInstall; - -use strict; -use Cwd (); -use ExtUtils::MakeMaker (); - -use vars qw{$VERSION}; -BEGIN { - $VERSION = '1.03'; -} - -# special map on pre-defined feature sets -my %FeatureMap = ( - '' => 'Core Features', # XXX: deprecated - '-core' => 'Core Features', -); - -# various lexical flags -my ( @Missing, @Existing, %DisabledTests, $UnderCPAN, $HasCPANPLUS ); -my ( - $Config, $CheckOnly, $SkipInstall, $AcceptDefault, $TestOnly, $AllDeps -); -my ( $PostambleActions, $PostambleUsed ); - -# See if it's a testing or non-interactive session -_accept_default( $ENV{AUTOMATED_TESTING} or ! -t STDIN ); -_init(); - -sub _accept_default { - $AcceptDefault = shift; -} - -sub missing_modules { - return @Missing; -} - -sub do_install { - __PACKAGE__->install( - [ - $Config - ? ( UNIVERSAL::isa( $Config, 'HASH' ) ? %{$Config} : @{$Config} ) - : () - ], - @Missing, - ); -} - -# initialize various flags, and/or perform install -sub _init { - foreach my $arg ( - @ARGV, - split( - /[\s\t]+/, - $ENV{PERL_AUTOINSTALL} || $ENV{PERL_EXTUTILS_AUTOINSTALL} || '' - ) - ) - { - if ( $arg =~ /^--config=(.*)$/ ) { - $Config = [ split( ',', $1 ) ]; - } - elsif ( $arg =~ /^--installdeps=(.*)$/ ) { - __PACKAGE__->install( $Config, @Missing = split( /,/, $1 ) ); - exit 0; - } - elsif ( $arg =~ /^--default(?:deps)?$/ ) { - $AcceptDefault = 1; - } - elsif ( $arg =~ /^--check(?:deps)?$/ ) { - $CheckOnly = 1; - } - elsif ( $arg =~ /^--skip(?:deps)?$/ ) { - $SkipInstall = 1; - } - elsif ( $arg =~ /^--test(?:only)?$/ ) { - $TestOnly = 1; - } - elsif ( $arg =~ /^--all(?:deps)?$/ ) { - $AllDeps = 1; - } - } -} - -# overrides MakeMaker's prompt() to automatically accept the default choice -sub _prompt { - goto &ExtUtils::MakeMaker::prompt unless $AcceptDefault; - - my ( $prompt, $default ) = @_; - my $y = ( $default =~ /^[Yy]/ ); - - print $prompt, ' [', ( $y ? 'Y' : 'y' ), '/', ( $y ? 'n' : 'N' ), '] '; - print "$default\n"; - return $default; -} - -# the workhorse -sub import { - my $class = shift; - my @args = @_ or return; - my $core_all; - - print "*** $class version " . $class->VERSION . "\n"; - print "*** Checking for Perl dependencies...\n"; - - my $cwd = Cwd::cwd(); - - $Config = []; - - my $maxlen = length( - ( - sort { length($b) <=> length($a) } - grep { /^[^\-]/ } - map { - ref($_) - ? ( ( ref($_) eq 'HASH' ) ? keys(%$_) : @{$_} ) - : '' - } - map { +{@args}->{$_} } - grep { /^[^\-]/ or /^-core$/i } keys %{ +{@args} } - )[0] - ); - - # We want to know if we're under CPAN early to avoid prompting, but - # if we aren't going to try and install anything anyway then skip the - # check entirely since we don't want to have to load (and configure) - # an old CPAN just for a cosmetic message - - $UnderCPAN = _check_lock(1) unless $SkipInstall; - - while ( my ( $feature, $modules ) = splice( @args, 0, 2 ) ) { - my ( @required, @tests, @skiptests ); - my $default = 1; - my $conflict = 0; - - if ( $feature =~ m/^-(\w+)$/ ) { - my $option = lc($1); - - # check for a newer version of myself - _update_to( $modules, @_ ) and return if $option eq 'version'; - - # sets CPAN configuration options - $Config = $modules if $option eq 'config'; - - # promote every features to core status - $core_all = ( $modules =~ /^all$/i ) and next - if $option eq 'core'; - - next unless $option eq 'core'; - } - - print "[" . ( $FeatureMap{ lc($feature) } || $feature ) . "]\n"; - - $modules = [ %{$modules} ] if UNIVERSAL::isa( $modules, 'HASH' ); - - unshift @$modules, -default => &{ shift(@$modules) } - if ( ref( $modules->[0] ) eq 'CODE' ); # XXX: bugward combatability - - while ( my ( $mod, $arg ) = splice( @$modules, 0, 2 ) ) { - if ( $mod =~ m/^-(\w+)$/ ) { - my $option = lc($1); - - $default = $arg if ( $option eq 'default' ); - $conflict = $arg if ( $option eq 'conflict' ); - @tests = @{$arg} if ( $option eq 'tests' ); - @skiptests = @{$arg} if ( $option eq 'skiptests' ); - - next; - } - - printf( "- %-${maxlen}s ...", $mod ); - - if ( $arg and $arg =~ /^\D/ ) { - unshift @$modules, $arg; - $arg = 0; - } - - # XXX: check for conflicts and uninstalls(!) them. - my $cur = _load($mod); - if (_version_cmp ($cur, $arg) >= 0) - { - print "loaded. ($cur" . ( $arg ? " >= $arg" : '' ) . ")\n"; - push @Existing, $mod => $arg; - $DisabledTests{$_} = 1 for map { glob($_) } @skiptests; - } - else { - if (not defined $cur) # indeed missing - { - print "missing." . ( $arg ? " (would need $arg)" : '' ) . "\n"; - } - else - { - # no need to check $arg as _version_cmp ($cur, undef) would satisfy >= above - print "too old. ($cur < $arg)\n"; - } - - push @required, $mod => $arg; - } - } - - next unless @required; - - my $mandatory = ( $feature eq '-core' or $core_all ); - - if ( - !$SkipInstall - and ( - $CheckOnly - or ($mandatory and $UnderCPAN) - or $AllDeps - or _prompt( - qq{==> Auto-install the } - . ( @required / 2 ) - . ( $mandatory ? ' mandatory' : ' optional' ) - . qq{ module(s) from CPAN?}, - $default ? 'y' : 'n', - ) =~ /^[Yy]/ - ) - ) - { - push( @Missing, @required ); - $DisabledTests{$_} = 1 for map { glob($_) } @skiptests; - } - - elsif ( !$SkipInstall - and $default - and $mandatory - and - _prompt( qq{==> The module(s) are mandatory! Really skip?}, 'n', ) - =~ /^[Nn]/ ) - { - push( @Missing, @required ); - $DisabledTests{$_} = 1 for map { glob($_) } @skiptests; - } - - else { - $DisabledTests{$_} = 1 for map { glob($_) } @tests; - } - } - - if ( @Missing and not( $CheckOnly or $UnderCPAN ) ) { - require Config; - print -"*** Dependencies will be installed the next time you type '$Config::Config{make}'.\n"; - - # make an educated guess of whether we'll need root permission. - print " (You may need to do that as the 'root' user.)\n" - if eval '$>'; - } - print "*** $class configuration finished.\n"; - - chdir $cwd; - - # import to main:: - no strict 'refs'; - *{'main::WriteMakefile'} = \&Write if caller(0) eq 'main'; - - return (@Existing, @Missing); -} - -sub _running_under { - my $thing = shift; - print <<"END_MESSAGE"; -*** Since we're running under ${thing}, I'll just let it take care - of the dependency's installation later. -END_MESSAGE - return 1; -} - -# Check to see if we are currently running under CPAN.pm and/or CPANPLUS; -# if we are, then we simply let it taking care of our dependencies -sub _check_lock { - return unless @Missing or @_; - - my $cpan_env = $ENV{PERL5_CPAN_IS_RUNNING}; - - if ($ENV{PERL5_CPANPLUS_IS_RUNNING}) { - return _running_under($cpan_env ? 'CPAN' : 'CPANPLUS'); - } - - require CPAN; - - if ($CPAN::VERSION > '1.89') { - if ($cpan_env) { - return _running_under('CPAN'); - } - return; # CPAN.pm new enough, don't need to check further - } - - # last ditch attempt, this -will- configure CPAN, very sorry - - _load_cpan(1); # force initialize even though it's already loaded - - # Find the CPAN lock-file - my $lock = MM->catfile( $CPAN::Config->{cpan_home}, ".lock" ); - return unless -f $lock; - - # Check the lock - local *LOCK; - return unless open(LOCK, $lock); - - if ( - ( $^O eq 'MSWin32' ? _under_cpan() : == getppid() ) - and ( $CPAN::Config->{prerequisites_policy} || '' ) ne 'ignore' - ) { - print <<'END_MESSAGE'; - -*** Since we're running under CPAN, I'll just let it take care - of the dependency's installation later. -END_MESSAGE - return 1; - } - - close LOCK; - return; -} - -sub install { - my $class = shift; - - my $i; # used below to strip leading '-' from config keys - my @config = ( map { s/^-// if ++$i; $_ } @{ +shift } ); - - my ( @modules, @installed ); - while ( my ( $pkg, $ver ) = splice( @_, 0, 2 ) ) { - - # grep out those already installed - if ( _version_cmp( _load($pkg), $ver ) >= 0 ) { - push @installed, $pkg; - } - else { - push @modules, $pkg, $ver; - } - } - - return @installed unless @modules; # nothing to do - return @installed if _check_lock(); # defer to the CPAN shell - - print "*** Installing dependencies...\n"; - - return unless _connected_to('cpan.org'); - - my %args = @config; - my %failed; - local *FAILED; - if ( $args{do_once} and open( FAILED, '.#autoinstall.failed' ) ) { - while () { chomp; $failed{$_}++ } - close FAILED; - - my @newmod; - while ( my ( $k, $v ) = splice( @modules, 0, 2 ) ) { - push @newmod, ( $k => $v ) unless $failed{$k}; - } - @modules = @newmod; - } - - if ( _has_cpanplus() and not $ENV{PERL_AUTOINSTALL_PREFER_CPAN} ) { - _install_cpanplus( \@modules, \@config ); - } else { - _install_cpan( \@modules, \@config ); - } - - print "*** $class installation finished.\n"; - - # see if we have successfully installed them - while ( my ( $pkg, $ver ) = splice( @modules, 0, 2 ) ) { - if ( _version_cmp( _load($pkg), $ver ) >= 0 ) { - push @installed, $pkg; - } - elsif ( $args{do_once} and open( FAILED, '>> .#autoinstall.failed' ) ) { - print FAILED "$pkg\n"; - } - } - - close FAILED if $args{do_once}; - - return @installed; -} - -sub _install_cpanplus { - my @modules = @{ +shift }; - my @config = _cpanplus_config( @{ +shift } ); - my $installed = 0; - - require CPANPLUS::Backend; - my $cp = CPANPLUS::Backend->new; - my $conf = $cp->configure_object; - - return unless $conf->can('conf') # 0.05x+ with "sudo" support - or _can_write($conf->_get_build('base')); # 0.04x - - # if we're root, set UNINST=1 to avoid trouble unless user asked for it. - my $makeflags = $conf->get_conf('makeflags') || ''; - if ( UNIVERSAL::isa( $makeflags, 'HASH' ) ) { - # 0.03+ uses a hashref here - $makeflags->{UNINST} = 1 unless exists $makeflags->{UNINST}; - - } else { - # 0.02 and below uses a scalar - $makeflags = join( ' ', split( ' ', $makeflags ), 'UNINST=1' ) - if ( $makeflags !~ /\bUNINST\b/ and eval qq{ $> eq '0' } ); - - } - $conf->set_conf( makeflags => $makeflags ); - $conf->set_conf( prereqs => 1 ); - - - - while ( my ( $key, $val ) = splice( @config, 0, 2 ) ) { - $conf->set_conf( $key, $val ); - } - - my $modtree = $cp->module_tree; - while ( my ( $pkg, $ver ) = splice( @modules, 0, 2 ) ) { - print "*** Installing $pkg...\n"; - - MY::preinstall( $pkg, $ver ) or next if defined &MY::preinstall; - - my $success; - my $obj = $modtree->{$pkg}; - - if ( $obj and _version_cmp( $obj->{version}, $ver ) >= 0 ) { - my $pathname = $pkg; - $pathname =~ s/::/\\W/; - - foreach my $inc ( grep { m/$pathname.pm/i } keys(%INC) ) { - delete $INC{$inc}; - } - - my $rv = $cp->install( modules => [ $obj->{module} ] ); - - if ( $rv and ( $rv->{ $obj->{module} } or $rv->{ok} ) ) { - print "*** $pkg successfully installed.\n"; - $success = 1; - } else { - print "*** $pkg installation cancelled.\n"; - $success = 0; - } - - $installed += $success; - } else { - print << "."; -*** Could not find a version $ver or above for $pkg; skipping. -. - } - - MY::postinstall( $pkg, $ver, $success ) if defined &MY::postinstall; - } - - return $installed; -} - -sub _cpanplus_config { - my @config = (); - while ( @_ ) { - my ($key, $value) = (shift(), shift()); - if ( $key eq 'prerequisites_policy' ) { - if ( $value eq 'follow' ) { - $value = CPANPLUS::Internals::Constants::PREREQ_INSTALL(); - } elsif ( $value eq 'ask' ) { - $value = CPANPLUS::Internals::Constants::PREREQ_ASK(); - } elsif ( $value eq 'ignore' ) { - $value = CPANPLUS::Internals::Constants::PREREQ_IGNORE(); - } else { - die "*** Cannot convert option $key = '$value' to CPANPLUS version.\n"; - } - } else { - die "*** Cannot convert option $key to CPANPLUS version.\n"; - } - } - return @config; -} - -sub _install_cpan { - my @modules = @{ +shift }; - my @config = @{ +shift }; - my $installed = 0; - my %args; - - _load_cpan(); - require Config; - - if (CPAN->VERSION < 1.80) { - # no "sudo" support, probe for writableness - return unless _can_write( MM->catfile( $CPAN::Config->{cpan_home}, 'sources' ) ) - and _can_write( $Config::Config{sitelib} ); - } - - # if we're root, set UNINST=1 to avoid trouble unless user asked for it. - my $makeflags = $CPAN::Config->{make_install_arg} || ''; - $CPAN::Config->{make_install_arg} = - join( ' ', split( ' ', $makeflags ), 'UNINST=1' ) - if ( $makeflags !~ /\bUNINST\b/ and eval qq{ $> eq '0' } ); - - # don't show start-up info - $CPAN::Config->{inhibit_startup_message} = 1; - - # set additional options - while ( my ( $opt, $arg ) = splice( @config, 0, 2 ) ) { - ( $args{$opt} = $arg, next ) - if $opt =~ /^force$/; # pseudo-option - $CPAN::Config->{$opt} = $arg; - } - - local $CPAN::Config->{prerequisites_policy} = 'follow'; - - while ( my ( $pkg, $ver ) = splice( @modules, 0, 2 ) ) { - MY::preinstall( $pkg, $ver ) or next if defined &MY::preinstall; - - print "*** Installing $pkg...\n"; - - my $obj = CPAN::Shell->expand( Module => $pkg ); - my $success = 0; - - if ( $obj and _version_cmp( $obj->cpan_version, $ver ) >= 0 ) { - my $pathname = $pkg; - $pathname =~ s/::/\\W/; - - foreach my $inc ( grep { m/$pathname.pm/i } keys(%INC) ) { - delete $INC{$inc}; - } - - my $rv = $args{force} ? CPAN::Shell->force( install => $pkg ) - : CPAN::Shell->install($pkg); - $rv ||= eval { - $CPAN::META->instance( 'CPAN::Distribution', $obj->cpan_file, ) - ->{install} - if $CPAN::META; - }; - - if ( $rv eq 'YES' ) { - print "*** $pkg successfully installed.\n"; - $success = 1; - } - else { - print "*** $pkg installation failed.\n"; - $success = 0; - } - - $installed += $success; - } - else { - print << "."; -*** Could not find a version $ver or above for $pkg; skipping. -. - } - - MY::postinstall( $pkg, $ver, $success ) if defined &MY::postinstall; - } - - return $installed; -} - -sub _has_cpanplus { - return ( - $HasCPANPLUS = ( - $INC{'CPANPLUS/Config.pm'} - or _load('CPANPLUS::Shell::Default') - ) - ); -} - -# make guesses on whether we're under the CPAN installation directory -sub _under_cpan { - require Cwd; - require File::Spec; - - my $cwd = File::Spec->canonpath( Cwd::cwd() ); - my $cpan = File::Spec->canonpath( $CPAN::Config->{cpan_home} ); - - return ( index( $cwd, $cpan ) > -1 ); -} - -sub _update_to { - my $class = __PACKAGE__; - my $ver = shift; - - return - if _version_cmp( _load($class), $ver ) >= 0; # no need to upgrade - - if ( - _prompt( "==> A newer version of $class ($ver) is required. Install?", - 'y' ) =~ /^[Nn]/ - ) - { - die "*** Please install $class $ver manually.\n"; - } - - print << "."; -*** Trying to fetch it from CPAN... -. - - # install ourselves - _load($class) and return $class->import(@_) - if $class->install( [], $class, $ver ); - - print << '.'; exit 1; - -*** Cannot bootstrap myself. :-( Installation terminated. -. -} - -# check if we're connected to some host, using inet_aton -sub _connected_to { - my $site = shift; - - return ( - ( _load('Socket') and Socket::inet_aton($site) ) or _prompt( - qq( -*** Your host cannot resolve the domain name '$site', which - probably means the Internet connections are unavailable. -==> Should we try to install the required module(s) anyway?), 'n' - ) =~ /^[Yy]/ - ); -} - -# check if a directory is writable; may create it on demand -sub _can_write { - my $path = shift; - mkdir( $path, 0755 ) unless -e $path; - - return 1 if -w $path; - - print << "."; -*** You are not allowed to write to the directory '$path'; - the installation may fail due to insufficient permissions. -. - - if ( - eval '$>' and lc(`sudo -V`) =~ /version/ and _prompt( - qq( -==> Should we try to re-execute the autoinstall process with 'sudo'?), - ((-t STDIN) ? 'y' : 'n') - ) =~ /^[Yy]/ - ) - { - - # try to bootstrap ourselves from sudo - print << "."; -*** Trying to re-execute the autoinstall process with 'sudo'... -. - my $missing = join( ',', @Missing ); - my $config = join( ',', - UNIVERSAL::isa( $Config, 'HASH' ) ? %{$Config} : @{$Config} ) - if $Config; - - return - unless system( 'sudo', $^X, $0, "--config=$config", - "--installdeps=$missing" ); - - print << "."; -*** The 'sudo' command exited with error! Resuming... -. - } - - return _prompt( - qq( -==> Should we try to install the required module(s) anyway?), 'n' - ) =~ /^[Yy]/; -} - -# load a module and return the version it reports -sub _load { - my $mod = pop; # class/instance doesn't matter - my $file = $mod; - - $file =~ s|::|/|g; - $file .= '.pm'; - - local $@; - return eval { require $file; $mod->VERSION } || ( $@ ? undef: 0 ); -} - -# Load CPAN.pm and it's configuration -sub _load_cpan { - return if $CPAN::VERSION and $CPAN::Config and not @_; - require CPAN; - - # CPAN-1.82+ adds CPAN::Config::AUTOLOAD to redirect to - # CPAN::HandleConfig->load. CPAN reports that the redirection - # is deprecated in a warning printed at the user. - - # CPAN-1.81 expects CPAN::HandleConfig->load, does not have - # $CPAN::HandleConfig::VERSION but cannot handle - # CPAN::Config->load - - # Which "versions expect CPAN::Config->load? - - if ( $CPAN::HandleConfig::VERSION - || CPAN::HandleConfig->can('load') - ) { - # Newer versions of CPAN have a HandleConfig module - CPAN::HandleConfig->load; - } else { - # Older versions had the load method in Config directly - CPAN::Config->load; - } -} - -# compare two versions, either use Sort::Versions or plain comparison -# return values same as <=> -sub _version_cmp { - my ( $cur, $min ) = @_; - return -1 unless defined $cur; # if 0 keep comparing - return 1 unless $min; - - $cur =~ s/\s+$//; - - # check for version numbers that are not in decimal format - if ( ref($cur) or ref($min) or $cur =~ /v|\..*\./ or $min =~ /v|\..*\./ ) { - if ( ( $version::VERSION or defined( _load('version') )) and - version->can('new') - ) { - - # use version.pm if it is installed. - return version->new($cur) <=> version->new($min); - } - elsif ( $Sort::Versions::VERSION or defined( _load('Sort::Versions') ) ) - { - - # use Sort::Versions as the sorting algorithm for a.b.c versions - return Sort::Versions::versioncmp( $cur, $min ); - } - - warn "Cannot reliably compare non-decimal formatted versions.\n" - . "Please install version.pm or Sort::Versions.\n"; - } - - # plain comparison - local $^W = 0; # shuts off 'not numeric' bugs - return $cur <=> $min; -} - -# nothing; this usage is deprecated. -sub main::PREREQ_PM { return {}; } - -sub _make_args { - my %args = @_; - - $args{PREREQ_PM} = { %{ $args{PREREQ_PM} || {} }, @Existing, @Missing } - if $UnderCPAN or $TestOnly; - - if ( $args{EXE_FILES} and -e 'MANIFEST' ) { - require ExtUtils::Manifest; - my $manifest = ExtUtils::Manifest::maniread('MANIFEST'); - - $args{EXE_FILES} = - [ grep { exists $manifest->{$_} } @{ $args{EXE_FILES} } ]; - } - - $args{test}{TESTS} ||= 't/*.t'; - $args{test}{TESTS} = join( ' ', - grep { !exists( $DisabledTests{$_} ) } - map { glob($_) } split( /\s+/, $args{test}{TESTS} ) ); - - my $missing = join( ',', @Missing ); - my $config = - join( ',', UNIVERSAL::isa( $Config, 'HASH' ) ? %{$Config} : @{$Config} ) - if $Config; - - $PostambleActions = ( - ($missing and not $UnderCPAN) - ? "\$(PERL) $0 --config=$config --installdeps=$missing" - : "\$(NOECHO) \$(NOOP)" - ); - - return %args; -} - -# a wrapper to ExtUtils::MakeMaker::WriteMakefile -sub Write { - require Carp; - Carp::croak "WriteMakefile: Need even number of args" if @_ % 2; - - if ($CheckOnly) { - print << "."; -*** Makefile not written in check-only mode. -. - return; - } - - my %args = _make_args(@_); - - no strict 'refs'; - - $PostambleUsed = 0; - local *MY::postamble = \&postamble unless defined &MY::postamble; - ExtUtils::MakeMaker::WriteMakefile(%args); - - print << "." unless $PostambleUsed; -*** WARNING: Makefile written with customized MY::postamble() without - including contents from Module::AutoInstall::postamble() -- - auto installation features disabled. Please contact the author. -. - - return 1; -} - -sub postamble { - $PostambleUsed = 1; - - return <<"END_MAKE"; - -config :: installdeps -\t\$(NOECHO) \$(NOOP) - -checkdeps :: -\t\$(PERL) $0 --checkdeps - -installdeps :: -\t$PostambleActions - -END_MAKE - -} - -1; - -__END__ - -#line 1071 diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/inc/Module/Install/AutoInstall.pm check-mk-1.2.6p12/=unpacked-tar10=/api/perl/inc/Module/Install/AutoInstall.pm --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/inc/Module/Install/AutoInstall.pm 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/inc/Module/Install/AutoInstall.pm 1970-01-01 00:00:00.000000000 +0000 @@ -1,82 +0,0 @@ -#line 1 -package Module::Install::AutoInstall; - -use strict; -use Module::Install::Base (); - -use vars qw{$VERSION @ISA $ISCORE}; -BEGIN { - $VERSION = '1.00'; - @ISA = 'Module::Install::Base'; - $ISCORE = 1; -} - -sub AutoInstall { $_[0] } - -sub run { - my $self = shift; - $self->auto_install_now(@_); -} - -sub write { - my $self = shift; - $self->auto_install(@_); -} - -sub auto_install { - my $self = shift; - return if $self->{done}++; - - # Flatten array of arrays into a single array - my @core = map @$_, map @$_, grep ref, - $self->build_requires, $self->requires; - - my @config = @_; - - # We'll need Module::AutoInstall - $self->include('Module::AutoInstall'); - require Module::AutoInstall; - - my @features_require = Module::AutoInstall->import( - (@config ? (-config => \@config) : ()), - (@core ? (-core => \@core) : ()), - $self->features, - ); - - my %seen; - my @requires = map @$_, map @$_, grep ref, $self->requires; - while (my ($mod, $ver) = splice(@requires, 0, 2)) { - $seen{$mod}{$ver}++; - } - my @build_requires = map @$_, map @$_, grep ref, $self->build_requires; - while (my ($mod, $ver) = splice(@build_requires, 0, 2)) { - $seen{$mod}{$ver}++; - } - my @configure_requires = map @$_, map @$_, grep ref, $self->configure_requires; - while (my ($mod, $ver) = splice(@configure_requires, 0, 2)) { - $seen{$mod}{$ver}++; - } - - my @deduped; - while (my ($mod, $ver) = splice(@features_require, 0, 2)) { - push @deduped, $mod => $ver unless $seen{$mod}{$ver}++; - } - - $self->requires(@deduped); - - $self->makemaker_args( Module::AutoInstall::_make_args() ); - - my $class = ref($self); - $self->postamble( - "# --- $class section:\n" . - Module::AutoInstall::postamble() - ); -} - -sub auto_install_now { - my $self = shift; - $self->auto_install(@_); - Module::AutoInstall::do_install(); -} - -1; diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/inc/Module/Install/Base.pm check-mk-1.2.6p12/=unpacked-tar10=/api/perl/inc/Module/Install/Base.pm --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/inc/Module/Install/Base.pm 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/inc/Module/Install/Base.pm 1970-01-01 00:00:00.000000000 +0000 @@ -1,83 +0,0 @@ -#line 1 -package Module::Install::Base; - -use strict 'vars'; -use vars qw{$VERSION}; -BEGIN { - $VERSION = '1.00'; -} - -# Suspend handler for "redefined" warnings -BEGIN { - my $w = $SIG{__WARN__}; - $SIG{__WARN__} = sub { $w }; -} - -#line 42 - -sub new { - my $class = shift; - unless ( defined &{"${class}::call"} ) { - *{"${class}::call"} = sub { shift->_top->call(@_) }; - } - unless ( defined &{"${class}::load"} ) { - *{"${class}::load"} = sub { shift->_top->load(@_) }; - } - bless { @_ }, $class; -} - -#line 61 - -sub AUTOLOAD { - local $@; - my $func = eval { shift->_top->autoload } or return; - goto &$func; -} - -#line 75 - -sub _top { - $_[0]->{_top}; -} - -#line 90 - -sub admin { - $_[0]->_top->{admin} - or - Module::Install::Base::FakeAdmin->new; -} - -#line 106 - -sub is_admin { - ! $_[0]->admin->isa('Module::Install::Base::FakeAdmin'); -} - -sub DESTROY {} - -package Module::Install::Base::FakeAdmin; - -use vars qw{$VERSION}; -BEGIN { - $VERSION = $Module::Install::Base::VERSION; -} - -my $fake; - -sub new { - $fake ||= bless(\@_, $_[0]); -} - -sub AUTOLOAD {} - -sub DESTROY {} - -# Restore warning handler -BEGIN { - $SIG{__WARN__} = $SIG{__WARN__}->(); -} - -1; - -#line 159 diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/inc/Module/Install/Can.pm check-mk-1.2.6p12/=unpacked-tar10=/api/perl/inc/Module/Install/Can.pm --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/inc/Module/Install/Can.pm 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/inc/Module/Install/Can.pm 1970-01-01 00:00:00.000000000 +0000 @@ -1,81 +0,0 @@ -#line 1 -package Module::Install::Can; - -use strict; -use Config (); -use File::Spec (); -use ExtUtils::MakeMaker (); -use Module::Install::Base (); - -use vars qw{$VERSION @ISA $ISCORE}; -BEGIN { - $VERSION = '1.00'; - @ISA = 'Module::Install::Base'; - $ISCORE = 1; -} - -# check if we can load some module -### Upgrade this to not have to load the module if possible -sub can_use { - my ($self, $mod, $ver) = @_; - $mod =~ s{::|\\}{/}g; - $mod .= '.pm' unless $mod =~ /\.pm$/i; - - my $pkg = $mod; - $pkg =~ s{/}{::}g; - $pkg =~ s{\.pm$}{}i; - - local $@; - eval { require $mod; $pkg->VERSION($ver || 0); 1 }; -} - -# check if we can run some command -sub can_run { - my ($self, $cmd) = @_; - - my $_cmd = $cmd; - return $_cmd if (-x $_cmd or $_cmd = MM->maybe_command($_cmd)); - - for my $dir ((split /$Config::Config{path_sep}/, $ENV{PATH}), '.') { - next if $dir eq ''; - my $abs = File::Spec->catfile($dir, $_[1]); - return $abs if (-x $abs or $abs = MM->maybe_command($abs)); - } - - return; -} - -# can we locate a (the) C compiler -sub can_cc { - my $self = shift; - my @chunks = split(/ /, $Config::Config{cc}) or return; - - # $Config{cc} may contain args; try to find out the program part - while (@chunks) { - return $self->can_run("@chunks") || (pop(@chunks), next); - } - - return; -} - -# Fix Cygwin bug on maybe_command(); -if ( $^O eq 'cygwin' ) { - require ExtUtils::MM_Cygwin; - require ExtUtils::MM_Win32; - if ( ! defined(&ExtUtils::MM_Cygwin::maybe_command) ) { - *ExtUtils::MM_Cygwin::maybe_command = sub { - my ($self, $file) = @_; - if ($file =~ m{^/cygdrive/}i and ExtUtils::MM_Win32->can('maybe_command')) { - ExtUtils::MM_Win32->maybe_command($file); - } else { - ExtUtils::MM_Unix->maybe_command($file); - } - } - } -} - -1; - -__END__ - -#line 156 diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/inc/Module/Install/Fetch.pm check-mk-1.2.6p12/=unpacked-tar10=/api/perl/inc/Module/Install/Fetch.pm --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/inc/Module/Install/Fetch.pm 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/inc/Module/Install/Fetch.pm 1970-01-01 00:00:00.000000000 +0000 @@ -1,93 +0,0 @@ -#line 1 -package Module::Install::Fetch; - -use strict; -use Module::Install::Base (); - -use vars qw{$VERSION @ISA $ISCORE}; -BEGIN { - $VERSION = '1.00'; - @ISA = 'Module::Install::Base'; - $ISCORE = 1; -} - -sub get_file { - my ($self, %args) = @_; - my ($scheme, $host, $path, $file) = - $args{url} =~ m|^(\w+)://([^/]+)(.+)/(.+)| or return; - - if ( $scheme eq 'http' and ! eval { require LWP::Simple; 1 } ) { - $args{url} = $args{ftp_url} - or (warn("LWP support unavailable!\n"), return); - ($scheme, $host, $path, $file) = - $args{url} =~ m|^(\w+)://([^/]+)(.+)/(.+)| or return; - } - - $|++; - print "Fetching '$file' from $host... "; - - unless (eval { require Socket; Socket::inet_aton($host) }) { - warn "'$host' resolve failed!\n"; - return; - } - - return unless $scheme eq 'ftp' or $scheme eq 'http'; - - require Cwd; - my $dir = Cwd::getcwd(); - chdir $args{local_dir} or return if exists $args{local_dir}; - - if (eval { require LWP::Simple; 1 }) { - LWP::Simple::mirror($args{url}, $file); - } - elsif (eval { require Net::FTP; 1 }) { eval { - # use Net::FTP to get past firewall - my $ftp = Net::FTP->new($host, Passive => 1, Timeout => 600); - $ftp->login("anonymous", 'anonymous@example.com'); - $ftp->cwd($path); - $ftp->binary; - $ftp->get($file) or (warn("$!\n"), return); - $ftp->quit; - } } - elsif (my $ftp = $self->can_run('ftp')) { eval { - # no Net::FTP, fallback to ftp.exe - require FileHandle; - my $fh = FileHandle->new; - - local $SIG{CHLD} = 'IGNORE'; - unless ($fh->open("|$ftp -n")) { - warn "Couldn't open ftp: $!\n"; - chdir $dir; return; - } - - my @dialog = split(/\n/, <<"END_FTP"); -open $host -user anonymous anonymous\@example.com -cd $path -binary -get $file $file -quit -END_FTP - foreach (@dialog) { $fh->print("$_\n") } - $fh->close; - } } - else { - warn "No working 'ftp' program available!\n"; - chdir $dir; return; - } - - unless (-f $file) { - warn "Fetching failed: $@\n"; - chdir $dir; return; - } - - return if exists $args{size} and -s $file != $args{size}; - system($args{run}) if exists $args{run}; - unlink($file) if $args{remove}; - - print(((!exists $args{check_for} or -e $args{check_for}) - ? "done!" : "failed! ($!)"), "\n"); - chdir $dir; return !$?; -} - -1; diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/inc/Module/Install/Include.pm check-mk-1.2.6p12/=unpacked-tar10=/api/perl/inc/Module/Install/Include.pm --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/inc/Module/Install/Include.pm 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/inc/Module/Install/Include.pm 1970-01-01 00:00:00.000000000 +0000 @@ -1,34 +0,0 @@ -#line 1 -package Module::Install::Include; - -use strict; -use Module::Install::Base (); - -use vars qw{$VERSION @ISA $ISCORE}; -BEGIN { - $VERSION = '1.00'; - @ISA = 'Module::Install::Base'; - $ISCORE = 1; -} - -sub include { - shift()->admin->include(@_); -} - -sub include_deps { - shift()->admin->include_deps(@_); -} - -sub auto_include { - shift()->admin->auto_include(@_); -} - -sub auto_include_deps { - shift()->admin->auto_include_deps(@_); -} - -sub auto_include_dependent_dists { - shift()->admin->auto_include_dependent_dists(@_); -} - -1; diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/inc/Module/Install/Makefile.pm check-mk-1.2.6p12/=unpacked-tar10=/api/perl/inc/Module/Install/Makefile.pm --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/inc/Module/Install/Makefile.pm 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/inc/Module/Install/Makefile.pm 1970-01-01 00:00:00.000000000 +0000 @@ -1,415 +0,0 @@ -#line 1 -package Module::Install::Makefile; - -use strict 'vars'; -use ExtUtils::MakeMaker (); -use Module::Install::Base (); -use Fcntl qw/:flock :seek/; - -use vars qw{$VERSION @ISA $ISCORE}; -BEGIN { - $VERSION = '1.00'; - @ISA = 'Module::Install::Base'; - $ISCORE = 1; -} - -sub Makefile { $_[0] } - -my %seen = (); - -sub prompt { - shift; - - # Infinite loop protection - my @c = caller(); - if ( ++$seen{"$c[1]|$c[2]|$_[0]"} > 3 ) { - die "Caught an potential prompt infinite loop ($c[1]|$c[2]|$_[0])"; - } - - # In automated testing or non-interactive session, always use defaults - if ( ($ENV{AUTOMATED_TESTING} or -! -t STDIN) and ! $ENV{PERL_MM_USE_DEFAULT} ) { - local $ENV{PERL_MM_USE_DEFAULT} = 1; - goto &ExtUtils::MakeMaker::prompt; - } else { - goto &ExtUtils::MakeMaker::prompt; - } -} - -# Store a cleaned up version of the MakeMaker version, -# since we need to behave differently in a variety of -# ways based on the MM version. -my $makemaker = eval $ExtUtils::MakeMaker::VERSION; - -# If we are passed a param, do a "newer than" comparison. -# Otherwise, just return the MakeMaker version. -sub makemaker { - ( @_ < 2 or $makemaker >= eval($_[1]) ) ? $makemaker : 0 -} - -# Ripped from ExtUtils::MakeMaker 6.56, and slightly modified -# as we only need to know here whether the attribute is an array -# or a hash or something else (which may or may not be appendable). -my %makemaker_argtype = ( - C => 'ARRAY', - CONFIG => 'ARRAY', -# CONFIGURE => 'CODE', # ignore - DIR => 'ARRAY', - DL_FUNCS => 'HASH', - DL_VARS => 'ARRAY', - EXCLUDE_EXT => 'ARRAY', - EXE_FILES => 'ARRAY', - FUNCLIST => 'ARRAY', - H => 'ARRAY', - IMPORTS => 'HASH', - INCLUDE_EXT => 'ARRAY', - LIBS => 'ARRAY', # ignore '' - MAN1PODS => 'HASH', - MAN3PODS => 'HASH', - META_ADD => 'HASH', - META_MERGE => 'HASH', - PL_FILES => 'HASH', - PM => 'HASH', - PMLIBDIRS => 'ARRAY', - PMLIBPARENTDIRS => 'ARRAY', - PREREQ_PM => 'HASH', - CONFIGURE_REQUIRES => 'HASH', - SKIP => 'ARRAY', - TYPEMAPS => 'ARRAY', - XS => 'HASH', -# VERSION => ['version',''], # ignore -# _KEEP_AFTER_FLUSH => '', - - clean => 'HASH', - depend => 'HASH', - dist => 'HASH', - dynamic_lib=> 'HASH', - linkext => 'HASH', - macro => 'HASH', - postamble => 'HASH', - realclean => 'HASH', - test => 'HASH', - tool_autosplit => 'HASH', - - # special cases where you can use makemaker_append - CCFLAGS => 'APPENDABLE', - DEFINE => 'APPENDABLE', - INC => 'APPENDABLE', - LDDLFLAGS => 'APPENDABLE', - LDFROM => 'APPENDABLE', -); - -sub makemaker_args { - my ($self, %new_args) = @_; - my $args = ( $self->{makemaker_args} ||= {} ); - foreach my $key (keys %new_args) { - if ($makemaker_argtype{$key}) { - if ($makemaker_argtype{$key} eq 'ARRAY') { - $args->{$key} = [] unless defined $args->{$key}; - unless (ref $args->{$key} eq 'ARRAY') { - $args->{$key} = [$args->{$key}] - } - push @{$args->{$key}}, - ref $new_args{$key} eq 'ARRAY' - ? @{$new_args{$key}} - : $new_args{$key}; - } - elsif ($makemaker_argtype{$key} eq 'HASH') { - $args->{$key} = {} unless defined $args->{$key}; - foreach my $skey (keys %{ $new_args{$key} }) { - $args->{$key}{$skey} = $new_args{$key}{$skey}; - } - } - elsif ($makemaker_argtype{$key} eq 'APPENDABLE') { - $self->makemaker_append($key => $new_args{$key}); - } - } - else { - if (defined $args->{$key}) { - warn qq{MakeMaker attribute "$key" is overriden; use "makemaker_append" to append values\n}; - } - $args->{$key} = $new_args{$key}; - } - } - return $args; -} - -# For mm args that take multiple space-seperated args, -# append an argument to the current list. -sub makemaker_append { - my $self = shift; - my $name = shift; - my $args = $self->makemaker_args; - $args->{$name} = defined $args->{$name} - ? join( ' ', $args->{$name}, @_ ) - : join( ' ', @_ ); -} - -sub build_subdirs { - my $self = shift; - my $subdirs = $self->makemaker_args->{DIR} ||= []; - for my $subdir (@_) { - push @$subdirs, $subdir; - } -} - -sub clean_files { - my $self = shift; - my $clean = $self->makemaker_args->{clean} ||= {}; - %$clean = ( - %$clean, - FILES => join ' ', grep { length $_ } ($clean->{FILES} || (), @_), - ); -} - -sub realclean_files { - my $self = shift; - my $realclean = $self->makemaker_args->{realclean} ||= {}; - %$realclean = ( - %$realclean, - FILES => join ' ', grep { length $_ } ($realclean->{FILES} || (), @_), - ); -} - -sub libs { - my $self = shift; - my $libs = ref $_[0] ? shift : [ shift ]; - $self->makemaker_args( LIBS => $libs ); -} - -sub inc { - my $self = shift; - $self->makemaker_args( INC => shift ); -} - -sub _wanted_t { -} - -sub tests_recursive { - my $self = shift; - my $dir = shift || 't'; - unless ( -d $dir ) { - die "tests_recursive dir '$dir' does not exist"; - } - my %tests = map { $_ => 1 } split / /, ($self->tests || ''); - require File::Find; - File::Find::find( - sub { /\.t$/ and -f $_ and $tests{"$File::Find::dir/*.t"} = 1 }, - $dir - ); - $self->tests( join ' ', sort keys %tests ); -} - -sub write { - my $self = shift; - die "&Makefile->write() takes no arguments\n" if @_; - - # Check the current Perl version - my $perl_version = $self->perl_version; - if ( $perl_version ) { - eval "use $perl_version; 1" - or die "ERROR: perl: Version $] is installed, " - . "but we need version >= $perl_version"; - } - - # Make sure we have a new enough MakeMaker - require ExtUtils::MakeMaker; - - if ( $perl_version and $self->_cmp($perl_version, '5.006') >= 0 ) { - # MakeMaker can complain about module versions that include - # an underscore, even though its own version may contain one! - # Hence the funny regexp to get rid of it. See RT #35800 - # for details. - my $v = $ExtUtils::MakeMaker::VERSION =~ /^(\d+\.\d+)/; - $self->build_requires( 'ExtUtils::MakeMaker' => $v ); - $self->configure_requires( 'ExtUtils::MakeMaker' => $v ); - } else { - # Allow legacy-compatibility with 5.005 by depending on the - # most recent EU:MM that supported 5.005. - $self->build_requires( 'ExtUtils::MakeMaker' => 6.42 ); - $self->configure_requires( 'ExtUtils::MakeMaker' => 6.42 ); - } - - # Generate the MakeMaker params - my $args = $self->makemaker_args; - $args->{DISTNAME} = $self->name; - $args->{NAME} = $self->module_name || $self->name; - $args->{NAME} =~ s/-/::/g; - $args->{VERSION} = $self->version or die <<'EOT'; -ERROR: Can't determine distribution version. Please specify it -explicitly via 'version' in Makefile.PL, or set a valid $VERSION -in a module, and provide its file path via 'version_from' (or -'all_from' if you prefer) in Makefile.PL. -EOT - - $DB::single = 1; - if ( $self->tests ) { - my @tests = split ' ', $self->tests; - my %seen; - $args->{test} = { - TESTS => (join ' ', grep {!$seen{$_}++} @tests), - }; - } elsif ( $Module::Install::ExtraTests::use_extratests ) { - # Module::Install::ExtraTests doesn't set $self->tests and does its own tests via harness. - # So, just ignore our xt tests here. - } elsif ( -d 'xt' and ($Module::Install::AUTHOR or $ENV{RELEASE_TESTING}) ) { - $args->{test} = { - TESTS => join( ' ', map { "$_/*.t" } grep { -d $_ } qw{ t xt } ), - }; - } - if ( $] >= 5.005 ) { - $args->{ABSTRACT} = $self->abstract; - $args->{AUTHOR} = join ', ', @{$self->author || []}; - } - if ( $self->makemaker(6.10) ) { - $args->{NO_META} = 1; - #$args->{NO_MYMETA} = 1; - } - if ( $self->makemaker(6.17) and $self->sign ) { - $args->{SIGN} = 1; - } - unless ( $self->is_admin ) { - delete $args->{SIGN}; - } - if ( $self->makemaker(6.31) and $self->license ) { - $args->{LICENSE} = $self->license; - } - - my $prereq = ($args->{PREREQ_PM} ||= {}); - %$prereq = ( %$prereq, - map { @$_ } # flatten [module => version] - map { @$_ } - grep $_, - ($self->requires) - ); - - # Remove any reference to perl, PREREQ_PM doesn't support it - delete $args->{PREREQ_PM}->{perl}; - - # Merge both kinds of requires into BUILD_REQUIRES - my $build_prereq = ($args->{BUILD_REQUIRES} ||= {}); - %$build_prereq = ( %$build_prereq, - map { @$_ } # flatten [module => version] - map { @$_ } - grep $_, - ($self->configure_requires, $self->build_requires) - ); - - # Remove any reference to perl, BUILD_REQUIRES doesn't support it - delete $args->{BUILD_REQUIRES}->{perl}; - - # Delete bundled dists from prereq_pm, add it to Makefile DIR - my $subdirs = ($args->{DIR} || []); - if ($self->bundles) { - my %processed; - foreach my $bundle (@{ $self->bundles }) { - my ($mod_name, $dist_dir) = @$bundle; - delete $prereq->{$mod_name}; - $dist_dir = File::Basename::basename($dist_dir); # dir for building this module - if (not exists $processed{$dist_dir}) { - if (-d $dist_dir) { - # List as sub-directory to be processed by make - push @$subdirs, $dist_dir; - } - # Else do nothing: the module is already present on the system - $processed{$dist_dir} = undef; - } - } - } - - unless ( $self->makemaker('6.55_03') ) { - %$prereq = (%$prereq,%$build_prereq); - delete $args->{BUILD_REQUIRES}; - } - - if ( my $perl_version = $self->perl_version ) { - eval "use $perl_version; 1" - or die "ERROR: perl: Version $] is installed, " - . "but we need version >= $perl_version"; - - if ( $self->makemaker(6.48) ) { - $args->{MIN_PERL_VERSION} = $perl_version; - } - } - - if ($self->installdirs) { - warn qq{old INSTALLDIRS (probably set by makemaker_args) is overriden by installdirs\n} if $args->{INSTALLDIRS}; - $args->{INSTALLDIRS} = $self->installdirs; - } - - my %args = map { - ( $_ => $args->{$_} ) } grep {defined($args->{$_} ) - } keys %$args; - - my $user_preop = delete $args{dist}->{PREOP}; - if ( my $preop = $self->admin->preop($user_preop) ) { - foreach my $key ( keys %$preop ) { - $args{dist}->{$key} = $preop->{$key}; - } - } - - my $mm = ExtUtils::MakeMaker::WriteMakefile(%args); - $self->fix_up_makefile($mm->{FIRST_MAKEFILE} || 'Makefile'); -} - -sub fix_up_makefile { - my $self = shift; - my $makefile_name = shift; - my $top_class = ref($self->_top) || ''; - my $top_version = $self->_top->VERSION || ''; - - my $preamble = $self->preamble - ? "# Preamble by $top_class $top_version\n" - . $self->preamble - : ''; - my $postamble = "# Postamble by $top_class $top_version\n" - . ($self->postamble || ''); - - local *MAKEFILE; - open MAKEFILE, "+< $makefile_name" or die "fix_up_makefile: Couldn't open $makefile_name: $!"; - eval { flock MAKEFILE, LOCK_EX }; - my $makefile = do { local $/; }; - - $makefile =~ s/\b(test_harness\(\$\(TEST_VERBOSE\), )/$1'inc', /; - $makefile =~ s/( -I\$\(INST_ARCHLIB\))/ -Iinc$1/g; - $makefile =~ s/( "-I\$\(INST_LIB\)")/ "-Iinc"$1/g; - $makefile =~ s/^(FULLPERL = .*)/$1 "-Iinc"/m; - $makefile =~ s/^(PERL = .*)/$1 "-Iinc"/m; - - # Module::Install will never be used to build the Core Perl - # Sometimes PERL_LIB and PERL_ARCHLIB get written anyway, which breaks - # PREFIX/PERL5LIB, and thus, install_share. Blank them if they exist - $makefile =~ s/^PERL_LIB = .+/PERL_LIB =/m; - #$makefile =~ s/^PERL_ARCHLIB = .+/PERL_ARCHLIB =/m; - - # Perl 5.005 mentions PERL_LIB explicitly, so we have to remove that as well. - $makefile =~ s/(\"?)-I\$\(PERL_LIB\)\1//g; - - # XXX - This is currently unused; not sure if it breaks other MM-users - # $makefile =~ s/^pm_to_blib\s+:\s+/pm_to_blib :: /mg; - - seek MAKEFILE, 0, SEEK_SET; - truncate MAKEFILE, 0; - print MAKEFILE "$preamble$makefile$postamble" or die $!; - close MAKEFILE or die $!; - - 1; -} - -sub preamble { - my ($self, $text) = @_; - $self->{preamble} = $text . $self->{preamble} if defined $text; - $self->{preamble}; -} - -sub postamble { - my ($self, $text) = @_; - $self->{postamble} ||= $self->admin->postamble; - $self->{postamble} .= $text if defined $text; - $self->{postamble} -} - -1; - -__END__ - -#line 541 diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/inc/Module/Install/Metadata.pm check-mk-1.2.6p12/=unpacked-tar10=/api/perl/inc/Module/Install/Metadata.pm --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/inc/Module/Install/Metadata.pm 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/inc/Module/Install/Metadata.pm 1970-01-01 00:00:00.000000000 +0000 @@ -1,715 +0,0 @@ -#line 1 -package Module::Install::Metadata; - -use strict 'vars'; -use Module::Install::Base (); - -use vars qw{$VERSION @ISA $ISCORE}; -BEGIN { - $VERSION = '1.00'; - @ISA = 'Module::Install::Base'; - $ISCORE = 1; -} - -my @boolean_keys = qw{ - sign -}; - -my @scalar_keys = qw{ - name - module_name - abstract - version - distribution_type - tests - installdirs -}; - -my @tuple_keys = qw{ - configure_requires - build_requires - requires - recommends - bundles - resources -}; - -my @resource_keys = qw{ - homepage - bugtracker - repository -}; - -my @array_keys = qw{ - keywords - author -}; - -*authors = \&author; - -sub Meta { shift } -sub Meta_BooleanKeys { @boolean_keys } -sub Meta_ScalarKeys { @scalar_keys } -sub Meta_TupleKeys { @tuple_keys } -sub Meta_ResourceKeys { @resource_keys } -sub Meta_ArrayKeys { @array_keys } - -foreach my $key ( @boolean_keys ) { - *$key = sub { - my $self = shift; - if ( defined wantarray and not @_ ) { - return $self->{values}->{$key}; - } - $self->{values}->{$key} = ( @_ ? $_[0] : 1 ); - return $self; - }; -} - -foreach my $key ( @scalar_keys ) { - *$key = sub { - my $self = shift; - return $self->{values}->{$key} if defined wantarray and !@_; - $self->{values}->{$key} = shift; - return $self; - }; -} - -foreach my $key ( @array_keys ) { - *$key = sub { - my $self = shift; - return $self->{values}->{$key} if defined wantarray and !@_; - $self->{values}->{$key} ||= []; - push @{$self->{values}->{$key}}, @_; - return $self; - }; -} - -foreach my $key ( @resource_keys ) { - *$key = sub { - my $self = shift; - unless ( @_ ) { - return () unless $self->{values}->{resources}; - return map { $_->[1] } - grep { $_->[0] eq $key } - @{ $self->{values}->{resources} }; - } - return $self->{values}->{resources}->{$key} unless @_; - my $uri = shift or die( - "Did not provide a value to $key()" - ); - $self->resources( $key => $uri ); - return 1; - }; -} - -foreach my $key ( grep { $_ ne "resources" } @tuple_keys) { - *$key = sub { - my $self = shift; - return $self->{values}->{$key} unless @_; - my @added; - while ( @_ ) { - my $module = shift or last; - my $version = shift || 0; - push @added, [ $module, $version ]; - } - push @{ $self->{values}->{$key} }, @added; - return map {@$_} @added; - }; -} - -# Resource handling -my %lc_resource = map { $_ => 1 } qw{ - homepage - license - bugtracker - repository -}; - -sub resources { - my $self = shift; - while ( @_ ) { - my $name = shift or last; - my $value = shift or next; - if ( $name eq lc $name and ! $lc_resource{$name} ) { - die("Unsupported reserved lowercase resource '$name'"); - } - $self->{values}->{resources} ||= []; - push @{ $self->{values}->{resources} }, [ $name, $value ]; - } - $self->{values}->{resources}; -} - -# Aliases for build_requires that will have alternative -# meanings in some future version of META.yml. -sub test_requires { shift->build_requires(@_) } -sub install_requires { shift->build_requires(@_) } - -# Aliases for installdirs options -sub install_as_core { $_[0]->installdirs('perl') } -sub install_as_cpan { $_[0]->installdirs('site') } -sub install_as_site { $_[0]->installdirs('site') } -sub install_as_vendor { $_[0]->installdirs('vendor') } - -sub dynamic_config { - my $self = shift; - unless ( @_ ) { - warn "You MUST provide an explicit true/false value to dynamic_config\n"; - return $self; - } - $self->{values}->{dynamic_config} = $_[0] ? 1 : 0; - return 1; -} - -sub perl_version { - my $self = shift; - return $self->{values}->{perl_version} unless @_; - my $version = shift or die( - "Did not provide a value to perl_version()" - ); - - # Normalize the version - $version = $self->_perl_version($version); - - # We don't support the reall old versions - unless ( $version >= 5.005 ) { - die "Module::Install only supports 5.005 or newer (use ExtUtils::MakeMaker)\n"; - } - - $self->{values}->{perl_version} = $version; -} - -sub all_from { - my ( $self, $file ) = @_; - - unless ( defined($file) ) { - my $name = $self->name or die( - "all_from called with no args without setting name() first" - ); - $file = join('/', 'lib', split(/-/, $name)) . '.pm'; - $file =~ s{.*/}{} unless -e $file; - unless ( -e $file ) { - die("all_from cannot find $file from $name"); - } - } - unless ( -f $file ) { - die("The path '$file' does not exist, or is not a file"); - } - - $self->{values}{all_from} = $file; - - # Some methods pull from POD instead of code. - # If there is a matching .pod, use that instead - my $pod = $file; - $pod =~ s/\.pm$/.pod/i; - $pod = $file unless -e $pod; - - # Pull the different values - $self->name_from($file) unless $self->name; - $self->version_from($file) unless $self->version; - $self->perl_version_from($file) unless $self->perl_version; - $self->author_from($pod) unless @{$self->author || []}; - $self->license_from($pod) unless $self->license; - $self->abstract_from($pod) unless $self->abstract; - - return 1; -} - -sub provides { - my $self = shift; - my $provides = ( $self->{values}->{provides} ||= {} ); - %$provides = (%$provides, @_) if @_; - return $provides; -} - -sub auto_provides { - my $self = shift; - return $self unless $self->is_admin; - unless (-e 'MANIFEST') { - warn "Cannot deduce auto_provides without a MANIFEST, skipping\n"; - return $self; - } - # Avoid spurious warnings as we are not checking manifest here. - local $SIG{__WARN__} = sub {1}; - require ExtUtils::Manifest; - local *ExtUtils::Manifest::manicheck = sub { return }; - - require Module::Build; - my $build = Module::Build->new( - dist_name => $self->name, - dist_version => $self->version, - license => $self->license, - ); - $self->provides( %{ $build->find_dist_packages || {} } ); -} - -sub feature { - my $self = shift; - my $name = shift; - my $features = ( $self->{values}->{features} ||= [] ); - my $mods; - - if ( @_ == 1 and ref( $_[0] ) ) { - # The user used ->feature like ->features by passing in the second - # argument as a reference. Accomodate for that. - $mods = $_[0]; - } else { - $mods = \@_; - } - - my $count = 0; - push @$features, ( - $name => [ - map { - ref($_) ? ( ref($_) eq 'HASH' ) ? %$_ : @$_ : $_ - } @$mods - ] - ); - - return @$features; -} - -sub features { - my $self = shift; - while ( my ( $name, $mods ) = splice( @_, 0, 2 ) ) { - $self->feature( $name, @$mods ); - } - return $self->{values}->{features} - ? @{ $self->{values}->{features} } - : (); -} - -sub no_index { - my $self = shift; - my $type = shift; - push @{ $self->{values}->{no_index}->{$type} }, @_ if $type; - return $self->{values}->{no_index}; -} - -sub read { - my $self = shift; - $self->include_deps( 'YAML::Tiny', 0 ); - - require YAML::Tiny; - my $data = YAML::Tiny::LoadFile('META.yml'); - - # Call methods explicitly in case user has already set some values. - while ( my ( $key, $value ) = each %$data ) { - next unless $self->can($key); - if ( ref $value eq 'HASH' ) { - while ( my ( $module, $version ) = each %$value ) { - $self->can($key)->($self, $module => $version ); - } - } else { - $self->can($key)->($self, $value); - } - } - return $self; -} - -sub write { - my $self = shift; - return $self unless $self->is_admin; - $self->admin->write_meta; - return $self; -} - -sub version_from { - require ExtUtils::MM_Unix; - my ( $self, $file ) = @_; - $self->version( ExtUtils::MM_Unix->parse_version($file) ); - - # for version integrity check - $self->makemaker_args( VERSION_FROM => $file ); -} - -sub abstract_from { - require ExtUtils::MM_Unix; - my ( $self, $file ) = @_; - $self->abstract( - bless( - { DISTNAME => $self->name }, - 'ExtUtils::MM_Unix' - )->parse_abstract($file) - ); -} - -# Add both distribution and module name -sub name_from { - my ($self, $file) = @_; - if ( - Module::Install::_read($file) =~ m/ - ^ \s* - package \s* - ([\w:]+) - \s* ; - /ixms - ) { - my ($name, $module_name) = ($1, $1); - $name =~ s{::}{-}g; - $self->name($name); - unless ( $self->module_name ) { - $self->module_name($module_name); - } - } else { - die("Cannot determine name from $file\n"); - } -} - -sub _extract_perl_version { - if ( - $_[0] =~ m/ - ^\s* - (?:use|require) \s* - v? - ([\d_\.]+) - \s* ; - /ixms - ) { - my $perl_version = $1; - $perl_version =~ s{_}{}g; - return $perl_version; - } else { - return; - } -} - -sub perl_version_from { - my $self = shift; - my $perl_version=_extract_perl_version(Module::Install::_read($_[0])); - if ($perl_version) { - $self->perl_version($perl_version); - } else { - warn "Cannot determine perl version info from $_[0]\n"; - return; - } -} - -sub author_from { - my $self = shift; - my $content = Module::Install::_read($_[0]); - if ($content =~ m/ - =head \d \s+ (?:authors?)\b \s* - ([^\n]*) - | - =head \d \s+ (?:licen[cs]e|licensing|copyright|legal)\b \s* - .*? copyright .*? \d\d\d[\d.]+ \s* (?:\bby\b)? \s* - ([^\n]*) - /ixms) { - my $author = $1 || $2; - - # XXX: ugly but should work anyway... - if (eval "require Pod::Escapes; 1") { - # Pod::Escapes has a mapping table. - # It's in core of perl >= 5.9.3, and should be installed - # as one of the Pod::Simple's prereqs, which is a prereq - # of Pod::Text 3.x (see also below). - $author =~ s{ E<( (\d+) | ([A-Za-z]+) )> } - { - defined $2 - ? chr($2) - : defined $Pod::Escapes::Name2character_number{$1} - ? chr($Pod::Escapes::Name2character_number{$1}) - : do { - warn "Unknown escape: E<$1>"; - "E<$1>"; - }; - }gex; - } - elsif (eval "require Pod::Text; 1" && $Pod::Text::VERSION < 3) { - # Pod::Text < 3.0 has yet another mapping table, - # though the table name of 2.x and 1.x are different. - # (1.x is in core of Perl < 5.6, 2.x is in core of - # Perl < 5.9.3) - my $mapping = ($Pod::Text::VERSION < 2) - ? \%Pod::Text::HTML_Escapes - : \%Pod::Text::ESCAPES; - $author =~ s{ E<( (\d+) | ([A-Za-z]+) )> } - { - defined $2 - ? chr($2) - : defined $mapping->{$1} - ? $mapping->{$1} - : do { - warn "Unknown escape: E<$1>"; - "E<$1>"; - }; - }gex; - } - else { - $author =~ s{E}{<}g; - $author =~ s{E}{>}g; - } - $self->author($author); - } else { - warn "Cannot determine author info from $_[0]\n"; - } -} - -#Stolen from M::B -my %license_urls = ( - perl => 'http://dev.perl.org/licenses/', - apache => 'http://apache.org/licenses/LICENSE-2.0', - apache_1_1 => 'http://apache.org/licenses/LICENSE-1.1', - artistic => 'http://opensource.org/licenses/artistic-license.php', - artistic_2 => 'http://opensource.org/licenses/artistic-license-2.0.php', - lgpl => 'http://opensource.org/licenses/lgpl-license.php', - lgpl2 => 'http://opensource.org/licenses/lgpl-2.1.php', - lgpl3 => 'http://opensource.org/licenses/lgpl-3.0.html', - bsd => 'http://opensource.org/licenses/bsd-license.php', - gpl => 'http://opensource.org/licenses/gpl-license.php', - gpl2 => 'http://opensource.org/licenses/gpl-2.0.php', - gpl3 => 'http://opensource.org/licenses/gpl-3.0.html', - mit => 'http://opensource.org/licenses/mit-license.php', - mozilla => 'http://opensource.org/licenses/mozilla1.1.php', - open_source => undef, - unrestricted => undef, - restrictive => undef, - unknown => undef, -); - -sub license { - my $self = shift; - return $self->{values}->{license} unless @_; - my $license = shift or die( - 'Did not provide a value to license()' - ); - $license = __extract_license($license) || lc $license; - $self->{values}->{license} = $license; - - # Automatically fill in license URLs - if ( $license_urls{$license} ) { - $self->resources( license => $license_urls{$license} ); - } - - return 1; -} - -sub _extract_license { - my $pod = shift; - my $matched; - return __extract_license( - ($matched) = $pod =~ m/ - (=head \d \s+ L(?i:ICEN[CS]E|ICENSING)\b.*?) - (=head \d.*|=cut.*|)\z - /xms - ) || __extract_license( - ($matched) = $pod =~ m/ - (=head \d \s+ (?:C(?i:OPYRIGHTS?)|L(?i:EGAL))\b.*?) - (=head \d.*|=cut.*|)\z - /xms - ); -} - -sub __extract_license { - my $license_text = shift or return; - my @phrases = ( - '(?:under )?the same (?:terms|license) as (?:perl|the perl (?:\d )?programming language)' => 'perl', 1, - '(?:under )?the terms of (?:perl|the perl programming language) itself' => 'perl', 1, - 'Artistic and GPL' => 'perl', 1, - 'GNU general public license' => 'gpl', 1, - 'GNU public license' => 'gpl', 1, - 'GNU lesser general public license' => 'lgpl', 1, - 'GNU lesser public license' => 'lgpl', 1, - 'GNU library general public license' => 'lgpl', 1, - 'GNU library public license' => 'lgpl', 1, - 'GNU Free Documentation license' => 'unrestricted', 1, - 'GNU Affero General Public License' => 'open_source', 1, - '(?:Free)?BSD license' => 'bsd', 1, - 'Artistic license' => 'artistic', 1, - 'Apache (?:Software )?license' => 'apache', 1, - 'GPL' => 'gpl', 1, - 'LGPL' => 'lgpl', 1, - 'BSD' => 'bsd', 1, - 'Artistic' => 'artistic', 1, - 'MIT' => 'mit', 1, - 'Mozilla Public License' => 'mozilla', 1, - 'Q Public License' => 'open_source', 1, - 'OpenSSL License' => 'unrestricted', 1, - 'SSLeay License' => 'unrestricted', 1, - 'zlib License' => 'open_source', 1, - 'proprietary' => 'proprietary', 0, - ); - while ( my ($pattern, $license, $osi) = splice(@phrases, 0, 3) ) { - $pattern =~ s#\s+#\\s+#gs; - if ( $license_text =~ /\b$pattern\b/i ) { - return $license; - } - } - return ''; -} - -sub license_from { - my $self = shift; - if (my $license=_extract_license(Module::Install::_read($_[0]))) { - $self->license($license); - } else { - warn "Cannot determine license info from $_[0]\n"; - return 'unknown'; - } -} - -sub _extract_bugtracker { - my @links = $_[0] =~ m#L<( - \Qhttp://rt.cpan.org/\E[^>]+| - \Qhttp://github.com/\E[\w_]+/[\w_]+/issues| - \Qhttp://code.google.com/p/\E[\w_\-]+/issues/list - )>#gx; - my %links; - @links{@links}=(); - @links=keys %links; - return @links; -} - -sub bugtracker_from { - my $self = shift; - my $content = Module::Install::_read($_[0]); - my @links = _extract_bugtracker($content); - unless ( @links ) { - warn "Cannot determine bugtracker info from $_[0]\n"; - return 0; - } - if ( @links > 1 ) { - warn "Found more than one bugtracker link in $_[0]\n"; - return 0; - } - - # Set the bugtracker - bugtracker( $links[0] ); - return 1; -} - -sub requires_from { - my $self = shift; - my $content = Module::Install::_readperl($_[0]); - my @requires = $content =~ m/^use\s+([^\W\d]\w*(?:::\w+)*)\s+([\d\.]+)/mg; - while ( @requires ) { - my $module = shift @requires; - my $version = shift @requires; - $self->requires( $module => $version ); - } -} - -sub test_requires_from { - my $self = shift; - my $content = Module::Install::_readperl($_[0]); - my @requires = $content =~ m/^use\s+([^\W\d]\w*(?:::\w+)*)\s+([\d\.]+)/mg; - while ( @requires ) { - my $module = shift @requires; - my $version = shift @requires; - $self->test_requires( $module => $version ); - } -} - -# Convert triple-part versions (eg, 5.6.1 or 5.8.9) to -# numbers (eg, 5.006001 or 5.008009). -# Also, convert double-part versions (eg, 5.8) -sub _perl_version { - my $v = $_[-1]; - $v =~ s/^([1-9])\.([1-9]\d?\d?)$/sprintf("%d.%03d",$1,$2)/e; - $v =~ s/^([1-9])\.([1-9]\d?\d?)\.(0|[1-9]\d?\d?)$/sprintf("%d.%03d%03d",$1,$2,$3 || 0)/e; - $v =~ s/(\.\d\d\d)000$/$1/; - $v =~ s/_.+$//; - if ( ref($v) ) { - # Numify - $v = $v + 0; - } - return $v; -} - -sub add_metadata { - my $self = shift; - my %hash = @_; - for my $key (keys %hash) { - warn "add_metadata: $key is not prefixed with 'x_'.\n" . - "Use appopriate function to add non-private metadata.\n" unless $key =~ /^x_/; - $self->{values}->{$key} = $hash{$key}; - } -} - - -###################################################################### -# MYMETA Support - -sub WriteMyMeta { - die "WriteMyMeta has been deprecated"; -} - -sub write_mymeta_yaml { - my $self = shift; - - # We need YAML::Tiny to write the MYMETA.yml file - unless ( eval { require YAML::Tiny; 1; } ) { - return 1; - } - - # Generate the data - my $meta = $self->_write_mymeta_data or return 1; - - # Save as the MYMETA.yml file - print "Writing MYMETA.yml\n"; - YAML::Tiny::DumpFile('MYMETA.yml', $meta); -} - -sub write_mymeta_json { - my $self = shift; - - # We need JSON to write the MYMETA.json file - unless ( eval { require JSON; 1; } ) { - return 1; - } - - # Generate the data - my $meta = $self->_write_mymeta_data or return 1; - - # Save as the MYMETA.yml file - print "Writing MYMETA.json\n"; - Module::Install::_write( - 'MYMETA.json', - JSON->new->pretty(1)->canonical->encode($meta), - ); -} - -sub _write_mymeta_data { - my $self = shift; - - # If there's no existing META.yml there is nothing we can do - return undef unless -f 'META.yml'; - - # We need Parse::CPAN::Meta to load the file - unless ( eval { require Parse::CPAN::Meta; 1; } ) { - return undef; - } - - # Merge the perl version into the dependencies - my $val = $self->Meta->{values}; - my $perl = delete $val->{perl_version}; - if ( $perl ) { - $val->{requires} ||= []; - my $requires = $val->{requires}; - - # Canonize to three-dot version after Perl 5.6 - if ( $perl >= 5.006 ) { - $perl =~ s{^(\d+)\.(\d\d\d)(\d*)}{join('.', $1, int($2||0), int($3||0))}e - } - unshift @$requires, [ perl => $perl ]; - } - - # Load the advisory META.yml file - my @yaml = Parse::CPAN::Meta::LoadFile('META.yml'); - my $meta = $yaml[0]; - - # Overwrite the non-configure dependency hashs - delete $meta->{requires}; - delete $meta->{build_requires}; - delete $meta->{recommends}; - if ( exists $val->{requires} ) { - $meta->{requires} = { map { @$_ } @{ $val->{requires} } }; - } - if ( exists $val->{build_requires} ) { - $meta->{build_requires} = { map { @$_ } @{ $val->{build_requires} } }; - } - - return $meta; -} - -1; diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/inc/Module/Install/Win32.pm check-mk-1.2.6p12/=unpacked-tar10=/api/perl/inc/Module/Install/Win32.pm --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/inc/Module/Install/Win32.pm 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/inc/Module/Install/Win32.pm 1970-01-01 00:00:00.000000000 +0000 @@ -1,64 +0,0 @@ -#line 1 -package Module::Install::Win32; - -use strict; -use Module::Install::Base (); - -use vars qw{$VERSION @ISA $ISCORE}; -BEGIN { - $VERSION = '1.00'; - @ISA = 'Module::Install::Base'; - $ISCORE = 1; -} - -# determine if the user needs nmake, and download it if needed -sub check_nmake { - my $self = shift; - $self->load('can_run'); - $self->load('get_file'); - - require Config; - return unless ( - $^O eq 'MSWin32' and - $Config::Config{make} and - $Config::Config{make} =~ /^nmake\b/i and - ! $self->can_run('nmake') - ); - - print "The required 'nmake' executable not found, fetching it...\n"; - - require File::Basename; - my $rv = $self->get_file( - url => 'http://download.microsoft.com/download/vc15/Patch/1.52/W95/EN-US/Nmake15.exe', - ftp_url => 'ftp://ftp.microsoft.com/Softlib/MSLFILES/Nmake15.exe', - local_dir => File::Basename::dirname($^X), - size => 51928, - run => 'Nmake15.exe /o > nul', - check_for => 'Nmake.exe', - remove => 1, - ); - - die <<'END_MESSAGE' unless $rv; - -------------------------------------------------------------------------------- - -Since you are using Microsoft Windows, you will need the 'nmake' utility -before installation. It's available at: - - http://download.microsoft.com/download/vc15/Patch/1.52/W95/EN-US/Nmake15.exe - or - ftp://ftp.microsoft.com/Softlib/MSLFILES/Nmake15.exe - -Please download the file manually, save it to a directory in %PATH% (e.g. -C:\WINDOWS\COMMAND\), then launch the MS-DOS command line shell, "cd" to -that directory, and run "Nmake15.exe" from there; that will create the -'nmake.exe' file needed by this module. - -You may then resume the installation process described in README. - -------------------------------------------------------------------------------- -END_MESSAGE - -} - -1; diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/inc/Module/Install/WriteAll.pm check-mk-1.2.6p12/=unpacked-tar10=/api/perl/inc/Module/Install/WriteAll.pm --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/inc/Module/Install/WriteAll.pm 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/inc/Module/Install/WriteAll.pm 1970-01-01 00:00:00.000000000 +0000 @@ -1,63 +0,0 @@ -#line 1 -package Module::Install::WriteAll; - -use strict; -use Module::Install::Base (); - -use vars qw{$VERSION @ISA $ISCORE}; -BEGIN { - $VERSION = '1.00'; - @ISA = qw{Module::Install::Base}; - $ISCORE = 1; -} - -sub WriteAll { - my $self = shift; - my %args = ( - meta => 1, - sign => 0, - inline => 0, - check_nmake => 1, - @_, - ); - - $self->sign(1) if $args{sign}; - $self->admin->WriteAll(%args) if $self->is_admin; - - $self->check_nmake if $args{check_nmake}; - unless ( $self->makemaker_args->{PL_FILES} ) { - # XXX: This still may be a bit over-defensive... - unless ($self->makemaker(6.25)) { - $self->makemaker_args( PL_FILES => {} ) if -f 'Build.PL'; - } - } - - # Until ExtUtils::MakeMaker support MYMETA.yml, make sure - # we clean it up properly ourself. - $self->realclean_files('MYMETA.yml'); - - if ( $args{inline} ) { - $self->Inline->write; - } else { - $self->Makefile->write; - } - - # The Makefile write process adds a couple of dependencies, - # so write the META.yml files after the Makefile. - if ( $args{meta} ) { - $self->Meta->write; - } - - # Experimental support for MYMETA - if ( $ENV{X_MYMETA} ) { - if ( $ENV{X_MYMETA} eq 'JSON' ) { - $self->Meta->write_mymeta_json; - } else { - $self->Meta->write_mymeta_yaml; - } - } - - return 1; -} - -1; diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/inc/Module/Install.pm check-mk-1.2.6p12/=unpacked-tar10=/api/perl/inc/Module/Install.pm --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/inc/Module/Install.pm 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/inc/Module/Install.pm 1970-01-01 00:00:00.000000000 +0000 @@ -1,470 +0,0 @@ -#line 1 -package Module::Install; - -# For any maintainers: -# The load order for Module::Install is a bit magic. -# It goes something like this... -# -# IF ( host has Module::Install installed, creating author mode ) { -# 1. Makefile.PL calls "use inc::Module::Install" -# 2. $INC{inc/Module/Install.pm} set to installed version of inc::Module::Install -# 3. The installed version of inc::Module::Install loads -# 4. inc::Module::Install calls "require Module::Install" -# 5. The ./inc/ version of Module::Install loads -# } ELSE { -# 1. Makefile.PL calls "use inc::Module::Install" -# 2. $INC{inc/Module/Install.pm} set to ./inc/ version of Module::Install -# 3. The ./inc/ version of Module::Install loads -# } - -use 5.005; -use strict 'vars'; -use Cwd (); -use File::Find (); -use File::Path (); - -use vars qw{$VERSION $MAIN}; -BEGIN { - # All Module::Install core packages now require synchronised versions. - # This will be used to ensure we don't accidentally load old or - # different versions of modules. - # This is not enforced yet, but will be some time in the next few - # releases once we can make sure it won't clash with custom - # Module::Install extensions. - $VERSION = '1.00'; - - # Storage for the pseudo-singleton - $MAIN = undef; - - *inc::Module::Install::VERSION = *VERSION; - @inc::Module::Install::ISA = __PACKAGE__; - -} - -sub import { - my $class = shift; - my $self = $class->new(@_); - my $who = $self->_caller; - - #------------------------------------------------------------- - # all of the following checks should be included in import(), - # to allow "eval 'require Module::Install; 1' to test - # installation of Module::Install. (RT #51267) - #------------------------------------------------------------- - - # Whether or not inc::Module::Install is actually loaded, the - # $INC{inc/Module/Install.pm} is what will still get set as long as - # the caller loaded module this in the documented manner. - # If not set, the caller may NOT have loaded the bundled version, and thus - # they may not have a MI version that works with the Makefile.PL. This would - # result in false errors or unexpected behaviour. And we don't want that. - my $file = join( '/', 'inc', split /::/, __PACKAGE__ ) . '.pm'; - unless ( $INC{$file} ) { die <<"END_DIE" } - -Please invoke ${\__PACKAGE__} with: - - use inc::${\__PACKAGE__}; - -not: - - use ${\__PACKAGE__}; - -END_DIE - - # This reportedly fixes a rare Win32 UTC file time issue, but - # as this is a non-cross-platform XS module not in the core, - # we shouldn't really depend on it. See RT #24194 for detail. - # (Also, this module only supports Perl 5.6 and above). - eval "use Win32::UTCFileTime" if $^O eq 'MSWin32' && $] >= 5.006; - - # If the script that is loading Module::Install is from the future, - # then make will detect this and cause it to re-run over and over - # again. This is bad. Rather than taking action to touch it (which - # is unreliable on some platforms and requires write permissions) - # for now we should catch this and refuse to run. - if ( -f $0 ) { - my $s = (stat($0))[9]; - - # If the modification time is only slightly in the future, - # sleep briefly to remove the problem. - my $a = $s - time; - if ( $a > 0 and $a < 5 ) { sleep 5 } - - # Too far in the future, throw an error. - my $t = time; - if ( $s > $t ) { die <<"END_DIE" } - -Your installer $0 has a modification time in the future ($s > $t). - -This is known to create infinite loops in make. - -Please correct this, then run $0 again. - -END_DIE - } - - - # Build.PL was formerly supported, but no longer is due to excessive - # difficulty in implementing every single feature twice. - if ( $0 =~ /Build.PL$/i ) { die <<"END_DIE" } - -Module::Install no longer supports Build.PL. - -It was impossible to maintain duel backends, and has been deprecated. - -Please remove all Build.PL files and only use the Makefile.PL installer. - -END_DIE - - #------------------------------------------------------------- - - # To save some more typing in Module::Install installers, every... - # use inc::Module::Install - # ...also acts as an implicit use strict. - $^H |= strict::bits(qw(refs subs vars)); - - #------------------------------------------------------------- - - unless ( -f $self->{file} ) { - foreach my $key (keys %INC) { - delete $INC{$key} if $key =~ /Module\/Install/; - } - - local $^W; - require "$self->{path}/$self->{dispatch}.pm"; - File::Path::mkpath("$self->{prefix}/$self->{author}"); - $self->{admin} = "$self->{name}::$self->{dispatch}"->new( _top => $self ); - $self->{admin}->init; - @_ = ($class, _self => $self); - goto &{"$self->{name}::import"}; - } - - local $^W; - *{"${who}::AUTOLOAD"} = $self->autoload; - $self->preload; - - # Unregister loader and worker packages so subdirs can use them again - delete $INC{'inc/Module/Install.pm'}; - delete $INC{'Module/Install.pm'}; - - # Save to the singleton - $MAIN = $self; - - return 1; -} - -sub autoload { - my $self = shift; - my $who = $self->_caller; - my $cwd = Cwd::cwd(); - my $sym = "${who}::AUTOLOAD"; - $sym->{$cwd} = sub { - my $pwd = Cwd::cwd(); - if ( my $code = $sym->{$pwd} ) { - # Delegate back to parent dirs - goto &$code unless $cwd eq $pwd; - } - unless ($$sym =~ s/([^:]+)$//) { - # XXX: it looks like we can't retrieve the missing function - # via $$sym (usually $main::AUTOLOAD) in this case. - # I'm still wondering if we should slurp Makefile.PL to - # get some context or not ... - my ($package, $file, $line) = caller; - die <<"EOT"; -Unknown function is found at $file line $line. -Execution of $file aborted due to runtime errors. - -If you're a contributor to a project, you may need to install -some Module::Install extensions from CPAN (or other repository). -If you're a user of a module, please contact the author. -EOT - } - my $method = $1; - if ( uc($method) eq $method ) { - # Do nothing - return; - } elsif ( $method =~ /^_/ and $self->can($method) ) { - # Dispatch to the root M:I class - return $self->$method(@_); - } - - # Dispatch to the appropriate plugin - unshift @_, ( $self, $1 ); - goto &{$self->can('call')}; - }; -} - -sub preload { - my $self = shift; - unless ( $self->{extensions} ) { - $self->load_extensions( - "$self->{prefix}/$self->{path}", $self - ); - } - - my @exts = @{$self->{extensions}}; - unless ( @exts ) { - @exts = $self->{admin}->load_all_extensions; - } - - my %seen; - foreach my $obj ( @exts ) { - while (my ($method, $glob) = each %{ref($obj) . '::'}) { - next unless $obj->can($method); - next if $method =~ /^_/; - next if $method eq uc($method); - $seen{$method}++; - } - } - - my $who = $self->_caller; - foreach my $name ( sort keys %seen ) { - local $^W; - *{"${who}::$name"} = sub { - ${"${who}::AUTOLOAD"} = "${who}::$name"; - goto &{"${who}::AUTOLOAD"}; - }; - } -} - -sub new { - my ($class, %args) = @_; - - delete $INC{'FindBin.pm'}; - { - # to suppress the redefine warning - local $SIG{__WARN__} = sub {}; - require FindBin; - } - - # ignore the prefix on extension modules built from top level. - my $base_path = Cwd::abs_path($FindBin::Bin); - unless ( Cwd::abs_path(Cwd::cwd()) eq $base_path ) { - delete $args{prefix}; - } - return $args{_self} if $args{_self}; - - $args{dispatch} ||= 'Admin'; - $args{prefix} ||= 'inc'; - $args{author} ||= ($^O eq 'VMS' ? '_author' : '.author'); - $args{bundle} ||= 'inc/BUNDLES'; - $args{base} ||= $base_path; - $class =~ s/^\Q$args{prefix}\E:://; - $args{name} ||= $class; - $args{version} ||= $class->VERSION; - unless ( $args{path} ) { - $args{path} = $args{name}; - $args{path} =~ s!::!/!g; - } - $args{file} ||= "$args{base}/$args{prefix}/$args{path}.pm"; - $args{wrote} = 0; - - bless( \%args, $class ); -} - -sub call { - my ($self, $method) = @_; - my $obj = $self->load($method) or return; - splice(@_, 0, 2, $obj); - goto &{$obj->can($method)}; -} - -sub load { - my ($self, $method) = @_; - - $self->load_extensions( - "$self->{prefix}/$self->{path}", $self - ) unless $self->{extensions}; - - foreach my $obj (@{$self->{extensions}}) { - return $obj if $obj->can($method); - } - - my $admin = $self->{admin} or die <<"END_DIE"; -The '$method' method does not exist in the '$self->{prefix}' path! -Please remove the '$self->{prefix}' directory and run $0 again to load it. -END_DIE - - my $obj = $admin->load($method, 1); - push @{$self->{extensions}}, $obj; - - $obj; -} - -sub load_extensions { - my ($self, $path, $top) = @_; - - my $should_reload = 0; - unless ( grep { ! ref $_ and lc $_ eq lc $self->{prefix} } @INC ) { - unshift @INC, $self->{prefix}; - $should_reload = 1; - } - - foreach my $rv ( $self->find_extensions($path) ) { - my ($file, $pkg) = @{$rv}; - next if $self->{pathnames}{$pkg}; - - local $@; - my $new = eval { local $^W; require $file; $pkg->can('new') }; - unless ( $new ) { - warn $@ if $@; - next; - } - $self->{pathnames}{$pkg} = - $should_reload ? delete $INC{$file} : $INC{$file}; - push @{$self->{extensions}}, &{$new}($pkg, _top => $top ); - } - - $self->{extensions} ||= []; -} - -sub find_extensions { - my ($self, $path) = @_; - - my @found; - File::Find::find( sub { - my $file = $File::Find::name; - return unless $file =~ m!^\Q$path\E/(.+)\.pm\Z!is; - my $subpath = $1; - return if lc($subpath) eq lc($self->{dispatch}); - - $file = "$self->{path}/$subpath.pm"; - my $pkg = "$self->{name}::$subpath"; - $pkg =~ s!/!::!g; - - # If we have a mixed-case package name, assume case has been preserved - # correctly. Otherwise, root through the file to locate the case-preserved - # version of the package name. - if ( $subpath eq lc($subpath) || $subpath eq uc($subpath) ) { - my $content = Module::Install::_read($subpath . '.pm'); - my $in_pod = 0; - foreach ( split //, $content ) { - $in_pod = 1 if /^=\w/; - $in_pod = 0 if /^=cut/; - next if ($in_pod || /^=cut/); # skip pod text - next if /^\s*#/; # and comments - if ( m/^\s*package\s+($pkg)\s*;/i ) { - $pkg = $1; - last; - } - } - } - - push @found, [ $file, $pkg ]; - }, $path ) if -d $path; - - @found; -} - - - - - -##################################################################### -# Common Utility Functions - -sub _caller { - my $depth = 0; - my $call = caller($depth); - while ( $call eq __PACKAGE__ ) { - $depth++; - $call = caller($depth); - } - return $call; -} - -# Done in evals to avoid confusing Perl::MinimumVersion -eval( $] >= 5.006 ? <<'END_NEW' : <<'END_OLD' ); die $@ if $@; -sub _read { - local *FH; - open( FH, '<', $_[0] ) or die "open($_[0]): $!"; - my $string = do { local $/; }; - close FH or die "close($_[0]): $!"; - return $string; -} -END_NEW -sub _read { - local *FH; - open( FH, "< $_[0]" ) or die "open($_[0]): $!"; - my $string = do { local $/; }; - close FH or die "close($_[0]): $!"; - return $string; -} -END_OLD - -sub _readperl { - my $string = Module::Install::_read($_[0]); - $string =~ s/(?:\015{1,2}\012|\015|\012)/\n/sg; - $string =~ s/(\n)\n*__(?:DATA|END)__\b.*\z/$1/s; - $string =~ s/\n\n=\w+.+?\n\n=cut\b.+?\n+/\n\n/sg; - return $string; -} - -sub _readpod { - my $string = Module::Install::_read($_[0]); - $string =~ s/(?:\015{1,2}\012|\015|\012)/\n/sg; - return $string if $_[0] =~ /\.pod\z/; - $string =~ s/(^|\n=cut\b.+?\n+)[^=\s].+?\n(\n=\w+|\z)/$1$2/sg; - $string =~ s/\n*=pod\b[^\n]*\n+/\n\n/sg; - $string =~ s/\n*=cut\b[^\n]*\n+/\n\n/sg; - $string =~ s/^\n+//s; - return $string; -} - -# Done in evals to avoid confusing Perl::MinimumVersion -eval( $] >= 5.006 ? <<'END_NEW' : <<'END_OLD' ); die $@ if $@; -sub _write { - local *FH; - open( FH, '>', $_[0] ) or die "open($_[0]): $!"; - foreach ( 1 .. $#_ ) { - print FH $_[$_] or die "print($_[0]): $!"; - } - close FH or die "close($_[0]): $!"; -} -END_NEW -sub _write { - local *FH; - open( FH, "> $_[0]" ) or die "open($_[0]): $!"; - foreach ( 1 .. $#_ ) { - print FH $_[$_] or die "print($_[0]): $!"; - } - close FH or die "close($_[0]): $!"; -} -END_OLD - -# _version is for processing module versions (eg, 1.03_05) not -# Perl versions (eg, 5.8.1). -sub _version ($) { - my $s = shift || 0; - my $d =()= $s =~ /(\.)/g; - if ( $d >= 2 ) { - # Normalise multipart versions - $s =~ s/(\.)(\d{1,3})/sprintf("$1%03d",$2)/eg; - } - $s =~ s/^(\d+)\.?//; - my $l = $1 || 0; - my @v = map { - $_ . '0' x (3 - length $_) - } $s =~ /(\d{1,3})\D?/g; - $l = $l . '.' . join '', @v if @v; - return $l + 0; -} - -sub _cmp ($$) { - _version($_[0]) <=> _version($_[1]); -} - -# Cloned from Params::Util::_CLASS -sub _CLASS ($) { - ( - defined $_[0] - and - ! ref $_[0] - and - $_[0] =~ m/^[^\W\d]\w*(?:::\w+)*\z/s - ) ? $_[0] : undef; -} - -1; - -# Copyright 2008 - 2010 Adam Kennedy. diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/lib/Monitoring/Livestatus/INET.pm check-mk-1.2.6p12/=unpacked-tar10=/api/perl/lib/Monitoring/Livestatus/INET.pm --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/lib/Monitoring/Livestatus/INET.pm 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/lib/Monitoring/Livestatus/INET.pm 1970-01-01 00:00:00.000000000 +0000 @@ -1,121 +0,0 @@ -package Monitoring::Livestatus::INET; - -use 5.000000; -use strict; -use warnings; -use IO::Socket::INET; -use Socket qw(IPPROTO_TCP TCP_NODELAY); -use Carp; -use base "Monitoring::Livestatus"; - -=head1 NAME - -Monitoring::Livestatus::INET - connector with tcp sockets - -=head1 SYNOPSIS - - use Monitoring::Livestatus; - my $nl = Monitoring::Livestatus::INET->new( 'localhost:9999' ); - my $hosts = $nl->selectall_arrayref("GET hosts"); - -=head1 CONSTRUCTOR - -=head2 new ( [ARGS] ) - -Creates an C object. C takes at least the server. -Arguments are the same as in C. -If the constructor is only passed a single argument, it is assumed to -be a the C specification. Use either socker OR server. - -=cut - -sub new { - my $class = shift; - unshift(@_, "peer") if scalar @_ == 1; - my(%options) = @_; - $options{'name'} = $options{'peer'} unless defined $options{'name'}; - - $options{'backend'} = $class; - my $self = Monitoring::Livestatus->new(%options); - bless $self, $class; - confess('not a scalar') if ref $self->{'peer'} ne ''; - - return $self; -} - - -######################################## - -=head1 METHODS - -=cut - -sub _open { - my $self = shift; - my $sock; - - eval { - local $SIG{'ALRM'} = sub { die("connection timeout"); }; - alarm($self->{'connect_timeout'}); - $sock = IO::Socket::INET->new( - PeerAddr => $self->{'peer'}, - Type => SOCK_STREAM, - Timeout => $self->{'connect_timeout'}, - ); - if(!defined $sock or !$sock->connected()) { - my $msg = "failed to connect to $self->{'peer'} :$!"; - if($self->{'errors_are_fatal'}) { - croak($msg); - } - $Monitoring::Livestatus::ErrorCode = 500; - $Monitoring::Livestatus::ErrorMessage = $msg; - alarm(0); - return; - } - - if(defined $self->{'query_timeout'}) { - # set timeout - $sock->timeout($self->{'query_timeout'}); - } - - setsockopt($sock, IPPROTO_TCP, TCP_NODELAY, 1); - - }; - alarm(0); - - if($@) { - $Monitoring::Livestatus::ErrorCode = 500; - $Monitoring::Livestatus::ErrorMessage = $@; - return; - } - - return($sock); -} - - -######################################## - -sub _close { - my $self = shift; - my $sock = shift; - return unless defined $sock; - return close($sock); -} - - -1; - -=head1 AUTHOR - -Sven Nierlein, Enierlein@cpan.orgE - -=head1 COPYRIGHT AND LICENSE - -Copyright (C) 2009 by Sven Nierlein - -This library is free software; you can redistribute it and/or modify -it under the same terms as Perl itself. - -=cut - -__END__ diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/lib/Monitoring/Livestatus/MULTI.pm check-mk-1.2.6p12/=unpacked-tar10=/api/perl/lib/Monitoring/Livestatus/MULTI.pm --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/lib/Monitoring/Livestatus/MULTI.pm 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/lib/Monitoring/Livestatus/MULTI.pm 1970-01-01 00:00:00.000000000 +0000 @@ -1,922 +0,0 @@ -package Monitoring::Livestatus::MULTI; - -use 5.000000; -use strict; -use warnings; -use Carp; -use Data::Dumper; -use Config; -use Time::HiRes qw/gettimeofday tv_interval/; -use Scalar::Util qw/looks_like_number/; -use Monitoring::Livestatus; -use base "Monitoring::Livestatus"; - -=head1 NAME - -Monitoring::Livestatus::MULTI - connector with multiple peers - -=head1 SYNOPSIS - - use Monitoring::Livestatus; - my $nl = Monitoring::Livestatus::MULTI->new( qw{nagioshost1:9999 nagioshost2:9999 /var/spool/nagios/live.socket} ); - my $hosts = $nl->selectall_arrayref("GET hosts"); - -=head1 CONSTRUCTOR - -=head2 new ( [ARGS] ) - -Creates an C object. C takes at least the server. -Arguments are the same as in L. - -=cut - -sub new { - my $class = shift; - unshift(@_, "peer") if scalar @_ == 1; - my(%options) = @_; - - $options{'backend'} = $class; - my $self = Monitoring::Livestatus->new(%options); - bless $self, $class; - - if(!defined $self->{'peers'}) { - $self->{'peer'} = $self->_get_peers(); - - # set our peer(s) from the options - my %peer_options; - my $peers; - for my $opt_key (keys %options) { - $peer_options{$opt_key} = $options{$opt_key}; - } - $peer_options{'errors_are_fatal'} = 0; - for my $peer (@{$self->{'peer'}}) { - $peer_options{'name'} = $peer->{'name'}; - $peer_options{'peer'} = $peer->{'peer'}; - delete $peer_options{'socket'}; - delete $peer_options{'server'}; - - if($peer->{'type'} eq 'UNIX') { - push @{$peers}, new Monitoring::Livestatus::UNIX(%peer_options); - } - elsif($peer->{'type'} eq 'INET') { - push @{$peers}, new Monitoring::Livestatus::INET(%peer_options); - } - } - $self->{'peers'} = $peers; - delete $self->{'socket'}; - delete $self->{'server'}; - } - - if(!defined $self->{'peers'}) { - croak('please specify at least one peer, socket or server'); - } - - # dont use threads with only one peer - if(scalar @{$self->{'peers'}} == 1) { $self->{'use_threads'} = 0; } - - # check for threads support - if(!defined $self->{'use_threads'}) { - $self->{'use_threads'} = 0; - if($Config{useithreads}) { - $self->{'use_threads'} = 1; - } - } - if($self->{'use_threads'}) { - eval { - require threads; - require Thread::Queue; - }; - if($@) { - $self->{'use_threads'} = 0; - $self->{'logger'}->debug('error initializing threads: '.$@) if defined $self->{'logger'}; - } else { - $self->_start_worker; - } - } - - # initialize peer keys - $self->{'peer_by_key'} = {}; - $self->{'peer_by_addr'} = {}; - for my $peer (@{$self->{'peers'}}) { - $self->{'peer_by_key'}->{$peer->peer_key} = $peer; - $self->{'peer_by_addr'}->{$peer->peer_addr} = $peer; - } - - $self->{'name'} = 'multiple connector' unless defined $self->{'name'}; - $self->{'logger'}->debug('initialized Monitoring::Livestatus::MULTI '.($self->{'use_threads'} ? 'with' : 'without' ).' threads') if $self->{'verbose'}; - - return $self; -} - - -######################################## - -=head1 METHODS - -=head2 do - -See L for more information. - -=cut - -sub do { - my $self = shift; - my $opts = $self->_lowercase_and_verify_options($_[1]); - my $t0 = [gettimeofday]; - - $self->_do_on_peers("do", $opts->{'backends'}, @_); - my $elapsed = tv_interval ( $t0 ); - $self->{'logger'}->debug(sprintf('%.4f', $elapsed).' sec for do('.$_[0].') in total') if $self->{'verbose'}; - return 1; -} - - -######################################## - -=head2 selectall_arrayref - -See L for more information. - -=cut - -sub selectall_arrayref { - my $self = shift; - my $opts = $self->_lowercase_and_verify_options($_[1]); - my $t0 = [gettimeofday]; - - $self->_log_statement($_[0], $opts, 0) if $self->{'verbose'}; - - my $return = $self->_merge_answer($self->_do_on_peers("selectall_arrayref", $opts->{'backends'}, @_)); - my $elapsed = tv_interval ( $t0 ); - if($self->{'verbose'}) { - my $total_results = 0; - $total_results = scalar @{$return} if defined $return; - $self->{'logger'}->debug(sprintf('%.4f', $elapsed).' sec for selectall_arrayref() in total, results: '.$total_results); - } - - return $return; -} - - -######################################## - -=head2 selectall_hashref - -See L for more information. - -=cut - -sub selectall_hashref { - my $self = shift; - my $opts = $self->_lowercase_and_verify_options($_[2]); - my $t0 = [gettimeofday]; - - my $return = $self->_merge_answer($self->_do_on_peers("selectall_hashref", $opts->{'backends'}, @_)); - my $elapsed = tv_interval ( $t0 ); - $self->{'logger'}->debug(sprintf('%.4f', $elapsed).' sec for selectall_hashref() in total') if $self->{'verbose'}; - - return $return; -} - - -######################################## - -=head2 selectcol_arrayref - -See L for more information. - -=cut - -sub selectcol_arrayref { - my $self = shift; - my $opts = $self->_lowercase_and_verify_options($_[1]); - my $t0 = [gettimeofday]; - - my $return = $self->_merge_answer($self->_do_on_peers("selectcol_arrayref", $opts->{'backends'}, @_)); - my $elapsed = tv_interval ( $t0 ); - $self->{'logger'}->debug(sprintf('%.4f', $elapsed).' sec for selectcol_arrayref() in total') if $self->{'verbose'}; - - return $return; -} - - -######################################## - -=head2 selectrow_array - -See L for more information. - -=cut - -sub selectrow_array { - my $self = shift; - my $statement = $_[0]; - my $opts = $self->_lowercase_and_verify_options($_[1]); - my $t0 = [gettimeofday]; - my @return; - - if((defined $opts->{'sum'} and $opts->{'sum'} == 1) or (!defined $opts->{'sum'} and $statement =~ m/^Stats:/mx)) { - @return = @{$self->_sum_answer($self->_do_on_peers("selectrow_arrayref", $opts->{'backends'}, @_))}; - } else { - if($self->{'warnings'}) { - carp("selectrow_arrayref without Stats on multi backend will not work as expected!"); - } - my $rows = $self->_merge_answer($self->_do_on_peers("selectrow_arrayref", $opts->{'backends'}, @_)); - @return = @{$rows} if defined $rows; - } - - my $elapsed = tv_interval ( $t0 ); - $self->{'logger'}->debug(sprintf('%.4f', $elapsed).' sec for selectrow_array() in total') if $self->{'verbose'}; - - return @return; -} - - -######################################## - -=head2 selectrow_arrayref - -See L for more information. - -=cut - -sub selectrow_arrayref { - my $self = shift; - my $statement = $_[0]; - my $opts = $self->_lowercase_and_verify_options($_[1]); - my $t0 = [gettimeofday]; - my $return; - - if((defined $opts->{'sum'} and $opts->{'sum'} == 1) or (!defined $opts->{'sum'} and $statement =~ m/^Stats:/mx)) { - $return = $self->_sum_answer($self->_do_on_peers("selectrow_arrayref", $opts->{'backends'}, @_)); - } else { - if($self->{'warnings'}) { - carp("selectrow_arrayref without Stats on multi backend will not work as expected!"); - } - my $rows = $self->_merge_answer($self->_do_on_peers("selectrow_arrayref", $opts->{'backends'}, @_)); - $return = $rows->[0] if defined $rows->[0]; - } - - my $elapsed = tv_interval ( $t0 ); - $self->{'logger'}->debug(sprintf('%.4f', $elapsed).' sec for selectrow_arrayref() in total') if $self->{'verbose'}; - - return $return; -} - - -######################################## - -=head2 selectrow_hashref - -See L for more information. - -=cut - -sub selectrow_hashref { - my $self = shift; - my $statement = $_[0]; - my $opts = $self->_lowercase_and_verify_options($_[1]); - - my $t0 = [gettimeofday]; - - my $return; - - if((defined $opts->{'sum'} and $opts->{'sum'} == 1) or (!defined $opts->{'sum'} and $statement =~ m/^Stats:/mx)) { - $return = $self->_sum_answer($self->_do_on_peers("selectrow_hashref", $opts->{'backends'}, @_)); - } else { - if($self->{'warnings'}) { - carp("selectrow_hashref without Stats on multi backend will not work as expected!"); - } - $return = $self->_merge_answer($self->_do_on_peers("selectrow_hashref", $opts->{'backends'}, @_)); - } - - my $elapsed = tv_interval ( $t0 ); - $self->{'logger'}->debug(sprintf('%.4f', $elapsed).' sec for selectrow_hashref() in total') if $self->{'verbose'}; - - return $return; -} - - -######################################## - -=head2 selectscalar_value - -See L for more information. - -=cut - -sub selectscalar_value { - my $self = shift; - my $statement = $_[0]; - my $opts = $self->_lowercase_and_verify_options($_[1]); - - my $t0 = [gettimeofday]; - - my $return; - - if((defined $opts->{'sum'} and $opts->{'sum'} == 1) or (!defined $opts->{'sum'} and $statement =~ m/^Stats:/mx)) { - return $self->_sum_answer($self->_do_on_peers("selectscalar_value", $opts->{'backends'}, @_)); - } else { - if($self->{'warnings'}) { - carp("selectscalar_value without Stats on multi backend will not work as expected!"); - } - my $rows = $self->_merge_answer($self->_do_on_peers("selectscalar_value", $opts->{'backends'}, @_)); - - $return = $rows->[0] if defined $rows->[0]; - } - - my $elapsed = tv_interval ( $t0 ); - $self->{'logger'}->debug(sprintf('%.4f', $elapsed).' sec for selectscalar_value() in total') if $self->{'verbose'}; - - return $return; -} - - -######################################## - -=head2 errors_are_fatal - -See L for more information. - -=cut - -sub errors_are_fatal { - my $self = shift; - my $value = shift; - return $self->_change_setting('errors_are_fatal', $value); -} - - -######################################## - -=head2 warnings - -See L for more information. - -=cut - -sub warnings { - my $self = shift; - my $value = shift; - return $self->_change_setting('warnings', $value); -} - - -######################################## - -=head2 verbose - -See L for more information. - -=cut - -sub verbose { - my $self = shift; - my $value = shift; - return $self->_change_setting('verbose', $value); -} - - -######################################## - -=head2 peer_addr - -See L for more information. - -=cut - -sub peer_addr { - my $self = shift; - - my @addrs; - for my $peer (@{$self->{'peers'}}) { - push @addrs, $peer->peer_addr; - } - - return wantarray ? @addrs : undef; -} - - -######################################## - -=head2 peer_name - -See L for more information. - -=cut - -sub peer_name { - my $self = shift; - - my @names; - for my $peer (@{$self->{'peers'}}) { - push @names, $peer->peer_name; - } - - return wantarray ? @names : $self->{'name'}; -} - - -######################################## - -=head2 peer_key - -See L for more information. - -=cut - -sub peer_key { - my $self = shift; - - my @keys; - for my $peer (@{$self->{'peers'}}) { - push @keys, $peer->peer_key; - } - - return wantarray ? @keys : $self->{'key'}; -} - - -######################################## - -=head2 disable - - $ml->disable() - -disables this connection, returns the last state. - -=cut -sub disable { - my $self = shift; - my $peer_key = shift; - if(!defined $peer_key) { - for my $peer (@{$self->{'peers'}}) { - $peer->disable(); - } - return 1; - } else { - my $peer = $self->_get_peer_by_key($peer_key); - my $prev = $peer->{'disabled'}; - $peer->{'disabled'} = 1; - return $prev; - } -} - - -######################################## - -=head2 enable - - $ml->enable() - -enables this connection, returns the last state. - -=cut -sub enable { - my $self = shift; - my $peer_key = shift; - if(!defined $peer_key) { - for my $peer (@{$self->{'peers'}}) { - $peer->enable(); - } - return 1; - } else { - my $peer = $self->_get_peer_by_key($peer_key); - my $prev = $peer->{'disabled'}; - $peer->{'disabled'} = 0; - return $prev; - } -} - -######################################## -# INTERNAL SUBS -######################################## - -sub _change_setting { - my $self = shift; - my $key = shift; - my $value = shift; - my $old = $self->{$key}; - - # set new value - if(defined $value) { - $self->{$key} = $value; - for my $peer (@{$self->{'peers'}}) { - $peer->{$key} = $value; - } - - # restart workers - if($self->{'use_threads'}) { - _stop_worker(); - $self->_start_worker(); - } - } - - return $old; -} - - -######################################## -sub _start_worker { - my $self = shift; - - # create job transports - $self->{'WorkQueue'} = Thread::Queue->new; - $self->{'WorkResults'} = Thread::Queue->new; - - # set signal handler before thread is started - # otherwise they would be killed when started - # and stopped immediately after start - $SIG{'USR1'} = sub { threads->exit(); }; - - # start worker threads - our %threads; - my $threadcount = scalar @{$self->{'peers'}}; - for(my $x = 0; $x < $threadcount; $x++) { - $self->{'threads'}->[$x] = threads->new(\&_worker_thread, $self->{'peers'}, $self->{'WorkQueue'}, $self->{'WorkResults'}, $self->{'logger'}); - } - - # restore sig handler as it was only for the threads - $SIG{'USR1'} = 'DEFAULT'; - return; -} - - -######################################## -sub _stop_worker { - # try to kill our threads safely - eval { - for my $thr (threads->list()) { - $thr->kill('USR1')->detach(); - } - }; - return; -} - - -######################################## -sub _worker_thread { - local $SIG{'USR1'} = sub { threads->exit(); }; - - my $peers = shift; - my $workQueue = shift; - my $workResults = shift; - my $logger = shift; - - while (my $job = $workQueue->dequeue) { - my $erg; - eval { - $erg = _do_wrapper($peers->[$job->{'peer'}], $job->{'sub'}, $logger, @{$job->{'opts'}}); - }; - if($@) { - warn("Error in Thread ".$job->{'peer'}." :".$@); - $job->{'logger'}->error("Error in Thread ".$job->{'peer'}." :".$@) if defined $job->{'logger'}; - }; - $workResults->enqueue({ peer => $job->{'peer'}, result => $erg }); - } - return; -} - - -######################################## -sub _do_wrapper { - my $peer = shift; - my $sub = shift; - my $logger = shift; - my @opts = @_; - - my $t0 = [gettimeofday]; - - my $data = $peer->$sub(@opts); - - my $elapsed = tv_interval ( $t0 ); - $logger->debug(sprintf('%.4f', $elapsed).' sec for fetching data on '.$peer->peer_name.' ('.$peer->peer_addr.')') if defined $logger; - - $Monitoring::Livestatus::ErrorCode = 0 unless defined $Monitoring::Livestatus::ErrorCode; - $Monitoring::Livestatus::ErrorMessage = '' unless defined $Monitoring::Livestatus::ErrorMessage; - my $return = { - 'msg' => $Monitoring::Livestatus::ErrorMessage, - 'code' => $Monitoring::Livestatus::ErrorCode, - 'data' => $data, - }; - return $return; -} - - -######################################## -sub _do_on_peers { - my $self = shift; - my $sub = shift; - my $backends = shift; - my @opts = @_; - my $statement = $opts[0]; - my $use_threads = $self->{'use_threads'}; - my $t0 = [gettimeofday]; - - my $return; - my %codes; - my %messages; - my $query_options; - if($sub eq 'selectall_hashref') { - $query_options = $self->_lowercase_and_verify_options($opts[2]); - } else { - $query_options = $self->_lowercase_and_verify_options($opts[1]); - } - - # which peers affected? - my @peers; - if(defined $backends) { - my @backends; - if(ref $backends eq '') { - push @backends, $backends; - } - elsif(ref $backends eq 'ARRAY') { - @backends = @{$backends}; - } else { - croak("unsupported type for backend: ".ref($backends)); - } - - for my $key (@backends) { - my $backend = $self->_get_peer_by_key($key); - push @peers, $backend unless $backend->{'disabled'}; - } - } else { - # use all backends - @peers = @{$self->{'peers'}}; - } - - # its faster without threads for only one peer - if(scalar @peers <= 1) { $use_threads = 0; } - - # if we have limits set, we cannot use threads - if(defined $query_options->{'limit_start'}) { $use_threads = 0; } - - if($use_threads) { - # use the threaded variant - $self->{'logger'}->debug('using threads') if $self->{'verbose'}; - - my $peers_to_use; - for my $peer (@peers) { - if($peer->{'disabled'}) { - # dont send any query - } - elsif($peer->marked_bad) { - warn($peer->peer_name.' ('.$peer->peer_key.') is marked bad') if $self->{'verbose'}; - } - else { - $peers_to_use->{$peer->peer_key} = 1; - } - } - my $x = 0; - for my $peer (@{$self->{'peers'}}) { - if(defined $peers_to_use->{$peer->peer_key}) { - my $job = { - 'peer' => $x, - 'sub' => $sub, - 'opts' => \@opts, - }; - $self->{'WorkQueue'}->enqueue($job); - } - $x++; - } - - for(my $x = 0; $x < scalar keys %{$peers_to_use}; $x++) { - my $result = $self->{'WorkResults'}->dequeue; - my $peer = $self->{'peers'}->[$result->{'peer'}]; - if(defined $result->{'result'}) { - push @{$codes{$result->{'result'}->{'code'}}}, { 'peer' => $peer->peer_key, 'msg' => $result->{'result'}->{'msg'} }; - $return->{$peer->peer_key} = $result->{'result'}->{'data'}; - } else { - warn("undefined result for: $statement"); - } - } - } else { - $self->{'logger'}->debug('not using threads') if $self->{'verbose'}; - for my $peer (@peers) { - if($peer->{'disabled'}) { - # dont send any query - } - elsif($peer->marked_bad) { - warn($peer->peer_name.' ('.$peer->peer_key.') is marked bad') if $self->{'verbose'}; - } else { - my $erg = _do_wrapper($peer, $sub, $self->{'logger'}, @opts); - $return->{$peer->peer_key} = $erg->{'data'}; - push @{$codes{$erg->{'code'}}}, { 'peer' => $peer, 'msg' => $erg->{'msg'} }; - - # compute limits - if(defined $query_options->{'limit_length'} and $peer->{'meta_data'}->{'result_count'}) { - last; - } - # set a new start if we had rows already - if(defined $query_options->{'limit_start'}) { - $query_options->{'limit_start'} = $query_options->{'limit_start'} - $peer->{'meta_data'}->{'row_count'}; - } - } - } - } - - - # check if we different result stati - undef $Monitoring::Livestatus::ErrorMessage; - $Monitoring::Livestatus::ErrorCode = 0; - my @codes = sort keys %codes; - if(scalar @codes > 1) { - # got different results for our backends - if($self->{'verbose'}) { - $self->{'logger'}->warn("got different result stati: ".Dumper(\%codes)); - } - } else { - # got same result codes for all backend - } - - my $failed = 0; - my $code = $codes[0]; - if(defined $code and $code >= 300) { - $failed = 1; - } - - if($failed) { - my $msg = $codes{$code}->[0]->{'msg'}; - $self->{'logger'}->debug("same: $code -> $msg") if $self->{'verbose'}; - $Monitoring::Livestatus::ErrorMessage = $msg; - $Monitoring::Livestatus::ErrorCode = $code; - if($self->{'errors_are_fatal'}) { - croak("ERROR ".$code." - ".$Monitoring::Livestatus::ErrorMessage." in query:\n'".$statement."'\n"); - } - return; - } - - my $elapsed = tv_interval ( $t0 ); - $self->{'logger'}->debug(sprintf('%.4f', $elapsed).' sec for fetching all data') if $self->{'verbose'}; - - # deep copy result? - if($use_threads - and ( - (defined $query_options->{'deepcopy'} and $query_options->{'deepcopy'} == 1) - or - (defined $self->{'deepcopy'} and $self->{'deepcopy'} == 1) - ) - ) { - # result has to be cloned to avoid "Invalid value for shared scalar" error - - $return = $self->_clone($return, $self->{'logger'}); - } - - return($return); -} - - -######################################## -sub _merge_answer { - my $self = shift; - my $data = shift; - my $return; - - my $t0 = [gettimeofday]; - - # iterate over original peers to retain order - for my $peer (@{$self->{'peers'}}) { - my $key = $peer->peer_key; - next if !defined $data->{$key}; - - if(ref $data->{$key} eq 'ARRAY') { - $return = [] unless defined $return; - $return = [ @{$return}, @{$data->{$key}} ]; - } elsif(ref $data->{$key} eq 'HASH') { - $return = {} unless defined $return; - $return = { %{$return}, %{$data->{$key}} }; - } else { - push @{$return}, $data->{$key}; - } - } - - my $elapsed = tv_interval ( $t0 ); - $self->{'logger'}->debug(sprintf('%.4f', $elapsed).' sec for merging data') if $self->{'verbose'}; - - return($return); -} - - -######################################## -sub _sum_answer { - my $self = shift; - my $data = shift; - my $return; - my $t0 = [gettimeofday]; - for my $peername (keys %{$data}) { - if(ref $data->{$peername} eq 'HASH') { - for my $key (keys %{$data->{$peername}}) { - if(!defined $return->{$key}) { - $return->{$key} = $data->{$peername}->{$key}; - } elsif(looks_like_number($data->{$peername}->{$key})) { - $return->{$key} += $data->{$peername}->{$key}; - } - } - } - elsif(ref $data->{$peername} eq 'ARRAY') { - my $x = 0; - for my $val (@{$data->{$peername}}) { - if(!defined $return->[$x]) { - $return->[$x] = $data->{$peername}->[$x]; - } else { - $return->[$x] += $data->{$peername}->[$x]; - } - $x++; - } - } elsif(defined $data->{$peername}) { - $return = 0 unless defined $return; - next unless defined $data->{$peername}; - $return += $data->{$peername}; - } - } - - my $elapsed = tv_interval ( $t0 ); - $self->{'logger'}->debug(sprintf('%.4f', $elapsed).' sec for summarizing data') if $self->{'verbose'}; - - return $return; -} - - -######################################## -sub _clone { - my $self = shift; - my $data = shift; - my $logger = shift; - my $t0 = [gettimeofday]; - - my $return; - if(ref $data eq '') { - $return = $data; - } - elsif(ref $data eq 'ARRAY') { - $return = []; - for my $dat (@{$data}) { - push @{$return}, $self->_clone($dat); - } - } - elsif(ref $data eq 'HASH') { - $return = {}; - for my $key (keys %{$data}) { - $return->{$key} = $self->_clone($data->{$key}); - } - } - else { - croak("cant clone: ".(ref $data)); - } - - my $elapsed = tv_interval ( $t0 ); - $logger->debug(sprintf('%.4f', $elapsed).' sec for cloning data') if defined $logger; - - return $return; -} - - -######################################## -sub _get_peer_by_key { - my $self = shift; - my $key = shift; - - return unless defined $key; - return unless defined $self->{'peer_by_key'}->{$key}; - - return $self->{'peer_by_key'}->{$key}; -} - - -######################################## -sub _get_peer_by_addr { - my $self = shift; - my $addr = shift; - - return unless defined $addr; - return unless defined $self->{'peer_by_addr'}->{$addr}; - - return $self->{'peer_by_addr'}->{$addr}; -} - - -######################################## - -END { - # try to kill our threads safely - _stop_worker(); -} - -######################################## - -1; - -=head1 AUTHOR - -Sven Nierlein, Enierlein@cpan.orgE - -=head1 COPYRIGHT AND LICENSE - -Copyright (C) 2009 by Sven Nierlein - -This library is free software; you can redistribute it and/or modify -it under the same terms as Perl itself. - -=cut - -__END__ diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/lib/Monitoring/Livestatus/UNIX.pm check-mk-1.2.6p12/=unpacked-tar10=/api/perl/lib/Monitoring/Livestatus/UNIX.pm --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/lib/Monitoring/Livestatus/UNIX.pm 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/lib/Monitoring/Livestatus/UNIX.pm 1970-01-01 00:00:00.000000000 +0000 @@ -1,112 +0,0 @@ -package Monitoring::Livestatus::UNIX; - -use 5.000000; -use strict; -use warnings; -use IO::Socket::UNIX; -use Carp; -use base "Monitoring::Livestatus"; - -=head1 NAME - -Monitoring::Livestatus::UNIX - connector with unix sockets - -=head1 SYNOPSIS - - use Monitoring::Livestatus; - my $nl = Monitoring::Livestatus::UNIX->new( '/var/lib/livestatus/livestatus.sock' ); - my $hosts = $nl->selectall_arrayref("GET hosts"); - -=head1 CONSTRUCTOR - -=head2 new ( [ARGS] ) - -Creates an C object. C takes at least the socketpath. -Arguments are the same as in C. -If the constructor is only passed a single argument, it is assumed to -be a the C specification. Use either socker OR server. - -=cut - -sub new { - my $class = shift; - unshift(@_, "peer") if scalar @_ == 1; - my(%options) = @_; - $options{'name'} = $options{'peer'} unless defined $options{'name'}; - - $options{'backend'} = $class; - my $self = Monitoring::Livestatus->new(%options); - bless $self, $class; - confess('not a scalar') if ref $self->{'peer'} ne ''; - - return $self; -} - - -######################################## - -=head1 METHODS - -=cut - -sub _open { - my $self = shift; - - if(!-S $self->{'peer'}) { - my $msg = "failed to open socket $self->{'peer'}: $!"; - if($self->{'errors_are_fatal'}) { - croak($msg); - } - $Monitoring::Livestatus::ErrorCode = 500; - $Monitoring::Livestatus::ErrorMessage = $msg; - return; - } - my $sock = IO::Socket::UNIX->new( - Peer => $self->{'peer'}, - Type => SOCK_STREAM, - ); - if(!defined $sock or !$sock->connected()) { - my $msg = "failed to connect to $self->{'peer'} :$!"; - if($self->{'errors_are_fatal'}) { - croak($msg); - } - $Monitoring::Livestatus::ErrorCode = 500; - $Monitoring::Livestatus::ErrorMessage = $msg; - return; - } - - if(defined $self->{'query_timeout'}) { - # set timeout - $sock->timeout($self->{'query_timeout'}); - } - - return($sock); -} - - -######################################## - -sub _close { - my $self = shift; - my $sock = shift; - return unless defined $sock; - return close($sock); -} - - -1; - -=head1 AUTHOR - -Sven Nierlein, Enierlein@cpan.orgE - -=head1 COPYRIGHT AND LICENSE - -Copyright (C) 2009 by Sven Nierlein - -This library is free software; you can redistribute it and/or modify -it under the same terms as Perl itself. - -=cut - -__END__ diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/lib/Monitoring/Livestatus.pm check-mk-1.2.6p12/=unpacked-tar10=/api/perl/lib/Monitoring/Livestatus.pm --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/lib/Monitoring/Livestatus.pm 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/lib/Monitoring/Livestatus.pm 1970-01-01 00:00:00.000000000 +0000 @@ -1,1564 +0,0 @@ -package Monitoring::Livestatus; - -use 5.006; -use strict; -use warnings; -use Data::Dumper; -use Carp; -use Digest::MD5 qw(md5_hex); -use Monitoring::Livestatus::INET; -use Monitoring::Livestatus::UNIX; -use Monitoring::Livestatus::MULTI; -use Encode; -use JSON::XS; - -our $VERSION = '0.74'; - - -=head1 NAME - -Monitoring::Livestatus - Perl API for check_mk livestatus to access runtime -data from Nagios and Icinga - -=head1 SYNOPSIS - - use Monitoring::Livestatus; - my $ml = Monitoring::Livestatus->new( - socket => '/var/lib/livestatus/livestatus.sock' - ); - my $hosts = $ml->selectall_arrayref("GET hosts"); - -=head1 DESCRIPTION - -This module connects via socket/tcp to the check_mk livestatus addon for Nagios -and Icinga. You first have to install and activate the mklivestatus addon in your -monitoring installation. - -=head1 CONSTRUCTOR - -=head2 new ( [ARGS] ) - -Creates an C object. C takes at least the -socketpath. Arguments are in key-value pairs. -See L for more complex variants. - -=over 4 - -=item socket - -path to the UNIX socket of check_mk livestatus - -=item server - -use this server for a TCP connection - -=item peer - -alternative way to set socket or server, if value contains ':' server is used, -else socket - -=item name - -human readable name for this connection, defaults to the the socket/server -address - -=item verbose - -verbose mode - -=item line_seperator - -ascii code of the line seperator, defaults to 10, (newline) - -=item column_seperator - -ascii code of the column seperator, defaults to 0 (null byte) - -=item list_seperator - -ascii code of the list seperator, defaults to 44 (comma) - -=item host_service_seperator - -ascii code of the host/service seperator, defaults to 124 (pipe) - -=item keepalive - -enable keepalive. Default is off - -=item errors_are_fatal - -errors will die with an error message. Default: on - -=item warnings - -show warnings -currently only querys without Columns: Header will result in a warning - -=item timeout - -set a general timeout. Used for connect and querys, no default - -=item query_timeout - -set a query timeout. Used for retrieving querys, Default 60sec - -=item connect_timeout - -set a connect timeout. Used for initial connections, default 5sec - -=item use_threads - -only used with multiple backend connections. -Default is to don't threads where available. As threads in perl -are causing problems with tied resultset and using more memory. -Querys are usually faster without threads, except for very slow backends -connections. - -=back - -If the constructor is only passed a single argument, it is assumed to -be a the C specification. Use either socker OR server. - -=cut - -sub new { - my $class = shift; - unshift(@_, "peer") if scalar @_ == 1; - my(%options) = @_; - - my $self = { - "verbose" => 0, # enable verbose output - "socket" => undef, # use unix sockets - "server" => undef, # use tcp connections - "peer" => undef, # use for socket / server connections - "name" => undef, # human readable name - "line_seperator" => 10, # defaults to newline - "column_seperator" => 0, # defaults to null byte - "list_seperator" => 44, # defaults to comma - "host_service_seperator" => 124, # defaults to pipe - "keepalive" => 0, # enable keepalive? - "errors_are_fatal" => 1, # die on errors - "backend" => undef, # should be keept undef, used internally - "timeout" => undef, # timeout for tcp connections - "query_timeout" => 60, # query timeout for tcp connections - "connect_timeout" => 5, # connect timeout for tcp connections - "timeout" => undef, # timeout for tcp connections - "use_threads" => undef, # use threads, default is to use threads where available - "warnings" => 1, # show warnings, for example on querys without Column: Header - "logger" => undef, # logger object used for statistical informations and errors / warnings - "deepcopy" => undef, # copy result set to avoid errors with tied structures - "disabled" => 0, # if disabled, this peer will not receive any query - "retries_on_connection_error" => 3, # retry x times to connect - "retry_interval" => 1, # retry after x seconds - }; - - for my $opt_key (keys %options) { - if(exists $self->{$opt_key}) { - $self->{$opt_key} = $options{$opt_key}; - } - else { - croak("unknown option: $opt_key"); - } - } - - if($self->{'verbose'} and !defined $self->{'logger'}) { - croak('please specify a logger object when using verbose mode'); - $self->{'verbose'} = 0; - } - - # setting a general timeout? - if(defined $self->{'timeout'}) { - $self->{'query_timeout'} = $self->{'timeout'}; - $self->{'connect_timeout'} = $self->{'timeout'}; - } - - bless $self, $class; - - # set our peer(s) from the options - my $peers = $self->_get_peers(); - - if(!defined $peers) { - croak('please specify at least one peer, socket or server'); - } - - if(!defined $self->{'backend'}) { - if(scalar @{$peers} == 1) { - my $peer = $peers->[0]; - $options{'name'} = $peer->{'name'}; - $options{'peer'} = $peer->{'peer'}; - if($peer->{'type'} eq 'UNIX') { - $self->{'CONNECTOR'} = new Monitoring::Livestatus::UNIX(%options); - } - elsif($peer->{'type'} eq 'INET') { - $self->{'CONNECTOR'} = new Monitoring::Livestatus::INET(%options); - } - $self->{'peer'} = $peer->{'peer'}; - } - else { - $options{'peer'} = $peers; - return new Monitoring::Livestatus::MULTI(%options); - } - } - - # set names and peer for non multi backends - if(defined $self->{'CONNECTOR'}->{'name'} and !defined $self->{'name'}) { - $self->{'name'} = $self->{'CONNECTOR'}->{'name'}; - } - if(defined $self->{'CONNECTOR'}->{'peer'} and !defined $self->{'peer'}) { - $self->{'peer'} = $self->{'CONNECTOR'}->{'peer'}; - } - - if($self->{'verbose'} and (!defined $self->{'backend'} or $self->{'backend'} ne 'Monitoring::Livestatus::MULTI')) { - $self->{'logger'}->debug('initialized Monitoring::Livestatus ('.$self->peer_name.')'); - } - - return $self; -} - - -######################################## - -=head1 METHODS - -=head2 do - - do($statement) - do($statement, %opts) - -Send a single statement without fetching the result. -Always returns true. - -=cut - -sub do { - my $self = shift; - my $statement = shift; - return if $self->{'disabled'}; - $self->_send($statement); - return(1); -} - - -######################################## - -=head2 selectall_arrayref - - selectall_arrayref($statement) - selectall_arrayref($statement, %opts) - selectall_arrayref($statement, %opts, $limit ) - -Sends a query and returns an array reference of arrays - - my $arr_refs = $ml->selectall_arrayref("GET hosts"); - -to get an array of hash references do something like - - my $hash_refs = $ml->selectall_arrayref( - "GET hosts", { Slice => {} } - ); - -to get an array of hash references from the first 2 returned rows only - - my $hash_refs = $ml->selectall_arrayref( - "GET hosts", { Slice => {} }, 2 - ); - -use limit to limit the result to this number of rows - -column aliases can be defined with a rename hash - - my $hash_refs = $ml->selectall_arrayref( - "GET hosts", { - Slice => {}, - rename => { - 'name' => 'host_name' - } - } - ); - -=cut - -sub selectall_arrayref { - my $self = shift; - my $statement = shift; - my $opt = shift; - my $limit = shift || 0; - return if $self->{'disabled'}; - my $result; - - # make opt hash keys lowercase - $opt = $self->_lowercase_and_verify_options($opt); - - $self->_log_statement($statement, $opt, $limit) if $self->{'verbose'}; - - $result = $self->_send($statement, $opt); - - if(!defined $result) { - return unless $self->{'errors_are_fatal'}; - croak("got undef result for: $statement"); - } - - # trim result set down to excepted row count - if(defined $limit and $limit >= 1) { - if(scalar @{$result->{'result'}} > $limit) { - @{$result->{'result'}} = @{$result->{'result'}}[0..$limit-1]; - } - } - - if($opt->{'slice'}) { - # make an array of hashes - my @hash_refs; - for my $res (@{$result->{'result'}}) { - my $hash_ref; - for(my $x=0;$x{'keys'}->[$x]; - if(exists $opt->{'rename'} and defined $opt->{'rename'}->{$key}) { - $key = $opt->{'rename'}->{$key}; - } - $hash_ref->{$key} = $res->[$x]; - } - # add callbacks - if(exists $opt->{'callbacks'}) { - for my $key (keys %{$opt->{'callbacks'}}) { - $hash_ref->{$key} = $opt->{'callbacks'}->{$key}->($hash_ref); - } - } - push @hash_refs, $hash_ref; - } - return(\@hash_refs); - } - elsif(exists $opt->{'callbacks'}) { - for my $res (@{$result->{'result'}}) { - # add callbacks - if(exists $opt->{'callbacks'}) { - for my $key (keys %{$opt->{'callbacks'}}) { - push @{$res}, $opt->{'callbacks'}->{$key}->($res); - } - } - } - } - - if(exists $opt->{'callbacks'}) { - for my $key (keys %{$opt->{'callbacks'}}) { - push @{$result->{'keys'}}, $key; - } - } - - return($result->{'result'}); -} - - -######################################## - -=head2 selectall_hashref - - selectall_hashref($statement, $key_field) - selectall_hashref($statement, $key_field, %opts) - -Sends a query and returns a hashref with the given key - - my $hashrefs = $ml->selectall_hashref("GET hosts", "name"); - -=cut - -sub selectall_hashref { - my $self = shift; - my $statement = shift; - my $key_field = shift; - my $opt = shift; - - $opt = $self->_lowercase_and_verify_options($opt); - - $opt->{'slice'} = 1; - - croak("key is required for selectall_hashref") if !defined $key_field; - - my $result = $self->selectall_arrayref($statement, $opt); - - my %indexed; - for my $row (@{$result}) { - if($key_field eq '$peername') { - $indexed{$self->peer_name} = $row; - } - elsif(!defined $row->{$key_field}) { - my %possible_keys = keys %{$row}; - croak("key $key_field not found in result set, possible keys are: ".join(', ', sort keys %possible_keys)); - } else { - $indexed{$row->{$key_field}} = $row; - } - } - return(\%indexed); -} - - -######################################## - -=head2 selectcol_arrayref - - selectcol_arrayref($statement) - selectcol_arrayref($statement, %opt ) - -Sends a query an returns an arrayref for the first columns - - my $array_ref = $ml->selectcol_arrayref("GET hosts\nColumns: name"); - - $VAR1 = [ - 'localhost', - 'gateway', - ]; - -returns an empty array if nothing was found - -to get a different column use this - - my $array_ref = $ml->selectcol_arrayref( - "GET hosts\nColumns: name contacts", - { Columns => [2] } - ); - - you can link 2 columns in a hash result set - - my %hash = @{ - $ml->selectcol_arrayref( - "GET hosts\nColumns: name contacts", - { Columns => [1,2] } - ) - }; - -produces a hash with host the contact assosiation - - $VAR1 = { - 'localhost' => 'user1', - 'gateway' => 'user2' - }; - -=cut - -sub selectcol_arrayref { - my $self = shift; - my $statement = shift; - my $opt = shift; - - # make opt hash keys lowercase - $opt = $self->_lowercase_and_verify_options($opt); - - # if now colums are set, use just the first one - if(!defined $opt->{'columns'} or ref $opt->{'columns'} ne 'ARRAY') { - @{$opt->{'columns'}} = qw{1}; - } - - my $result = $self->selectall_arrayref($statement); - - my @column; - for my $row (@{$result}) { - for my $nr (@{$opt->{'columns'}}) { - push @column, $row->[$nr-1]; - } - } - return(\@column); -} - - -######################################## - -=head2 selectrow_array - - selectrow_array($statement) - selectrow_array($statement, %opts) - -Sends a query and returns an array for the first row - - my @array = $ml->selectrow_array("GET hosts"); - -returns undef if nothing was found - -=cut -sub selectrow_array { - my $self = shift; - my $statement = shift; - my $opt = shift; - - # make opt hash keys lowercase - $opt = $self->_lowercase_and_verify_options($opt); - - my @result = @{$self->selectall_arrayref($statement, $opt, 1)}; - return @{$result[0]} if scalar @result > 0; - return; -} - - -######################################## - -=head2 selectrow_arrayref - - selectrow_arrayref($statement) - selectrow_arrayref($statement, %opts) - -Sends a query and returns an array reference for the first row - - my $arrayref = $ml->selectrow_arrayref("GET hosts"); - -returns undef if nothing was found - -=cut -sub selectrow_arrayref { - my $self = shift; - my $statement = shift; - my $opt = shift; - - # make opt hash keys lowercase - $opt = $self->_lowercase_and_verify_options($opt); - - my $result = $self->selectall_arrayref($statement, $opt, 1); - return if !defined $result; - return $result->[0] if scalar @{$result} > 0; - return; -} - - -######################################## - -=head2 selectrow_hashref - - selectrow_hashref($statement) - selectrow_hashref($statement, %opt) - -Sends a query and returns a hash reference for the first row - - my $hashref = $ml->selectrow_hashref("GET hosts"); - -returns undef if nothing was found - -=cut -sub selectrow_hashref { - my $self = shift; - my $statement = shift; - my $opt = shift; - - # make opt hash keys lowercase - $opt = $self->_lowercase_and_verify_options($opt); - $opt->{slice} = 1; - - my $result = $self->selectall_arrayref($statement, $opt, 1); - return if !defined $result; - return $result->[0] if scalar @{$result} > 0; - return; -} - - -######################################## - -=head2 selectscalar_value - - selectscalar_value($statement) - selectscalar_value($statement, %opt) - -Sends a query and returns a single scalar - - my $count = $ml->selectscalar_value("GET hosts\nStats: state = 0"); - -returns undef if nothing was found - -=cut -sub selectscalar_value { - my $self = shift; - my $statement = shift; - my $opt = shift; - - # make opt hash keys lowercase - $opt = $self->_lowercase_and_verify_options($opt); - - my $row = $self->selectrow_arrayref($statement); - return if !defined $row; - return $row->[0] if scalar @{$row} > 0; - return; -} - -######################################## - -=head2 errors_are_fatal - - errors_are_fatal() - errors_are_fatal($value) - -Enable or disable fatal errors. When enabled the module will croak on any error. - -returns the current setting if called without new value - -=cut -sub errors_are_fatal { - my $self = shift; - my $value = shift; - my $old = $self->{'errors_are_fatal'}; - - $self->{'errors_are_fatal'} = $value; - $self->{'CONNECTOR'}->{'errors_are_fatal'} = $value if defined $self->{'CONNECTOR'}; - - return $old; -} - -######################################## - -=head2 warnings - - warnings() - warnings($value) - -Enable or disable warnings. When enabled the module will carp on warnings. - -returns the current setting if called without new value - -=cut -sub warnings { - my $self = shift; - my $value = shift; - my $old = $self->{'warnings'}; - - $self->{'warnings'} = $value; - $self->{'CONNECTOR'}->{'warnings'} = $value if defined $self->{'CONNECTOR'}; - - return $old; -} - - - -######################################## - -=head2 verbose - - verbose() - verbose($values) - -Enable or disable verbose output. When enabled the module will dump out debug output - -returns the current setting if called without new value - -=cut -sub verbose { - my $self = shift; - my $value = shift; - my $old = $self->{'verbose'}; - - $self->{'verbose'} = $value; - $self->{'CONNECTOR'}->{'verbose'} = $value if defined $self->{'CONNECTOR'}; - - return $old; -} - - -######################################## - -=head2 peer_addr - - $ml->peer_addr() - -returns the current peer address - -when using multiple backends, a list of all addresses is returned in list context - -=cut -sub peer_addr { - my $self = shift; - - return "".$self->{'peer'}; -} - - -######################################## - -=head2 peer_name - - $ml->peer_name() - $ml->peer_name($string) - -if new value is set, name is set to this value - -always returns the current peer name - -when using multiple backends, a list of all names is returned in list context - -=cut -sub peer_name { - my $self = shift; - my $value = shift; - - if(defined $value and $value ne '') { - $self->{'name'} = $value; - } - - return "".$self->{'name'}; -} - - -######################################## - -=head2 peer_key - - $ml->peer_key() - -returns a uniq key for this peer - -when using multiple backends, a list of all keys is returned in list context - -=cut -sub peer_key { - my $self = shift; - - if(!defined $self->{'key'}) { $self->{'key'} = md5_hex($self->peer_addr." ".$self->peer_name); } - - return $self->{'key'}; -} - - -######################################## - -=head2 marked_bad - - $ml->marked_bad() - -returns true if the current connection is marked down - -=cut -sub marked_bad { - my $self = shift; - - return 0; -} - - -######################################## - -=head2 disable - - $ml->disable() - -disables this connection, returns the last state. - -=cut -sub disable { - my $self = shift; - my $prev = $self->{'disabled'}; - $self->{'disabled'} = 1; - return $prev; -} - - -######################################## - -=head2 enable - - $ml->enable() - -enables this connection, returns the last state. - -=cut -sub enable { - my $self = shift; - my $prev = $self->{'disabled'}; - $self->{'disabled'} = 0; - return $prev; -} - -######################################## -# INTERNAL SUBS -######################################## -sub _send { - my $self = shift; - my $statement = shift; - my $opt = shift; - - delete $self->{'meta_data'}; - - my $header = ""; - my $keys; - - my $with_peers = 0; - if(defined $opt->{'addpeer'} and $opt->{'addpeer'}) { - $with_peers = 1; - } - - $Monitoring::Livestatus::ErrorCode = 0; - undef $Monitoring::Livestatus::ErrorMessage; - - return(490, $self->_get_error(490), undef) if !defined $statement; - chomp($statement); - - my($status,$msg,$body); - if($statement =~ m/^Separators:/mx) { - $status = 492; - $msg = $self->_get_error($status); - } - - elsif($statement =~ m/^KeepAlive:/mx) { - $status = 496; - $msg = $self->_get_error($status); - } - - elsif($statement =~ m/^ResponseHeader:/mx) { - $status = 495; - $msg = $self->_get_error($status); - } - - elsif($statement =~ m/^ColumnHeaders:/mx) { - $status = 494; - $msg = $self->_get_error($status); - } - - elsif($statement =~ m/^OuputFormat:/mx) { - $status = 493; - $msg = $self->_get_error($status); - } - - # should be cought in mlivestatus directly - elsif($statement =~ m/^Limit:\ (.*)$/mx and $1 !~ m/^\d+$/mx) { - $status = 403; - $msg = $self->_get_error($status); - } - elsif($statement =~ m/^GET\ (.*)$/mx and $1 =~ m/^\s*$/mx) { - $status = 403; - $msg = $self->_get_error($status); - } - - elsif($statement =~ m/^Columns:\ (.*)$/mx and ($1 =~ m/,/mx or $1 =~ /^\s*$/mx)) { - $status = 405; - $msg = $self->_get_error($status); - } - elsif($statement !~ m/^GET\ /mx and $statement !~ m/^COMMAND\ /mx) { - $status = 401; - $msg = $self->_get_error($status); - } - - else { - - # Add Limits header - if(defined $opt->{'limit_start'}) { - $statement .= "\nLimit: ".($opt->{'limit_start'} + $opt->{'limit_length'}); - } - - # for querys with column header, no seperate columns will be returned - if($statement =~ m/^Columns:\ (.*)$/mx) { - ($statement,$keys) = $self->_extract_keys_from_columns_header($statement); - } elsif($statement =~ m/^Stats:\ (.*)$/mx or $statement =~ m/^StatsGroupBy:\ (.*)$/mx) { - ($statement,$keys) = $self->_extract_keys_from_stats_statement($statement); - } - - # Commands need no additional header - if($statement !~ m/^COMMAND/mx) { - $header .= "OutputFormat: json\n"; - $header .= "ResponseHeader: fixed16\n"; - if($self->{'keepalive'}) { - $header .= "KeepAlive: on\n"; - } - # remove empty lines from statement - $statement =~ s/\n+/\n/gmx; - } - - # add additional headers - if(defined $opt->{'header'} and ref $opt->{'header'} eq 'HASH') { - for my $key ( keys %{$opt->{'header'}}) { - $header .= $key.": ".$opt->{'header'}->{$key}."\n"; - } - } - - chomp($statement); - my $send = "$statement\n$header"; - $self->{'logger'}->debug("> ".Dumper($send)) if $self->{'verbose'}; - ($status,$msg,$body) = $self->_send_socket($send); - if($self->{'verbose'}) { - #$self->{'logger'}->debug("got:"); - #$self->{'logger'}->debug(Dumper(\@erg)); - $self->{'logger'}->debug("status: ".Dumper($status)); - $self->{'logger'}->debug("msg: ".Dumper($msg)); - $self->{'logger'}->debug("< ".Dumper($body)); - } - } - - if($status >= 300) { - $body = '' if !defined $body; - chomp($body); - $Monitoring::Livestatus::ErrorCode = $status; - if(defined $body and $body ne '') { - $Monitoring::Livestatus::ErrorMessage = $body; - } else { - $Monitoring::Livestatus::ErrorMessage = $msg; - } - $self->{'logger'}->error($status." - ".$Monitoring::Livestatus::ErrorMessage." in query:\n'".$statement) if $self->{'verbose'}; - if($self->{'errors_are_fatal'}) { - croak("ERROR ".$status." - ".$Monitoring::Livestatus::ErrorMessage." in query:\n'".$statement."'\n"); - } - return; - } - - # return a empty result set if nothing found - return({ keys => [], result => []}) if !defined $body; - - my $line_seperator = chr($self->{'line_seperator'}); - my $col_seperator = chr($self->{'column_seperator'}); - - my $peer_name = $self->peer_name; - my $peer_addr = $self->peer_addr; - my $peer_key = $self->peer_key; - - my $limit_start = 0; - if(defined $opt->{'limit_start'}) { $limit_start = $opt->{'limit_start'}; } - my $result; - # fix json output - $body =~ s/\],\n\]\n$/]]/mx; - eval { - $result = decode_json($body); - }; - if($@) { - my $message = "ERROR ".$@." in text: '".$body."'\" for statement: '$statement'\n"; - $self->{'logger'}->error($message) if $self->{'verbose'}; - if($self->{'errors_are_fatal'}) { - croak($message); - } - } - - # for querys with column header, no separate columns will be returned - if(!defined $keys) { - $self->{'logger'}->warn("got statement without Columns: header!") if $self->{'verbose'}; - if($self->{'warnings'}) { - carp("got statement without Columns: header! -> ".$statement); - } - $keys = shift @{$result}; - } - - # add peer information? - if(defined $with_peers and $with_peers == 1) { - unshift @{$keys}, 'peer_name'; - unshift @{$keys}, 'peer_addr'; - unshift @{$keys}, 'peer_key'; - - for my $row (@{$result}) { - unshift @{$row}, $peer_name; - unshift @{$row}, $peer_addr; - unshift @{$row}, $peer_key; - } - } - - # set some metadata - $self->{'meta_data'} = { - 'result_count' => scalar @${result}, - }; - - return({ keys => $keys, result => $result }); -} - -######################################## -sub _open { - my $self = shift; - my $statement = shift; - - # return the current socket in keep alive mode - if($self->{'keepalive'} and defined $self->{'sock'} and $self->{'sock'}->connected) { - $self->{'logger'}->debug("reusing old connection") if $self->{'verbose'}; - return($self->{'sock'}); - } - - my $sock = $self->{'CONNECTOR'}->_open(); - - # store socket for later retrieval - if($self->{'keepalive'}) { - $self->{'sock'} = $sock; - } - - $self->{'logger'}->debug("using new connection") if $self->{'verbose'}; - return($sock); -} - -######################################## -sub _close { - my $self = shift; - my $sock = shift; - undef $self->{'sock'}; - return($self->{'CONNECTOR'}->_close($sock)); -} - - -######################################## - -=head1 QUERY OPTIONS - -In addition to the normal query syntax from the livestatus addon, it is -possible to set column aliases in various ways. - -=head2 AddPeer - -adds the peers name, addr and key to the result set: - - my $hosts = $ml->selectall_hashref( - "GET hosts\nColumns: name alias state", - "name", - { AddPeer => 1 } - ); - -=head2 Backend - -send the query only to some specific backends. Only -useful when using multiple backends. - - my $hosts = $ml->selectall_arrayref( - "GET hosts\nColumns: name alias state", - { Backends => [ 'key1', 'key4' ] } - ); - -=head2 Columns - - only return the given column indexes - - my $array_ref = $ml->selectcol_arrayref( - "GET hosts\nColumns: name contacts", - { Columns => [2] } - ); - - see L for more examples - -=head2 Deepcopy - - deep copy/clone the result set. - - Only effective when using multiple backends and threads. - This can be safely turned off if you dont change the - result set. - If you get an error like "Invalid value for shared scalar" error" this - should be turned on. - - my $array_ref = $ml->selectcol_arrayref( - "GET hosts\nColumns: name contacts", - { Deepcopy => 1 } - ); - -=head2 Limit - - Just like the Limit: option from livestatus itself. - In addition you can add a start,length limit. - - my $array_ref = $ml->selectcol_arrayref( - "GET hosts\nColumns: name contacts", - { Limit => "10,20" } - ); - - This example will return 20 rows starting at row 10. You will - get row 10-30. - - Cannot be combined with a Limit inside the query - because a Limit will be added automatically. - - Adding a limit this way will greatly increase performance and - reduce memory usage. - - This option is multibackend safe contrary to the "Limit: " part of a statement. - Sending a statement like "GET...Limit: 10" with 3 backends will result in 30 rows. - Using this options, you will receive only the first 10 rows. - -=head2 Rename - - see L for detailed explainaton - -=head2 Slice - - see L for detailed explainaton - -=head2 Sum - -The Sum option only applies when using multiple backends. -The values from all backends with be summed up to a total. - - my $stats = $ml->selectrow_hashref( - "GET hosts\nStats: state = 0\nStats: state = 1", - { Sum => 1 } - ); - -=cut - - -######################################## -# wrapper around _send_socket_do -sub _send_socket { - my $self = shift; - my $statement = shift; - - my $retries = 0; - my($status, $msg, $recv); - - - # try to avoid connection errors - eval { - local $SIG{PIPE} = sub { - die("broken pipe"); - $self->{'logger'}->debug("broken pipe, closing socket") if $self->{'verbose'}; - $self->_close($self->{'sock'}); - }; - - if($self->{'retries_on_connection_error'} <= 0) { - ($status, $msg, $recv) = $self->_send_socket_do($statement); - return; - } - - while((!defined $status or ($status == 491 or $status == 497 or $status == 500)) and $retries < $self->{'retries_on_connection_error'}) { - $retries++; - ($status, $msg, $recv) = $self->_send_socket_do($statement); - $self->{'logger'}->debug('query status '.$status) if $self->{'verbose'}; - if($status == 491 or $status == 497 or $status == 500) { - $self->{'logger'}->debug('got status '.$status.' retrying in '.$self->{'retry_interval'}.' seconds') if $self->{'verbose'}; - $self->_close(); - sleep($self->{'retry_interval'}) if $retries < $self->{'retries_on_connection_error'}; - } - } - }; - if($@) { - $self->{'logger'}->debug("try 1 failed: $@") if $self->{'verbose'}; - if(defined $@ and $@ =~ /broken\ pipe/mx) { - return $self->_send_socket_do($statement); - } - croak($@) if $self->{'errors_are_fatal'}; - } - - croak($msg) if($status >= 400 and $self->{'errors_are_fatal'}); - - return($status, $msg, $recv); -} - -######################################## -sub _send_socket_do { - my $self = shift; - my $statement = shift; - my($recv,$header); - - my $sock = $self->_open() or return(491, $self->_get_error(491), $!); - utf8::decode($statement); - print $sock encode('utf-8' => $statement) or return($self->_socket_error($statement, $sock, 'write to socket failed: '.$!)); - - print $sock "\n"; - - # COMMAND statements never return something - if($statement =~ m/^COMMAND/mx) { - return('201', $self->_get_error(201), undef); - } - - $sock->read($header, 16) or return($self->_socket_error($statement, $sock, 'reading header from socket failed, check your livestatus logfile: '.$!)); - $self->{'logger'}->debug("header: $header") if $self->{'verbose'}; - my($status, $msg, $content_length) = $self->_parse_header($header, $sock); - return($status, $msg, undef) if !defined $content_length; - if($content_length > 0) { - $sock->read($recv, $content_length) or return($self->_socket_error($statement, $sock, 'reading body from socket failed')); - } - - $self->_close($sock) unless $self->{'keepalive'}; - return($status, $msg, $recv); -} - -######################################## -sub _socket_error { - my $self = shift; - my $statement = shift; - my $sock = shift; - my $body = shift; - - my $message = "\n"; - $message .= "peer ".Dumper($self->peer_name); - $message .= "statement ".Dumper($statement); - $message .= "message ".Dumper($body); - - $self->{'logger'}->error($message) if $self->{'verbose'}; - - if($self->{'retries_on_connection_error'} <= 0) { - if($self->{'errors_are_fatal'}) { - croak($message); - } - else { - carp($message); - } - } - $self->_close(); - return(500, $self->_get_error(500), $message); -} - -######################################## -sub _parse_header { - my $self = shift; - my $header = shift; - my $sock = shift; - - if(!defined $header) { - return(497, $self->_get_error(497), undef); - } - - my $headerlength = length($header); - if($headerlength != 16) { - return(498, $self->_get_error(498)."\ngot: ".$header.<$sock>, undef); - } - chomp($header); - - my $status = substr($header,0,3); - my $content_length = substr($header,5); - if($content_length !~ m/^\s*(\d+)$/mx) { - return(499, $self->_get_error(499)."\ngot: ".$header.<$sock>, undef); - } else { - $content_length = $1; - } - - return($status, $self->_get_error($status), $content_length); -} - -######################################## - -=head1 COLUMN ALIAS - -In addition to the normal query syntax from the livestatus addon, it is -possible to set column aliases in various ways. - -A valid Columns: Header could look like this: - - my $hosts = $ml->selectall_arrayref( - "GET hosts\nColumns: state as status" - ); - -Stats queries could be aliased too: - - my $stats = $ml->selectall_arrayref( - "GET hosts\nStats: state = 0 as up" - ); - -This syntax is available for: Stats, StatsAnd, StatsOr and StatsGroupBy - - -An alternative way to set column aliases is to define rename option key/value -pairs: - - my $hosts = $ml->selectall_arrayref( - "GET hosts\nColumns: name", { - rename => { 'name' => 'hostname' } - } - ); - -=cut - -######################################## -sub _extract_keys_from_stats_statement { - my $self = shift; - my $statement = shift; - - my(@header, $new_statement); - - for my $line (split/\n/mx, $statement) { - if($line =~ m/^Stats:\ (.*)\s+as\s+(.*)$/mxi) { - push @header, $2; - $line = 'Stats: '.$1; - } - elsif($line =~ m/^Stats:\ (.*)$/mx) { - push @header, $1; - } - - if($line =~ m/^StatsAnd:\ (\d+)\s+as\s+(.*)$/mx) { - for(my $x = 0; $x < $1; $x++) { - pop @header; - } - $line = 'StatsAnd: '.$1; - push @header, $2; - } - elsif($line =~ m/^StatsAnd:\ (\d+)$/mx) { - my @to_join; - for(my $x = 0; $x < $1; $x++) { - unshift @to_join, pop @header; - } - push @header, join(' && ', @to_join); - } - - if($line =~ m/^StatsOr:\ (\d+)\s+as\s+(.*)$/mx) { - for(my $x = 0; $x < $1; $x++) { - pop @header; - } - $line = 'StatsOr: '.$1; - push @header, $2; - } - elsif($line =~ m/^StatsOr:\ (\d+)$/mx) { - my @to_join; - for(my $x = 0; $x < $1; $x++) { - unshift @to_join, pop @header; - } - push @header, join(' || ', @to_join); - } - - # StatsGroupBy header are always sent first - if($line =~ m/^StatsGroupBy:\ (.*)\s+as\s+(.*)$/mxi) { - unshift @header, $2; - $line = 'StatsGroupBy: '.$1; - } - elsif($line =~ m/^StatsGroupBy:\ (.*)$/mx) { - unshift @header, $1; - } - $new_statement .= $line."\n"; - } - - return($new_statement, \@header); -} - -######################################## -sub _extract_keys_from_columns_header { - my $self = shift; - my $statement = shift; - - my(@header, $new_statement); - for my $line (split/\n/mx, $statement) { - if($line =~ m/^Columns:\s+(.*)$/mx) { - for my $column (split/\s+/mx, $1) { - if($column eq 'as') { - pop @header; - } else { - push @header, $column; - } - } - $line =~ s/\s+as\s+([^\s]+)/\ /gmx; - } - $new_statement .= $line."\n"; - } - - return($new_statement, \@header); -} - -######################################## - -=head1 ERROR HANDLING - -Errorhandling can be done like this: - - use Monitoring::Livestatus; - my $ml = Monitoring::Livestatus->new( - socket => '/var/lib/livestatus/livestatus.sock' - ); - $ml->errors_are_fatal(0); - my $hosts = $ml->selectall_arrayref("GET hosts"); - if($Monitoring::Livestatus::ErrorCode) { - croak($Monitoring::Livestatus::ErrorMessage); - } - -=cut -sub _get_error { - my $self = shift; - my $code = shift; - - my $codes = { - '200' => 'OK. Reponse contains the queried data.', - '201' => 'COMMANDs never return something', - '400' => 'The request contains an invalid header.', - '401' => 'The request contains an invalid header.', - '402' => 'The request is completely invalid.', - '403' => 'The request is incomplete.', - '404' => 'The target of the GET has not been found (e.g. the table).', - '405' => 'A non-existing column was being referred to', - '490' => 'no query', - '491' => 'failed to connect', - '492' => 'Separators not allowed in statement. Please use the seperator options in new()', - '493' => 'OuputFormat not allowed in statement. Header will be set automatically', - '494' => 'ColumnHeaders not allowed in statement. Header will be set automatically', - '495' => 'ResponseHeader not allowed in statement. Header will be set automatically', - '496' => 'Keepalive not allowed in statement. Please use the keepalive option in new()', - '497' => 'got no header', - '498' => 'header is not exactly 16byte long', - '499' => 'not a valid header (no content-length)', - '500' => 'socket error', - }; - - confess('non existant error code: '.$code) if !defined $codes->{$code}; - - return($codes->{$code}); -} - -######################################## -sub _get_peers { - my $self = shift; - - # set options for our peer(s) - my %options; - for my $opt_key (keys %{$self}) { - $options{$opt_key} = $self->{$opt_key}; - } - - my $peers = []; - - # check if the supplied peer is a socket or a server address - if(defined $self->{'peer'}) { - if(ref $self->{'peer'} eq '') { - my $name = $self->{'name'} || "".$self->{'peer'}; - if(index($self->{'peer'}, ':') > 0) { - push @{$peers}, { 'peer' => "".$self->{'peer'}, type => 'INET', name => $name }; - } else { - push @{$peers}, { 'peer' => "".$self->{'peer'}, type => 'UNIX', name => $name }; - } - } - elsif(ref $self->{'peer'} eq 'ARRAY') { - for my $peer (@{$self->{'peer'}}) { - if(ref $peer eq 'HASH') { - next if !defined $peer->{'peer'}; - $peer->{'name'} = "".$peer->{'peer'} unless defined $peer->{'name'}; - if(!defined $peer->{'type'}) { - $peer->{'type'} = 'UNIX'; - if(index($peer->{'peer'}, ':') >= 0) { - $peer->{'type'} = 'INET'; - } - } - push @{$peers}, $peer; - } else { - my $type = 'UNIX'; - if(index($peer, ':') >= 0) { - $type = 'INET'; - } - push @{$peers}, { 'peer' => "".$peer, type => $type, name => "".$peer }; - } - } - } - elsif(ref $self->{'peer'} eq 'HASH') { - for my $peer (keys %{$self->{'peer'}}) { - my $name = $self->{'peer'}->{$peer}; - my $type = 'UNIX'; - if(index($peer, ':') >= 0) { - $type = 'INET'; - } - push @{$peers}, { 'peer' => "".$peer, type => $type, name => "".$name }; - } - } else { - confess("type ".(ref $self->{'peer'})." is not supported for peer option"); - } - } - if(defined $self->{'socket'}) { - my $name = $self->{'name'} || "".$self->{'socket'}; - push @{$peers}, { 'peer' => "".$self->{'socket'}, type => 'UNIX', name => $name }; - } - if(defined $self->{'server'}) { - my $name = $self->{'name'} || "".$self->{'server'}; - push @{$peers}, { 'peer' => "".$self->{'server'}, type => 'INET', name => $name }; - } - - # check if we got a peer - if(scalar @{$peers} == 0) { - croak('please specify at least one peer, socket or server'); - } - - # clean up - delete $options{'peer'}; - delete $options{'socket'}; - delete $options{'server'}; - - return $peers; -} - - -######################################## -sub _lowercase_and_verify_options { - my $self = shift; - my $opts = shift; - my $return = {}; - - # list of allowed options - my $allowed_options = { - 'addpeer' => 1, - 'backend' => 1, - 'columns' => 1, - 'deepcopy' => 1, - 'header' => 1, - 'limit' => 1, - 'limit_start' => 1, - 'limit_length' => 1, - 'rename' => 1, - 'slice' => 1, - 'sum' => 1, - 'callbacks' => 1, - }; - - for my $key (keys %{$opts}) { - if($self->{'warnings'} and !defined $allowed_options->{lc $key}) { - carp("unknown option used: $key - please use only: ".join(", ", keys %{$allowed_options})); - } - $return->{lc $key} = $opts->{$key}; - } - - # set limits - if(defined $return->{'limit'}) { - if(index($return->{'limit'}, ',') != -1) { - my($limit_start,$limit_length) = split /,/mx, $return->{'limit'}; - $return->{'limit_start'} = $limit_start; - $return->{'limit_length'} = $limit_length; - } - else { - $return->{'limit_start'} = 0; - $return->{'limit_length'} = $return->{'limit'}; - } - delete $return->{'limit'}; - } - - return($return); -} - -######################################## -sub _log_statement { - my $self = shift; - my $statement = shift; - my $opt = shift; - my $limit = shift; - my $d = Data::Dumper->new([$opt]); - $d->Indent(0); - my $optstring = $d->Dump; - $optstring =~ s/^\$VAR1\s+=\s+//mx; - $optstring =~ s/;$//mx; - - # remove empty lines from statement - $statement =~ s/\n+/\n/gmx; - - my $cleanstatement = $statement; - $cleanstatement =~ s/\n/\\n/gmx; - $self->{'logger'}->debug('selectall_arrayref("'.$cleanstatement.'", '.$optstring.', '.$limit.')'); - return 1; -} - -######################################## - -1; - -=head1 EXAMPLES - -=head2 Multibackend Configuration - - use Monitoring::Livestatus; - my $ml = Monitoring::Livestatus->new( - name => 'multiple connector', - verbose => 0, - keepalive => 1, - peer => [ - { - name => 'DMZ Monitoring', - peer => '50.50.50.50:9999', - }, - { - name => 'Local Monitoring', - peer => '/tmp/livestatus.socket', - }, - { - name => 'Special Monitoring', - peer => '100.100.100.100:9999', - } - ], - ); - my $hosts = $ml->selectall_arrayref("GET hosts"); - -=head1 SEE ALSO - -For more information about the query syntax and the livestatus plugin installation -see the Livestatus page: http://mathias-kettner.de/checkmk_livestatus.html - -=head1 AUTHOR - -Sven Nierlein, Enierlein@cpan.orgE - -=head1 COPYRIGHT AND LICENSE - -Copyright (C) 2009 by Sven Nierlein - -This library is free software; you can redistribute it and/or modify -it under the same terms as Perl itself. - -=cut - -__END__ diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/Makefile.PL check-mk-1.2.6p12/=unpacked-tar10=/api/perl/Makefile.PL --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/Makefile.PL 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/Makefile.PL 1970-01-01 00:00:00.000000000 +0000 @@ -1,41 +0,0 @@ -# IMPORTANT: if you delete this file your app will not work as -# expected. you have been warned -use inc::Module::Install; - -name 'Monitoring-Livestatus'; -all_from 'lib/Monitoring/Livestatus.pm'; -perl_version '5.006'; -license 'perl'; - -resources( - 'homepage', => 'http://search.cpan.org/dist/Monitoring-Livestatus/', - 'bugtracker' => 'http://github.com/sni/Monitoring-Livestatus/issues', - 'repository', => 'http://github.com/sni/Monitoring-Livestatus', -); - - -requires 'IO::Socket::UNIX'; -requires 'IO::Socket::INET'; -requires 'Digest::MD5'; -requires 'Scalar::Util'; -requires 'Test::More' => '0.87'; -requires 'Thread::Queue' => '2.11'; -requires 'utf8'; -requires 'Encode'; -requires 'JSON::XS'; - -# test requirements -# these requirements still make it into the META.yml, so they are commented so far -#feature ('authortests', -# -default => 0, -# 'File::Copy::Recursive' => 0, -# 'Test::Pod' => 1.14, -# 'Test::Perl::Critic' => 0, -# 'Test::Pod::Coverage' => 0, -# 'Perl::Critic::Policy::Dynamic::NoIndirect' => 0, -# 'Perl::Critic::Policy::NamingConventions::ProhibitMixedCaseSubs' => 0, -# 'Perl::Critic::Policy::ValuesAndExpressions::ProhibitAccessOfPrivateData' => 0, -#); - -auto_install; -WriteAll; diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/MANIFEST check-mk-1.2.6p12/=unpacked-tar10=/api/perl/MANIFEST --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/MANIFEST 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/MANIFEST 1970-01-01 00:00:00.000000000 +0000 @@ -1,38 +0,0 @@ -Changes -examples/dump.pl -examples/test.pl -inc/Module/AutoInstall.pm -inc/Module/Install.pm -inc/Module/Install/AutoInstall.pm -inc/Module/Install/Base.pm -inc/Module/Install/Can.pm -inc/Module/Install/Fetch.pm -inc/Module/Install/Include.pm -inc/Module/Install/Makefile.pm -inc/Module/Install/Metadata.pm -inc/Module/Install/Win32.pm -inc/Module/Install/WriteAll.pm -lib/Monitoring/Livestatus.pm -lib/Monitoring/Livestatus/INET.pm -lib/Monitoring/Livestatus/MULTI.pm -lib/Monitoring/Livestatus/UNIX.pm -Makefile.PL -MANIFEST This list of files -META.yml -README -t/01-Monitoring-Livestatus-basic_tests.t -t/02-Monitoring-Livestatus-internals.t -t/03-Monitoring-Livestatus-MULTI-internals.t -t/20-Monitoring-Livestatus-test_socket.t -t/21-Monitoring-Livestatus-INET.t -t/22-Monitoring-Livestatus-UNIX.t -t/30-Monitoring-Livestatus-live-test.t -t/31-Monitoring-Livestatus-MULTI-live-test.t -t/32-Monitoring-Livestatus-backend-test.t -t/33-Monitoring-Livestatus-test_socket_timeout.t -t/34-Monitoring-Livestatus-utf8_support.t -t/35-Monitoring-Livestatus-callbacks_support.t -t/97-Pod.t -t/98-Pod-Coverage.t -t/99-Perl-Critic.t -t/perlcriticrc diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/META.yml check-mk-1.2.6p12/=unpacked-tar10=/api/perl/META.yml --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/META.yml 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/META.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,37 +0,0 @@ ---- -abstract: 'Perl API for check_mk livestatus to access runtime' -author: - - 'Sven Nierlein, ' -build_requires: - ExtUtils::MakeMaker: 6.42 -configure_requires: - ExtUtils::MakeMaker: 6.42 -distribution_type: module -generated_by: 'Module::Install version 1.00' -license: perl -meta-spec: - url: http://module-build.sourceforge.net/META-spec-v1.4.html - version: 1.4 -name: Monitoring-Livestatus -no_index: - directory: - - examples - - inc - - t -requires: - Digest::MD5: 0 - Encode: 0 - IO::Socket::INET: 0 - IO::Socket::UNIX: 0 - JSON::XS: 0 - Scalar::Util: 0 - Test::More: 0.87 - Thread::Queue: 2.11 - perl: 5.6.0 - utf8: 0 -resources: - bugtracker: http://github.com/sni/Monitoring-Livestatus/issues - homepage: http://search.cpan.org/dist/Monitoring-Livestatus/ - license: http://dev.perl.org/licenses/ - repository: http://github.com/sni/Monitoring-Livestatus -version: 0.74 diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/README check-mk-1.2.6p12/=unpacked-tar10=/api/perl/README --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/README 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/README 1970-01-01 00:00:00.000000000 +0000 @@ -1,32 +0,0 @@ -Monitoring-Livestatus -===================== - - Monitoring::Livestatus can be used to access the data of the check_mk - Livestatus Addon for Nagios and Icinga. - -INSTALLATION - - To install this module type the following: - - perl Makefile.PL - make - make test - make install - -DEPENDENCIES - - This module requires no other modules. - -SYNOPSIS - my $ml = Monitoring::Livestatus->new( socket => '/var/lib/livestatus/livestatus.sock' ); - my $hosts = $ml->selectall_arrayref("GET hosts"); - -AUTHOR - Sven Nierlein - -COPYRIGHT AND LICENCE - - Copyright (C) 2009 by Sven Nierlein - - This library is free software; you can redistribute it and/or modify - it under the same terms as Perl itself. diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/t/01-Monitoring-Livestatus-basic_tests.t check-mk-1.2.6p12/=unpacked-tar10=/api/perl/t/01-Monitoring-Livestatus-basic_tests.t --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/t/01-Monitoring-Livestatus-basic_tests.t 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/t/01-Monitoring-Livestatus-basic_tests.t 1970-01-01 00:00:00.000000000 +0000 @@ -1,149 +0,0 @@ -#!/usr/bin/env perl - -######################### - -use strict; -use Test::More; -use File::Temp; -use Data::Dumper; -use IO::Socket::UNIX qw( SOCK_STREAM SOMAXCONN ); -use_ok('Monitoring::Livestatus'); - -BEGIN { - if( $^O eq 'MSWin32' ) { - plan skip_all => 'no sockets on windows'; - } - else { - plan tests => 35; - } -} - -######################### -# get a temp file from File::Temp and replace it with our socket -my $fh = File::Temp->new(UNLINK => 0); -my $socket_path = $fh->filename; -unlink($socket_path); -my $listener = IO::Socket::UNIX->new( - Type => SOCK_STREAM, - Listen => SOMAXCONN, - Local => $socket_path, - ) or die("failed to open $socket_path as test socket: $!"); -######################### -# create object with single arg -my $ml = Monitoring::Livestatus->new( $socket_path ); -isa_ok($ml, 'Monitoring::Livestatus', 'single args'); -is($ml->peer_name(), $socket_path, 'get peer_name()'); -is($ml->peer_addr(), $socket_path, 'get peer_addr()'); - -######################### -# create object with hash args -my $line_seperator = 10; -my $column_seperator = 0; -$ml = Monitoring::Livestatus->new( - verbose => 0, - socket => $socket_path, - line_seperator => $line_seperator, - column_seperator => $column_seperator, - ); -isa_ok($ml, 'Monitoring::Livestatus', 'new hash args'); -is($ml->peer_name(), $socket_path, 'get peer_name()'); -is($ml->peer_addr(), $socket_path, 'get peer_addr()'); - -######################### -# create object with peer arg -$ml = Monitoring::Livestatus->new( - peer => $socket_path, - ); -isa_ok($ml, 'Monitoring::Livestatus', 'peer hash arg socket'); -is($ml->peer_name(), $socket_path, 'get peer_name()'); -is($ml->peer_addr(), $socket_path, 'get peer_addr()'); -isa_ok($ml->{'CONNECTOR'}, 'Monitoring::Livestatus::UNIX', 'peer backend UNIX'); - -######################### -# create object with peer arg -my $server = 'localhost:12345'; -$ml = Monitoring::Livestatus->new( - peer => $server, - ); -isa_ok($ml, 'Monitoring::Livestatus', 'peer hash arg server'); -is($ml->peer_name(), $server, 'get peer_name()'); -is($ml->peer_addr(), $server, 'get peer_addr()'); -isa_ok($ml->{'CONNECTOR'}, 'Monitoring::Livestatus::INET', 'peer backend INET'); - -######################### -# create multi object with peers -$ml = Monitoring::Livestatus->new( - peer => [ $server, $socket_path ], - ); -isa_ok($ml, 'Monitoring::Livestatus', 'peer hash arg multi'); -my @names = $ml->peer_name(); -my @addrs = $ml->peer_addr(); -my $name = $ml->peer_name(); -my $expect = [ $server, $socket_path ]; -is_deeply(\@names, $expect, 'list context get peer_name()') or diag("got peer names: ".Dumper(\@names)."but expected: ".Dumper($expect)); -is($name, 'multiple connector', 'scalar context get peer_name()') or diag("got peer name: ".Dumper($name)."but expected: ".Dumper('multiple connector')); -is_deeply(\@addrs, $expect, 'list context get peer_addr()') or diag("got peer addrs: ".Dumper(\@addrs)."but expected: ".Dumper($expect)); - -######################### -# create multi object with peers and name -$ml = Monitoring::Livestatus->new( - peer => [ $server, $socket_path ], - name => 'test multi', - ); -isa_ok($ml, 'Monitoring::Livestatus', 'peer hash arg multi with name'); -$name = $ml->peer_name(); -is($name, 'test multi', 'peer_name()'); - -######################### -$ml = Monitoring::Livestatus->new( - peer => [ $socket_path ], - verbose => 0, - keepalive => 1, - logger => undef, - ); -isa_ok($ml, 'Monitoring::Livestatus', 'peer hash arg multi with keepalive'); -is($ml->peer_name(), $socket_path, 'get peer_name()'); -is($ml->peer_addr(), $socket_path, 'get peer_addr()'); - -######################### -# timeout checks -$ml = Monitoring::Livestatus->new( - peer => [ $socket_path ], - verbose => 0, - timeout => 13, - logger => undef, - ); -isa_ok($ml, 'Monitoring::Livestatus', 'peer hash arg multi with general timeout'); -is($ml->peer_name(), $socket_path, 'get peer_name()'); -is($ml->peer_addr(), $socket_path, 'get peer_addr()'); -is($ml->{'connect_timeout'}, 13, 'connect_timeout'); -is($ml->{'query_timeout'}, 13, 'query_timeout'); - -$ml = Monitoring::Livestatus->new( - peer => [ $socket_path ], - verbose => 0, - query_timeout => 14, - connect_timeout => 17, - logger => undef, - ); -isa_ok($ml, 'Monitoring::Livestatus', 'peer hash arg multi with general timeout'); -is($ml->peer_name(), $socket_path, 'get peer_name()'); -is($ml->peer_addr(), $socket_path, 'get peer_addr()'); -is($ml->{'connect_timeout'}, 17, 'connect_timeout'); -is($ml->{'query_timeout'}, 14, 'query_timeout'); - - -######################### -# error retry -$ml = Monitoring::Livestatus->new( - peer => [ $socket_path ], - verbose => 0, - retries_on_connection_error => 3, - retry_interval => 1, - logger => undef, - ); -isa_ok($ml, 'Monitoring::Livestatus', 'peer hash arg multi with error retry'); - -######################### -# cleanup -unlink($socket_path); diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/t/02-Monitoring-Livestatus-internals.t check-mk-1.2.6p12/=unpacked-tar10=/api/perl/t/02-Monitoring-Livestatus-internals.t --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/t/02-Monitoring-Livestatus-internals.t 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/t/02-Monitoring-Livestatus-internals.t 1970-01-01 00:00:00.000000000 +0000 @@ -1,148 +0,0 @@ -#!/usr/bin/env perl - -######################### - -use strict; -use Test::More; -use File::Temp; -use Data::Dumper; -use IO::Socket::UNIX qw( SOCK_STREAM SOMAXCONN ); -use_ok('Monitoring::Livestatus'); - -BEGIN { - if( $^O eq 'MSWin32' ) { - plan skip_all => 'no sockets on windows'; - } - else { - plan tests => 14; - } -} - -######################### -# get a temp file from File::Temp and replace it with our socket -my $fh = File::Temp->new(UNLINK => 0); -my $socket_path = $fh->filename; -unlink($socket_path); -my $listener = IO::Socket::UNIX->new( - Type => SOCK_STREAM, - Listen => SOMAXCONN, - Local => $socket_path, - ) or die("failed to open $socket_path as test socket: $!"); - -######################### -# create object with single arg -my $ml = Monitoring::Livestatus->new( 'localhost:12345' ); -isa_ok($ml, 'Monitoring::Livestatus', 'single args server'); -isa_ok($ml->{'CONNECTOR'}, 'Monitoring::Livestatus::INET', 'single args server peer'); -is($ml->{'CONNECTOR'}->peer_name, 'localhost:12345', 'single args server peer name'); -is($ml->{'CONNECTOR'}->peer_addr, 'localhost:12345', 'single args server peer addr'); - -######################### -# create object with single arg -$ml = Monitoring::Livestatus->new( $socket_path ); -isa_ok($ml, 'Monitoring::Livestatus', 'single args socket'); -isa_ok($ml->{'CONNECTOR'}, 'Monitoring::Livestatus::UNIX', 'single args socket peer'); -is($ml->{'CONNECTOR'}->peer_name, $socket_path, 'single args socket peer name'); -is($ml->{'CONNECTOR'}->peer_addr, $socket_path, 'single args socket peer addr'); - -my $header = "404 43\n"; -my($error,$error_msg) = $ml->_parse_header($header); -is($error, '404', 'error code 404'); -isnt($error_msg, undef, 'error code 404 message'); - -######################### -my $stats_query1 = "GET services -Stats: state = 0 -Stats: state = 1 -Stats: state = 2 -Stats: state = 3 -Stats: state = 4 -Stats: host_state != 0 -Stats: state = 1 -StatsAnd: 2 -Stats: host_state != 0 -Stats: state = 2 -StatsAnd: 2 -Stats: host_state != 0 -Stats: state = 3 -StatsAnd: 2 -Stats: host_state != 0 -Stats: state = 3 -Stats: active_checks = 1 -StatsAnd: 3 -Stats: state = 3 -Stats: active_checks = 1 -StatsOr: 2"; -my @expected_keys1 = ( - 'state = 0', - 'state = 1', - 'state = 2', - 'state = 3', - 'state = 4', - 'host_state != 0 && state = 1', - 'host_state != 0 && state = 2', - 'host_state != 0 && state = 3', - 'host_state != 0 && state = 3 && active_checks = 1', - 'state = 3 || active_checks = 1', - ); -my @got_keys1 = @{$ml->_extract_keys_from_stats_statement($stats_query1)}; -is_deeply(\@got_keys1, \@expected_keys1, 'statsAnd, statsOr query keys') - or ( diag('got keys: '.Dumper(\@got_keys1)) ); - - -######################### -my $stats_query2 = "GET services -Stats: state = 0 as all_ok -Stats: state = 1 as all_warning -Stats: state = 2 as all_critical -Stats: state = 3 as all_unknown -Stats: state = 4 as all_pending -Stats: host_state != 0 -Stats: state = 1 -StatsAnd: 2 as all_warning_on_down_hosts -Stats: host_state != 0 -Stats: state = 2 -StatsAnd: 2 as all_critical_on_down_hosts -Stats: host_state != 0 -Stats: state = 3 -StatsAnd: 2 as all_unknown_on_down_hosts -Stats: host_state != 0 -Stats: state = 3 -Stats: active_checks_enabled = 1 -StatsAnd: 3 as all_unknown_active_on_down_hosts -Stats: state = 3 -Stats: active_checks_enabled = 1 -StatsOr: 2 as all_active_or_unknown"; -my @expected_keys2 = ( - 'all_ok', - 'all_warning', - 'all_critical', - 'all_unknown', - 'all_pending', - 'all_warning_on_down_hosts', - 'all_critical_on_down_hosts', - 'all_unknown_on_down_hosts', - 'all_unknown_active_on_down_hosts', - 'all_active_or_unknown', - ); -my @got_keys2 = @{$ml->_extract_keys_from_stats_statement($stats_query2)}; -is_deeply(\@got_keys2, \@expected_keys2, 'stats query keys2') - or ( diag('got keys: '.Dumper(\@got_keys2)) ); - - -######################### -my $normal_query1 = "GET services -Columns: host_name as host is_flapping description as name state -"; -my @expected_keys3 = ( - 'host', - 'is_flapping', - 'name', - 'state', - ); -my @got_keys3 = @{$ml->_extract_keys_from_columns_header($normal_query1)}; -is_deeply(\@got_keys3, \@expected_keys3, 'normal query keys') - or ( diag('got keys: '.Dumper(\@got_keys3)) ); - -######################### -unlink($socket_path); diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/t/03-Monitoring-Livestatus-MULTI-internals.t check-mk-1.2.6p12/=unpacked-tar10=/api/perl/t/03-Monitoring-Livestatus-MULTI-internals.t --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/t/03-Monitoring-Livestatus-MULTI-internals.t 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/t/03-Monitoring-Livestatus-MULTI-internals.t 1970-01-01 00:00:00.000000000 +0000 @@ -1,215 +0,0 @@ -#!/usr/bin/env perl - -######################### - -use strict; -use Test::More; -use Data::Dumper; -use File::Temp; -use IO::Socket::UNIX qw( SOCK_STREAM SOMAXCONN ); -use_ok('Monitoring::Livestatus::MULTI'); - -BEGIN { - if( $^O eq 'MSWin32' ) { - plan skip_all => 'no sockets on windows'; - } - else { - plan tests => 57; - } -} - -######################### -# create 2 test sockets -# get a temp file from File::Temp and replace it with our socket -my $fh = File::Temp->new(UNLINK => 0); -my $socket_path1 = $fh->filename; -unlink($socket_path1); -my $listener1 = IO::Socket::UNIX->new( - Type => SOCK_STREAM, - Listen => SOMAXCONN, - Local => $socket_path1, - ) or die("failed to open $socket_path1 as test socket: $!"); - -$fh = File::Temp->new(UNLINK => 0); -my $socket_path2 = $fh->filename; -unlink($socket_path2); -my $listener2 = IO::Socket::UNIX->new( - Type => SOCK_STREAM, - Listen => SOMAXCONN, - Local => $socket_path2, - ) or die("failed to open $socket_path2 as test socket: $!"); - -######################### -# test the _merge_answer -my $mergetests = [ - { # simple test for sliced selectall_arrayref - in => { '820e03551b95b42ec037c87aed9b8f4a' => [ { 'description' => 'test_flap_07', 'host_name' => 'test_host_000', 'state' => '0' }, { 'description' => 'test_flap_11', 'host_name' => 'test_host_000', 'state' => '0' } ], - '35bbb11a888f66131d429efd058fb141' => [ { 'description' => 'test_ok_00', 'host_name' => 'test_host_000', 'state' => '0' }, { 'description' => 'test_ok_01', 'host_name' => 'test_host_000', 'state' => '0' } ], - '70ea8fa14abb984761bdd45ef27685b0' => [ { 'description' => 'test_critical_00', 'host_name' => 'test_host_000', 'state' => '2' }, { 'description' => 'test_critical_19', 'host_name' => 'test_host_000', 'state' => '2' } ] - }, - exp => [ - { 'description' => 'test_flap_07', 'host_name' => 'test_host_000', 'state' => '0' }, - { 'description' => 'test_flap_11', 'host_name' => 'test_host_000', 'state' => '0' }, - { 'description' => 'test_ok_00', 'host_name' => 'test_host_000', 'state' => '0' }, - { 'description' => 'test_ok_01', 'host_name' => 'test_host_000', 'state' => '0' }, - { 'description' => 'test_critical_00', 'host_name' => 'test_host_000', 'state' => '2' }, - { 'description' => 'test_critical_19', 'host_name' => 'test_host_000', 'state' => '2' }, - ] - }, -]; - -######################### -# test object creation -my $ml = Monitoring::Livestatus::MULTI->new( [ $socket_path1, $socket_path2 ] ); -isa_ok($ml, 'Monitoring::Livestatus', 'single args sockets'); -for my $peer (@{$ml->{'peers'}}) { - isa_ok($peer, 'Monitoring::Livestatus::UNIX', 'single args sockets peer'); -} - -$ml = Monitoring::Livestatus::MULTI->new( [$socket_path1] ); -isa_ok($ml, 'Monitoring::Livestatus', 'single array args socket'); -for my $peer (@{$ml->{'peers'}}) { - isa_ok($peer, 'Monitoring::Livestatus::UNIX', 'single array args socket peer'); - is($peer->peer_addr, $socket_path1, 'single arrays args socket peer addr'); - is($peer->peer_name, $socket_path1, 'single arrays args socket peer name'); -} - -$ml = Monitoring::Livestatus::MULTI->new( 'localhost:5001' ); -isa_ok($ml, 'Monitoring::Livestatus', 'single args server'); -for my $peer (@{$ml->{'peers'}}) { - isa_ok($peer, 'Monitoring::Livestatus::INET', 'single args server peer'); - like($peer->peer_addr, qr/^localhost/, 'single args servers peer addr'); - like($peer->peer_name, qr/^localhost/, 'single args servers peer name'); -} - -$ml = Monitoring::Livestatus::MULTI->new( ['localhost:5001'] ); -isa_ok($ml, 'Monitoring::Livestatus', 'single array args server'); -for my $peer (@{$ml->{'peers'}}) { - isa_ok($peer, 'Monitoring::Livestatus::INET', 'single arrays args server peer'); - like($peer->peer_addr, qr/^localhost/, 'single arrays args servers peer addr'); - like($peer->peer_name, qr/^localhost/, 'single arrays args servers peer name'); -} - -$ml = Monitoring::Livestatus::MULTI->new( [ 'localhost:5001', 'localhost:5002' ] ); -isa_ok($ml, 'Monitoring::Livestatus', 'single args servers'); -for my $peer (@{$ml->{'peers'}}) { - isa_ok($peer, 'Monitoring::Livestatus::INET', 'single args servers peer'); - like($peer->peer_addr, qr/^localhost/, 'single args servers peer addr'); - like($peer->peer_name, qr/^localhost/, 'single args servers peer name'); -} - -$ml = Monitoring::Livestatus::MULTI->new( peer => [ 'localhost:5001', 'localhost:5002' ] ); -isa_ok($ml, 'Monitoring::Livestatus', 'hash args servers'); -for my $peer (@{$ml->{'peers'}}) { - isa_ok($peer, 'Monitoring::Livestatus::INET', 'hash args servers peer'); - like($peer->peer_addr, qr/^localhost/, 'hash args servers peer addr'); - like($peer->peer_name, qr/^localhost/, 'hash args servers peer name'); -} - -$ml = Monitoring::Livestatus::MULTI->new( peer => [ $socket_path1, $socket_path2 ] ); -isa_ok($ml, 'Monitoring::Livestatus', 'hash args sockets'); -for my $peer (@{$ml->{'peers'}}) { - isa_ok($peer, 'Monitoring::Livestatus::UNIX', 'hash args sockets peer'); -} - -$ml = Monitoring::Livestatus::MULTI->new( peer => { $socket_path1 => 'Location 1', $socket_path2 => 'Location2' } ); -isa_ok($ml, 'Monitoring::Livestatus', 'hash args hashed sockets'); -for my $peer (@{$ml->{'peers'}}) { - isa_ok($peer, 'Monitoring::Livestatus::UNIX', 'hash args hashed sockets peer'); - like($peer->peer_name, qr/^Location/, 'hash args hashed sockets peer name'); -} - -$ml = Monitoring::Livestatus::MULTI->new( peer => { 'localhost:5001' => 'Location 1', 'localhost:5002' => 'Location2' } ); -isa_ok($ml, 'Monitoring::Livestatus', 'hash args hashed servers'); -for my $peer (@{$ml->{'peers'}}) { - isa_ok($peer, 'Monitoring::Livestatus::INET', 'hash args hashed servers peer'); - like($peer->peer_addr, qr/^localhost/, 'hash args hashed servers peer addr'); - like($peer->peer_name, qr/^Location/, 'hash args hashed servers peer name'); -} - -$ml = Monitoring::Livestatus::MULTI->new( $socket_path1 ); -isa_ok($ml, 'Monitoring::Livestatus', 'single args socket'); -for my $peer (@{$ml->{'peers'}}) { - isa_ok($peer, 'Monitoring::Livestatus::UNIX', 'single args socket peer'); -} - -######################### -# test internal subs -$ml = Monitoring::Livestatus::MULTI->new('peer' => ['192.168.123.2:9996', '192.168.123.2:9997', '192.168.123.2:9998' ] ); - -my $x = 0; -for my $test (@{$mergetests}) { - my $got = $ml->_merge_answer($test->{'in'}); - is_deeply($got, $test->{'exp'}, '_merge_answer test '.$x) - or diag("got: ".Dumper($got)."\nbut expected ".Dumper($test->{'exp'})); - $x++; -} - -######################### -# test the _sum_answer -my $sumtests = [ - { # hashes - in => { '192.168.123.2:9996' => { 'ok' => '12', 'warning' => '8' }, - '192.168.123.2:9997' => { 'ok' => '17', 'warning' => '7' }, - '192.168.123.2:9998' => { 'ok' => '13', 'warning' => '2' } - }, - exp => { 'ok' => '42', 'warning' => '17' } - }, - { # hashes, undefs - in => { '192.168.123.2:9996' => { 'ok' => '12', 'warning' => '8' }, - '192.168.123.2:9997' => undef, - '192.168.123.2:9998' => { 'ok' => '13', 'warning' => '2' } - }, - exp => { 'ok' => '25', 'warning' => '10' } - }, - { # hashes, undefs - in => { '192.168.123.2:9996' => { 'ok' => '12', 'warning' => '8' }, - '192.168.123.2:9997' => {}, - '192.168.123.2:9998' => { 'ok' => '13', 'warning' => '2' } - }, - exp => { 'ok' => '25', 'warning' => '10' } - }, - { # arrays - in => { '192.168.123.2:9996' => [ '3302', '235' ], - '192.168.123.2:9997' => [ '3324', '236' ], - '192.168.123.2:9998' => [ '3274', '236' ] - }, - exp => [ 9900, 707 ] - }, - { # undefs / scalars - in => { 'e69322abf0352888e598da3e2514df4a' => undef, - 'f42530d7e8c2b52732ba427b1e5e0a8e' => '1' - }, - exp => 1, - }, - { # arrays, undefs - in => { '192.168.123.2:9996' => [ '2', '5' ], - '192.168.123.2:9997' => [ ], - '192.168.123.2:9998' => [ '4', '6' ] - }, - exp => [ 6, 11 ] - }, - { # arrays, undefs - in => { '192.168.123.2:9996' => [ '2', '5' ], - '192.168.123.2:9997' => undef, - '192.168.123.2:9998' => [ '4', '6' ] - }, - exp => [ 6, 11 ] - }, -]; - -$x = 1; -for my $test (@{$sumtests}) { - my $got = $ml->_sum_answer($test->{'in'}); - is_deeply($got, $test->{'exp'}, '_sum_answer test '.$x) - or diag("got: ".Dumper($got)."\nbut expected ".Dumper($test->{'exp'})); - $x++; -} - -######################### -# clone test -my $clone = $ml->_clone($mergetests); -is_deeply($clone, $mergetests, 'merge test clone'); - -$clone = $ml->_clone($sumtests); -is_deeply($clone, $sumtests, 'sum test clone'); diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/t/20-Monitoring-Livestatus-test_socket.t check-mk-1.2.6p12/=unpacked-tar10=/api/perl/t/20-Monitoring-Livestatus-test_socket.t --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/t/20-Monitoring-Livestatus-test_socket.t 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/t/20-Monitoring-Livestatus-test_socket.t 1970-01-01 00:00:00.000000000 +0000 @@ -1,329 +0,0 @@ -#!/usr/bin/env perl - -######################### - -use strict; -use Test::More; -use IO::Socket::UNIX qw( SOCK_STREAM SOMAXCONN ); -use Data::Dumper; -use JSON::XS; - -BEGIN { - eval {require threads;}; - if ( $@ ) { - plan skip_all => 'need threads support for testing a real socket' - } - elsif( $^O eq 'MSWin32' ) { - plan skip_all => 'no sockets on windows'; - } - else{ - plan tests => 109 - } -} - -use File::Temp; -BEGIN { use_ok('Monitoring::Livestatus') }; - -######################### -# Normal Querys -######################### -my $line_seperator = 10; -my $column_seperator = 0; -my $test_data = [ ["alias","name","contacts"], # table header - ["alias1","host1","contact1"], # row 1 - ["alias2","host2","contact2"], # row 2 - ["alias3","host3","contact3"], # row 3 - ]; -my $test_hostgroups = [['']]; # test one row with no data - -# expected results -my $selectall_arrayref1 = [ [ 'alias1', 'host1', 'contact1' ], - [ 'alias2', 'host2', 'contact2' ], - [ 'alias3', 'host3', 'contact3' ] - ]; -my $selectall_arrayref2 = [ - { 'contacts' => 'contact1', 'name' => 'host1', 'alias' => 'alias1' }, - { 'contacts' => 'contact2', 'name' => 'host2', 'alias' => 'alias2' }, - { 'contacts' => 'contact3', 'name' => 'host3', 'alias' => 'alias3' } - ]; -my $selectall_hashref = { - 'host1' => { 'contacts' => 'contact1', 'name' => 'host1', 'alias' => 'alias1' }, - 'host2' => { 'contacts' => 'contact2', 'name' => 'host2', 'alias' => 'alias2' }, - 'host3' => { 'contacts' => 'contact3', 'name' => 'host3', 'alias' => 'alias3' } - }; -my $selectcol_arrayref1 = [ 'alias1', 'alias2', 'alias3' ]; -my $selectcol_arrayref2 = [ 'alias1', 'host1', 'alias2', 'host2', 'alias3', 'host3' ]; -my $selectcol_arrayref3 = [ 'alias1', 'host1', 'contact1', 'alias2', 'host2', 'contact2', 'alias3', 'host3', 'contact3' ]; -my @selectrow_array = ( 'alias1', 'host1', 'contact1' ); -my $selectrow_arrayref = [ 'alias1', 'host1', 'contact1' ]; -my $selectrow_hashref = { 'contacts' => 'contact1', 'name' => 'host1', 'alias' => 'alias1' }; - -######################### -# Single Querys -######################### -my $single_statement = "GET hosts\nColumns: alias\nFilter: name = host1"; -my $selectscalar_value = 'alias1'; - -######################### -# Stats Querys -######################### -my $stats_statement = "GET services\nStats: state = 0\nStats: state = 1\nStats: state = 2\nStats: state = 3"; -my $stats_data = [[4297,13,9,0]]; - -# expected results -my $stats_selectall_arrayref1 = [ [4297,13,9,0] ]; -my $stats_selectall_arrayref2 = [ { 'state = 0' => '4297', 'state = 1' => '13', 'state = 2' => '9', 'state = 3' => 0 } ]; -my $stats_selectcol_arrayref = [ '4297' ]; -my @stats_selectrow_array = ( '4297', '13', '9', '0' ); -my $stats_selectrow_arrayref = [ '4297', '13', '9', '0' ]; -my $stats_selectrow_hashref = { 'state = 0' => '4297', 'state = 1' => '13', 'state = 2' => '9', 'state = 3' => 0 }; - -######################### -# Empty Querys -######################### -my $empty_statement = "GET services\nFilter: description = empty"; - -# expected results -my $empty_selectall_arrayref = []; -my $empty_selectcol_arrayref = []; -my @empty_selectrow_array; -my $empty_selectrow_arrayref; -my $empty_selectrow_hashref; - - -######################### -# get a temp file from File::Temp and replace it with our socket -my $fh = File::Temp->new(UNLINK => 0); -my $socket_path = $fh->filename; -unlink($socket_path); -my $thr1 = threads->create('create_socket', 'unix'); -######################### -# get a temp file from File::Temp and replace it with our socket -my $server = 'localhost:32987'; -my $thr2 = threads->create('create_socket', 'inet'); -sleep(1); - -######################### -my $objects_to_test = { - # create unix object with hash args - 'unix_hash_args' => Monitoring::Livestatus->new( - verbose => 0, - socket => $socket_path, - line_seperator => $line_seperator, - column_seperator => $column_seperator, - ), - - # create unix object with a single arg - 'unix_single_arg' => Monitoring::Livestatus::UNIX->new( $socket_path ), - - # create inet object with hash args - 'inet_hash_args' => Monitoring::Livestatus->new( - verbose => 0, - server => $server, - line_seperator => $line_seperator, - column_seperator => $column_seperator, - ), - - # create inet object with a single arg - 'inet_single_arg' => Monitoring::Livestatus::INET->new( $server ), - -}; - -for my $key (keys %{$objects_to_test}) { - my $ml = $objects_to_test->{$key}; - isa_ok($ml, 'Monitoring::Livestatus'); - - # we dont need warnings for testing - $ml->warnings(0); - - ################################################## - # test settings - my $rt = $ml->verbose(1); - is($rt, '0', 'enable verbose'); - $rt = $ml->verbose(0); - is($rt, '1', 'disable verbose'); - - $rt = $ml->errors_are_fatal(0); - is($rt, '1', 'disable errors_are_fatal'); - $rt = $ml->errors_are_fatal(1); - is($rt, '0', 'enable errors_are_fatal'); - - ################################################## - # do some sample querys - my $statement = "GET hosts"; - - ######################### - my $ary_ref = $ml->selectall_arrayref($statement); - is_deeply($ary_ref, $selectall_arrayref1, 'selectall_arrayref($statement)') - or diag("got: ".Dumper($ary_ref)."\nbut expected ".Dumper($selectall_arrayref1)); - - ######################### - $ary_ref = $ml->selectall_arrayref($statement, { Slice => {} }); - is_deeply($ary_ref, $selectall_arrayref2, 'selectall_arrayref($statement, { Slice => {} })') - or diag("got: ".Dumper($ary_ref)."\nbut expected ".Dumper($selectall_arrayref2)); - - ######################### - my $hash_ref = $ml->selectall_hashref($statement, 'name'); - is_deeply($hash_ref, $selectall_hashref, 'selectall_hashref($statement, "name")') - or diag("got: ".Dumper($hash_ref)."\nbut expected ".Dumper($selectall_hashref)); - - ######################### - $ary_ref = $ml->selectcol_arrayref($statement); - is_deeply($ary_ref, $selectcol_arrayref1, 'selectcol_arrayref($statement)') - or diag("got: ".Dumper($ary_ref)."\nbut expected ".Dumper($selectcol_arrayref1)); - - ######################### - $ary_ref = $ml->selectcol_arrayref($statement, { Columns=>[1,2] }); - is_deeply($ary_ref, $selectcol_arrayref2, 'selectcol_arrayref($statement, { Columns=>[1,2] })') - or diag("got: ".Dumper($ary_ref)."\nbut expected ".Dumper($selectcol_arrayref2)); - - $ary_ref = $ml->selectcol_arrayref($statement, { Columns=>[1,2,3] }); - is_deeply($ary_ref, $selectcol_arrayref3, 'selectcol_arrayref($statement, { Columns=>[1,2,3] })') - or diag("got: ".Dumper($ary_ref)."\nbut expected ".Dumper($selectcol_arrayref3)); - - ######################### - my @row_ary = $ml->selectrow_array($statement); - is_deeply(\@row_ary, \@selectrow_array, 'selectrow_array($statement)') - or diag("got: ".Dumper(\@row_ary)."\nbut expected ".Dumper(\@selectrow_array)); - - ######################### - $ary_ref = $ml->selectrow_arrayref($statement); - is_deeply($ary_ref, $selectrow_arrayref, 'selectrow_arrayref($statement)') - or diag("got: ".Dumper($ary_ref)."\nbut expected ".Dumper($selectrow_arrayref)); - - ######################### - $hash_ref = $ml->selectrow_hashref($statement); - is_deeply($hash_ref, $selectrow_hashref, 'selectrow_hashref($statement)') - or diag("got: ".Dumper($hash_ref)."\nbut expected ".Dumper($selectrow_hashref)); - - ################################################## - # stats querys - ################################################## - $ary_ref = $ml->selectall_arrayref($stats_statement); - is_deeply($ary_ref, $stats_selectall_arrayref1, 'selectall_arrayref($stats_statement)') - or diag("got: ".Dumper($ary_ref)."\nbut expected ".Dumper($stats_selectall_arrayref1)); - - $ary_ref = $ml->selectall_arrayref($stats_statement, { Slice => {} }); - is_deeply($ary_ref, $stats_selectall_arrayref2, 'selectall_arrayref($stats_statement, { Slice => {} })') - or diag("got: ".Dumper($ary_ref)."\nbut expected ".Dumper($stats_selectall_arrayref2)); - - $ary_ref = $ml->selectcol_arrayref($stats_statement); - is_deeply($ary_ref, $stats_selectcol_arrayref, 'selectcol_arrayref($stats_statement)') - or diag("got: ".Dumper($ary_ref)."\nbut expected ".Dumper($stats_selectcol_arrayref)); - - @row_ary = $ml->selectrow_array($stats_statement); - is_deeply(\@row_ary, \@stats_selectrow_array, 'selectrow_arrayref($stats_statement)') - or diag("got: ".Dumper(\@row_ary)."\nbut expected ".Dumper(\@stats_selectrow_array)); - - $ary_ref = $ml->selectrow_arrayref($stats_statement); - is_deeply($ary_ref, $stats_selectrow_arrayref, 'selectrow_arrayref($stats_statement)') - or diag("got: ".Dumper($ary_ref)."\nbut expected ".Dumper($stats_selectrow_arrayref)); - - $hash_ref = $ml->selectrow_hashref($stats_statement); - is_deeply($hash_ref, $stats_selectrow_hashref, 'selectrow_hashref($stats_statement)') - or diag("got: ".Dumper($hash_ref)."\nbut expected ".Dumper($stats_selectrow_hashref)); - - my $scal = $ml->selectscalar_value($single_statement); - is($scal, $selectscalar_value, 'selectscalar_value($single_statement)') - or diag("got: ".Dumper($scal)."\nbut expected ".Dumper($selectscalar_value)); - - ################################################## - # empty querys - ################################################## - $ary_ref = $ml->selectall_arrayref($empty_statement); - is_deeply($ary_ref, $empty_selectall_arrayref, 'selectall_arrayref($empty_statement)') - or diag("got: ".Dumper($ary_ref)."\nbut expected ".Dumper($empty_selectall_arrayref)); - - $ary_ref = $ml->selectcol_arrayref($empty_statement); - is_deeply($ary_ref, $empty_selectcol_arrayref, 'selectcol_arrayref($empty_statement)') - or diag("got: ".Dumper($ary_ref)."\nbut expected ".Dumper($empty_selectcol_arrayref)); - - @row_ary = $ml->selectrow_array($empty_statement); - is_deeply(\@row_ary, \@empty_selectrow_array, 'selectrow_arrayref($empty_statement)') - or diag("got: ".Dumper(\@row_ary)."\nbut expected ".Dumper(\@empty_selectrow_array)); - - $ary_ref = $ml->selectrow_arrayref($empty_statement); - is_deeply($ary_ref, $empty_selectrow_arrayref, 'selectrow_arrayref($empty_statement)') - or diag("got: ".Dumper($ary_ref)."\nbut expected ".Dumper($empty_selectrow_arrayref)); - - $hash_ref = $ml->selectrow_hashref($empty_statement); - is_deeply($hash_ref, $empty_selectrow_hashref, 'selectrow_hashref($empty_statement)') - or diag("got: ".Dumper($hash_ref)."\nbut expected ".Dumper($empty_selectrow_hashref)); - - ################################################## - # empty rows and columns - ################################################## - my $empty_hostgroups_stm = "GET hostgroups\nColumns: members"; - $ary_ref = $ml->selectall_arrayref($empty_hostgroups_stm); - is_deeply($ary_ref, $test_hostgroups, 'selectall_arrayref($empty_hostgroups_stm)') - or diag("got: ".Dumper($ary_ref)."\nbut expected ".Dumper($test_hostgroups)); - -} - -################################################## -# exit threads -$thr1->kill('KILL')->detach(); -$thr2->kill('KILL')->detach(); -exit; - - -######################### -# SUBS -######################### -# test socket server -sub create_socket { - my $type = shift; - my $listener; - - $SIG{'KILL'} = sub { threads->exit(); }; - - if($type eq 'unix') { - print "creating unix socket\n"; - $listener = IO::Socket::UNIX->new( - Type => SOCK_STREAM, - Listen => SOMAXCONN, - Local => $socket_path, - ) or die("failed to open $socket_path as test socket: $!"); - } - elsif($type eq 'inet') { - print "creating tcp socket\n"; - $listener = IO::Socket::INET->new( - LocalAddr => $server, - Proto => 'tcp', - Listen => 1, - Reuse => 1, - ) or die("failed to listen on $server: $!"); - } else { - die("unknown type"); - } - while( my $socket = $listener->accept() or die('cannot accept: $!') ) { - my $recv = ""; - while(<$socket>) { $recv .= $_; last if $_ eq "\n" } - my $data; - my $status = 200; - if($recv =~ m/^GET .*?\s+Filter:.*?empty/m) { - $data = ''; - } - elsif($recv =~ m/^GET hosts\s+Columns: alias/m) { - my @data = @{$test_data}[1..3]; - $data = encode_json(\@data)."\n"; - } - elsif($recv =~ m/^GET hosts\s+Columns: name/m) { - $data = encode_json(\@{$test_data}[1..3])."\n"; - } - elsif($recv =~ m/^GET hosts/) { - $data = encode_json($test_data)."\n"; - } - elsif($recv =~ m/^GET hostgroups/) { - $data = encode_json(\@{$test_hostgroups})."\n"; - } - elsif($recv =~ m/^GET services/ and $recv =~ m/Stats:/m) { - $data = encode_json(\@{$stats_data})."\n"; - } - my $content_length = sprintf("%11s", length($data)); - print $socket $status." ".$content_length."\n"; - print $socket $data; - close($socket); - } - unlink($socket_path); -} diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/t/21-Monitoring-Livestatus-INET.t check-mk-1.2.6p12/=unpacked-tar10=/api/perl/t/21-Monitoring-Livestatus-INET.t --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/t/21-Monitoring-Livestatus-INET.t 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/t/21-Monitoring-Livestatus-INET.t 1970-01-01 00:00:00.000000000 +0000 @@ -1,30 +0,0 @@ -#!/usr/bin/env perl - -######################### - -use strict; -use Test::More tests => 3; -use IO::Socket::INET; -BEGIN { use_ok('Monitoring::Livestatus::INET') }; - -######################### -# create a tmp listener -my $server = 'localhost:9999'; -my $listener = IO::Socket::INET->new( - ) or die("failed to open port as test listener: $!"); -######################### -# create object with single arg -my $ml = Monitoring::Livestatus::INET->new( $server ); -isa_ok($ml, 'Monitoring::Livestatus', 'Monitoring::Livestatus::INET->new()'); - -######################### -# create object with hash args -my $line_seperator = 10; -my $column_seperator = 0; -$ml = Monitoring::Livestatus::INET->new( - verbose => 0, - server => $server, - line_seperator => $line_seperator, - column_seperator => $column_seperator, - ); -isa_ok($ml, 'Monitoring::Livestatus', 'Monitoring::Livestatus::INET->new(%args)'); diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/t/22-Monitoring-Livestatus-UNIX.t check-mk-1.2.6p12/=unpacked-tar10=/api/perl/t/22-Monitoring-Livestatus-UNIX.t --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/t/22-Monitoring-Livestatus-UNIX.t 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/t/22-Monitoring-Livestatus-UNIX.t 1970-01-01 00:00:00.000000000 +0000 @@ -1,26 +0,0 @@ -#!/usr/bin/env perl - -######################### - -use strict; -use Test::More tests => 3; -use IO::Socket::INET; -BEGIN { use_ok('Monitoring::Livestatus::UNIX') }; - -######################### -# create object with single arg -my $socket = "/tmp/blah.socket"; -my $ml = Monitoring::Livestatus::UNIX->new( $socket ); -isa_ok($ml, 'Monitoring::Livestatus', 'Monitoring::Livestatus::UNIX->new()'); - -######################### -# create object with hash args -my $line_seperator = 10; -my $column_seperator = 0; -$ml = Monitoring::Livestatus::UNIX->new( - verbose => 0, - socket => $socket, - line_seperator => $line_seperator, - column_seperator => $column_seperator, - ); -isa_ok($ml, 'Monitoring::Livestatus', 'Monitoring::Livestatus::UNIX->new(%args)'); diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/t/30-Monitoring-Livestatus-live-test.t check-mk-1.2.6p12/=unpacked-tar10=/api/perl/t/30-Monitoring-Livestatus-live-test.t --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/t/30-Monitoring-Livestatus-live-test.t 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/t/30-Monitoring-Livestatus-live-test.t 1970-01-01 00:00:00.000000000 +0000 @@ -1,472 +0,0 @@ -#!/usr/bin/env perl - -######################### - -use strict; -use Test::More; -use Data::Dumper; - -if ( ! defined $ENV{TEST_SOCKET} or !defined $ENV{TEST_SERVER} ) { - my $msg = 'Author test. Set $ENV{TEST_SOCKET} and $ENV{TEST_SERVER} to run'; - plan( skip_all => $msg ); -} else { - plan( tests => 727 ); -} - -# set an alarm -my $lastquery; -$SIG{ALRM} = sub { - my @caller = caller; - print STDERR 'last query: '.$lastquery if defined $lastquery; - die "timeout reached:".Dumper(\@caller)."\n" -}; -alarm(120); - -use_ok('Monitoring::Livestatus'); - -######################### -my $line_seperator = 10; -my $column_seperator = 0; -my $objects_to_test = { - # UNIX - # create unix object with a single arg -# '01 unix_single_arg' => Monitoring::Livestatus::UNIX->new( $ENV{TEST_SOCKET} ), - - # create unix object with hash args - '02 unix_few_args' => Monitoring::Livestatus->new( - #verbose => 1, - socket => $ENV{TEST_SOCKET}, - line_seperator => $line_seperator, - column_seperator => $column_seperator, - ), - - # create unix object with hash args - '03 unix_keepalive' => Monitoring::Livestatus->new( - verbose => 0, - socket => $ENV{TEST_SOCKET}, - keepalive => 1, - ), - - # TCP - # create inet object with a single arg - '04 inet_single_arg' => Monitoring::Livestatus::INET->new( $ENV{TEST_SERVER} ), - - # create inet object with hash args - '05 inet_few_args' => Monitoring::Livestatus->new( - verbose => 0, - server => $ENV{TEST_SERVER}, - line_seperator => $line_seperator, - column_seperator => $column_seperator, - ), - - - # create inet object with keepalive - '06 inet_keepalive' => Monitoring::Livestatus->new( - verbose => 0, - server => $ENV{TEST_SERVER}, - keepalive => 1, - ), - - # create multi single args - '07 multi_keepalive' => Monitoring::Livestatus->new( [ $ENV{TEST_SERVER}, $ENV{TEST_SOCKET} ] ), - - # create multi object with keepalive - '08 multi_keepalive_hash_args' => Monitoring::Livestatus->new( - verbose => 0, - peer => [ $ENV{TEST_SERVER}, $ENV{TEST_SOCKET} ], - keepalive => 1, - ), - - # create multi object without keepalive - '09 multi_no_keepalive' => Monitoring::Livestatus->new( - peer => [ $ENV{TEST_SERVER}, $ENV{TEST_SOCKET} ], - keepalive => 0, - ), - - # create multi object without threads - '10 multi_no_threads' => Monitoring::Livestatus->new( - peer => [ $ENV{TEST_SERVER}, $ENV{TEST_SOCKET} ], - use_threads => 0, - ), - - # create multi object with only one peer - '11 multi_one_peer' => Monitoring::Livestatus::MULTI->new( - peer => $ENV{TEST_SERVER}, - ), - - # create multi object without threads - '12 multi_two_peers' => Monitoring::Livestatus::MULTI->new( - peer => [ $ENV{TEST_SERVER}, $ENV{TEST_SOCKET} ], - ), -}; - -my $expected_keys = { - 'columns' => [ - 'description','name','table','type' - ], - 'commands' => [ - 'line','name' - ], - 'comments' => [ - '__all_from_hosts__', '__all_from_services__', - 'author','comment','entry_time','entry_type','expire_time','expires', 'id','persistent', - 'source','type' - ], - 'contacts' => [ - 'address1','address2','address3','address4','address5','address6','alias', - 'can_submit_commands','custom_variable_names','custom_variable_values','email', - 'host_notification_period','host_notifications_enabled','in_host_notification_period', - 'in_service_notification_period','name','modified_attributes','modified_attributes_list', - 'pager','service_notification_period','service_notifications_enabled' - ], - 'contactgroups' => [ 'name', 'alias', 'members' ], - 'downtimes' => [ - '__all_from_hosts__', '__all_from_services__', - 'author','comment','duration','end_time','entry_time','fixed','id','start_time', - 'triggered_by','type' - ], - 'hostgroups' => [ - 'action_url','alias','members','name','members_with_state','notes','notes_url','num_hosts','num_hosts_down', - 'num_hosts_pending','num_hosts_unreach','num_hosts_up','num_services','num_services_crit', - 'num_services_hard_crit','num_services_hard_ok','num_services_hard_unknown', - 'num_services_hard_warn','num_services_ok','num_services_pending','num_services_unknown', - 'num_services_warn','worst_host_state','worst_service_hard_state','worst_service_state' - ], - 'hosts' => [ - 'accept_passive_checks','acknowledged','acknowledgement_type','action_url','action_url_expanded', - 'active_checks_enabled','address','alias','check_command','check_freshness','check_interval', - 'check_options','check_period','check_type','checks_enabled','childs','comments','comments_with_info', - 'contacts','current_attempt','current_notification_number','custom_variable_names', - 'custom_variable_values','display_name','downtimes','downtimes_with_info','event_handler_enabled', - 'execution_time','first_notification_delay','flap_detection_enabled','groups','hard_state','has_been_checked', - 'high_flap_threshold','icon_image','icon_image_alt','icon_image_expanded','in_check_period', - 'in_notification_period','initial_state','is_executing','is_flapping','last_check','last_hard_state', - 'last_hard_state_change','last_notification','last_state','last_state_change','latency','last_time_down', - 'last_time_unreachable','last_time_up','long_plugin_output','low_flap_threshold','max_check_attempts','name', - 'modified_attributes','modified_attributes_list','next_check', - 'next_notification','notes','notes_expanded','notes_url','notes_url_expanded','notification_interval', - 'notification_period','notifications_enabled','num_services','num_services_crit','num_services_hard_crit', - 'num_services_hard_ok','num_services_hard_unknown','num_services_hard_warn','num_services_ok', - 'num_services_pending','num_services_unknown','num_services_warn','obsess_over_host','parents', - 'pending_flex_downtime','percent_state_change','perf_data','plugin_output', - 'process_performance_data','retry_interval','scheduled_downtime_depth','services','services_with_state', - 'state','state_type','statusmap_image','total_services','worst_service_hard_state','worst_service_state', - 'x_3d','y_3d','z_3d' - ], - 'hostsbygroup' => [ - '__all_from_hosts__', '__all_from_hostgroups__' - ], - 'log' => [ - '__all_from_hosts__','__all_from_services__','__all_from_contacts__','__all_from_commands__', - 'attempt','class','command_name','comment','contact_name','host_name','lineno','message','options', - 'plugin_output','service_description','state','state_type','time','type' - ], - 'servicegroups' => [ - 'action_url','alias','members','name','members_with_state','notes','notes_url','num_services','num_services_crit', - 'num_services_hard_crit','num_services_hard_ok','num_services_hard_unknown', - 'num_services_hard_warn','num_services_ok','num_services_pending','num_services_unknown', - 'num_services_warn','worst_service_state' - ], - 'servicesbygroup' => [ - '__all_from_services__', '__all_from_hosts__', '__all_from_servicegroups__' - ], - 'services' => [ - '__all_from_hosts__', - 'accept_passive_checks','acknowledged','acknowledgement_type','action_url','action_url_expanded', - 'active_checks_enabled','check_command','check_interval','check_options','check_period', - 'check_type','checks_enabled','comments','comments_with_info','contacts','current_attempt', - 'current_notification_number','custom_variable_names','custom_variable_values', - 'description','display_name','downtimes','downtimes_with_info','event_handler','event_handler_enabled', - 'execution_time','first_notification_delay','flap_detection_enabled','groups', - 'has_been_checked','high_flap_threshold','icon_image','icon_image_alt','icon_image_expanded','in_check_period', - 'in_notification_period','initial_state','is_executing','is_flapping','last_check', - 'last_hard_state','last_hard_state_change','last_notification','last_state', - 'last_state_change','latency','last_time_critical','last_time_ok','last_time_unknown','last_time_warning', - 'long_plugin_output','low_flap_threshold','max_check_attempts','modified_attributes','modified_attributes_list', - 'next_check','next_notification','notes','notes_expanded','notes_url','notes_url_expanded', - 'notification_interval','notification_period','notifications_enabled','obsess_over_service', - 'percent_state_change','perf_data','plugin_output','process_performance_data','retry_interval', - 'scheduled_downtime_depth','state','state_type' - ], - 'servicesbyhostgroup' => [ - '__all_from_services__', '__all_from_hosts__', '__all_from_hostgroups__' - ], - 'status' => [ - 'accept_passive_host_checks','accept_passive_service_checks','cached_log_messages', - 'check_external_commands','check_host_freshness','check_service_freshness','connections', - 'connections_rate','enable_event_handlers','enable_flap_detection','enable_notifications', - 'execute_host_checks','execute_service_checks','forks','forks_rate','host_checks','host_checks_rate','interval_length', - 'last_command_check','last_log_rotation','livestatus_version','log_messages','log_messages_rate','nagios_pid','neb_callbacks', - 'neb_callbacks_rate','obsess_over_hosts','obsess_over_services','process_performance_data', - 'program_start','program_version','requests','requests_rate','service_checks','service_checks_rate' - ], - 'timeperiods' => [ 'in', 'name', 'alias' ], -}; - -my $author = 'Monitoring::Livestatus test'; -for my $key (sort keys %{$objects_to_test}) { - my $ml = $objects_to_test->{$key}; - isa_ok($ml, 'Monitoring::Livestatus') or BAIL_OUT("no need to continue without a proper Monitoring::Livestatus object: ".$key); - - # dont die on errors - $ml->errors_are_fatal(0); - $ml->warnings(0); - - ######################### - # set downtime for a host and service - my $downtimes = $ml->selectall_arrayref("GET downtimes\nColumns: id"); - my $num_downtimes = 0; - $num_downtimes = scalar @{$downtimes} if defined $downtimes; - my $firsthost = $ml->selectscalar_value("GET hosts\nColumns: name\nLimit: 1"); - isnt($firsthost, undef, 'get test hostname') or BAIL_OUT($key.': got not test hostname'); - $ml->do('COMMAND ['.time().'] SCHEDULE_HOST_DOWNTIME;'.$firsthost.';'.time().';'.(time()+300).';1;0;300;'.$author.';perl test: '.$0); - my $firstservice = $ml->selectscalar_value("GET services\nColumns: description\nFilter: host_name = $firsthost\nLimit: 1"); - isnt($firstservice, undef, 'get test servicename') or BAIL_OUT('got not test servicename'); - $ml->do('COMMAND ['.time().'] SCHEDULE_SVC_DOWNTIME;'.$firsthost.';'.$firstservice.';'.time().';'.(time()+300).';1;0;300;'.$author.';perl test: '.$0); - # sometimes it takes while till the downtime is accepted - my $waited = 0; - while(scalar @{$ml->selectall_arrayref("GET downtimes\nColumns: id")} < $num_downtimes + 2) { - print "waiting for the downtime...\n"; - sleep(1); - $waited++; - BAIL_OUT('waited 30 seconds for the downtime...') if $waited > 30; - } - ######################### - - ######################### - # check tables - my $data = $ml->selectall_hashref("GET columns\nColumns: table", 'table'); - my @tables = sort keys %{$data}; - my @expected_tables = sort keys %{$expected_keys}; - is_deeply(\@tables, \@expected_tables, $key.' tables') or BAIL_OUT("got tables:\n".join(', ', @tables)."\nbut expected\n".join(', ', @expected_tables)); - - ######################### - # check keys - for my $type (keys %{$expected_keys}) { - my $filter = ""; - $filter = "Filter: time > ".(time() - 86400)."\n" if $type eq 'log'; - $filter .= "Filter: time < ".(time())."\n" if $type eq 'log'; - my $expected_keys = get_expected_keys($type); - my $statement = "GET $type\n".$filter."Limit: 1"; - $lastquery = $statement; - my $hash_ref = $ml->selectrow_hashref($statement ); - undef $lastquery; - is(ref $hash_ref, 'HASH', $type.' keys are a hash') or BAIL_OUT($type.'keys are not in hash format, got '.Dumper($hash_ref)); - my @keys = sort keys %{$hash_ref}; - is_deeply(\@keys, $expected_keys, $key.' '.$type.' table columns') or BAIL_OUT("got $type keys:\n".join(', ', @keys)."\nbut expected\n".join(', ', @{$expected_keys})); - } - - my $statement = "GET hosts\nColumns: name as hostname state\nLimit: 1"; - $lastquery = $statement; - my $hash_ref = $ml->selectrow_hashref($statement); - undef $lastquery; - isnt($hash_ref, undef, $key.' test column alias'); - is($Monitoring::Livestatus::ErrorCode, 0, $key.' test column alias') or - diag('got error: '.$Monitoring::Livestatus::ErrorMessage); - - ######################### - # send a test command - # commands still dont work and breaks livestatus - my $rt = $ml->do('COMMAND ['.time().'] SAVE_STATE_INFORMATION'); - is($rt, '1', $key.' test command'); - - ######################### - # check for errors - #$ml->{'verbose'} = 1; - $statement = "GET hosts\nLimit: 1"; - $lastquery = $statement; - $hash_ref = $ml->selectrow_hashref($statement ); - undef $lastquery; - isnt($hash_ref, undef, $key.' test error 200 body'); - is($Monitoring::Livestatus::ErrorCode, 0, $key.' test error 200 status') or - diag('got error: '.$Monitoring::Livestatus::ErrorMessage); - - $statement = "BLAH hosts"; - $lastquery = $statement; - $hash_ref = $ml->selectrow_hashref($statement ); - undef $lastquery; - is($hash_ref, undef, $key.' test error 401 body'); - is($Monitoring::Livestatus::ErrorCode, '401', $key.' test error 401 status') or - diag('got error: '.$Monitoring::Livestatus::ErrorMessage); - - $statement = "GET hosts\nLimit: "; - $lastquery = $statement; - $hash_ref = $ml->selectrow_hashref($statement ); - undef $lastquery; - is($hash_ref, undef, $key.' test error 403 body'); - is($Monitoring::Livestatus::ErrorCode, '403', $key.' test error 403 status') or - diag('got error: '.$Monitoring::Livestatus::ErrorMessage); - - $statement = "GET unknowntable\nLimit: 1"; - $lastquery = $statement; - $hash_ref = $ml->selectrow_hashref($statement ); - undef $lastquery; - is($hash_ref, undef, $key.' test error 404 body'); - is($Monitoring::Livestatus::ErrorCode, '404', $key.' test error 404 status') or - diag('got error: '.$Monitoring::Livestatus::ErrorMessage); - - $statement = "GET hosts\nColumns: unknown"; - $lastquery = $statement; - $hash_ref = $ml->selectrow_hashref($statement ); - undef $lastquery; - is($hash_ref, undef, $key.' test error 405 body'); - TODO: { - local $TODO = 'livestatus returns wrong status'; - is($Monitoring::Livestatus::ErrorCode, '405', $key.' test error 405 status') or - diag('got error: '.$Monitoring::Livestatus::ErrorMessage); - }; - - ######################### - # some more broken statements - $statement = "GET "; - $lastquery = $statement; - $hash_ref = $ml->selectrow_hashref($statement); - undef $lastquery; - is($hash_ref, undef, $key.' test error 403 body'); - is($Monitoring::Livestatus::ErrorCode, '403', $key.' test error 403 status: GET ') or - diag('got error: '.$Monitoring::Livestatus::ErrorMessage); - - $statement = "GET hosts\nColumns: name, name"; - $lastquery = $statement; - $hash_ref = $ml->selectrow_hashref($statement ); - undef $lastquery; - is($hash_ref, undef, $key.' test error 405 body'); - is($Monitoring::Livestatus::ErrorCode, '405', $key.' test error 405 status: GET hosts\nColumns: name, name') or - diag('got error: '.$Monitoring::Livestatus::ErrorMessage); - - $statement = "GET hosts\nColumns: "; - $lastquery = $statement; - $hash_ref = $ml->selectrow_hashref($statement ); - undef $lastquery; - is($hash_ref, undef, $key.' test error 405 body'); - is($Monitoring::Livestatus::ErrorCode, '405', $key.' test error 405 status: GET hosts\nColumns: ') or - diag('got error: '.$Monitoring::Livestatus::ErrorMessage); - - ######################### - # some forbidden headers - $statement = "GET hosts\nKeepAlive: on"; - $lastquery = $statement; - $hash_ref = $ml->selectrow_hashref($statement ); - undef $lastquery; - is($hash_ref, undef, $key.' test error 496 body'); - is($Monitoring::Livestatus::ErrorCode, '496', $key.' test error 496 status: KeepAlive: on') or - diag('got error: '.$Monitoring::Livestatus::ErrorMessage); - - $statement = "GET hosts\nResponseHeader: fixed16"; - $lastquery = $statement; - $hash_ref = $ml->selectrow_hashref($statement ); - undef $lastquery; - is($hash_ref, undef, $key.' test error 495 body'); - is($Monitoring::Livestatus::ErrorCode, '495', $key.' test error 495 status: ResponseHeader: fixed16') or - diag('got error: '.$Monitoring::Livestatus::ErrorMessage); - - $statement = "GET hosts\nColumnHeaders: on"; - $lastquery = $statement; - $hash_ref = $ml->selectrow_hashref($statement ); - undef $lastquery; - is($hash_ref, undef, $key.' test error 494 body'); - is($Monitoring::Livestatus::ErrorCode, '494', $key.' test error 494 status: ColumnHeader: on') or - diag('got error: '.$Monitoring::Livestatus::ErrorMessage); - - $statement = "GET hosts\nOuputFormat: json"; - $lastquery = $statement; - $hash_ref = $ml->selectrow_hashref($statement ); - undef $lastquery; - is($hash_ref, undef, $key.' test error 493 body'); - is($Monitoring::Livestatus::ErrorCode, '493', $key.' test error 493 status: OutputForma: json') or - diag('got error: '.$Monitoring::Livestatus::ErrorMessage); - - $statement = "GET hosts\nSeparators: 0 1 2 3"; - $lastquery = $statement; - $hash_ref = $ml->selectrow_hashref($statement ); - undef $lastquery; - is($hash_ref, undef, $key.' test error 492 body'); - is($Monitoring::Livestatus::ErrorCode, '492', $key.' test error 492 status: Seperators: 0 1 2 3') or - diag('got error: '.$Monitoring::Livestatus::ErrorMessage); - - - ######################### - # check some fancy stats queries - my $stats_query = "GET services -Stats: state = 0 as all_ok -Stats: state = 1 as all_warning -Stats: state = 2 as all_critical -Stats: state = 3 as all_unknown -Stats: state = 4 as all_pending -Stats: host_state != 0 -Stats: state = 1 -StatsAnd: 2 as all_warning_on_down_hosts -Stats: host_state != 0 -Stats: state = 2 -StatsAnd: 2 as all_critical_on_down_hosts -Stats: host_state != 0 -Stats: state = 3 -StatsAnd: 2 as all_unknown_on_down_hosts -Stats: host_state != 0 -Stats: state = 3 -Stats: active_checks_enabled = 1 -StatsAnd: 3 as all_unknown_active_on_down_hosts -Stats: state = 3 -Stats: active_checks_enabled = 1 -StatsOr: 2 as all_active_or_unknown"; - $lastquery = $stats_query; - $hash_ref = $ml->selectrow_hashref($stats_query ); - undef $lastquery; - isnt($hash_ref, undef, $key.' test fancy stats query') or - diag('got error: '.Dumper($hash_ref)); -} - - - -# generate expected keys -sub get_expected_keys { - my $type = shift; - my $skip = shift; - my @keys = @{$expected_keys->{$type}}; - - my @new_keys; - for my $key (@keys) { - my $replaced = 0; - for my $replace_with (keys %{$expected_keys}) { - if($key eq '__all_from_'.$replace_with.'__') { - $replaced = 1; - next if $skip; - my $prefix = $replace_with.'_'; - if($replace_with eq "hosts") { $prefix = 'host_'; } - if($replace_with eq "services") { $prefix = 'service_'; } - if($replace_with eq "commands") { $prefix = 'command_'; } - if($replace_with eq "contacts") { $prefix = 'contact_'; } - if($replace_with eq "servicegroups") { $prefix = 'servicegroup_'; } - if($replace_with eq "hostgroups") { $prefix = 'hostgroup_'; } - - if($type eq "log") { $prefix = 'current_'.$prefix; } - - if($type eq "servicesbygroup" and $replace_with eq 'services') { $prefix = ''; } - if($type eq "servicesbyhostgroup" and $replace_with eq 'services') { $prefix = ''; } - if($type eq "hostsbygroup" and $replace_with eq 'hosts') { $prefix = ''; } - - my $replace_keys = get_expected_keys($replace_with, 1); - for my $key2 (@{$replace_keys}) { - push @new_keys, $prefix.$key2; - } - } - } - if($replaced == 0) { - push @new_keys, $key; - } - } - - # has been fixed in 1.1.1rc - #if($type eq 'log') { - # my %keys = map { $_ => 1 } @new_keys; - # delete $keys{'current_contact_can_submit_commands'}; - # delete $keys{'current_contact_host_notifications_enabled'}; - # delete $keys{'current_contact_in_host_notification_period'}; - # delete $keys{'current_contact_in_service_notification_period'}; - # delete $keys{'current_contact_service_notifications_enabled'}; - # @new_keys = keys %keys; - #} - - my @return = sort @new_keys; - return(\@return); -} diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/t/31-Monitoring-Livestatus-MULTI-live-test.t check-mk-1.2.6p12/=unpacked-tar10=/api/perl/t/31-Monitoring-Livestatus-MULTI-live-test.t --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/t/31-Monitoring-Livestatus-MULTI-live-test.t 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/t/31-Monitoring-Livestatus-MULTI-live-test.t 1970-01-01 00:00:00.000000000 +0000 @@ -1,95 +0,0 @@ -#!/usr/bin/env perl - -######################### - -use strict; -use Test::More; -use Data::Dumper; - -if ( ! defined $ENV{TEST_SOCKET} or !defined $ENV{TEST_SERVER} ) { - my $msg = 'Author test. Set $ENV{TEST_SOCKET} and $ENV{TEST_SERVER} to run'; - plan( skip_all => $msg ); -} else { - plan( tests => 22 ); -} - -use_ok('Monitoring::Livestatus::MULTI'); - -######################### -# create new test object -my $objects_to_test = { - 'multi_one' => Monitoring::Livestatus::MULTI->new( peer => [ $ENV{TEST_SERVER} ], warnings => 0 ), - 'multi_two' => Monitoring::Livestatus::MULTI->new( peer => [ $ENV{TEST_SERVER}, $ENV{TEST_SOCKET} ], warnings => 0 ), - 'multi_three' => Monitoring::Livestatus::MULTI->new( - 'verbose' => '0', - 'warnings' => '0', - 'timeout' => '10', - 'peer' => [ - { 'name' => 'Mon 1', 'peer' => $ENV{TEST_SERVER} }, - { 'name' => 'Mon 2', 'peer' => $ENV{TEST_SOCKET} }, - ], - 'keepalive' => '1' - ), -}; - -# dont die on errors -#$ml->errors_are_fatal(0); - -for my $key (keys %{$objects_to_test}) { - my $ml = $objects_to_test->{$key}; - isa_ok($ml, 'Monitoring::Livestatus::MULTI') or BAIL_OUT("no need to continue without a proper Monitoring::Livestatus::MULTI object"); - - ######################### - # DATA INTEGRITY - ######################### - - my $statement = "GET hosts\nColumns: state name alias\nLimit: 1"; - my $data1 = $ml->selectall_arrayref($statement, {Slice => 1}); - my $data2 = $ml->selectall_arrayref($statement, {Slice => 1, AddPeer => 1}); - for my $data (@{$data2}) { - delete $data->{'peer_name'}; - delete $data->{'peer_addr'}; - delete $data->{'peer_key'}; - } - is_deeply($data1, $data2, "data integrity with peers added and Column"); - - $statement = "GET hosts\nLimit: 1"; - $data1 = $ml->selectall_arrayref($statement, {Slice => 1, Deepcopy => 1}); - $data2 = $ml->selectall_arrayref($statement, {Slice => 1, AddPeer => 1, Deepcopy => 1}); - for my $data (@{$data2}) { - delete $data->{'peer_name'}; - delete $data->{'peer_addr'}; - delete $data->{'peer_key'}; - } - is_deeply($data1, $data2, "data integrity with peers added without Columns"); - - ######################### - # try to change result set to scalar - for my $data (@{$data1}) { $data->{'peer_name'} = 1; } - for my $data (@{$data2}) { $data->{'peer_name'} = 1; } - is_deeply($data1, $data2, "data integrity with changed result set"); - - ######################### - # try to change result set to hash - for my $data (@{$data1}) { $data->{'peer_name'} = {}; } - for my $data (@{$data2}) { $data->{'peer_name'} = {}; } - is_deeply($data1, $data2, "data integrity with changed result set"); - - ######################### - # BACKENDS - ######################### - my @backends = $ml->peer_key(); - $data1 = $ml->selectall_arrayref($statement, {Slice => 1}); - $data2 = $ml->selectall_arrayref($statement, {Slice => 1, Backend => \@backends }); - is_deeply($data1, $data2, "data integrity with backends"); - - ######################### - # BUGS - ######################### - - ######################### - # Bug: Can't use string ("flap") as an ARRAY ref while "strict refs" in use at Monitoring/Livestatus/MULTI.pm line 206. - $statement = "GET servicegroups\nColumns: name alias\nFilter: name = flap\nLimit: 1"; - $data1 = $ml->selectrow_array($statement); - isnt($data1, undef, "bug check: Can't use string (\"group\")..."); -} diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/t/32-Monitoring-Livestatus-backend-test.t check-mk-1.2.6p12/=unpacked-tar10=/api/perl/t/32-Monitoring-Livestatus-backend-test.t --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/t/32-Monitoring-Livestatus-backend-test.t 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/t/32-Monitoring-Livestatus-backend-test.t 1970-01-01 00:00:00.000000000 +0000 @@ -1,106 +0,0 @@ -#!/usr/bin/env perl - -######################### - -use strict; -use Carp; -use Test::More; -use Data::Dumper; - -if ( ! defined $ENV{TEST_SOCKET} or !defined $ENV{TEST_SERVER} or !defined $ENV{TEST_BACKEND} ) { - my $msg = 'Author test. Set $ENV{TEST_SOCKET} and $ENV{TEST_SERVER} and $ENV{TEST_BACKEND} to run'; - plan( skip_all => $msg ); -} else { - # we dont know yet how many tests we got - plan( tests => 55237 ); -} - -# set an alarm -my $lastquery; -$SIG{ALRM} = sub { - my @caller = caller; - $lastquery =~ s/\n+/\n/g; - print STDERR 'last query: '.$lastquery."\n" if defined $lastquery; - confess "timeout reached:".Dumper(\@caller)."\n" -}; - -use_ok('Monitoring::Livestatus'); - -######################### -my $objects_to_test = { - # UNIX - '01 unix_single_arg' => Monitoring::Livestatus::UNIX->new( $ENV{TEST_SOCKET} ), - - # TCP - '02 inet_single_arg' => Monitoring::Livestatus::INET->new( $ENV{TEST_SERVER} ), - - # MULTI - '03 multi_keepalive' => Monitoring::Livestatus->new( [ $ENV{TEST_SERVER}, $ENV{TEST_SOCKET} ] ), -}; - -for my $key (sort keys %{$objects_to_test}) { - my $ml = $objects_to_test->{$key}; - isa_ok($ml, 'Monitoring::Livestatus') or BAIL_OUT("no need to continue without a proper Monitoring::Livestatus object: ".$key); - - # dont die on errors - $ml->errors_are_fatal(0); - $ml->warnings(0); - - ######################### - # get tables - my $data = $ml->selectall_hashref("GET columns\nColumns: table", 'table'); - my @tables = sort keys %{$data}; - - ######################### - # check keys - for my $type (@tables) { - alarm(120); - my $filter = ""; - $filter = "Filter: time > ".(time() - 86400)."\n" if $type eq 'log'; - $filter .= "Filter: time < ".(time())."\n" if $type eq 'log'; - my $statement = "GET $type\n".$filter."Limit: 1"; - $lastquery = $statement; - my $keys = $ml->selectrow_hashref($statement ); - undef $lastquery; - is(ref $keys, 'HASH', $type.' keys are a hash');# or BAIL_OUT('keys are not in hash format, got '.Dumper($keys)); - - # status has no filter implemented - next if $type eq 'status'; - - for my $key (keys %{$keys}) { - my $value = $keys->{$key}; - if(index($value, ',') > 0) { my @vals = split /,/, $value; $value = $vals[0]; } - my $typefilter = "Filter: $key >= $value\n"; - if($value eq '') { - $typefilter = "Filter: $key =\n"; - } - my $statement = "GET $type\n".$filter.$typefilter."Limit: 1"; - $lastquery = $statement; - my $hash_ref = $ml->selectrow_hashref($statement ); - undef $lastquery; - is($Monitoring::Livestatus::ErrorCode, 0, "GET ".$type." Filter: ".$key." >= ".$value) or BAIL_OUT("query failed: ".$statement); - #isnt($hash_ref, undef, "GET ".$type." Filter: ".$key." >= ".$value);# or BAIL_OUT("got undef for ".$statement); - - # send test stats query - my $stats_query = [ $key.' = '.$value, 'std '.$key, 'min '.$key, 'max '.$key, 'avg '.$key, 'sum '.$key ]; - for my $stats_part (@{$stats_query}) { - my $statement = "GET $type\n".$filter.$typefilter."\nStats: $stats_part"; - $lastquery = $statement; - my $hash_ref = $ml->selectrow_hashref($statement ); - undef $lastquery; - is($Monitoring::Livestatus::ErrorCode, 0, "GET ".$type." Filter: ".$key." >= ".$value." Stats: $stats_part") or BAIL_OUT("query failed:\n".$statement); - - $statement = "GET $type\n".$filter.$typefilter."\nStats: $stats_part\nStatsGroupBy: $key"; - $lastquery = $statement; - $hash_ref = $ml->selectrow_hashref($statement ); - undef $lastquery; - is($Monitoring::Livestatus::ErrorCode, 0, "GET ".$type." Filter: ".$key." >= ".$value." Stats: $stats_part StatsGroupBy: $key") or BAIL_OUT("query failed:\n".$statement); - } - - # wait till backend is started up again - if(!defined $hash_ref and $Monitoring::Livestatus::ErrorCode > 200) { - sleep(2); - } - } - } -} diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/t/33-Monitoring-Livestatus-test_socket_timeout.t check-mk-1.2.6p12/=unpacked-tar10=/api/perl/t/33-Monitoring-Livestatus-test_socket_timeout.t --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/t/33-Monitoring-Livestatus-test_socket_timeout.t 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/t/33-Monitoring-Livestatus-test_socket_timeout.t 1970-01-01 00:00:00.000000000 +0000 @@ -1,74 +0,0 @@ -#!/usr/bin/env perl - -######################### - -use strict; -use Test::More; -use Data::Dumper; - -if ( !defined $ENV{TEST_SERVER} ) { - my $msg = 'Author test. Set $ENV{TEST_SOCKET} and $ENV{TEST_SERVER} to run'; - plan( skip_all => $msg ); -} else { - plan( tests => 7 ); -} - -# set an alarm -my $lastquery; -$SIG{ALRM} = sub { - my @caller = caller; - print STDERR 'last query: '.$lastquery if defined $lastquery; - die "timeout reached:".Dumper(\@caller)."\n" -}; -alarm(30); - -use_ok('Monitoring::Livestatus'); - -#use Log::Log4perl qw(:easy); -#Log::Log4perl->easy_init($DEBUG); - -######################### -# Test Query -######################### -my $statement = "GET hosts\nColumns: alias\nFilter: name = host1"; - -######################### -my $objects_to_test = { - # create inet object with hash args - '01 inet_hash_args' => Monitoring::Livestatus->new( - verbose => 0, - server => $ENV{TEST_SERVER}, - keepalive => 1, - timeout => 3, - retries_on_connection_error => 0, -# logger => get_logger(), - ), - - # create inet object with a single arg - '02 inet_single_arg' => Monitoring::Livestatus::INET->new( $ENV{TEST_SERVER} ), - -}; - -for my $key (sort keys %{$objects_to_test}) { - my $ml = $objects_to_test->{$key}; - isa_ok($ml, 'Monitoring::Livestatus'); - - # we dont need warnings for testing - $ml->warnings(0); - - ######################### - my $ary_ref = $ml->selectall_arrayref($statement); - is($Monitoring::Livestatus::ErrorCode, 0, 'Query Status 0'); - #is_deeply($ary_ref, $selectall_arrayref1, 'selectall_arrayref($statement)') - # or diag("got: ".Dumper($ary_ref)."\nbut expected ".Dumper($selectall_arrayref1)); - - sleep(10); - - $ary_ref = $ml->selectall_arrayref($statement); - is($Monitoring::Livestatus::ErrorCode, 0, 'Query Status 0'); - #is_deeply($ary_ref, $selectall_arrayref1, 'selectall_arrayref($statement)') - # or diag("got: ".Dumper($ary_ref)."\nbut expected ".Dumper($selectall_arrayref1)); - - #print Dumper($Monitoring::Livestatus::ErrorCode); - #print Dumper($Monitoring::Livestatus::ErrorMessage); -} diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/t/34-Monitoring-Livestatus-utf8_support.t check-mk-1.2.6p12/=unpacked-tar10=/api/perl/t/34-Monitoring-Livestatus-utf8_support.t --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/t/34-Monitoring-Livestatus-utf8_support.t 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/t/34-Monitoring-Livestatus-utf8_support.t 1970-01-01 00:00:00.000000000 +0000 @@ -1,78 +0,0 @@ -#!/usr/bin/env perl - -######################### - -use strict; -use Encode; -use Test::More; -use Data::Dumper; - -if ( !defined $ENV{TEST_SERVER} ) { - my $msg = 'Author test. Set $ENV{TEST_SOCKET} and $ENV{TEST_SERVER} to run'; - plan( skip_all => $msg ); -} else { - plan( tests => 9 ); -} - -use_ok('Monitoring::Livestatus'); - -#use Log::Log4perl qw(:easy); -#Log::Log4perl->easy_init($DEBUG); - -######################### -my $objects_to_test = { - # create inet object with hash args - '01 inet_hash_args' => Monitoring::Livestatus->new( - verbose => 0, - server => $ENV{TEST_SERVER}, - keepalive => 1, - timeout => 3, - retries_on_connection_error => 0, -# logger => get_logger(), - ), - - # create inet object with a single arg - '02 inet_single_arg' => Monitoring::Livestatus::INET->new( $ENV{TEST_SERVER} ), -}; - -my $author = 'Monitoring::Livestatus test'; -for my $key (sort keys %{$objects_to_test}) { - my $ml = $objects_to_test->{$key}; - isa_ok($ml, 'Monitoring::Livestatus'); - - # we dont need warnings for testing - $ml->warnings(0); - - ######################### - my $downtimes = $ml->selectall_arrayref("GET downtimes\nColumns: id"); - my $num_downtimes = 0; - $num_downtimes = scalar @{$downtimes} if defined $downtimes; - - ######################### - # get a test host - my $firsthost = $ml->selectscalar_value("GET hosts\nColumns: name\nLimit: 1"); - isnt($firsthost, undef, 'get test hostname') or BAIL_OUT($key.': got not test hostname'); - - my $expect = "aa ²&é\"'''(§è!çà)- %s ''%s'' aa ~ € bb"; - #my $expect = "öäüß"; - my $teststrings = [ - $expect, - "aa \x{c2}\x{b2}&\x{c3}\x{a9}\"'''(\x{c2}\x{a7}\x{c3}\x{a8}!\x{c3}\x{a7}\x{c3}\x{a0})- %s ''%s'' aa ~ \x{e2}\x{82}\x{ac} bb", - ]; - for my $string (@{$teststrings}) { - $ml->do('COMMAND ['.time().'] SCHEDULE_HOST_DOWNTIME;'.$firsthost.';'.time().';'.(time()+300).';1;0;300;'.$author.';'.$string); - - # sometimes it takes while till the downtime is accepted - my $waited = 0; - while($downtimes = $ml->selectall_arrayref("GET downtimes\nColumns: id comment", { Slice => 1 }) and scalar @{$downtimes} < $num_downtimes + 1) { - print "waiting for the downtime...\n"; - sleep(1); - $waited++; - BAIL_OUT('waited 30 seconds for the downtime...') if $waited > 30; - } - - my $last_downtime = pop @{$downtimes}; - #utf8::decode($expect); - is($last_downtime->{'comment'}, $expect, 'get same utf8 comment: got '.Dumper($last_downtime)); - } -} diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/t/35-Monitoring-Livestatus-callbacks_support.t check-mk-1.2.6p12/=unpacked-tar10=/api/perl/t/35-Monitoring-Livestatus-callbacks_support.t --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/t/35-Monitoring-Livestatus-callbacks_support.t 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/t/35-Monitoring-Livestatus-callbacks_support.t 1970-01-01 00:00:00.000000000 +0000 @@ -1,53 +0,0 @@ -#!/usr/bin/env perl - -######################### - -use strict; -use Encode; -use Test::More; -use Data::Dumper; - -if ( !defined $ENV{TEST_SERVER} ) { - my $msg = 'Author test. Set $ENV{TEST_SOCKET} and $ENV{TEST_SERVER} to run'; - plan( skip_all => $msg ); -} else { - plan( tests => 15 ); -} - -use_ok('Monitoring::Livestatus'); - -#use Log::Log4perl qw(:easy); -#Log::Log4perl->easy_init($DEBUG); - -######################### -my $objects_to_test = { - # create inet object with hash args - '01 inet_hash_args' => Monitoring::Livestatus->new( - verbose => 0, - server => $ENV{TEST_SERVER}, - keepalive => 1, - timeout => 3, - retries_on_connection_error => 0, -# logger => get_logger(), - ), - - # create inet object with a single arg - '02 inet_single_arg' => Monitoring::Livestatus::INET->new( $ENV{TEST_SERVER} ), -}; - -for my $key (sort keys %{$objects_to_test}) { - my $ml = $objects_to_test->{$key}; - isa_ok($ml, 'Monitoring::Livestatus'); - - my $got = $ml->selectall_arrayref("GET hosts\nColumns: name alias state\nLimit: 1", { Slice => 1, callbacks => { 'c1' => sub { return $_[0]->{'alias'}; } } }); - isnt($got->[0]->{'alias'}, undef, 'got a test host'); - is($got->[0]->{'alias'}, $got->[0]->{'c1'}, 'callback for sliced results'); - - $got = $ml->selectall_arrayref("GET hosts\nColumns: name alias state\nLimit: 1", { Slice => 1, callbacks => { 'name' => sub { return $_[0]->{'alias'}; } } }); - isnt($got->[0]->{'alias'}, undef, 'got a test host'); - is($got->[0]->{'alias'}, $got->[0]->{'name'}, 'callback for sliced results which overwrites key'); - - $got = $ml->selectall_arrayref("GET hosts\nColumns: name alias state\nLimit: 1", { callbacks => { 'c1' => sub { return $_[0]->[1]; } } }); - isnt($got->[0]->[1], undef, 'got a test host'); - is($got->[0]->[1], $got->[0]->[3], 'callback for non sliced results'); -} diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/t/97-Pod.t check-mk-1.2.6p12/=unpacked-tar10=/api/perl/t/97-Pod.t --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/t/97-Pod.t 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/t/97-Pod.t 1970-01-01 00:00:00.000000000 +0000 @@ -1,9 +0,0 @@ -use strict; -use warnings; -use Test::More; - -eval "use Test::Pod 1.14"; -plan skip_all => 'Test::Pod 1.14 required' if $@; -plan skip_all => 'Author test. Set $ENV{TEST_AUTHOR} to a true value to run.' unless $ENV{TEST_AUTHOR}; - -all_pod_files_ok(); diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/t/98-Pod-Coverage.t check-mk-1.2.6p12/=unpacked-tar10=/api/perl/t/98-Pod-Coverage.t --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/t/98-Pod-Coverage.t 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/t/98-Pod-Coverage.t 1970-01-01 00:00:00.000000000 +0000 @@ -1,23 +0,0 @@ -#!/usr/bin/env perl -# -# $Id$ -# -use strict; -use warnings; -use File::Spec; -use Test::More; - -if ( not $ENV{TEST_AUTHOR} ) { - my $msg = 'Author test. Set $ENV{TEST_AUTHOR} to a true value to run.'; - plan( skip_all => $msg ); -} - -eval { require Test::Pod::Coverage; }; - -if ( $@ ) { - my $msg = 'Test::Pod::Coverage required to criticise pod'; - plan( skip_all => $msg ); -} - -eval "use Test::Pod::Coverage 1.00"; -all_pod_coverage_ok(); diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/t/99-Perl-Critic.t check-mk-1.2.6p12/=unpacked-tar10=/api/perl/t/99-Perl-Critic.t --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/t/99-Perl-Critic.t 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/t/99-Perl-Critic.t 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ -#!/usr/bin/env perl -# -# $Id$ -# -use strict; -use warnings; -use File::Spec; -use Test::More; - -if ( not $ENV{TEST_AUTHOR} ) { - my $msg = 'Author test. Set $ENV{TEST_AUTHOR} to a true value to run.'; - plan( skip_all => $msg ); -} - -eval { require Test::Perl::Critic; }; - -if ( $@ ) { - my $msg = 'Test::Perl::Critic required to criticise code'; - plan( skip_all => $msg ); -} - -my $rcfile = File::Spec->catfile( 't', 'perlcriticrc' ); -Test::Perl::Critic->import( -profile => $rcfile ); -all_critic_ok(); diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/perl/t/perlcriticrc check-mk-1.2.6p12/=unpacked-tar10=/api/perl/t/perlcriticrc --- check-mk-1.2.2p3/=unpacked-tar10=/api/perl/t/perlcriticrc 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/perl/t/perlcriticrc 1970-01-01 00:00:00.000000000 +0000 @@ -1,286 +0,0 @@ -############################################################################## -# This Perl::Critic configuration file sets the Policy severity levels -# according to Damian Conway's own personal recommendations. Feel free to -# use this as your own, or make modifications. -############################################################################## - -[Perl::Critic::Policy::ValuesAndExpressions::ProhibitAccessOfPrivateData] -severity = 3 - -[Perl::Critic::Policy::BuiltinFunctions::ProhibitLvalueSubstr] -severity = 3 - -[Perl::Critic::Policy::BuiltinFunctions::ProhibitReverseSortBlock] -severity = 1 - -[Perl::Critic::Policy::BuiltinFunctions::ProhibitSleepViaSelect] -severity = 5 - -[Perl::Critic::Policy::BuiltinFunctions::ProhibitStringyEval] -severity = 5 - -[Perl::Critic::Policy::BuiltinFunctions::ProhibitStringySplit] -severity = 2 - -[Perl::Critic::Policy::BuiltinFunctions::ProhibitUniversalCan] -severity = 4 - -[Perl::Critic::Policy::BuiltinFunctions::ProhibitUniversalIsa] -severity = 4 - -[Perl::Critic::Policy::BuiltinFunctions::ProhibitVoidGrep] -severity = 3 - -[Perl::Critic::Policy::BuiltinFunctions::ProhibitVoidMap] -severity = 3 - -[Perl::Critic::Policy::BuiltinFunctions::RequireBlockGrep] -severity = 4 - -[Perl::Critic::Policy::BuiltinFunctions::RequireBlockMap] -severity = 4 - -[Perl::Critic::Policy::BuiltinFunctions::RequireGlobFunction] -severity = 5 - -[Perl::Critic::Policy::BuiltinFunctions::RequireSimpleSortBlock] -severity = 3 - -[Perl::Critic::Policy::ClassHierarchies::ProhibitAutoloading] -severity = 3 - -[Perl::Critic::Policy::ClassHierarchies::ProhibitExplicitISA] -severity = 4 - -[Perl::Critic::Policy::ClassHierarchies::ProhibitOneArgBless] -severity = 5 - -[Perl::Critic::Policy::CodeLayout::ProhibitHardTabs] -severity = 3 - -[Perl::Critic::Policy::CodeLayout::ProhibitParensWithBuiltins] -severity = 1 - -[Perl::Critic::Policy::CodeLayout::ProhibitQuotedWordLists] -severity = 2 - -[Perl::Critic::Policy::CodeLayout::RequireConsistentNewlines] -severity = 4 - -[Perl::Critic::Policy::CodeLayout::RequireTidyCode] -severity = 1 - -[Perl::Critic::Policy::CodeLayout::RequireTrailingCommas] -severity = 3 - -[Perl::Critic::Policy::ControlStructures::ProhibitCStyleForLoops] -severity = 3 - -[Perl::Critic::Policy::ControlStructures::ProhibitCascadingIfElse] -severity = 3 - -[Perl::Critic::Policy::ControlStructures::ProhibitDeepNests] -severity = 3 - -[Perl::Critic::Policy::ControlStructures::ProhibitMutatingListFunctions] -severity = 5 - -[Perl::Critic::Policy::ControlStructures::ProhibitPostfixControls] -severity = 4 - -[Perl::Critic::Policy::ControlStructures::ProhibitUnlessBlocks] -severity = 4 - -[Perl::Critic::Policy::ControlStructures::ProhibitUnreachableCode] -severity = 4 - -[Perl::Critic::Policy::ControlStructures::ProhibitUntilBlocks] -severity = 4 - -[Perl::Critic::Policy::Documentation::RequirePodAtEnd] -severity = 2 - -[Perl::Critic::Policy::Documentation::RequirePodSections] -severity = 2 - -[Perl::Critic::Policy::ErrorHandling::RequireCarping] -severity = 4 - -[Perl::Critic::Policy::InputOutput::ProhibitBacktickOperators] -severity = 3 - -[Perl::Critic::Policy::InputOutput::ProhibitBarewordFileHandles] -severity = 5 - -[Perl::Critic::Policy::InputOutput::ProhibitInteractiveTest] -severity = 4 - -[Perl::Critic::Policy::InputOutput::ProhibitOneArgSelect] -severity = 4 - -[Perl::Critic::Policy::InputOutput::ProhibitReadlineInForLoop] -severity = 5 - -[Perl::Critic::Policy::InputOutput::ProhibitTwoArgOpen] -severity = 4 - -[Perl::Critic::Policy::InputOutput::RequireBracedFileHandleWithPrint] -severity = 3 - -[Perl::Critic::Policy::Miscellanea::ProhibitFormats] -severity = 3 - -[Perl::Critic::Policy::Miscellanea::ProhibitTies] -severity = 4 - -[-Perl::Critic::Policy::Miscellanea::RequireRcsKeywords] - -[Perl::Critic::Policy::Modules::ProhibitAutomaticExportation] -severity = 4 - -[Perl::Critic::Policy::Modules::ProhibitEvilModules] -severity = 5 - -[Perl::Critic::Policy::Modules::ProhibitMultiplePackages] -severity = 4 - -[Perl::Critic::Policy::Modules::RequireBarewordIncludes] -severity = 5 - -[Perl::Critic::Policy::Modules::RequireEndWithOne] -severity = 4 - -[Perl::Critic::Policy::Modules::RequireExplicitPackage] -severity = 4 - -[Perl::Critic::Policy::Modules::RequireFilenameMatchesPackage] -severity = 5 - -[Perl::Critic::Policy::Modules::RequireVersionVar] -severity = 4 - -[Perl::Critic::Policy::NamingConventions::ProhibitAmbiguousNames] -severity = 3 - -[Perl::Critic::Policy::NamingConventions::ProhibitMixedCaseSubs] -severity = 1 - -[Perl::Critic::Policy::NamingConventions::ProhibitMixedCaseVars] -severity = 1 - -[Perl::Critic::Policy::References::ProhibitDoubleSigils] -severity = 4 - -[Perl::Critic::Policy::RegularExpressions::ProhibitCaptureWithoutTest] -severity = 4 - -[Perl::Critic::Policy::RegularExpressions::RequireExtendedFormatting] -severity = 5 - -[Perl::Critic::Policy::RegularExpressions::RequireLineBoundaryMatching] -severity = 5 - -[Perl::Critic::Policy::Subroutines::ProhibitAmpersandSigils] -severity = 2 - -[Perl::Critic::Policy::Subroutines::ProhibitBuiltinHomonyms] -severity = 4 - -[Perl::Critic::Policy::Subroutines::ProhibitExcessComplexity] -severity = 3 - -[Perl::Critic::Policy::Subroutines::ProhibitExplicitReturnUndef] -severity = 5 - -[Perl::Critic::Policy::Subroutines::ProhibitSubroutinePrototypes] -severity = 4 - -[Perl::Critic::Policy::Subroutines::ProtectPrivateSubs] -severity = 3 - -[Perl::Critic::Policy::Subroutines::RequireFinalReturn] -severity = 5 - -[Perl::Critic::Policy::TestingAndDebugging::ProhibitNoStrict] -severity = 5 - -[Perl::Critic::Policy::TestingAndDebugging::ProhibitNoWarnings] -severity = 4 - -[Perl::Critic::Policy::TestingAndDebugging::ProhibitProlongedStrictureOverride] -severity = 4 - -[Perl::Critic::Policy::TestingAndDebugging::RequireTestLabels] -severity = 3 - -[Perl::Critic::Policy::TestingAndDebugging::RequireUseStrict] -severity = 5 - -[Perl::Critic::Policy::TestingAndDebugging::RequireUseWarnings] -severity = 4 - -[Perl::Critic::Policy::ValuesAndExpressions::ProhibitConstantPragma] -severity = 4 - -[Perl::Critic::Policy::ValuesAndExpressions::ProhibitEmptyQuotes] -severity = 2 - -[Perl::Critic::Policy::ValuesAndExpressions::ProhibitEscapedCharacters] -severity = 2 - -[Perl::Critic::Policy::ValuesAndExpressions::ProhibitInterpolationOfLiterals] -severity = 1 - -[Perl::Critic::Policy::ValuesAndExpressions::ProhibitLeadingZeros] -severity = 5 - -[Perl::Critic::Policy::ValuesAndExpressions::ProhibitMismatchedOperators] -severity = 2 - -[Perl::Critic::Policy::ValuesAndExpressions::ProhibitMixedBooleanOperators] -severity = 4 - -[Perl::Critic::Policy::ValuesAndExpressions::ProhibitNoisyQuotes] -severity = 2 - -[Perl::Critic::Policy::ValuesAndExpressions::ProhibitVersionStrings] -severity = 3 - -[Perl::Critic::Policy::ValuesAndExpressions::RequireInterpolationOfMetachars] -severity = 1 - -[Perl::Critic::Policy::ValuesAndExpressions::RequireNumberSeparators] -severity = 2 - -[Perl::Critic::Policy::ValuesAndExpressions::RequireQuotedHeredocTerminator] -severity = 4 - -[Perl::Critic::Policy::ValuesAndExpressions::RequireUpperCaseHeredocTerminator] -severity = 4 - -[Perl::Critic::Policy::Variables::ProhibitConditionalDeclarations] -severity = 5 - -[Perl::Critic::Policy::Variables::ProhibitLocalVars] -severity = 2 - -[Perl::Critic::Policy::Variables::ProhibitMatchVars] -severity = 4 - -[Perl::Critic::Policy::Variables::ProhibitPackageVars] -severity = 3 - -[Perl::Critic::Policy::Variables::ProhibitPunctuationVars] -severity = 2 - -[Perl::Critic::Policy::Variables::ProtectPrivateVars] -severity = 3 - -[Perl::Critic::Policy::Variables::RequireInitializationForLocalVars] -severity = 5 - -[Perl::Critic::Policy::Variables::RequireLexicalLoopIterators] -severity = 5 - -[Perl::Critic::Policy::Variables::RequireNegativeIndices] -severity = 4 \ No newline at end of file diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/python/example_multisite.py check-mk-1.2.6p12/=unpacked-tar10=/api/python/example_multisite.py --- check-mk-1.2.2p3/=unpacked-tar10=/api/python/example_multisite.py 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/python/example_multisite.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,93 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# ails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -import os -import livestatus - -try: - omd_root = os.getenv("OMD_ROOT") - socket_path = "unix:" + omd_root + "/tmp/run/live" -except: - sys.stderr.write("This example is indented to run in an OMD site\n") - sys.stderr.write("Please change socket_path in this example, if you are\n") - sys.stderr.write("not using OMD.\n") - sys.exit(1) - - -sites = { - "muc" : { - "socket" : socket_path, - "alias" : "Munich", - }, - "sitea" : { - "alias" : "Augsburg", - "socket" : "tcp:sitea:6557", - "nagios_url" : "/nagios/", - "timeout" : 2, - }, - "siteb" : { - "alias" : "Berlin", - "socket" : "tcp:siteb:6557", - "nagios_url" : "/nagios/", - "timeout" : 10, - }, -} - -c = livestatus.MultiSiteConnection(sites) -c.set_prepend_site(True) -print c.query("GET hosts\nColumns: name state\n") -c.set_prepend_site(False) -print c.query("GET hosts\nColumns: name state\n") - -# Beware: When doing stats, you need to aggregate yourself: -print sum(c.query_column("GET hosts\nStats: state >= 0\n")) - -# Detect errors: -sites = { - "muc" : { - "socket" : "unix:/var/run/nagios/rw/live", - "alias" : "Munich", - }, - "sitea" : { - "alias" : "Augsburg", - "socket" : "tcp:sitea:6558", # BROKEN - "nagios_url" : "/nagios/", - "timeout" : 2, - }, - "siteb" : { - "alias" : "Berlin", - "socket" : "tcp:siteb:6557", - "nagios_url" : "/nagios/", - "timeout" : 10, - }, -} - -c = livestatus.MultiSiteConnection(sites) -for name, state in c.query("GET hosts\nColumns: name state\n"): - print "%-15s: %d" % (name, state) -print "Dead sites:" -for sitename, info in c.dead_sites().items(): - print "%s: %s" % (sitename, info["exception"]) diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/python/example.py check-mk-1.2.6p12/=unpacked-tar10=/api/python/example.py --- check-mk-1.2.2p3/=unpacked-tar10=/api/python/example.py 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/python/example.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,72 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# ails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -import os -import livestatus - -try: - omd_root = os.getenv("OMD_ROOT") - socket_path = "unix:" + omd_root + "/tmp/run/live" -except: - sys.stderr.write("This example is indented to run in an OMD site\n") - sys.stderr.write("Please change socket_path in this example, if you are\n") - sys.stderr.write("not using OMD.\n") - sys.exit(1) - -try: - # Make a single connection for each query - print "\nPerformance:" - for key, value in livestatus.SingleSiteConnection(socket_path).query_row_assoc("GET status").items(): - print "%-30s: %s" % (key, value) - print "\nHosts:" - hosts = livestatus.SingleSiteConnection(socket_path).query_table("GET hosts\nColumns: name alias address") - for name, alias, address in hosts: - print "%-16s %-16s %s" % (name, address, alias) - - # Do several queries in one connection - conn = livestatus.SingleSiteConnection(socket_path) - num_up = conn.query_value("GET hosts\nStats: hard_state = 0") - print "\nHosts up: %d" % num_up - - stats = conn.query_row( - "GET services\n" - "Stats: state = 0\n" - "Stats: state = 1\n" - "Stats: state = 2\n" - "Stats: state = 3\n") - print "Service stats: %d/%d/%d/%d" % tuple(stats) - - print "List of commands: %s" % \ - ", ".join(conn.query_column("GET commands\nColumns: name")) - - print "Query error:" - conn.query_value("GET hosts\nColumns: hirni") - - -except Exception, e: # livestatus.MKLivestatusException, e: - print "Livestatus error: %s" % str(e) - - diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/python/livestatus.py check-mk-1.2.6p12/=unpacked-tar10=/api/python/livestatus.py --- check-mk-1.2.2p3/=unpacked-tar10=/api/python/livestatus.py 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/python/livestatus.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,636 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# ails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -import socket, time - -# Python 2.3 does not have 'set' in normal namespace. -# But it can be imported from 'sets' -try: - set() -except NameError: - from sets import Set as set - -"""MK Livestatus Python API - -This module allows easy access to Nagios via MK Livestatus. -It supports persistent connections via the connection class. -If you want single-shot connections, just initialize a -connection object on-the-fly, e.g.: - -r = connection("/var/lib/nagios/rw/live").query_table_assoc("GET hosts") - -For persistent connections create and keep an object: - -conn = connection("/var/lib/nagios/rw/live") -r1 = conn.query_table_assoc("GET hosts") -r2 = conn.query_row("GET status") -""" - -# Keep a global array of persistant connections -persistent_connections = {} - -# DEBUGGING PERSISTENT CONNECTIONS -# import os -# hirn_debug = file("/tmp/live.log", "a") -# def hirn(x): -# pid = os.getpid() -# hirn_debug.write("[\033[1;3%d;4%dm%d\033[0m] %s\n" % (pid%7+1, (pid/7)%7+1, pid, x)) -# hirn_debug.flush() - -class MKLivestatusException(Exception): - def __init__(self, value): - self.parameter = value - def __str__(self): - return str(self.parameter) - -class MKLivestatusSocketError(MKLivestatusException): - def __init__(self, reason): - MKLivestatusException.__init__(self, reason) - -class MKLivestatusSocketClosed(MKLivestatusSocketError): - def __init__(self, reason): - MKLivestatusSocketError.__init__(self, reason) - -class MKLivestatusConfigError(MKLivestatusException): - def __init__(self, reason): - MKLivestatusException.__init__(self, reason) - -class MKLivestatusQueryError(MKLivestatusException): - def __init__(self, code, reason): - MKLivestatusException.__init__(self, "%s: %s" % (code, reason)) - self.code = code - -class MKLivestatusNotFoundError(MKLivestatusException): - def __init__(self, query): - MKLivestatusException.__init__(self, query) - self.query = query - -# We need some unique value here -NO_DEFAULT = lambda: None -class Helpers: - def query_value(self, query, deflt = NO_DEFAULT): - """Issues a query that returns exactly one line and one columns and returns - the response as a single value""" - result = self.query(query, "ColumnHeaders: off\n") - try: - return result[0][0] - except: - if deflt == NO_DEFAULT: - raise MKLivestatusNotFoundError(query) - else: - return deflt - - def query_row(self, query): - """Issues a query that returns one line of data and returns the elements - of that line as list""" - return self.query(query, "ColumnHeaders: off\n")[0] - - def query_row_assoc(self, query): - """Issues a query that returns one line of data and returns the elements - of that line as a dictionary from column names to values""" - r = self.query(query, "ColumnHeaders: on\n")[0:2] - return dict(zip(r[0], r[1])) - - def query_column(self, query): - """Issues a query that returns exactly one column and returns the values - of all lines in that column as a single list""" - return [ l[0] for l in self.query(query, "ColumnHeaders: off\n") ] - - def query_column_unique(self, query): - """Issues a query that returns exactly one column and returns the values - of all lines with duplicates removed""" - result = [] - for line in self.query(query, "ColumnHeaders: off\n"): - if line[0] not in result: - result.append(line[0]) - return result - - def query_table(self, query): - """Issues a query that may return multiple lines and columns and returns - a list of lists""" - return self.query(query, "ColumnHeaders: off\n") - - def query_table_assoc(self, query): - """Issues a query that may return multiple lines and columns and returns - a dictionary from column names to values for each line. This can be - very ineffective for large response sets.""" - response = self.query(query, "ColumnHeaders: on\n") - headers = response[0] - result = [] - for line in response[1:]: - result.append(dict(zip(headers, line))) - return result - - def query_summed_stats(self, query, add_headers = ""): - """Conveniance function for adding up numbers from Stats queries - Adds up results column-wise. This is useful for multisite queries.""" - data = self.query(query, add_headers) - if len(data) == 1: - return data[0] - elif len(data) == 0: - raise MKLivestatusNotFoundError("Empty result to Stats-Query") - - result = [] - for x in range(0, len(data[0])): - result.append(sum([row[x] for row in data])) - return result - - -class BaseConnection: - def __init__(self, socketurl, persist = False): - """Create a new connection to a MK Livestatus socket""" - self.add_headers = "" - self.persist = persist - self.socketurl = socketurl - self.socket = None - self.timeout = None - self.successful_persistence = False - - def successfully_persisted(self): - return self.successful_persistence - - def add_header(self, header): - self.add_headers += header + "\n" - - def set_timeout(self, timeout): - self.timeout = timeout - if self.socket: - self.socket.settimeout(float(timeout)) - - def connect(self): - if self.persist and self.socketurl in persistent_connections: - self.socket = persistent_connections[self.socketurl] - self.successful_persistence = True - return - - self.successful_persistence = False - - # Create new socket - self.socket = None - url = self.socketurl - parts = url.split(":") - if parts[0] == "unix": - if len(parts) != 2: - raise MKLivestatusConfigError("Invalid livestatus unix url: %s. " - "Correct example is 'unix:/var/run/nagios/rw/live'" % url) - self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - target = parts[1] - - elif parts[0] == "tcp": - try: - host = parts[1] - port = int(parts[2]) - except: - raise MKLivestatusConfigError("Invalid livestatus tcp url '%s'. " - "Correct example is 'tcp:somehost:6557'" % url) - self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - target = (host, port) - else: - raise MKLivestatusConfigError("Invalid livestatus url '%s'. " - "Must begin with 'tcp:' or 'unix:'" % url) - - try: - if self.timeout: - self.socket.settimeout(float(self.timeout)) - self.socket.connect(target) - except Exception, e: - self.socket = None - raise MKLivestatusSocketError("Cannot connect to '%s': %s" % (self.socketurl, e)) - - if self.persist: - persistent_connections[self.socketurl] = self.socket - - def disconnect(self): - self.socket = None - if self.persist: - del persistent_connections[self.socketurl] - - def receive_data(self, size): - result = "" - while size > 0: - packet = self.socket.recv(size) - if len(packet) == 0: - raise MKLivestatusSocketClosed("Read zero data from socket, nagios server closed connection") - size -= len(packet) - result += packet - return result - - def do_query(self, query, add_headers = ""): - self.send_query(query, add_headers) - return self.recv_response(query, add_headers) - - def send_query(self, query, add_headers = ""): - if self.socket == None: - self.connect() - if not query.endswith("\n"): - query += "\n" - query += self.auth_header + self.add_headers - query += "Localtime: %d\nOutputFormat: python\nKeepAlive: on\nResponseHeader: fixed16\n" % int(time.time()) - query += add_headers - if not query.endswith("\n"): - query += "\n" - query += "\n" - - try: - self.socket.send(query) - except IOError, e: - if self.persist: - del persistent_connections[self.socketurl] - self.successful_persistence = False - self.socket = None - raise MKLivestatusSocketError(str(e)) - - # Reads a response from the livestatus socket. If the socket is closed - # by the livestatus server, we automatically make a reconnect and send - # the query again (once). This is due to timeouts during keepalive. - def recv_response(self, query = None, add_headers = ""): - try: - resp = self.receive_data(16) - code = resp[0:3] - try: - length = int(resp[4:15].lstrip()) - except: - raise MKLivestatusSocketError("Malformed output. Livestatus TCP socket might be unreachable.") - data = self.receive_data(length) - if code == "200": - try: - return eval(data) - except: - raise MKLivestatusSocketError("Malformed output") - else: - raise MKLivestatusQueryError(code, data.strip()) - except MKLivestatusSocketClosed: - self.disconnect() - if query: - self.connect() - self.send_query(query, add_headers) - return self.recv_response() # do not send query again -> danger of infinite loop - else: - raise - - except IOError, e: - self.socket = None - if self.persist: - del persistent_connections[self.socketurl] - raise MKLivestatusSocketError(str(e)) - - def do_command(self, command): - if self.socket == None: - self.connect() - if not command.endswith("\n"): - command += "\n" - try: - self.socket.send("COMMAND " + command + "\n") - except IOError, e: - self.socket = None - if self.persist: - del persistent_connections[self.socketurl] - raise MKLivestatusSocketError(str(e)) - - -class SingleSiteConnection(BaseConnection, Helpers): - def __init__(self, socketurl, persist = False): - BaseConnection.__init__(self, socketurl, persist) - self.prepend_site = False - self.auth_users = {} - self.deadsites = {} # never filled, just for compatibility - self.auth_header = "" - self.limit = None - - def set_prepend_site(self, p): - self.prepend_site = p - - def set_only_sites(self, os = None): - pass - - def set_limit(self, limit = None): - self.limit = limit - - def query(self, query, add_headers = ""): - if self.limit != None: - query += "Limit: %d\n" % self.limit - data = self.do_query(query, add_headers) - if self.prepend_site: - return [ [''] + line for line in data ] - else: - return data - - def command(self, command, site = None): - self.do_command(command) - - # Set user to be used in certain authorization domain - def set_auth_user(self, domain, user): - if user: - self.auth_users[domain] = user - else: - del self.auth_users[domain] - - # Switch future request to new authorization domain - def set_auth_domain(self, domain): - auth_user = self.auth_users.get(domain) - if auth_user: - self.auth_header = "AuthUser: %s\n" % auth_user - else: - self.auth_header = "" - - -# sites is a dictionary from site name to a dict. -# Keys in the dictionary: -# socket: socketurl (obligatory) -# timeout: timeout for tcp/unix in seconds - -class MultiSiteConnection(Helpers): - def __init__(self, sites, disabled_sites = []): - self.sites = sites - self.connections = [] - self.deadsites = {} - self.prepend_site = False - self.only_sites = None - self.limit = None - self.parallelize = True - - # Helper function for connecting to a site - def connect_to_site(sitename, site, temporary=False): - try: - url = site["socket"] - persist = not temporary and site.get("persist", False) - connection = SingleSiteConnection(url, persist) - if "timeout" in site: - connection.set_timeout(int(site["timeout"])) - connection.connect() - self.connections.append((sitename, site, connection)) - - except Exception, e: - self.deadsites[sitename] = { - "exception" : e, - "site" : site, - } - - # Needed for temporary connection for status_hosts in disabled sites - def disconnect_site(sitename): - i = 0 - for name, site, connection in self.connections: - if name == sitename: - del self.connections[i] - return - i += 1 - - - # Status host: A status host helps to prevent trying to connect - # to a remote site which is unreachable. This is done by looking - # at the current state of a certain host on a local site that is - # representing the connection to the remote site. The status host - # is specified as an optional pair of (site, host) in the entry - # "status_host". We first connect to all sites without a status_host - # entry, then retrieve the host states of the status hosts and then - # connect to the remote site which are reachable - - # Tackle very special problem: If the user disables a site which - # provides status_host information for other sites, the dead-detection - # would not work. For that cases we make a temporary connection just - # to fetch the status information - extra_status_sites = {} - if len(disabled_sites) > 0: - status_sitenames = set([]) - for sitename, site in sites.items(): - try: - s, h = site.get("status_host") - status_sitenames.add(s) - except: - continue - for sitename in status_sitenames: - site = disabled_sites.get(sitename) - if site: - extra_status_sites[sitename] = site - - - # First connect to sites without status host. Collect status - # hosts at the same time. - - status_hosts = {} # dict from site to list of status_hosts - for sitename, site in sites.items() + extra_status_sites.items(): - status_host = site.get("status_host") - if status_host: - if type(status_host) != tuple or len(status_host) != 2: - raise MKLivestatusConfigError("Status host of site %s is %r, but must be pair of site and host" % - (sitename, status_host)) - s, h = status_host - status_hosts[s] = status_hosts.get(s, []) + [h] - else: - connect_to_site(sitename, site) - - # Now learn current states of status hosts and store it in a dictionary - # from (local_site, host) => state - status_host_states = {} - for sitename, hosts in status_hosts.items(): - # Fetch all the states of status hosts of this local site in one query - query = "GET hosts\nColumns: name state has_been_checked last_time_up\n" - for host in hosts: - query += "Filter: name = %s\n" % host - query += "Or: %d\n" % len(hosts) - self.set_only_sites([sitename]) # only connect one site - try: - result = self.query_table(query) - # raise MKLivestatusConfigError("TRESulT: %s" % (result,)) - for host, state, has_been_checked, lastup in result: - if has_been_checked == 0: - state = 3 - status_host_states[(sitename, host)] = (state, lastup) - except Exception, e: - raise MKLivestatusConfigError(e) - status_host_states[(sitename, host)] = (str(e), None) - self.set_only_sites() # clear site filter - - # Disconnect from disabled sites that we connected to only to - # get status information from - for sitename, site in extra_status_sites.items(): - disconnect_site(sitename) - - # Now loop over all sites having a status_host and take that state - # of that into consideration - - for sitename, site in sites.items(): - status_host = site.get("status_host") - if status_host: - now = time.time() - shs, lastup = status_host_states.get(status_host, (4, now)) # None => Status host not existing - deltatime = now - lastup - if shs == 0 or shs == None: - connect_to_site(sitename, site) - else: - if shs == 1: - ex = "The remote monitoring host is down" - elif shs == 2: - ex = "The remote monitoring host is unreachable" - elif shs == 3: - ex = "The remote monitoring host's state it not yet determined" - elif shs == 4: - ex = "Invalid status host: site %s has no host %s" % (status_host[0], status_host[1]) - else: - ex = "Error determining state of remote monitoring host: %s" % shs - self.deadsites[sitename] = { - "site" : site, - "status_host_state" : shs, - "exception" : ex, - } - - def add_header(self, header): - for sitename, site, connection in self.connections: - connection.add_header(header) - - def set_prepend_site(self, p): - self.prepend_site = p - - def set_only_sites(self, os = None): - self.only_sites = os - - # Impose Limit on number of returned datasets (distributed amoung sites) - def set_limit(self, limit = None): - self.limit = limit - - def dead_sites(self): - return self.deadsites - - def alive_sites(self): - return self.connections.keys() - - def successfully_persisted(self): - for sitename, site, connection in self.connections: - if connection.successfully_persisted(): - return True - return False - - def set_auth_user(self, domain, user): - for sitename, site, connection in self.connections: - connection.set_auth_user(domain, user) - - def set_auth_domain(self, domain): - for sitename, site, connection in self.connections: - connection.set_auth_domain(domain) - - def query(self, query, add_headers = ""): - if self.parallelize: - return self.query_parallel(query, add_headers) - else: - return self.query_non_parallel(query, add_headers) - - def query_non_parallel(self, query, add_headers = ""): - result = [] - stillalive = [] - limit = self.limit - for sitename, site, connection in self.connections: - if self.only_sites != None and sitename not in self.only_sites: - stillalive.append( (sitename, site, connection) ) # state unknown, assume still alive - continue - try: - if limit != None: - limit_header = "Limit: %d\n" % limit - else: - limit_header = "" - r = connection.query(query, add_headers + limit_header) - if self.prepend_site: - r = [ [sitename] + l for l in r ] - if limit != None: - limit -= len(r) # Account for portion of limit used by this site - result += r - stillalive.append( (sitename, site, connection) ) - except Exception, e: - self.deadsites[sitename] = { - "exception" : e, - "site" : site, - } - self.connections = stillalive - return result - - # New parallelized version of query(). The semantics differs in the handling - # of Limit: since all sites are queried in parallel, the Limit: is simply - # applied to all sites - resulting in possibly more results then Limit requests. - def query_parallel(self, query, add_headers = ""): - if self.only_sites != None: - active_sites = [ c for c in self.connections if c[0] in self.only_sites ] - else: - active_sites = self.connections - - start_time = time.time() - stillalive = [] - limit = self.limit - if limit != None: - limit_header = "Limit: %d\n" % limit - else: - limit_header = "" - - # First send all queries - for sitename, site, connection in active_sites: - try: - connection.send_query(query, add_headers + limit_header) - except Exception, e: - self.deadsites[sitename] = { - "exception" : e, - "site" : site, - } - - # Then retrieve all answers. We will be as slow as the slowest of all - # connections. - result = [] - for sitename, site, connection in self.connections: - if self.only_sites != None and sitename not in self.only_sites: - stillalive.append( (sitename, site, connection) ) # state unknown, assume still alive - continue - - try: - r = connection.recv_response(query, add_headers + limit_header) - stillalive.append( (sitename, site, connection) ) - if self.prepend_site: - r = [ [sitename] + l for l in r ] - result += r - except Exception, e: - self.deadsites[sitename] = { - "exception" : e, - "site" : site, - } - - - self.connections = stillalive - return result - - def command(self, command, sitename = "local"): - if sitename in self.deadsites: - raise MKLivestatusSocketError("Connection to site %s is dead: %s" % \ - (sitename, self.deadsites[sitename]["exception"])) - conn = [t[2] for t in self.connections if t[0] == sitename] - if len(conn) == 0: - raise MKLivestatusConfigError("Cannot send command to unconfigured site '%s'" % sitename) - conn[0].do_command(command) - - # Return connection to localhost (UNIX), if available - def local_connection(self): - for sitename, site, connection in self.connections: - if site["socket"].startswith("unix:"): - return connection - raise MKLivestatusConfigError("No livestatus connection to local host") - -# Examle for forcing local connection: -# live.local_connection().query_single_value(...) diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/python/make_nagvis_map.py check-mk-1.2.6p12/=unpacked-tar10=/api/python/make_nagvis_map.py --- check-mk-1.2.2p3/=unpacked-tar10=/api/python/make_nagvis_map.py 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/python/make_nagvis_map.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,119 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# ails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# This is an example for a usage of Livestatus: it creates -# a NagVis map using actual live data from a running Nagios -# system. Most things are hardcoded here but this might by -# a useful example for coding your own stuff... - -import livestatus - -g_y = 50 -y_title = 40 -lineheight = 30 -x_hostgroup = 30 -x_therm = 200 -x_usv = 560 - -def make_label(text, x, y, width): - print """ -define textbox { - text=%s - x=%d - y=%d - background_color=#C0C0C1 - border_color=#000055 - w=%d -}""" % (text, x, y, width) - - -def render_hostgroup(name, alias): - global g_y - g_y += lineheight - - # Name des Serverraums - make_label(alias, x_hostgroup, g_y, x_therm - x_hostgroup - 20) - def display_servicegroup(name, x): - if live.query_value("GET servicegroups\nStats: name = %s\n" % name) == 1: - print """ -define servicegroup { - servicegroup_name = %s - x=%d - y=%d -}""" % (name, x, g_y) - - # Einzelauflistung der Thermometer - num = 0 - shift = 16 - for host, service in live.query("GET services\nFilter: groups >= %s\nColumns: host_name description" % name): - num += 1 - print """ -define service { - host_name=%s - service_description=%s - x=%d - y=%d - url=/pnp4nagios/graph?host=%s&srv=%s -} - """ % (host, service, x + 30 + shift * num, g_y, host, service) - - # Gesamtzustand Thermometer - display_servicegroup(name + "_therm", x_therm) - - # Auflistung der USV-Parameter - display_servicegroup(name + "_usv", x_usv) - - - - -socket_path = "unix:/var/run/nagios/rw/live" -live = livestatus.SingleSiteConnection(socket_path) - -print """ -define global { - allowed_for_config=nagiosadmin - allowed_user=nagiosadmin - map_image=demo_background.png - iconset=std_medium -} -""" - - -# hostgroups = live.query("GET hostgroups\nColumns: name alias") -hostgroups = [ - ( "s02", "S-02" ), - ( "s06", "S-06" ), - ( "s48", "S-48" ), - ( "ad214", "AD-214" ), - ( "ik026", "IK-026" ), - ( "etage", "Etagenverteiler" ), - ] -for name, alias in hostgroups: - render_hostgroup(name, alias) - -make_label("Temperaturen", x_therm, y_title, 250) -make_label("USV-Status", x_usv, y_title, 160) - diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/api/python/README check-mk-1.2.6p12/=unpacked-tar10=/api/python/README --- check-mk-1.2.2p3/=unpacked-tar10=/api/python/README 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/api/python/README 1970-01-01 00:00:00.000000000 +0000 @@ -1,23 +0,0 @@ -This directory contains a very efficient API to MK Livestatus -for Python. It is directly taken from the Multisite GUI and -has the following features: - -* It supports keep alive -* It returns typed values -* It support transparent multi-site access -* It supports persistent connection caching -* It supports parallelized queries (though still single-threaded) -* It supports detection of dead sites (via "status_host") - -Please look at the two examples: - -example.py: Example for a single site -example_multisite.py: Example querying several sites - -Both example are written to be run within an OMD instance -and need no further configuration. - -If you are not using OMD, you need to modify the examples -and enter the correct path to you livestatus socket. -Or even better: give OMD a try --> omdistro.org. This will -make you live *really* easier! diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/LQL-examples/10.lql check-mk-1.2.6p12/=unpacked-tar10=/LQL-examples/10.lql --- check-mk-1.2.2p3/=unpacked-tar10=/LQL-examples/10.lql 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/LQL-examples/10.lql 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ -GET services -Stats: state = 0 -Stats: state = 1 -Stats: state = 2 -Stats: state = 3 -StatsGroupBy: host_name -OutputFormat: json diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/LQL-examples/11.lql check-mk-1.2.6p12/=unpacked-tar10=/LQL-examples/11.lql --- check-mk-1.2.2p3/=unpacked-tar10=/LQL-examples/11.lql 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/LQL-examples/11.lql 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -GET log -Columns: message -Limit: 10 diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/LQL-examples/12.lql check-mk-1.2.6p12/=unpacked-tar10=/LQL-examples/12.lql --- check-mk-1.2.2p3/=unpacked-tar10=/LQL-examples/12.lql 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/LQL-examples/12.lql 1970-01-01 00:00:00.000000000 +0000 @@ -1,4 +0,0 @@ -GET log -Columns: message -Filter: host_name = windows -Limit: 10 diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/LQL-examples/13.lql check-mk-1.2.6p12/=unpacked-tar10=/LQL-examples/13.lql --- check-mk-1.2.2p3/=unpacked-tar10=/LQL-examples/13.lql 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/LQL-examples/13.lql 1970-01-01 00:00:00.000000000 +0000 @@ -1,8 +0,0 @@ -GET services -Stats: state = 0 -Stats: state = 1 -Stats: state = 2 -Stats: state = 3 -StatsGroupBy: host_name -AuthUser: mk - diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/LQL-examples/1.lql check-mk-1.2.6p12/=unpacked-tar10=/LQL-examples/1.lql --- check-mk-1.2.2p3/=unpacked-tar10=/LQL-examples/1.lql 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/LQL-examples/1.lql 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -GET hosts -Columns: name state diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/LQL-examples/2.lql check-mk-1.2.6p12/=unpacked-tar10=/LQL-examples/2.lql --- check-mk-1.2.2p3/=unpacked-tar10=/LQL-examples/2.lql 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/LQL-examples/2.lql 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -GET hosts -Columns: name state -Filter: state = 0 diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/LQL-examples/3.lql check-mk-1.2.6p12/=unpacked-tar10=/LQL-examples/3.lql --- check-mk-1.2.2p3/=unpacked-tar10=/LQL-examples/3.lql 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/LQL-examples/3.lql 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -GET columns -Columns: table name diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/LQL-examples/4.lql check-mk-1.2.6p12/=unpacked-tar10=/LQL-examples/4.lql --- check-mk-1.2.2p3/=unpacked-tar10=/LQL-examples/4.lql 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/LQL-examples/4.lql 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -GET columns -Columns: name -Filter: table = services diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/LQL-examples/5.lql check-mk-1.2.6p12/=unpacked-tar10=/LQL-examples/5.lql --- check-mk-1.2.2p3/=unpacked-tar10=/LQL-examples/5.lql 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/LQL-examples/5.lql 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -GET services -Columns: host_name description last_check last_hard_state_change -Filter: host_name = windows diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/LQL-examples/6.lql check-mk-1.2.6p12/=unpacked-tar10=/LQL-examples/6.lql --- check-mk-1.2.2p3/=unpacked-tar10=/LQL-examples/6.lql 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/LQL-examples/6.lql 1970-01-01 00:00:00.000000000 +0000 @@ -1,5 +0,0 @@ -GET services -Columns: host_name description state -Filter: state = 1 -Filter: state = 2 -Or: 2 diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/LQL-examples/7.lql check-mk-1.2.6p12/=unpacked-tar10=/LQL-examples/7.lql --- check-mk-1.2.2p3/=unpacked-tar10=/LQL-examples/7.lql 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/LQL-examples/7.lql 1970-01-01 00:00:00.000000000 +0000 @@ -1,5 +0,0 @@ -GET services -Stats: state = 0 -Stats: state = 1 -Stats: state = 2 -Stats: state = 3 diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/LQL-examples/8.lql check-mk-1.2.6p12/=unpacked-tar10=/LQL-examples/8.lql --- check-mk-1.2.2p3/=unpacked-tar10=/LQL-examples/8.lql 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/LQL-examples/8.lql 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -GET services -Stats: state = 0 -Stats: state = 1 -Stats: state = 2 -Stats: state = 3 -Filter: host_state = 0 diff -Nru check-mk-1.2.2p3/=unpacked-tar10=/LQL-examples/9.lql check-mk-1.2.6p12/=unpacked-tar10=/LQL-examples/9.lql --- check-mk-1.2.2p3/=unpacked-tar10=/LQL-examples/9.lql 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar10=/LQL-examples/9.lql 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ -GET services -Stats: state = 0 -Stats: state = 1 -Stats: state = 2 -Stats: state = 3 -StatsGroupBy: host_name - diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/c++/demo.cc check-mk-1.2.6p12/=unpacked-tar11=/api/c++/demo.cc --- check-mk-1.2.2p3/=unpacked-tar11=/api/c++/demo.cc 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/c++/demo.cc 2014-10-30 13:30:24.000000000 +0000 @@ -0,0 +1,59 @@ +// +------------------------------------------------------------------+ +// | ____ _ _ __ __ _ __ | +// | / ___| |__ ___ ___| | __ | \/ | |/ / | +// | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +// | | |___| | | | __/ (__| < | | | | . \ | +// | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +// | | +// | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +// +------------------------------------------------------------------+ +// +// This file is part of Check_MK. +// The official homepage is at http://mathias-kettner.de/check_mk. +// +// check_mk is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation in version 2. check_mk is distributed +// in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +// out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +// PARTICULAR PURPOSE. See the GNU General Public License for more de- +// ails. You should have received a copy of the GNU General Public +// License along with GNU Make; see the file COPYING. If not, write +// to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +// Boston, MA 02110-1301 USA. + +#include +#include "Livestatus.h" + +const char *query = "GET status\nColumns: livestatus_version program_version\nColumnHeaders: on\n"; +#define MAX_LINE_SIZE 8192 + +int main(int argc, char **argv) +{ + if (argc != 2) { + fprintf(stderr, "Usage: %s SOCKETPATH\n", argv[0]); + return 1; + } + + const char *socket_path = argv[1]; + Livestatus live; + live.connectUNIX(socket_path); + if (live.isConnected()) { + live.sendQuery(query); + std::vector *row; + while (0 != (row = live.nextRow())) + { + printf("Line:\n"); + for (int i=0; isize(); i++) + printf("%s\n", (*row)[i].c_str()); + delete row; + } + live.disconnect(); + } + else { + fprintf(stderr, "Couldn't connect to socket '%s'\n", socket_path); + return 1; + } + return 0; +} + diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/c++/Livestatus.cc check-mk-1.2.6p12/=unpacked-tar11=/api/c++/Livestatus.cc --- check-mk-1.2.2p3/=unpacked-tar11=/api/c++/Livestatus.cc 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/c++/Livestatus.cc 2014-10-30 13:30:24.000000000 +0000 @@ -0,0 +1,98 @@ +// +------------------------------------------------------------------+ +// | ____ _ _ __ __ _ __ | +// | / ___| |__ ___ ___| | __ | \/ | |/ / | +// | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +// | | |___| | | | __/ (__| < | | | | . \ | +// | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +// | | +// | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +// +------------------------------------------------------------------+ +// +// This file is part of Check_MK. +// The official homepage is at http://mathias-kettner.de/check_mk. +// +// check_mk is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation in version 2. check_mk is distributed +// in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +// out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +// PARTICULAR PURPOSE. See the GNU General Public License for more de- +// ails. You should have received a copy of the GNU General Public +// License along with GNU Make; see the file COPYING. If not, write +// to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +// Boston, MA 02110-1301 USA. + +#include +#include +#include +#include +#include +#include "Livestatus.h" + +#define SEPARATORS "Separators: 10 1 2 3\n" + +void Livestatus::connectUNIX(const char *socket_path) +{ + _connection = socket(PF_LOCAL, SOCK_STREAM, 0); + struct sockaddr_un sa; + sa.sun_family = AF_LOCAL; + strncpy(sa.sun_path, socket_path, sizeof(sa.sun_path)); + if (0 > connect(_connection, (const struct sockaddr *)&sa, sizeof(sockaddr_un))) { + close(_connection); + _connection = -1; + } + else + _file = fdopen(_connection, "r"); +} + + +Livestatus::~Livestatus() +{ + disconnect(); +} + +void Livestatus::disconnect() +{ + if (isConnected()) { + if (_file) + fclose(_file); + else + close(_connection); + } + _connection = -1; + _file = 0; +} + +void Livestatus::sendQuery(const char *query) +{ + write(_connection, query, strlen(query)); + write(_connection, SEPARATORS, strlen(SEPARATORS)); + shutdown(_connection, SHUT_WR); +} + + +std::vector *Livestatus::nextRow() +{ + char line[65536]; + if (0 != fgets(line, sizeof(line), _file)) { + // strip trailing linefeed + char *end = strlen(line) + line; + if (end > line && *(end-1) == '\n') { + *(end-1) = 0; + --end; + } + std::vector *row = new std::vector; + char *scan = line; + while (scan < end) { + char *zero = scan; + while (zero < end && *zero != '\001') zero++; + *zero = 0; + row->push_back(std::string(scan)); + scan = zero + 1; + } + return row; + } + else + return 0; +} + diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/c++/Livestatus.h check-mk-1.2.6p12/=unpacked-tar11=/api/c++/Livestatus.h --- check-mk-1.2.2p3/=unpacked-tar11=/api/c++/Livestatus.h 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/c++/Livestatus.h 2014-10-30 13:30:24.000000000 +0000 @@ -0,0 +1,54 @@ +// +------------------------------------------------------------------+ +// | ____ _ _ __ __ _ __ | +// | / ___| |__ ___ ___| | __ | \/ | |/ / | +// | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +// | | |___| | | | __/ (__| < | | | | . \ | +// | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +// | | +// | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +// +------------------------------------------------------------------+ +// +// This file is part of Check_MK. +// The official homepage is at http://mathias-kettner.de/check_mk. +// +// check_mk is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by +// the Free Software Foundation in version 2. check_mk is distributed +// in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +// out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +// PARTICULAR PURPOSE. See the GNU General Public License for more de- +// ails. You should have received a copy of the GNU General Public +// License along with GNU Make; see the file COPYING. If not, write +// to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +// Boston, MA 02110-1301 USA. + +#ifndef Livestatus_h +#define Livestatus_h + +#include +#include +#include + +// simple C++ API for accessing Livestatus from C++, +// currently supports only UNIX sockets, no TCP. But +// this is only a simple enhancement. + +class Livestatus +{ + int _connection; + FILE *_file; + +public: + Livestatus() : _connection(-1), _file(0) {}; + ~Livestatus(); + void connectUNIX(const char *socketpath); + bool isConnected() const { return _connection >= 0; }; + void disconnect(); + void sendQuery(const char *query); + std::vector *nextRow(); +}; + + + +#endif // Livestatus_h + diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/c++/Makefile check-mk-1.2.6p12/=unpacked-tar11=/api/c++/Makefile --- check-mk-1.2.2p3/=unpacked-tar11=/api/c++/Makefile 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/c++/Makefile 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,42 @@ +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +ifneq (DEBUG,) + CXXFLAGS += -g -DDEBUG + LDFLAGS += -g +endif + +all: demo + +demo.o: demo.cc Livestatus.h + g++ $(CXXFLAGS) -c -o $@ $< + +Livestatus.o: Livestatus.cc Livestatus.h + g++ $(CXXFLAGS) -c -o $@ $< + +demo: demo.o Livestatus.o + g++ $(CXXFLAGS) $(LDFLAGS) -o $@ $^ + +clean: + rm -f demo.o demo Livetatus.o diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/Changes check-mk-1.2.6p12/=unpacked-tar11=/api/perl/Changes --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/Changes 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/Changes 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,164 @@ +Revision history for Perl extension Monitoring::Livestatus. + + +0.74 Fri Apr 22 00:16:37 CEST 2011 + - fixed problem with bulk commands + +0.72 Tue Apr 19 15:38:34 CEST 2011 + - fixed problem with inet timeout + +0.70 Sat Apr 16 16:43:57 CEST 2011 + - fixed tests using english + +0.68 Wed Mar 23 23:16:22 CET 2011 + - fixed typo + +0.66 Tue Mar 22 23:19:23 CET 2011 + - added support for additonal headers + +0.64 Fri Nov 5 11:02:51 CET 2010 + - removed useless test dependecies + +0.62 Wed Nov 3 15:20:02 CET 2010 + - fixed tests with threads > 1.79 + +0.60 Wed Aug 25 15:04:22 CEST 2010 + - fixed package and made author tests optional + +0.58 Wed Aug 11 09:30:30 CEST 2010 + - added callback support + +0.56 Tue Aug 10 09:45:28 CEST 2010 + - changed parser from csv to JSON::XS + +0.54 Wed Jun 23 16:43:11 CEST 2010 + - fixed utf8 support + +0.52 Mon May 17 15:54:42 CEST 2010 + - fixed connection timeout + +0.50 Mon May 17 12:29:20 CEST 2010 + - fixed test requirements + +0.48 Sun May 16 15:16:12 CEST 2010 + - added retry option for better core restart handling + - added new columns from livestatus 1.1.4 + +0.46 Tue Mar 16 15:19:08 CET 2010 + - error code have been changed in livestatus (1.1.3) + - fixed threads support + +0.44 Sun Feb 28 12:19:56 CET 2010 + - fixed bug when disabling backends and using threads + +0.42 Thu Feb 25 21:32:37 CET 2010 + - added possibility to disable specific backends + +0.41 Sat Feb 20 20:37:36 CET 2010 + - fixed tests on windows + +0.40 Thu Feb 11 01:00:20 CET 2010 + - fixed timeout for inet sockets + +0.38 Fri Jan 29 20:54:50 CET 2010 + - added limit option + +0.37 Thu Jan 28 21:23:19 CET 2010 + - removed inc from repository + +0.36 Sun Jan 24 00:14:13 CET 2010 + - added more backend tests + - fixed problem with summing up non numbers + +0.35 Mon Jan 11 15:37:51 CET 2010 + - added TCP_NODELAY option for inet sockets + - fixed undefined values + +0.34 Sun Jan 10 12:29:57 CET 2010 + - fixed return code with multi backend and different errors + +0.32 Sat Jan 9 16:12:48 CET 2010 + - added deepcopy option + +0.31 Thu Jan 7 08:56:48 CET 2010 + - added generic tests for livestatus backend + - fixed problem when selecting specific backend + +0.30 Wed Jan 6 16:05:33 CET 2010 + - renamed project to Monitoring::Livestatus + +0.29 Mon Dec 28 00:11:53 CET 2009 + - retain order of backends when merge outut + - renamed select_scalar_value to selectscalar_value + - fixed sums for selectscalar_value + - fixed missing META.yml + +0.28 Sat Dec 19 19:19:13 CET 2009 + - fixed bug in column alias + - added support for multiple peers + - changed to Module::Install + +0.26 Fri Dec 4 08:25:07 CET 2009 + - added peer name + - added peer arg (can be socket or server) + +0.24 Wed Dec 2 23:41:34 CET 2009 + - added support for StatsAnd: and StatsOr: queries + - table alias support for selectall_hashref and selectrow_hashref + - added support for Stats: ... as alias + - added support for StatsAnd:... as alias + - added support for StatsOr: ... as alias + - added support for StatsGroupBy: (with alias) + - added support column aliases for Column: header + +0.22 Fri Nov 27 01:04:16 CET 2009 + - fixed errors on socket problems + - fixed sending commands + +0.20 Sun Nov 22 12:41:39 CET 2009 + - added keepalive support + - added support for ResponseHeader: fixed16 + - added error handling + - added pod test + - added tests with real socket / server + - added column aliases + - added timeout option + - implemented select_scalar_value() + - fixed perl::critic tests + +0.18 Sat Nov 14 2009 08:58:02 GMT + - fixed requirements + - fixed typos + +0.17 Fri Nov 13 17:15:44 CET 2009 + - added support for tcp connections + +0.16 Sun Nov 8 23:17:35 CET 2009 + - added support for stats querys + +0.15 Sat Nov 7 21:28:33 CET 2009 + - fixed typos in doc + - minor bugfixes + +0.14 Fri Nov 6 09:39:56 CET 2009 + - implemented selectcol_arrayref + - implemented selectrow_array + - implemented selectrow_hashref + +0.13 Fri Nov 6 00:03:38 CET 2009 + - fixed tests on solaris + - implemented selectall_hashref() + +0.12 Thu Nov 5 09:34:59 CET 2009 + - fixed tests with thread support + - added more tests + +0.11 Wed Nov 4 23:12:16 2009 + - inital working version + +0.10 Tue Nov 3 17:13:16 2009 + - renamed to Nagios::MKLivestatus + +0.01 Tue Nov 3 00:07:46 2009 + - original version; created by h2xs 1.23 with options + -A -X -n Nagios::Livestatus diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/examples/dump.pl check-mk-1.2.6p12/=unpacked-tar11=/api/perl/examples/dump.pl --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/examples/dump.pl 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/examples/dump.pl 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,104 @@ +#!/usr/bin/env perl + +=head1 NAME + +dump.pl - print some information from a socket + +=head1 SYNOPSIS + +./dump.pl [ -h ] [ -v ] + +=head1 DESCRIPTION + +this script print some information from a given livestatus socket or server + +=head1 ARGUMENTS + +script has the following arguments + +=over 4 + +=item help + + -h + +print help and exit + +=item verbose + + -v + +verbose output + +=item socket/server + + server local socket file or + + server remote address of livestatus + +=back + +=head1 EXAMPLE + +./dump.pl /tmp/live.sock + +=head1 AUTHOR + +2009, Sven Nierlein, + +=cut + +use warnings; +use strict; +use Data::Dumper; +use Getopt::Long; +use Pod::Usage; +use lib 'lib'; +use lib '../lib'; +use Monitoring::Livestatus; + +$Data::Dumper::Sortkeys = 1; + +######################################################################### +# parse and check cmd line arguments +my ($opt_h, $opt_v, $opt_f); +Getopt::Long::Configure('no_ignore_case'); +if(!GetOptions ( + "h" => \$opt_h, + "v" => \$opt_v, + "<>" => \&add_file, +)) { + pod2usage( { -verbose => 1, -message => 'error in options' } ); + exit 3; +} + +if(defined $opt_h) { + pod2usage( { -verbose => 1 } ); + exit 3; +} +my $verbose = 0; +if(defined $opt_v) { + $verbose = 1; +} + +if(!defined $opt_f) { + pod2usage( { -verbose => 1, -message => 'socket/server is a required option' } ); + exit 3; +} + +######################################################################### +my $nl = Monitoring::Livestatus->new( peer => $opt_f, verbose => $opt_v ); + +######################################################################### +#my $hosts = $nl->selectall_hashref('GET hosts', 'name'); +#print Dumper($hosts); + +######################################################################### +my $services = $nl->selectall_arrayref("GET services\nColumns: description host_name state\nLimit: 2", { Slice => {}}); +print Dumper($services); + +######################################################################### +sub add_file { + my $file = shift; + $opt_f = $file; +} diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/examples/test.pl check-mk-1.2.6p12/=unpacked-tar11=/api/perl/examples/test.pl --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/examples/test.pl 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/examples/test.pl 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,143 @@ +#!/usr/bin/env perl + +=head1 NAME + +test.pl - print some information from a socket + +=head1 SYNOPSIS + +./test.pl [ -h ] [ -v ] + +=head1 DESCRIPTION + +this script print some information from a given livestatus socket or server + +=head1 ARGUMENTS + +script has the following arguments + +=over 4 + +=item help + + -h + +print help and exit + +=item verbose + + -v + +verbose output + +=item socket/server + + server local socket file or + + server remote address of livestatus + +=back + +=head1 EXAMPLE + +./test.pl /tmp/live.sock + +=head1 AUTHOR + +2009, Sven Nierlein, + +=cut + +use warnings; +use strict; +use Data::Dumper; +use Getopt::Long; +use Pod::Usage; +use Time::HiRes qw( gettimeofday tv_interval ); +use Log::Log4perl qw(:easy); +use lib 'lib'; +use lib '../lib'; +use Monitoring::Livestatus; + +$Data::Dumper::Sortkeys = 1; + +######################################################################### +# parse and check cmd line arguments +my ($opt_h, $opt_v, @opt_f); +Getopt::Long::Configure('no_ignore_case'); +if(!GetOptions ( + "h" => \$opt_h, + "v" => \$opt_v, + "<>" => \&add_file, +)) { + pod2usage( { -verbose => 1, -message => 'error in options' } ); + exit 3; +} + +if(defined $opt_h) { + pod2usage( { -verbose => 1 } ); + exit 3; +} +my $verbose = 0; +if(defined $opt_v) { + $verbose = 1; +} + +if(scalar @opt_f == 0) { + pod2usage( { -verbose => 1, -message => 'socket/server is a required option' } ); + exit 3; +} + +######################################################################### +Log::Log4perl->easy_init($DEBUG); +my $nl = Monitoring::Livestatus->new( + peer => \@opt_f, + verbose => $opt_v, + timeout => 5, + keepalive => 1, + logger => get_logger(), + ); +my $log = get_logger(); + +######################################################################### +my $querys = [ + { 'query' => "GET hostgroups\nColumns: members\nFilter: name = flap\nFilter: name = down\nOr: 2", + 'sub' => "selectall_arrayref", + 'opt' => {Slice => 1 } + }, +# { 'query' => "GET comments", +# 'sub' => "selectall_arrayref", +# 'opt' => {Slice => 1 } +# }, +# { 'query' => "GET downtimes", +# 'sub' => "selectall_arrayref", +# 'opt' => {Slice => 1, Sum => 1} +# }, +# { 'query' => "GET log\nFilter: time > ".(time() - 600)."\nLimit: 1", +# 'sub' => "selectall_arrayref", +# 'opt' => {Slice => 1, AddPeer => 1} +# }, +# { 'query' => "GET services\nFilter: contacts >= test\nFilter: host_contacts >= test\nOr: 2\nColumns: host_name description contacts host_contacts", +# 'sub' => "selectall_arrayref", +# 'opt' => {Slice => 1, AddPeer => 0} +# }, +# { 'query' => "GET services\nFilter: host_name = test_host_00\nFilter: description = test_flap_02\nOr: 2\nColumns: host_name description contacts host_contacts", +# 'sub' => "selectall_arrayref", +# 'opt' => {Slice => 1, AddPeer => 0} +# }, +]; +for my $query (@{$querys}) { + my $sub = $query->{'sub'}; + my $t0 = [gettimeofday]; + my $stats = $nl->$sub($query->{'query'}, $query->{'opt'}); + my $elapsed = tv_interval($t0); + print Dumper($stats); + print "Query took ".($elapsed)." seconds\n"; +} + + +######################################################################### +sub add_file { + my $file = shift; + push @opt_f, $file; +} diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/inc/Module/AutoInstall.pm check-mk-1.2.6p12/=unpacked-tar11=/api/perl/inc/Module/AutoInstall.pm --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/inc/Module/AutoInstall.pm 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/inc/Module/AutoInstall.pm 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,820 @@ +#line 1 +package Module::AutoInstall; + +use strict; +use Cwd (); +use ExtUtils::MakeMaker (); + +use vars qw{$VERSION}; +BEGIN { + $VERSION = '1.03'; +} + +# special map on pre-defined feature sets +my %FeatureMap = ( + '' => 'Core Features', # XXX: deprecated + '-core' => 'Core Features', +); + +# various lexical flags +my ( @Missing, @Existing, %DisabledTests, $UnderCPAN, $HasCPANPLUS ); +my ( + $Config, $CheckOnly, $SkipInstall, $AcceptDefault, $TestOnly, $AllDeps +); +my ( $PostambleActions, $PostambleUsed ); + +# See if it's a testing or non-interactive session +_accept_default( $ENV{AUTOMATED_TESTING} or ! -t STDIN ); +_init(); + +sub _accept_default { + $AcceptDefault = shift; +} + +sub missing_modules { + return @Missing; +} + +sub do_install { + __PACKAGE__->install( + [ + $Config + ? ( UNIVERSAL::isa( $Config, 'HASH' ) ? %{$Config} : @{$Config} ) + : () + ], + @Missing, + ); +} + +# initialize various flags, and/or perform install +sub _init { + foreach my $arg ( + @ARGV, + split( + /[\s\t]+/, + $ENV{PERL_AUTOINSTALL} || $ENV{PERL_EXTUTILS_AUTOINSTALL} || '' + ) + ) + { + if ( $arg =~ /^--config=(.*)$/ ) { + $Config = [ split( ',', $1 ) ]; + } + elsif ( $arg =~ /^--installdeps=(.*)$/ ) { + __PACKAGE__->install( $Config, @Missing = split( /,/, $1 ) ); + exit 0; + } + elsif ( $arg =~ /^--default(?:deps)?$/ ) { + $AcceptDefault = 1; + } + elsif ( $arg =~ /^--check(?:deps)?$/ ) { + $CheckOnly = 1; + } + elsif ( $arg =~ /^--skip(?:deps)?$/ ) { + $SkipInstall = 1; + } + elsif ( $arg =~ /^--test(?:only)?$/ ) { + $TestOnly = 1; + } + elsif ( $arg =~ /^--all(?:deps)?$/ ) { + $AllDeps = 1; + } + } +} + +# overrides MakeMaker's prompt() to automatically accept the default choice +sub _prompt { + goto &ExtUtils::MakeMaker::prompt unless $AcceptDefault; + + my ( $prompt, $default ) = @_; + my $y = ( $default =~ /^[Yy]/ ); + + print $prompt, ' [', ( $y ? 'Y' : 'y' ), '/', ( $y ? 'n' : 'N' ), '] '; + print "$default\n"; + return $default; +} + +# the workhorse +sub import { + my $class = shift; + my @args = @_ or return; + my $core_all; + + print "*** $class version " . $class->VERSION . "\n"; + print "*** Checking for Perl dependencies...\n"; + + my $cwd = Cwd::cwd(); + + $Config = []; + + my $maxlen = length( + ( + sort { length($b) <=> length($a) } + grep { /^[^\-]/ } + map { + ref($_) + ? ( ( ref($_) eq 'HASH' ) ? keys(%$_) : @{$_} ) + : '' + } + map { +{@args}->{$_} } + grep { /^[^\-]/ or /^-core$/i } keys %{ +{@args} } + )[0] + ); + + # We want to know if we're under CPAN early to avoid prompting, but + # if we aren't going to try and install anything anyway then skip the + # check entirely since we don't want to have to load (and configure) + # an old CPAN just for a cosmetic message + + $UnderCPAN = _check_lock(1) unless $SkipInstall; + + while ( my ( $feature, $modules ) = splice( @args, 0, 2 ) ) { + my ( @required, @tests, @skiptests ); + my $default = 1; + my $conflict = 0; + + if ( $feature =~ m/^-(\w+)$/ ) { + my $option = lc($1); + + # check for a newer version of myself + _update_to( $modules, @_ ) and return if $option eq 'version'; + + # sets CPAN configuration options + $Config = $modules if $option eq 'config'; + + # promote every features to core status + $core_all = ( $modules =~ /^all$/i ) and next + if $option eq 'core'; + + next unless $option eq 'core'; + } + + print "[" . ( $FeatureMap{ lc($feature) } || $feature ) . "]\n"; + + $modules = [ %{$modules} ] if UNIVERSAL::isa( $modules, 'HASH' ); + + unshift @$modules, -default => &{ shift(@$modules) } + if ( ref( $modules->[0] ) eq 'CODE' ); # XXX: bugward combatability + + while ( my ( $mod, $arg ) = splice( @$modules, 0, 2 ) ) { + if ( $mod =~ m/^-(\w+)$/ ) { + my $option = lc($1); + + $default = $arg if ( $option eq 'default' ); + $conflict = $arg if ( $option eq 'conflict' ); + @tests = @{$arg} if ( $option eq 'tests' ); + @skiptests = @{$arg} if ( $option eq 'skiptests' ); + + next; + } + + printf( "- %-${maxlen}s ...", $mod ); + + if ( $arg and $arg =~ /^\D/ ) { + unshift @$modules, $arg; + $arg = 0; + } + + # XXX: check for conflicts and uninstalls(!) them. + my $cur = _load($mod); + if (_version_cmp ($cur, $arg) >= 0) + { + print "loaded. ($cur" . ( $arg ? " >= $arg" : '' ) . ")\n"; + push @Existing, $mod => $arg; + $DisabledTests{$_} = 1 for map { glob($_) } @skiptests; + } + else { + if (not defined $cur) # indeed missing + { + print "missing." . ( $arg ? " (would need $arg)" : '' ) . "\n"; + } + else + { + # no need to check $arg as _version_cmp ($cur, undef) would satisfy >= above + print "too old. ($cur < $arg)\n"; + } + + push @required, $mod => $arg; + } + } + + next unless @required; + + my $mandatory = ( $feature eq '-core' or $core_all ); + + if ( + !$SkipInstall + and ( + $CheckOnly + or ($mandatory and $UnderCPAN) + or $AllDeps + or _prompt( + qq{==> Auto-install the } + . ( @required / 2 ) + . ( $mandatory ? ' mandatory' : ' optional' ) + . qq{ module(s) from CPAN?}, + $default ? 'y' : 'n', + ) =~ /^[Yy]/ + ) + ) + { + push( @Missing, @required ); + $DisabledTests{$_} = 1 for map { glob($_) } @skiptests; + } + + elsif ( !$SkipInstall + and $default + and $mandatory + and + _prompt( qq{==> The module(s) are mandatory! Really skip?}, 'n', ) + =~ /^[Nn]/ ) + { + push( @Missing, @required ); + $DisabledTests{$_} = 1 for map { glob($_) } @skiptests; + } + + else { + $DisabledTests{$_} = 1 for map { glob($_) } @tests; + } + } + + if ( @Missing and not( $CheckOnly or $UnderCPAN ) ) { + require Config; + print +"*** Dependencies will be installed the next time you type '$Config::Config{make}'.\n"; + + # make an educated guess of whether we'll need root permission. + print " (You may need to do that as the 'root' user.)\n" + if eval '$>'; + } + print "*** $class configuration finished.\n"; + + chdir $cwd; + + # import to main:: + no strict 'refs'; + *{'main::WriteMakefile'} = \&Write if caller(0) eq 'main'; + + return (@Existing, @Missing); +} + +sub _running_under { + my $thing = shift; + print <<"END_MESSAGE"; +*** Since we're running under ${thing}, I'll just let it take care + of the dependency's installation later. +END_MESSAGE + return 1; +} + +# Check to see if we are currently running under CPAN.pm and/or CPANPLUS; +# if we are, then we simply let it taking care of our dependencies +sub _check_lock { + return unless @Missing or @_; + + my $cpan_env = $ENV{PERL5_CPAN_IS_RUNNING}; + + if ($ENV{PERL5_CPANPLUS_IS_RUNNING}) { + return _running_under($cpan_env ? 'CPAN' : 'CPANPLUS'); + } + + require CPAN; + + if ($CPAN::VERSION > '1.89') { + if ($cpan_env) { + return _running_under('CPAN'); + } + return; # CPAN.pm new enough, don't need to check further + } + + # last ditch attempt, this -will- configure CPAN, very sorry + + _load_cpan(1); # force initialize even though it's already loaded + + # Find the CPAN lock-file + my $lock = MM->catfile( $CPAN::Config->{cpan_home}, ".lock" ); + return unless -f $lock; + + # Check the lock + local *LOCK; + return unless open(LOCK, $lock); + + if ( + ( $^O eq 'MSWin32' ? _under_cpan() : == getppid() ) + and ( $CPAN::Config->{prerequisites_policy} || '' ) ne 'ignore' + ) { + print <<'END_MESSAGE'; + +*** Since we're running under CPAN, I'll just let it take care + of the dependency's installation later. +END_MESSAGE + return 1; + } + + close LOCK; + return; +} + +sub install { + my $class = shift; + + my $i; # used below to strip leading '-' from config keys + my @config = ( map { s/^-// if ++$i; $_ } @{ +shift } ); + + my ( @modules, @installed ); + while ( my ( $pkg, $ver ) = splice( @_, 0, 2 ) ) { + + # grep out those already installed + if ( _version_cmp( _load($pkg), $ver ) >= 0 ) { + push @installed, $pkg; + } + else { + push @modules, $pkg, $ver; + } + } + + return @installed unless @modules; # nothing to do + return @installed if _check_lock(); # defer to the CPAN shell + + print "*** Installing dependencies...\n"; + + return unless _connected_to('cpan.org'); + + my %args = @config; + my %failed; + local *FAILED; + if ( $args{do_once} and open( FAILED, '.#autoinstall.failed' ) ) { + while () { chomp; $failed{$_}++ } + close FAILED; + + my @newmod; + while ( my ( $k, $v ) = splice( @modules, 0, 2 ) ) { + push @newmod, ( $k => $v ) unless $failed{$k}; + } + @modules = @newmod; + } + + if ( _has_cpanplus() and not $ENV{PERL_AUTOINSTALL_PREFER_CPAN} ) { + _install_cpanplus( \@modules, \@config ); + } else { + _install_cpan( \@modules, \@config ); + } + + print "*** $class installation finished.\n"; + + # see if we have successfully installed them + while ( my ( $pkg, $ver ) = splice( @modules, 0, 2 ) ) { + if ( _version_cmp( _load($pkg), $ver ) >= 0 ) { + push @installed, $pkg; + } + elsif ( $args{do_once} and open( FAILED, '>> .#autoinstall.failed' ) ) { + print FAILED "$pkg\n"; + } + } + + close FAILED if $args{do_once}; + + return @installed; +} + +sub _install_cpanplus { + my @modules = @{ +shift }; + my @config = _cpanplus_config( @{ +shift } ); + my $installed = 0; + + require CPANPLUS::Backend; + my $cp = CPANPLUS::Backend->new; + my $conf = $cp->configure_object; + + return unless $conf->can('conf') # 0.05x+ with "sudo" support + or _can_write($conf->_get_build('base')); # 0.04x + + # if we're root, set UNINST=1 to avoid trouble unless user asked for it. + my $makeflags = $conf->get_conf('makeflags') || ''; + if ( UNIVERSAL::isa( $makeflags, 'HASH' ) ) { + # 0.03+ uses a hashref here + $makeflags->{UNINST} = 1 unless exists $makeflags->{UNINST}; + + } else { + # 0.02 and below uses a scalar + $makeflags = join( ' ', split( ' ', $makeflags ), 'UNINST=1' ) + if ( $makeflags !~ /\bUNINST\b/ and eval qq{ $> eq '0' } ); + + } + $conf->set_conf( makeflags => $makeflags ); + $conf->set_conf( prereqs => 1 ); + + + + while ( my ( $key, $val ) = splice( @config, 0, 2 ) ) { + $conf->set_conf( $key, $val ); + } + + my $modtree = $cp->module_tree; + while ( my ( $pkg, $ver ) = splice( @modules, 0, 2 ) ) { + print "*** Installing $pkg...\n"; + + MY::preinstall( $pkg, $ver ) or next if defined &MY::preinstall; + + my $success; + my $obj = $modtree->{$pkg}; + + if ( $obj and _version_cmp( $obj->{version}, $ver ) >= 0 ) { + my $pathname = $pkg; + $pathname =~ s/::/\\W/; + + foreach my $inc ( grep { m/$pathname.pm/i } keys(%INC) ) { + delete $INC{$inc}; + } + + my $rv = $cp->install( modules => [ $obj->{module} ] ); + + if ( $rv and ( $rv->{ $obj->{module} } or $rv->{ok} ) ) { + print "*** $pkg successfully installed.\n"; + $success = 1; + } else { + print "*** $pkg installation cancelled.\n"; + $success = 0; + } + + $installed += $success; + } else { + print << "."; +*** Could not find a version $ver or above for $pkg; skipping. +. + } + + MY::postinstall( $pkg, $ver, $success ) if defined &MY::postinstall; + } + + return $installed; +} + +sub _cpanplus_config { + my @config = (); + while ( @_ ) { + my ($key, $value) = (shift(), shift()); + if ( $key eq 'prerequisites_policy' ) { + if ( $value eq 'follow' ) { + $value = CPANPLUS::Internals::Constants::PREREQ_INSTALL(); + } elsif ( $value eq 'ask' ) { + $value = CPANPLUS::Internals::Constants::PREREQ_ASK(); + } elsif ( $value eq 'ignore' ) { + $value = CPANPLUS::Internals::Constants::PREREQ_IGNORE(); + } else { + die "*** Cannot convert option $key = '$value' to CPANPLUS version.\n"; + } + } else { + die "*** Cannot convert option $key to CPANPLUS version.\n"; + } + } + return @config; +} + +sub _install_cpan { + my @modules = @{ +shift }; + my @config = @{ +shift }; + my $installed = 0; + my %args; + + _load_cpan(); + require Config; + + if (CPAN->VERSION < 1.80) { + # no "sudo" support, probe for writableness + return unless _can_write( MM->catfile( $CPAN::Config->{cpan_home}, 'sources' ) ) + and _can_write( $Config::Config{sitelib} ); + } + + # if we're root, set UNINST=1 to avoid trouble unless user asked for it. + my $makeflags = $CPAN::Config->{make_install_arg} || ''; + $CPAN::Config->{make_install_arg} = + join( ' ', split( ' ', $makeflags ), 'UNINST=1' ) + if ( $makeflags !~ /\bUNINST\b/ and eval qq{ $> eq '0' } ); + + # don't show start-up info + $CPAN::Config->{inhibit_startup_message} = 1; + + # set additional options + while ( my ( $opt, $arg ) = splice( @config, 0, 2 ) ) { + ( $args{$opt} = $arg, next ) + if $opt =~ /^force$/; # pseudo-option + $CPAN::Config->{$opt} = $arg; + } + + local $CPAN::Config->{prerequisites_policy} = 'follow'; + + while ( my ( $pkg, $ver ) = splice( @modules, 0, 2 ) ) { + MY::preinstall( $pkg, $ver ) or next if defined &MY::preinstall; + + print "*** Installing $pkg...\n"; + + my $obj = CPAN::Shell->expand( Module => $pkg ); + my $success = 0; + + if ( $obj and _version_cmp( $obj->cpan_version, $ver ) >= 0 ) { + my $pathname = $pkg; + $pathname =~ s/::/\\W/; + + foreach my $inc ( grep { m/$pathname.pm/i } keys(%INC) ) { + delete $INC{$inc}; + } + + my $rv = $args{force} ? CPAN::Shell->force( install => $pkg ) + : CPAN::Shell->install($pkg); + $rv ||= eval { + $CPAN::META->instance( 'CPAN::Distribution', $obj->cpan_file, ) + ->{install} + if $CPAN::META; + }; + + if ( $rv eq 'YES' ) { + print "*** $pkg successfully installed.\n"; + $success = 1; + } + else { + print "*** $pkg installation failed.\n"; + $success = 0; + } + + $installed += $success; + } + else { + print << "."; +*** Could not find a version $ver or above for $pkg; skipping. +. + } + + MY::postinstall( $pkg, $ver, $success ) if defined &MY::postinstall; + } + + return $installed; +} + +sub _has_cpanplus { + return ( + $HasCPANPLUS = ( + $INC{'CPANPLUS/Config.pm'} + or _load('CPANPLUS::Shell::Default') + ) + ); +} + +# make guesses on whether we're under the CPAN installation directory +sub _under_cpan { + require Cwd; + require File::Spec; + + my $cwd = File::Spec->canonpath( Cwd::cwd() ); + my $cpan = File::Spec->canonpath( $CPAN::Config->{cpan_home} ); + + return ( index( $cwd, $cpan ) > -1 ); +} + +sub _update_to { + my $class = __PACKAGE__; + my $ver = shift; + + return + if _version_cmp( _load($class), $ver ) >= 0; # no need to upgrade + + if ( + _prompt( "==> A newer version of $class ($ver) is required. Install?", + 'y' ) =~ /^[Nn]/ + ) + { + die "*** Please install $class $ver manually.\n"; + } + + print << "."; +*** Trying to fetch it from CPAN... +. + + # install ourselves + _load($class) and return $class->import(@_) + if $class->install( [], $class, $ver ); + + print << '.'; exit 1; + +*** Cannot bootstrap myself. :-( Installation terminated. +. +} + +# check if we're connected to some host, using inet_aton +sub _connected_to { + my $site = shift; + + return ( + ( _load('Socket') and Socket::inet_aton($site) ) or _prompt( + qq( +*** Your host cannot resolve the domain name '$site', which + probably means the Internet connections are unavailable. +==> Should we try to install the required module(s) anyway?), 'n' + ) =~ /^[Yy]/ + ); +} + +# check if a directory is writable; may create it on demand +sub _can_write { + my $path = shift; + mkdir( $path, 0755 ) unless -e $path; + + return 1 if -w $path; + + print << "."; +*** You are not allowed to write to the directory '$path'; + the installation may fail due to insufficient permissions. +. + + if ( + eval '$>' and lc(`sudo -V`) =~ /version/ and _prompt( + qq( +==> Should we try to re-execute the autoinstall process with 'sudo'?), + ((-t STDIN) ? 'y' : 'n') + ) =~ /^[Yy]/ + ) + { + + # try to bootstrap ourselves from sudo + print << "."; +*** Trying to re-execute the autoinstall process with 'sudo'... +. + my $missing = join( ',', @Missing ); + my $config = join( ',', + UNIVERSAL::isa( $Config, 'HASH' ) ? %{$Config} : @{$Config} ) + if $Config; + + return + unless system( 'sudo', $^X, $0, "--config=$config", + "--installdeps=$missing" ); + + print << "."; +*** The 'sudo' command exited with error! Resuming... +. + } + + return _prompt( + qq( +==> Should we try to install the required module(s) anyway?), 'n' + ) =~ /^[Yy]/; +} + +# load a module and return the version it reports +sub _load { + my $mod = pop; # class/instance doesn't matter + my $file = $mod; + + $file =~ s|::|/|g; + $file .= '.pm'; + + local $@; + return eval { require $file; $mod->VERSION } || ( $@ ? undef: 0 ); +} + +# Load CPAN.pm and it's configuration +sub _load_cpan { + return if $CPAN::VERSION and $CPAN::Config and not @_; + require CPAN; + + # CPAN-1.82+ adds CPAN::Config::AUTOLOAD to redirect to + # CPAN::HandleConfig->load. CPAN reports that the redirection + # is deprecated in a warning printed at the user. + + # CPAN-1.81 expects CPAN::HandleConfig->load, does not have + # $CPAN::HandleConfig::VERSION but cannot handle + # CPAN::Config->load + + # Which "versions expect CPAN::Config->load? + + if ( $CPAN::HandleConfig::VERSION + || CPAN::HandleConfig->can('load') + ) { + # Newer versions of CPAN have a HandleConfig module + CPAN::HandleConfig->load; + } else { + # Older versions had the load method in Config directly + CPAN::Config->load; + } +} + +# compare two versions, either use Sort::Versions or plain comparison +# return values same as <=> +sub _version_cmp { + my ( $cur, $min ) = @_; + return -1 unless defined $cur; # if 0 keep comparing + return 1 unless $min; + + $cur =~ s/\s+$//; + + # check for version numbers that are not in decimal format + if ( ref($cur) or ref($min) or $cur =~ /v|\..*\./ or $min =~ /v|\..*\./ ) { + if ( ( $version::VERSION or defined( _load('version') )) and + version->can('new') + ) { + + # use version.pm if it is installed. + return version->new($cur) <=> version->new($min); + } + elsif ( $Sort::Versions::VERSION or defined( _load('Sort::Versions') ) ) + { + + # use Sort::Versions as the sorting algorithm for a.b.c versions + return Sort::Versions::versioncmp( $cur, $min ); + } + + warn "Cannot reliably compare non-decimal formatted versions.\n" + . "Please install version.pm or Sort::Versions.\n"; + } + + # plain comparison + local $^W = 0; # shuts off 'not numeric' bugs + return $cur <=> $min; +} + +# nothing; this usage is deprecated. +sub main::PREREQ_PM { return {}; } + +sub _make_args { + my %args = @_; + + $args{PREREQ_PM} = { %{ $args{PREREQ_PM} || {} }, @Existing, @Missing } + if $UnderCPAN or $TestOnly; + + if ( $args{EXE_FILES} and -e 'MANIFEST' ) { + require ExtUtils::Manifest; + my $manifest = ExtUtils::Manifest::maniread('MANIFEST'); + + $args{EXE_FILES} = + [ grep { exists $manifest->{$_} } @{ $args{EXE_FILES} } ]; + } + + $args{test}{TESTS} ||= 't/*.t'; + $args{test}{TESTS} = join( ' ', + grep { !exists( $DisabledTests{$_} ) } + map { glob($_) } split( /\s+/, $args{test}{TESTS} ) ); + + my $missing = join( ',', @Missing ); + my $config = + join( ',', UNIVERSAL::isa( $Config, 'HASH' ) ? %{$Config} : @{$Config} ) + if $Config; + + $PostambleActions = ( + ($missing and not $UnderCPAN) + ? "\$(PERL) $0 --config=$config --installdeps=$missing" + : "\$(NOECHO) \$(NOOP)" + ); + + return %args; +} + +# a wrapper to ExtUtils::MakeMaker::WriteMakefile +sub Write { + require Carp; + Carp::croak "WriteMakefile: Need even number of args" if @_ % 2; + + if ($CheckOnly) { + print << "."; +*** Makefile not written in check-only mode. +. + return; + } + + my %args = _make_args(@_); + + no strict 'refs'; + + $PostambleUsed = 0; + local *MY::postamble = \&postamble unless defined &MY::postamble; + ExtUtils::MakeMaker::WriteMakefile(%args); + + print << "." unless $PostambleUsed; +*** WARNING: Makefile written with customized MY::postamble() without + including contents from Module::AutoInstall::postamble() -- + auto installation features disabled. Please contact the author. +. + + return 1; +} + +sub postamble { + $PostambleUsed = 1; + + return <<"END_MAKE"; + +config :: installdeps +\t\$(NOECHO) \$(NOOP) + +checkdeps :: +\t\$(PERL) $0 --checkdeps + +installdeps :: +\t$PostambleActions + +END_MAKE + +} + +1; + +__END__ + +#line 1071 diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/inc/Module/Install/AutoInstall.pm check-mk-1.2.6p12/=unpacked-tar11=/api/perl/inc/Module/Install/AutoInstall.pm --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/inc/Module/Install/AutoInstall.pm 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/inc/Module/Install/AutoInstall.pm 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,82 @@ +#line 1 +package Module::Install::AutoInstall; + +use strict; +use Module::Install::Base (); + +use vars qw{$VERSION @ISA $ISCORE}; +BEGIN { + $VERSION = '1.00'; + @ISA = 'Module::Install::Base'; + $ISCORE = 1; +} + +sub AutoInstall { $_[0] } + +sub run { + my $self = shift; + $self->auto_install_now(@_); +} + +sub write { + my $self = shift; + $self->auto_install(@_); +} + +sub auto_install { + my $self = shift; + return if $self->{done}++; + + # Flatten array of arrays into a single array + my @core = map @$_, map @$_, grep ref, + $self->build_requires, $self->requires; + + my @config = @_; + + # We'll need Module::AutoInstall + $self->include('Module::AutoInstall'); + require Module::AutoInstall; + + my @features_require = Module::AutoInstall->import( + (@config ? (-config => \@config) : ()), + (@core ? (-core => \@core) : ()), + $self->features, + ); + + my %seen; + my @requires = map @$_, map @$_, grep ref, $self->requires; + while (my ($mod, $ver) = splice(@requires, 0, 2)) { + $seen{$mod}{$ver}++; + } + my @build_requires = map @$_, map @$_, grep ref, $self->build_requires; + while (my ($mod, $ver) = splice(@build_requires, 0, 2)) { + $seen{$mod}{$ver}++; + } + my @configure_requires = map @$_, map @$_, grep ref, $self->configure_requires; + while (my ($mod, $ver) = splice(@configure_requires, 0, 2)) { + $seen{$mod}{$ver}++; + } + + my @deduped; + while (my ($mod, $ver) = splice(@features_require, 0, 2)) { + push @deduped, $mod => $ver unless $seen{$mod}{$ver}++; + } + + $self->requires(@deduped); + + $self->makemaker_args( Module::AutoInstall::_make_args() ); + + my $class = ref($self); + $self->postamble( + "# --- $class section:\n" . + Module::AutoInstall::postamble() + ); +} + +sub auto_install_now { + my $self = shift; + $self->auto_install(@_); + Module::AutoInstall::do_install(); +} + +1; diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/inc/Module/Install/Base.pm check-mk-1.2.6p12/=unpacked-tar11=/api/perl/inc/Module/Install/Base.pm --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/inc/Module/Install/Base.pm 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/inc/Module/Install/Base.pm 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,83 @@ +#line 1 +package Module::Install::Base; + +use strict 'vars'; +use vars qw{$VERSION}; +BEGIN { + $VERSION = '1.00'; +} + +# Suspend handler for "redefined" warnings +BEGIN { + my $w = $SIG{__WARN__}; + $SIG{__WARN__} = sub { $w }; +} + +#line 42 + +sub new { + my $class = shift; + unless ( defined &{"${class}::call"} ) { + *{"${class}::call"} = sub { shift->_top->call(@_) }; + } + unless ( defined &{"${class}::load"} ) { + *{"${class}::load"} = sub { shift->_top->load(@_) }; + } + bless { @_ }, $class; +} + +#line 61 + +sub AUTOLOAD { + local $@; + my $func = eval { shift->_top->autoload } or return; + goto &$func; +} + +#line 75 + +sub _top { + $_[0]->{_top}; +} + +#line 90 + +sub admin { + $_[0]->_top->{admin} + or + Module::Install::Base::FakeAdmin->new; +} + +#line 106 + +sub is_admin { + ! $_[0]->admin->isa('Module::Install::Base::FakeAdmin'); +} + +sub DESTROY {} + +package Module::Install::Base::FakeAdmin; + +use vars qw{$VERSION}; +BEGIN { + $VERSION = $Module::Install::Base::VERSION; +} + +my $fake; + +sub new { + $fake ||= bless(\@_, $_[0]); +} + +sub AUTOLOAD {} + +sub DESTROY {} + +# Restore warning handler +BEGIN { + $SIG{__WARN__} = $SIG{__WARN__}->(); +} + +1; + +#line 159 diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/inc/Module/Install/Can.pm check-mk-1.2.6p12/=unpacked-tar11=/api/perl/inc/Module/Install/Can.pm --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/inc/Module/Install/Can.pm 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/inc/Module/Install/Can.pm 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,81 @@ +#line 1 +package Module::Install::Can; + +use strict; +use Config (); +use File::Spec (); +use ExtUtils::MakeMaker (); +use Module::Install::Base (); + +use vars qw{$VERSION @ISA $ISCORE}; +BEGIN { + $VERSION = '1.00'; + @ISA = 'Module::Install::Base'; + $ISCORE = 1; +} + +# check if we can load some module +### Upgrade this to not have to load the module if possible +sub can_use { + my ($self, $mod, $ver) = @_; + $mod =~ s{::|\\}{/}g; + $mod .= '.pm' unless $mod =~ /\.pm$/i; + + my $pkg = $mod; + $pkg =~ s{/}{::}g; + $pkg =~ s{\.pm$}{}i; + + local $@; + eval { require $mod; $pkg->VERSION($ver || 0); 1 }; +} + +# check if we can run some command +sub can_run { + my ($self, $cmd) = @_; + + my $_cmd = $cmd; + return $_cmd if (-x $_cmd or $_cmd = MM->maybe_command($_cmd)); + + for my $dir ((split /$Config::Config{path_sep}/, $ENV{PATH}), '.') { + next if $dir eq ''; + my $abs = File::Spec->catfile($dir, $_[1]); + return $abs if (-x $abs or $abs = MM->maybe_command($abs)); + } + + return; +} + +# can we locate a (the) C compiler +sub can_cc { + my $self = shift; + my @chunks = split(/ /, $Config::Config{cc}) or return; + + # $Config{cc} may contain args; try to find out the program part + while (@chunks) { + return $self->can_run("@chunks") || (pop(@chunks), next); + } + + return; +} + +# Fix Cygwin bug on maybe_command(); +if ( $^O eq 'cygwin' ) { + require ExtUtils::MM_Cygwin; + require ExtUtils::MM_Win32; + if ( ! defined(&ExtUtils::MM_Cygwin::maybe_command) ) { + *ExtUtils::MM_Cygwin::maybe_command = sub { + my ($self, $file) = @_; + if ($file =~ m{^/cygdrive/}i and ExtUtils::MM_Win32->can('maybe_command')) { + ExtUtils::MM_Win32->maybe_command($file); + } else { + ExtUtils::MM_Unix->maybe_command($file); + } + } + } +} + +1; + +__END__ + +#line 156 diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/inc/Module/Install/Fetch.pm check-mk-1.2.6p12/=unpacked-tar11=/api/perl/inc/Module/Install/Fetch.pm --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/inc/Module/Install/Fetch.pm 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/inc/Module/Install/Fetch.pm 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,93 @@ +#line 1 +package Module::Install::Fetch; + +use strict; +use Module::Install::Base (); + +use vars qw{$VERSION @ISA $ISCORE}; +BEGIN { + $VERSION = '1.00'; + @ISA = 'Module::Install::Base'; + $ISCORE = 1; +} + +sub get_file { + my ($self, %args) = @_; + my ($scheme, $host, $path, $file) = + $args{url} =~ m|^(\w+)://([^/]+)(.+)/(.+)| or return; + + if ( $scheme eq 'http' and ! eval { require LWP::Simple; 1 } ) { + $args{url} = $args{ftp_url} + or (warn("LWP support unavailable!\n"), return); + ($scheme, $host, $path, $file) = + $args{url} =~ m|^(\w+)://([^/]+)(.+)/(.+)| or return; + } + + $|++; + print "Fetching '$file' from $host... "; + + unless (eval { require Socket; Socket::inet_aton($host) }) { + warn "'$host' resolve failed!\n"; + return; + } + + return unless $scheme eq 'ftp' or $scheme eq 'http'; + + require Cwd; + my $dir = Cwd::getcwd(); + chdir $args{local_dir} or return if exists $args{local_dir}; + + if (eval { require LWP::Simple; 1 }) { + LWP::Simple::mirror($args{url}, $file); + } + elsif (eval { require Net::FTP; 1 }) { eval { + # use Net::FTP to get past firewall + my $ftp = Net::FTP->new($host, Passive => 1, Timeout => 600); + $ftp->login("anonymous", 'anonymous@example.com'); + $ftp->cwd($path); + $ftp->binary; + $ftp->get($file) or (warn("$!\n"), return); + $ftp->quit; + } } + elsif (my $ftp = $self->can_run('ftp')) { eval { + # no Net::FTP, fallback to ftp.exe + require FileHandle; + my $fh = FileHandle->new; + + local $SIG{CHLD} = 'IGNORE'; + unless ($fh->open("|$ftp -n")) { + warn "Couldn't open ftp: $!\n"; + chdir $dir; return; + } + + my @dialog = split(/\n/, <<"END_FTP"); +open $host +user anonymous anonymous\@example.com +cd $path +binary +get $file $file +quit +END_FTP + foreach (@dialog) { $fh->print("$_\n") } + $fh->close; + } } + else { + warn "No working 'ftp' program available!\n"; + chdir $dir; return; + } + + unless (-f $file) { + warn "Fetching failed: $@\n"; + chdir $dir; return; + } + + return if exists $args{size} and -s $file != $args{size}; + system($args{run}) if exists $args{run}; + unlink($file) if $args{remove}; + + print(((!exists $args{check_for} or -e $args{check_for}) + ? "done!" : "failed! ($!)"), "\n"); + chdir $dir; return !$?; +} + +1; diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/inc/Module/Install/Include.pm check-mk-1.2.6p12/=unpacked-tar11=/api/perl/inc/Module/Install/Include.pm --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/inc/Module/Install/Include.pm 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/inc/Module/Install/Include.pm 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,34 @@ +#line 1 +package Module::Install::Include; + +use strict; +use Module::Install::Base (); + +use vars qw{$VERSION @ISA $ISCORE}; +BEGIN { + $VERSION = '1.00'; + @ISA = 'Module::Install::Base'; + $ISCORE = 1; +} + +sub include { + shift()->admin->include(@_); +} + +sub include_deps { + shift()->admin->include_deps(@_); +} + +sub auto_include { + shift()->admin->auto_include(@_); +} + +sub auto_include_deps { + shift()->admin->auto_include_deps(@_); +} + +sub auto_include_dependent_dists { + shift()->admin->auto_include_dependent_dists(@_); +} + +1; diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/inc/Module/Install/Makefile.pm check-mk-1.2.6p12/=unpacked-tar11=/api/perl/inc/Module/Install/Makefile.pm --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/inc/Module/Install/Makefile.pm 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/inc/Module/Install/Makefile.pm 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,415 @@ +#line 1 +package Module::Install::Makefile; + +use strict 'vars'; +use ExtUtils::MakeMaker (); +use Module::Install::Base (); +use Fcntl qw/:flock :seek/; + +use vars qw{$VERSION @ISA $ISCORE}; +BEGIN { + $VERSION = '1.00'; + @ISA = 'Module::Install::Base'; + $ISCORE = 1; +} + +sub Makefile { $_[0] } + +my %seen = (); + +sub prompt { + shift; + + # Infinite loop protection + my @c = caller(); + if ( ++$seen{"$c[1]|$c[2]|$_[0]"} > 3 ) { + die "Caught an potential prompt infinite loop ($c[1]|$c[2]|$_[0])"; + } + + # In automated testing or non-interactive session, always use defaults + if ( ($ENV{AUTOMATED_TESTING} or -! -t STDIN) and ! $ENV{PERL_MM_USE_DEFAULT} ) { + local $ENV{PERL_MM_USE_DEFAULT} = 1; + goto &ExtUtils::MakeMaker::prompt; + } else { + goto &ExtUtils::MakeMaker::prompt; + } +} + +# Store a cleaned up version of the MakeMaker version, +# since we need to behave differently in a variety of +# ways based on the MM version. +my $makemaker = eval $ExtUtils::MakeMaker::VERSION; + +# If we are passed a param, do a "newer than" comparison. +# Otherwise, just return the MakeMaker version. +sub makemaker { + ( @_ < 2 or $makemaker >= eval($_[1]) ) ? $makemaker : 0 +} + +# Ripped from ExtUtils::MakeMaker 6.56, and slightly modified +# as we only need to know here whether the attribute is an array +# or a hash or something else (which may or may not be appendable). +my %makemaker_argtype = ( + C => 'ARRAY', + CONFIG => 'ARRAY', +# CONFIGURE => 'CODE', # ignore + DIR => 'ARRAY', + DL_FUNCS => 'HASH', + DL_VARS => 'ARRAY', + EXCLUDE_EXT => 'ARRAY', + EXE_FILES => 'ARRAY', + FUNCLIST => 'ARRAY', + H => 'ARRAY', + IMPORTS => 'HASH', + INCLUDE_EXT => 'ARRAY', + LIBS => 'ARRAY', # ignore '' + MAN1PODS => 'HASH', + MAN3PODS => 'HASH', + META_ADD => 'HASH', + META_MERGE => 'HASH', + PL_FILES => 'HASH', + PM => 'HASH', + PMLIBDIRS => 'ARRAY', + PMLIBPARENTDIRS => 'ARRAY', + PREREQ_PM => 'HASH', + CONFIGURE_REQUIRES => 'HASH', + SKIP => 'ARRAY', + TYPEMAPS => 'ARRAY', + XS => 'HASH', +# VERSION => ['version',''], # ignore +# _KEEP_AFTER_FLUSH => '', + + clean => 'HASH', + depend => 'HASH', + dist => 'HASH', + dynamic_lib=> 'HASH', + linkext => 'HASH', + macro => 'HASH', + postamble => 'HASH', + realclean => 'HASH', + test => 'HASH', + tool_autosplit => 'HASH', + + # special cases where you can use makemaker_append + CCFLAGS => 'APPENDABLE', + DEFINE => 'APPENDABLE', + INC => 'APPENDABLE', + LDDLFLAGS => 'APPENDABLE', + LDFROM => 'APPENDABLE', +); + +sub makemaker_args { + my ($self, %new_args) = @_; + my $args = ( $self->{makemaker_args} ||= {} ); + foreach my $key (keys %new_args) { + if ($makemaker_argtype{$key}) { + if ($makemaker_argtype{$key} eq 'ARRAY') { + $args->{$key} = [] unless defined $args->{$key}; + unless (ref $args->{$key} eq 'ARRAY') { + $args->{$key} = [$args->{$key}] + } + push @{$args->{$key}}, + ref $new_args{$key} eq 'ARRAY' + ? @{$new_args{$key}} + : $new_args{$key}; + } + elsif ($makemaker_argtype{$key} eq 'HASH') { + $args->{$key} = {} unless defined $args->{$key}; + foreach my $skey (keys %{ $new_args{$key} }) { + $args->{$key}{$skey} = $new_args{$key}{$skey}; + } + } + elsif ($makemaker_argtype{$key} eq 'APPENDABLE') { + $self->makemaker_append($key => $new_args{$key}); + } + } + else { + if (defined $args->{$key}) { + warn qq{MakeMaker attribute "$key" is overriden; use "makemaker_append" to append values\n}; + } + $args->{$key} = $new_args{$key}; + } + } + return $args; +} + +# For mm args that take multiple space-seperated args, +# append an argument to the current list. +sub makemaker_append { + my $self = shift; + my $name = shift; + my $args = $self->makemaker_args; + $args->{$name} = defined $args->{$name} + ? join( ' ', $args->{$name}, @_ ) + : join( ' ', @_ ); +} + +sub build_subdirs { + my $self = shift; + my $subdirs = $self->makemaker_args->{DIR} ||= []; + for my $subdir (@_) { + push @$subdirs, $subdir; + } +} + +sub clean_files { + my $self = shift; + my $clean = $self->makemaker_args->{clean} ||= {}; + %$clean = ( + %$clean, + FILES => join ' ', grep { length $_ } ($clean->{FILES} || (), @_), + ); +} + +sub realclean_files { + my $self = shift; + my $realclean = $self->makemaker_args->{realclean} ||= {}; + %$realclean = ( + %$realclean, + FILES => join ' ', grep { length $_ } ($realclean->{FILES} || (), @_), + ); +} + +sub libs { + my $self = shift; + my $libs = ref $_[0] ? shift : [ shift ]; + $self->makemaker_args( LIBS => $libs ); +} + +sub inc { + my $self = shift; + $self->makemaker_args( INC => shift ); +} + +sub _wanted_t { +} + +sub tests_recursive { + my $self = shift; + my $dir = shift || 't'; + unless ( -d $dir ) { + die "tests_recursive dir '$dir' does not exist"; + } + my %tests = map { $_ => 1 } split / /, ($self->tests || ''); + require File::Find; + File::Find::find( + sub { /\.t$/ and -f $_ and $tests{"$File::Find::dir/*.t"} = 1 }, + $dir + ); + $self->tests( join ' ', sort keys %tests ); +} + +sub write { + my $self = shift; + die "&Makefile->write() takes no arguments\n" if @_; + + # Check the current Perl version + my $perl_version = $self->perl_version; + if ( $perl_version ) { + eval "use $perl_version; 1" + or die "ERROR: perl: Version $] is installed, " + . "but we need version >= $perl_version"; + } + + # Make sure we have a new enough MakeMaker + require ExtUtils::MakeMaker; + + if ( $perl_version and $self->_cmp($perl_version, '5.006') >= 0 ) { + # MakeMaker can complain about module versions that include + # an underscore, even though its own version may contain one! + # Hence the funny regexp to get rid of it. See RT #35800 + # for details. + my $v = $ExtUtils::MakeMaker::VERSION =~ /^(\d+\.\d+)/; + $self->build_requires( 'ExtUtils::MakeMaker' => $v ); + $self->configure_requires( 'ExtUtils::MakeMaker' => $v ); + } else { + # Allow legacy-compatibility with 5.005 by depending on the + # most recent EU:MM that supported 5.005. + $self->build_requires( 'ExtUtils::MakeMaker' => 6.42 ); + $self->configure_requires( 'ExtUtils::MakeMaker' => 6.42 ); + } + + # Generate the MakeMaker params + my $args = $self->makemaker_args; + $args->{DISTNAME} = $self->name; + $args->{NAME} = $self->module_name || $self->name; + $args->{NAME} =~ s/-/::/g; + $args->{VERSION} = $self->version or die <<'EOT'; +ERROR: Can't determine distribution version. Please specify it +explicitly via 'version' in Makefile.PL, or set a valid $VERSION +in a module, and provide its file path via 'version_from' (or +'all_from' if you prefer) in Makefile.PL. +EOT + + $DB::single = 1; + if ( $self->tests ) { + my @tests = split ' ', $self->tests; + my %seen; + $args->{test} = { + TESTS => (join ' ', grep {!$seen{$_}++} @tests), + }; + } elsif ( $Module::Install::ExtraTests::use_extratests ) { + # Module::Install::ExtraTests doesn't set $self->tests and does its own tests via harness. + # So, just ignore our xt tests here. + } elsif ( -d 'xt' and ($Module::Install::AUTHOR or $ENV{RELEASE_TESTING}) ) { + $args->{test} = { + TESTS => join( ' ', map { "$_/*.t" } grep { -d $_ } qw{ t xt } ), + }; + } + if ( $] >= 5.005 ) { + $args->{ABSTRACT} = $self->abstract; + $args->{AUTHOR} = join ', ', @{$self->author || []}; + } + if ( $self->makemaker(6.10) ) { + $args->{NO_META} = 1; + #$args->{NO_MYMETA} = 1; + } + if ( $self->makemaker(6.17) and $self->sign ) { + $args->{SIGN} = 1; + } + unless ( $self->is_admin ) { + delete $args->{SIGN}; + } + if ( $self->makemaker(6.31) and $self->license ) { + $args->{LICENSE} = $self->license; + } + + my $prereq = ($args->{PREREQ_PM} ||= {}); + %$prereq = ( %$prereq, + map { @$_ } # flatten [module => version] + map { @$_ } + grep $_, + ($self->requires) + ); + + # Remove any reference to perl, PREREQ_PM doesn't support it + delete $args->{PREREQ_PM}->{perl}; + + # Merge both kinds of requires into BUILD_REQUIRES + my $build_prereq = ($args->{BUILD_REQUIRES} ||= {}); + %$build_prereq = ( %$build_prereq, + map { @$_ } # flatten [module => version] + map { @$_ } + grep $_, + ($self->configure_requires, $self->build_requires) + ); + + # Remove any reference to perl, BUILD_REQUIRES doesn't support it + delete $args->{BUILD_REQUIRES}->{perl}; + + # Delete bundled dists from prereq_pm, add it to Makefile DIR + my $subdirs = ($args->{DIR} || []); + if ($self->bundles) { + my %processed; + foreach my $bundle (@{ $self->bundles }) { + my ($mod_name, $dist_dir) = @$bundle; + delete $prereq->{$mod_name}; + $dist_dir = File::Basename::basename($dist_dir); # dir for building this module + if (not exists $processed{$dist_dir}) { + if (-d $dist_dir) { + # List as sub-directory to be processed by make + push @$subdirs, $dist_dir; + } + # Else do nothing: the module is already present on the system + $processed{$dist_dir} = undef; + } + } + } + + unless ( $self->makemaker('6.55_03') ) { + %$prereq = (%$prereq,%$build_prereq); + delete $args->{BUILD_REQUIRES}; + } + + if ( my $perl_version = $self->perl_version ) { + eval "use $perl_version; 1" + or die "ERROR: perl: Version $] is installed, " + . "but we need version >= $perl_version"; + + if ( $self->makemaker(6.48) ) { + $args->{MIN_PERL_VERSION} = $perl_version; + } + } + + if ($self->installdirs) { + warn qq{old INSTALLDIRS (probably set by makemaker_args) is overriden by installdirs\n} if $args->{INSTALLDIRS}; + $args->{INSTALLDIRS} = $self->installdirs; + } + + my %args = map { + ( $_ => $args->{$_} ) } grep {defined($args->{$_} ) + } keys %$args; + + my $user_preop = delete $args{dist}->{PREOP}; + if ( my $preop = $self->admin->preop($user_preop) ) { + foreach my $key ( keys %$preop ) { + $args{dist}->{$key} = $preop->{$key}; + } + } + + my $mm = ExtUtils::MakeMaker::WriteMakefile(%args); + $self->fix_up_makefile($mm->{FIRST_MAKEFILE} || 'Makefile'); +} + +sub fix_up_makefile { + my $self = shift; + my $makefile_name = shift; + my $top_class = ref($self->_top) || ''; + my $top_version = $self->_top->VERSION || ''; + + my $preamble = $self->preamble + ? "# Preamble by $top_class $top_version\n" + . $self->preamble + : ''; + my $postamble = "# Postamble by $top_class $top_version\n" + . ($self->postamble || ''); + + local *MAKEFILE; + open MAKEFILE, "+< $makefile_name" or die "fix_up_makefile: Couldn't open $makefile_name: $!"; + eval { flock MAKEFILE, LOCK_EX }; + my $makefile = do { local $/; }; + + $makefile =~ s/\b(test_harness\(\$\(TEST_VERBOSE\), )/$1'inc', /; + $makefile =~ s/( -I\$\(INST_ARCHLIB\))/ -Iinc$1/g; + $makefile =~ s/( "-I\$\(INST_LIB\)")/ "-Iinc"$1/g; + $makefile =~ s/^(FULLPERL = .*)/$1 "-Iinc"/m; + $makefile =~ s/^(PERL = .*)/$1 "-Iinc"/m; + + # Module::Install will never be used to build the Core Perl + # Sometimes PERL_LIB and PERL_ARCHLIB get written anyway, which breaks + # PREFIX/PERL5LIB, and thus, install_share. Blank them if they exist + $makefile =~ s/^PERL_LIB = .+/PERL_LIB =/m; + #$makefile =~ s/^PERL_ARCHLIB = .+/PERL_ARCHLIB =/m; + + # Perl 5.005 mentions PERL_LIB explicitly, so we have to remove that as well. + $makefile =~ s/(\"?)-I\$\(PERL_LIB\)\1//g; + + # XXX - This is currently unused; not sure if it breaks other MM-users + # $makefile =~ s/^pm_to_blib\s+:\s+/pm_to_blib :: /mg; + + seek MAKEFILE, 0, SEEK_SET; + truncate MAKEFILE, 0; + print MAKEFILE "$preamble$makefile$postamble" or die $!; + close MAKEFILE or die $!; + + 1; +} + +sub preamble { + my ($self, $text) = @_; + $self->{preamble} = $text . $self->{preamble} if defined $text; + $self->{preamble}; +} + +sub postamble { + my ($self, $text) = @_; + $self->{postamble} ||= $self->admin->postamble; + $self->{postamble} .= $text if defined $text; + $self->{postamble} +} + +1; + +__END__ + +#line 541 diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/inc/Module/Install/Metadata.pm check-mk-1.2.6p12/=unpacked-tar11=/api/perl/inc/Module/Install/Metadata.pm --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/inc/Module/Install/Metadata.pm 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/inc/Module/Install/Metadata.pm 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,715 @@ +#line 1 +package Module::Install::Metadata; + +use strict 'vars'; +use Module::Install::Base (); + +use vars qw{$VERSION @ISA $ISCORE}; +BEGIN { + $VERSION = '1.00'; + @ISA = 'Module::Install::Base'; + $ISCORE = 1; +} + +my @boolean_keys = qw{ + sign +}; + +my @scalar_keys = qw{ + name + module_name + abstract + version + distribution_type + tests + installdirs +}; + +my @tuple_keys = qw{ + configure_requires + build_requires + requires + recommends + bundles + resources +}; + +my @resource_keys = qw{ + homepage + bugtracker + repository +}; + +my @array_keys = qw{ + keywords + author +}; + +*authors = \&author; + +sub Meta { shift } +sub Meta_BooleanKeys { @boolean_keys } +sub Meta_ScalarKeys { @scalar_keys } +sub Meta_TupleKeys { @tuple_keys } +sub Meta_ResourceKeys { @resource_keys } +sub Meta_ArrayKeys { @array_keys } + +foreach my $key ( @boolean_keys ) { + *$key = sub { + my $self = shift; + if ( defined wantarray and not @_ ) { + return $self->{values}->{$key}; + } + $self->{values}->{$key} = ( @_ ? $_[0] : 1 ); + return $self; + }; +} + +foreach my $key ( @scalar_keys ) { + *$key = sub { + my $self = shift; + return $self->{values}->{$key} if defined wantarray and !@_; + $self->{values}->{$key} = shift; + return $self; + }; +} + +foreach my $key ( @array_keys ) { + *$key = sub { + my $self = shift; + return $self->{values}->{$key} if defined wantarray and !@_; + $self->{values}->{$key} ||= []; + push @{$self->{values}->{$key}}, @_; + return $self; + }; +} + +foreach my $key ( @resource_keys ) { + *$key = sub { + my $self = shift; + unless ( @_ ) { + return () unless $self->{values}->{resources}; + return map { $_->[1] } + grep { $_->[0] eq $key } + @{ $self->{values}->{resources} }; + } + return $self->{values}->{resources}->{$key} unless @_; + my $uri = shift or die( + "Did not provide a value to $key()" + ); + $self->resources( $key => $uri ); + return 1; + }; +} + +foreach my $key ( grep { $_ ne "resources" } @tuple_keys) { + *$key = sub { + my $self = shift; + return $self->{values}->{$key} unless @_; + my @added; + while ( @_ ) { + my $module = shift or last; + my $version = shift || 0; + push @added, [ $module, $version ]; + } + push @{ $self->{values}->{$key} }, @added; + return map {@$_} @added; + }; +} + +# Resource handling +my %lc_resource = map { $_ => 1 } qw{ + homepage + license + bugtracker + repository +}; + +sub resources { + my $self = shift; + while ( @_ ) { + my $name = shift or last; + my $value = shift or next; + if ( $name eq lc $name and ! $lc_resource{$name} ) { + die("Unsupported reserved lowercase resource '$name'"); + } + $self->{values}->{resources} ||= []; + push @{ $self->{values}->{resources} }, [ $name, $value ]; + } + $self->{values}->{resources}; +} + +# Aliases for build_requires that will have alternative +# meanings in some future version of META.yml. +sub test_requires { shift->build_requires(@_) } +sub install_requires { shift->build_requires(@_) } + +# Aliases for installdirs options +sub install_as_core { $_[0]->installdirs('perl') } +sub install_as_cpan { $_[0]->installdirs('site') } +sub install_as_site { $_[0]->installdirs('site') } +sub install_as_vendor { $_[0]->installdirs('vendor') } + +sub dynamic_config { + my $self = shift; + unless ( @_ ) { + warn "You MUST provide an explicit true/false value to dynamic_config\n"; + return $self; + } + $self->{values}->{dynamic_config} = $_[0] ? 1 : 0; + return 1; +} + +sub perl_version { + my $self = shift; + return $self->{values}->{perl_version} unless @_; + my $version = shift or die( + "Did not provide a value to perl_version()" + ); + + # Normalize the version + $version = $self->_perl_version($version); + + # We don't support the reall old versions + unless ( $version >= 5.005 ) { + die "Module::Install only supports 5.005 or newer (use ExtUtils::MakeMaker)\n"; + } + + $self->{values}->{perl_version} = $version; +} + +sub all_from { + my ( $self, $file ) = @_; + + unless ( defined($file) ) { + my $name = $self->name or die( + "all_from called with no args without setting name() first" + ); + $file = join('/', 'lib', split(/-/, $name)) . '.pm'; + $file =~ s{.*/}{} unless -e $file; + unless ( -e $file ) { + die("all_from cannot find $file from $name"); + } + } + unless ( -f $file ) { + die("The path '$file' does not exist, or is not a file"); + } + + $self->{values}{all_from} = $file; + + # Some methods pull from POD instead of code. + # If there is a matching .pod, use that instead + my $pod = $file; + $pod =~ s/\.pm$/.pod/i; + $pod = $file unless -e $pod; + + # Pull the different values + $self->name_from($file) unless $self->name; + $self->version_from($file) unless $self->version; + $self->perl_version_from($file) unless $self->perl_version; + $self->author_from($pod) unless @{$self->author || []}; + $self->license_from($pod) unless $self->license; + $self->abstract_from($pod) unless $self->abstract; + + return 1; +} + +sub provides { + my $self = shift; + my $provides = ( $self->{values}->{provides} ||= {} ); + %$provides = (%$provides, @_) if @_; + return $provides; +} + +sub auto_provides { + my $self = shift; + return $self unless $self->is_admin; + unless (-e 'MANIFEST') { + warn "Cannot deduce auto_provides without a MANIFEST, skipping\n"; + return $self; + } + # Avoid spurious warnings as we are not checking manifest here. + local $SIG{__WARN__} = sub {1}; + require ExtUtils::Manifest; + local *ExtUtils::Manifest::manicheck = sub { return }; + + require Module::Build; + my $build = Module::Build->new( + dist_name => $self->name, + dist_version => $self->version, + license => $self->license, + ); + $self->provides( %{ $build->find_dist_packages || {} } ); +} + +sub feature { + my $self = shift; + my $name = shift; + my $features = ( $self->{values}->{features} ||= [] ); + my $mods; + + if ( @_ == 1 and ref( $_[0] ) ) { + # The user used ->feature like ->features by passing in the second + # argument as a reference. Accomodate for that. + $mods = $_[0]; + } else { + $mods = \@_; + } + + my $count = 0; + push @$features, ( + $name => [ + map { + ref($_) ? ( ref($_) eq 'HASH' ) ? %$_ : @$_ : $_ + } @$mods + ] + ); + + return @$features; +} + +sub features { + my $self = shift; + while ( my ( $name, $mods ) = splice( @_, 0, 2 ) ) { + $self->feature( $name, @$mods ); + } + return $self->{values}->{features} + ? @{ $self->{values}->{features} } + : (); +} + +sub no_index { + my $self = shift; + my $type = shift; + push @{ $self->{values}->{no_index}->{$type} }, @_ if $type; + return $self->{values}->{no_index}; +} + +sub read { + my $self = shift; + $self->include_deps( 'YAML::Tiny', 0 ); + + require YAML::Tiny; + my $data = YAML::Tiny::LoadFile('META.yml'); + + # Call methods explicitly in case user has already set some values. + while ( my ( $key, $value ) = each %$data ) { + next unless $self->can($key); + if ( ref $value eq 'HASH' ) { + while ( my ( $module, $version ) = each %$value ) { + $self->can($key)->($self, $module => $version ); + } + } else { + $self->can($key)->($self, $value); + } + } + return $self; +} + +sub write { + my $self = shift; + return $self unless $self->is_admin; + $self->admin->write_meta; + return $self; +} + +sub version_from { + require ExtUtils::MM_Unix; + my ( $self, $file ) = @_; + $self->version( ExtUtils::MM_Unix->parse_version($file) ); + + # for version integrity check + $self->makemaker_args( VERSION_FROM => $file ); +} + +sub abstract_from { + require ExtUtils::MM_Unix; + my ( $self, $file ) = @_; + $self->abstract( + bless( + { DISTNAME => $self->name }, + 'ExtUtils::MM_Unix' + )->parse_abstract($file) + ); +} + +# Add both distribution and module name +sub name_from { + my ($self, $file) = @_; + if ( + Module::Install::_read($file) =~ m/ + ^ \s* + package \s* + ([\w:]+) + \s* ; + /ixms + ) { + my ($name, $module_name) = ($1, $1); + $name =~ s{::}{-}g; + $self->name($name); + unless ( $self->module_name ) { + $self->module_name($module_name); + } + } else { + die("Cannot determine name from $file\n"); + } +} + +sub _extract_perl_version { + if ( + $_[0] =~ m/ + ^\s* + (?:use|require) \s* + v? + ([\d_\.]+) + \s* ; + /ixms + ) { + my $perl_version = $1; + $perl_version =~ s{_}{}g; + return $perl_version; + } else { + return; + } +} + +sub perl_version_from { + my $self = shift; + my $perl_version=_extract_perl_version(Module::Install::_read($_[0])); + if ($perl_version) { + $self->perl_version($perl_version); + } else { + warn "Cannot determine perl version info from $_[0]\n"; + return; + } +} + +sub author_from { + my $self = shift; + my $content = Module::Install::_read($_[0]); + if ($content =~ m/ + =head \d \s+ (?:authors?)\b \s* + ([^\n]*) + | + =head \d \s+ (?:licen[cs]e|licensing|copyright|legal)\b \s* + .*? copyright .*? \d\d\d[\d.]+ \s* (?:\bby\b)? \s* + ([^\n]*) + /ixms) { + my $author = $1 || $2; + + # XXX: ugly but should work anyway... + if (eval "require Pod::Escapes; 1") { + # Pod::Escapes has a mapping table. + # It's in core of perl >= 5.9.3, and should be installed + # as one of the Pod::Simple's prereqs, which is a prereq + # of Pod::Text 3.x (see also below). + $author =~ s{ E<( (\d+) | ([A-Za-z]+) )> } + { + defined $2 + ? chr($2) + : defined $Pod::Escapes::Name2character_number{$1} + ? chr($Pod::Escapes::Name2character_number{$1}) + : do { + warn "Unknown escape: E<$1>"; + "E<$1>"; + }; + }gex; + } + elsif (eval "require Pod::Text; 1" && $Pod::Text::VERSION < 3) { + # Pod::Text < 3.0 has yet another mapping table, + # though the table name of 2.x and 1.x are different. + # (1.x is in core of Perl < 5.6, 2.x is in core of + # Perl < 5.9.3) + my $mapping = ($Pod::Text::VERSION < 2) + ? \%Pod::Text::HTML_Escapes + : \%Pod::Text::ESCAPES; + $author =~ s{ E<( (\d+) | ([A-Za-z]+) )> } + { + defined $2 + ? chr($2) + : defined $mapping->{$1} + ? $mapping->{$1} + : do { + warn "Unknown escape: E<$1>"; + "E<$1>"; + }; + }gex; + } + else { + $author =~ s{E}{<}g; + $author =~ s{E}{>}g; + } + $self->author($author); + } else { + warn "Cannot determine author info from $_[0]\n"; + } +} + +#Stolen from M::B +my %license_urls = ( + perl => 'http://dev.perl.org/licenses/', + apache => 'http://apache.org/licenses/LICENSE-2.0', + apache_1_1 => 'http://apache.org/licenses/LICENSE-1.1', + artistic => 'http://opensource.org/licenses/artistic-license.php', + artistic_2 => 'http://opensource.org/licenses/artistic-license-2.0.php', + lgpl => 'http://opensource.org/licenses/lgpl-license.php', + lgpl2 => 'http://opensource.org/licenses/lgpl-2.1.php', + lgpl3 => 'http://opensource.org/licenses/lgpl-3.0.html', + bsd => 'http://opensource.org/licenses/bsd-license.php', + gpl => 'http://opensource.org/licenses/gpl-license.php', + gpl2 => 'http://opensource.org/licenses/gpl-2.0.php', + gpl3 => 'http://opensource.org/licenses/gpl-3.0.html', + mit => 'http://opensource.org/licenses/mit-license.php', + mozilla => 'http://opensource.org/licenses/mozilla1.1.php', + open_source => undef, + unrestricted => undef, + restrictive => undef, + unknown => undef, +); + +sub license { + my $self = shift; + return $self->{values}->{license} unless @_; + my $license = shift or die( + 'Did not provide a value to license()' + ); + $license = __extract_license($license) || lc $license; + $self->{values}->{license} = $license; + + # Automatically fill in license URLs + if ( $license_urls{$license} ) { + $self->resources( license => $license_urls{$license} ); + } + + return 1; +} + +sub _extract_license { + my $pod = shift; + my $matched; + return __extract_license( + ($matched) = $pod =~ m/ + (=head \d \s+ L(?i:ICEN[CS]E|ICENSING)\b.*?) + (=head \d.*|=cut.*|)\z + /xms + ) || __extract_license( + ($matched) = $pod =~ m/ + (=head \d \s+ (?:C(?i:OPYRIGHTS?)|L(?i:EGAL))\b.*?) + (=head \d.*|=cut.*|)\z + /xms + ); +} + +sub __extract_license { + my $license_text = shift or return; + my @phrases = ( + '(?:under )?the same (?:terms|license) as (?:perl|the perl (?:\d )?programming language)' => 'perl', 1, + '(?:under )?the terms of (?:perl|the perl programming language) itself' => 'perl', 1, + 'Artistic and GPL' => 'perl', 1, + 'GNU general public license' => 'gpl', 1, + 'GNU public license' => 'gpl', 1, + 'GNU lesser general public license' => 'lgpl', 1, + 'GNU lesser public license' => 'lgpl', 1, + 'GNU library general public license' => 'lgpl', 1, + 'GNU library public license' => 'lgpl', 1, + 'GNU Free Documentation license' => 'unrestricted', 1, + 'GNU Affero General Public License' => 'open_source', 1, + '(?:Free)?BSD license' => 'bsd', 1, + 'Artistic license' => 'artistic', 1, + 'Apache (?:Software )?license' => 'apache', 1, + 'GPL' => 'gpl', 1, + 'LGPL' => 'lgpl', 1, + 'BSD' => 'bsd', 1, + 'Artistic' => 'artistic', 1, + 'MIT' => 'mit', 1, + 'Mozilla Public License' => 'mozilla', 1, + 'Q Public License' => 'open_source', 1, + 'OpenSSL License' => 'unrestricted', 1, + 'SSLeay License' => 'unrestricted', 1, + 'zlib License' => 'open_source', 1, + 'proprietary' => 'proprietary', 0, + ); + while ( my ($pattern, $license, $osi) = splice(@phrases, 0, 3) ) { + $pattern =~ s#\s+#\\s+#gs; + if ( $license_text =~ /\b$pattern\b/i ) { + return $license; + } + } + return ''; +} + +sub license_from { + my $self = shift; + if (my $license=_extract_license(Module::Install::_read($_[0]))) { + $self->license($license); + } else { + warn "Cannot determine license info from $_[0]\n"; + return 'unknown'; + } +} + +sub _extract_bugtracker { + my @links = $_[0] =~ m#L<( + \Qhttp://rt.cpan.org/\E[^>]+| + \Qhttp://github.com/\E[\w_]+/[\w_]+/issues| + \Qhttp://code.google.com/p/\E[\w_\-]+/issues/list + )>#gx; + my %links; + @links{@links}=(); + @links=keys %links; + return @links; +} + +sub bugtracker_from { + my $self = shift; + my $content = Module::Install::_read($_[0]); + my @links = _extract_bugtracker($content); + unless ( @links ) { + warn "Cannot determine bugtracker info from $_[0]\n"; + return 0; + } + if ( @links > 1 ) { + warn "Found more than one bugtracker link in $_[0]\n"; + return 0; + } + + # Set the bugtracker + bugtracker( $links[0] ); + return 1; +} + +sub requires_from { + my $self = shift; + my $content = Module::Install::_readperl($_[0]); + my @requires = $content =~ m/^use\s+([^\W\d]\w*(?:::\w+)*)\s+([\d\.]+)/mg; + while ( @requires ) { + my $module = shift @requires; + my $version = shift @requires; + $self->requires( $module => $version ); + } +} + +sub test_requires_from { + my $self = shift; + my $content = Module::Install::_readperl($_[0]); + my @requires = $content =~ m/^use\s+([^\W\d]\w*(?:::\w+)*)\s+([\d\.]+)/mg; + while ( @requires ) { + my $module = shift @requires; + my $version = shift @requires; + $self->test_requires( $module => $version ); + } +} + +# Convert triple-part versions (eg, 5.6.1 or 5.8.9) to +# numbers (eg, 5.006001 or 5.008009). +# Also, convert double-part versions (eg, 5.8) +sub _perl_version { + my $v = $_[-1]; + $v =~ s/^([1-9])\.([1-9]\d?\d?)$/sprintf("%d.%03d",$1,$2)/e; + $v =~ s/^([1-9])\.([1-9]\d?\d?)\.(0|[1-9]\d?\d?)$/sprintf("%d.%03d%03d",$1,$2,$3 || 0)/e; + $v =~ s/(\.\d\d\d)000$/$1/; + $v =~ s/_.+$//; + if ( ref($v) ) { + # Numify + $v = $v + 0; + } + return $v; +} + +sub add_metadata { + my $self = shift; + my %hash = @_; + for my $key (keys %hash) { + warn "add_metadata: $key is not prefixed with 'x_'.\n" . + "Use appopriate function to add non-private metadata.\n" unless $key =~ /^x_/; + $self->{values}->{$key} = $hash{$key}; + } +} + + +###################################################################### +# MYMETA Support + +sub WriteMyMeta { + die "WriteMyMeta has been deprecated"; +} + +sub write_mymeta_yaml { + my $self = shift; + + # We need YAML::Tiny to write the MYMETA.yml file + unless ( eval { require YAML::Tiny; 1; } ) { + return 1; + } + + # Generate the data + my $meta = $self->_write_mymeta_data or return 1; + + # Save as the MYMETA.yml file + print "Writing MYMETA.yml\n"; + YAML::Tiny::DumpFile('MYMETA.yml', $meta); +} + +sub write_mymeta_json { + my $self = shift; + + # We need JSON to write the MYMETA.json file + unless ( eval { require JSON; 1; } ) { + return 1; + } + + # Generate the data + my $meta = $self->_write_mymeta_data or return 1; + + # Save as the MYMETA.yml file + print "Writing MYMETA.json\n"; + Module::Install::_write( + 'MYMETA.json', + JSON->new->pretty(1)->canonical->encode($meta), + ); +} + +sub _write_mymeta_data { + my $self = shift; + + # If there's no existing META.yml there is nothing we can do + return undef unless -f 'META.yml'; + + # We need Parse::CPAN::Meta to load the file + unless ( eval { require Parse::CPAN::Meta; 1; } ) { + return undef; + } + + # Merge the perl version into the dependencies + my $val = $self->Meta->{values}; + my $perl = delete $val->{perl_version}; + if ( $perl ) { + $val->{requires} ||= []; + my $requires = $val->{requires}; + + # Canonize to three-dot version after Perl 5.6 + if ( $perl >= 5.006 ) { + $perl =~ s{^(\d+)\.(\d\d\d)(\d*)}{join('.', $1, int($2||0), int($3||0))}e + } + unshift @$requires, [ perl => $perl ]; + } + + # Load the advisory META.yml file + my @yaml = Parse::CPAN::Meta::LoadFile('META.yml'); + my $meta = $yaml[0]; + + # Overwrite the non-configure dependency hashs + delete $meta->{requires}; + delete $meta->{build_requires}; + delete $meta->{recommends}; + if ( exists $val->{requires} ) { + $meta->{requires} = { map { @$_ } @{ $val->{requires} } }; + } + if ( exists $val->{build_requires} ) { + $meta->{build_requires} = { map { @$_ } @{ $val->{build_requires} } }; + } + + return $meta; +} + +1; diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/inc/Module/Install/Win32.pm check-mk-1.2.6p12/=unpacked-tar11=/api/perl/inc/Module/Install/Win32.pm --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/inc/Module/Install/Win32.pm 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/inc/Module/Install/Win32.pm 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,64 @@ +#line 1 +package Module::Install::Win32; + +use strict; +use Module::Install::Base (); + +use vars qw{$VERSION @ISA $ISCORE}; +BEGIN { + $VERSION = '1.00'; + @ISA = 'Module::Install::Base'; + $ISCORE = 1; +} + +# determine if the user needs nmake, and download it if needed +sub check_nmake { + my $self = shift; + $self->load('can_run'); + $self->load('get_file'); + + require Config; + return unless ( + $^O eq 'MSWin32' and + $Config::Config{make} and + $Config::Config{make} =~ /^nmake\b/i and + ! $self->can_run('nmake') + ); + + print "The required 'nmake' executable not found, fetching it...\n"; + + require File::Basename; + my $rv = $self->get_file( + url => 'http://download.microsoft.com/download/vc15/Patch/1.52/W95/EN-US/Nmake15.exe', + ftp_url => 'ftp://ftp.microsoft.com/Softlib/MSLFILES/Nmake15.exe', + local_dir => File::Basename::dirname($^X), + size => 51928, + run => 'Nmake15.exe /o > nul', + check_for => 'Nmake.exe', + remove => 1, + ); + + die <<'END_MESSAGE' unless $rv; + +------------------------------------------------------------------------------- + +Since you are using Microsoft Windows, you will need the 'nmake' utility +before installation. It's available at: + + http://download.microsoft.com/download/vc15/Patch/1.52/W95/EN-US/Nmake15.exe + or + ftp://ftp.microsoft.com/Softlib/MSLFILES/Nmake15.exe + +Please download the file manually, save it to a directory in %PATH% (e.g. +C:\WINDOWS\COMMAND\), then launch the MS-DOS command line shell, "cd" to +that directory, and run "Nmake15.exe" from there; that will create the +'nmake.exe' file needed by this module. + +You may then resume the installation process described in README. + +------------------------------------------------------------------------------- +END_MESSAGE + +} + +1; diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/inc/Module/Install/WriteAll.pm check-mk-1.2.6p12/=unpacked-tar11=/api/perl/inc/Module/Install/WriteAll.pm --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/inc/Module/Install/WriteAll.pm 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/inc/Module/Install/WriteAll.pm 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,63 @@ +#line 1 +package Module::Install::WriteAll; + +use strict; +use Module::Install::Base (); + +use vars qw{$VERSION @ISA $ISCORE}; +BEGIN { + $VERSION = '1.00'; + @ISA = qw{Module::Install::Base}; + $ISCORE = 1; +} + +sub WriteAll { + my $self = shift; + my %args = ( + meta => 1, + sign => 0, + inline => 0, + check_nmake => 1, + @_, + ); + + $self->sign(1) if $args{sign}; + $self->admin->WriteAll(%args) if $self->is_admin; + + $self->check_nmake if $args{check_nmake}; + unless ( $self->makemaker_args->{PL_FILES} ) { + # XXX: This still may be a bit over-defensive... + unless ($self->makemaker(6.25)) { + $self->makemaker_args( PL_FILES => {} ) if -f 'Build.PL'; + } + } + + # Until ExtUtils::MakeMaker support MYMETA.yml, make sure + # we clean it up properly ourself. + $self->realclean_files('MYMETA.yml'); + + if ( $args{inline} ) { + $self->Inline->write; + } else { + $self->Makefile->write; + } + + # The Makefile write process adds a couple of dependencies, + # so write the META.yml files after the Makefile. + if ( $args{meta} ) { + $self->Meta->write; + } + + # Experimental support for MYMETA + if ( $ENV{X_MYMETA} ) { + if ( $ENV{X_MYMETA} eq 'JSON' ) { + $self->Meta->write_mymeta_json; + } else { + $self->Meta->write_mymeta_yaml; + } + } + + return 1; +} + +1; diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/inc/Module/Install.pm check-mk-1.2.6p12/=unpacked-tar11=/api/perl/inc/Module/Install.pm --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/inc/Module/Install.pm 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/inc/Module/Install.pm 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,470 @@ +#line 1 +package Module::Install; + +# For any maintainers: +# The load order for Module::Install is a bit magic. +# It goes something like this... +# +# IF ( host has Module::Install installed, creating author mode ) { +# 1. Makefile.PL calls "use inc::Module::Install" +# 2. $INC{inc/Module/Install.pm} set to installed version of inc::Module::Install +# 3. The installed version of inc::Module::Install loads +# 4. inc::Module::Install calls "require Module::Install" +# 5. The ./inc/ version of Module::Install loads +# } ELSE { +# 1. Makefile.PL calls "use inc::Module::Install" +# 2. $INC{inc/Module/Install.pm} set to ./inc/ version of Module::Install +# 3. The ./inc/ version of Module::Install loads +# } + +use 5.005; +use strict 'vars'; +use Cwd (); +use File::Find (); +use File::Path (); + +use vars qw{$VERSION $MAIN}; +BEGIN { + # All Module::Install core packages now require synchronised versions. + # This will be used to ensure we don't accidentally load old or + # different versions of modules. + # This is not enforced yet, but will be some time in the next few + # releases once we can make sure it won't clash with custom + # Module::Install extensions. + $VERSION = '1.00'; + + # Storage for the pseudo-singleton + $MAIN = undef; + + *inc::Module::Install::VERSION = *VERSION; + @inc::Module::Install::ISA = __PACKAGE__; + +} + +sub import { + my $class = shift; + my $self = $class->new(@_); + my $who = $self->_caller; + + #------------------------------------------------------------- + # all of the following checks should be included in import(), + # to allow "eval 'require Module::Install; 1' to test + # installation of Module::Install. (RT #51267) + #------------------------------------------------------------- + + # Whether or not inc::Module::Install is actually loaded, the + # $INC{inc/Module/Install.pm} is what will still get set as long as + # the caller loaded module this in the documented manner. + # If not set, the caller may NOT have loaded the bundled version, and thus + # they may not have a MI version that works with the Makefile.PL. This would + # result in false errors or unexpected behaviour. And we don't want that. + my $file = join( '/', 'inc', split /::/, __PACKAGE__ ) . '.pm'; + unless ( $INC{$file} ) { die <<"END_DIE" } + +Please invoke ${\__PACKAGE__} with: + + use inc::${\__PACKAGE__}; + +not: + + use ${\__PACKAGE__}; + +END_DIE + + # This reportedly fixes a rare Win32 UTC file time issue, but + # as this is a non-cross-platform XS module not in the core, + # we shouldn't really depend on it. See RT #24194 for detail. + # (Also, this module only supports Perl 5.6 and above). + eval "use Win32::UTCFileTime" if $^O eq 'MSWin32' && $] >= 5.006; + + # If the script that is loading Module::Install is from the future, + # then make will detect this and cause it to re-run over and over + # again. This is bad. Rather than taking action to touch it (which + # is unreliable on some platforms and requires write permissions) + # for now we should catch this and refuse to run. + if ( -f $0 ) { + my $s = (stat($0))[9]; + + # If the modification time is only slightly in the future, + # sleep briefly to remove the problem. + my $a = $s - time; + if ( $a > 0 and $a < 5 ) { sleep 5 } + + # Too far in the future, throw an error. + my $t = time; + if ( $s > $t ) { die <<"END_DIE" } + +Your installer $0 has a modification time in the future ($s > $t). + +This is known to create infinite loops in make. + +Please correct this, then run $0 again. + +END_DIE + } + + + # Build.PL was formerly supported, but no longer is due to excessive + # difficulty in implementing every single feature twice. + if ( $0 =~ /Build.PL$/i ) { die <<"END_DIE" } + +Module::Install no longer supports Build.PL. + +It was impossible to maintain duel backends, and has been deprecated. + +Please remove all Build.PL files and only use the Makefile.PL installer. + +END_DIE + + #------------------------------------------------------------- + + # To save some more typing in Module::Install installers, every... + # use inc::Module::Install + # ...also acts as an implicit use strict. + $^H |= strict::bits(qw(refs subs vars)); + + #------------------------------------------------------------- + + unless ( -f $self->{file} ) { + foreach my $key (keys %INC) { + delete $INC{$key} if $key =~ /Module\/Install/; + } + + local $^W; + require "$self->{path}/$self->{dispatch}.pm"; + File::Path::mkpath("$self->{prefix}/$self->{author}"); + $self->{admin} = "$self->{name}::$self->{dispatch}"->new( _top => $self ); + $self->{admin}->init; + @_ = ($class, _self => $self); + goto &{"$self->{name}::import"}; + } + + local $^W; + *{"${who}::AUTOLOAD"} = $self->autoload; + $self->preload; + + # Unregister loader and worker packages so subdirs can use them again + delete $INC{'inc/Module/Install.pm'}; + delete $INC{'Module/Install.pm'}; + + # Save to the singleton + $MAIN = $self; + + return 1; +} + +sub autoload { + my $self = shift; + my $who = $self->_caller; + my $cwd = Cwd::cwd(); + my $sym = "${who}::AUTOLOAD"; + $sym->{$cwd} = sub { + my $pwd = Cwd::cwd(); + if ( my $code = $sym->{$pwd} ) { + # Delegate back to parent dirs + goto &$code unless $cwd eq $pwd; + } + unless ($$sym =~ s/([^:]+)$//) { + # XXX: it looks like we can't retrieve the missing function + # via $$sym (usually $main::AUTOLOAD) in this case. + # I'm still wondering if we should slurp Makefile.PL to + # get some context or not ... + my ($package, $file, $line) = caller; + die <<"EOT"; +Unknown function is found at $file line $line. +Execution of $file aborted due to runtime errors. + +If you're a contributor to a project, you may need to install +some Module::Install extensions from CPAN (or other repository). +If you're a user of a module, please contact the author. +EOT + } + my $method = $1; + if ( uc($method) eq $method ) { + # Do nothing + return; + } elsif ( $method =~ /^_/ and $self->can($method) ) { + # Dispatch to the root M:I class + return $self->$method(@_); + } + + # Dispatch to the appropriate plugin + unshift @_, ( $self, $1 ); + goto &{$self->can('call')}; + }; +} + +sub preload { + my $self = shift; + unless ( $self->{extensions} ) { + $self->load_extensions( + "$self->{prefix}/$self->{path}", $self + ); + } + + my @exts = @{$self->{extensions}}; + unless ( @exts ) { + @exts = $self->{admin}->load_all_extensions; + } + + my %seen; + foreach my $obj ( @exts ) { + while (my ($method, $glob) = each %{ref($obj) . '::'}) { + next unless $obj->can($method); + next if $method =~ /^_/; + next if $method eq uc($method); + $seen{$method}++; + } + } + + my $who = $self->_caller; + foreach my $name ( sort keys %seen ) { + local $^W; + *{"${who}::$name"} = sub { + ${"${who}::AUTOLOAD"} = "${who}::$name"; + goto &{"${who}::AUTOLOAD"}; + }; + } +} + +sub new { + my ($class, %args) = @_; + + delete $INC{'FindBin.pm'}; + { + # to suppress the redefine warning + local $SIG{__WARN__} = sub {}; + require FindBin; + } + + # ignore the prefix on extension modules built from top level. + my $base_path = Cwd::abs_path($FindBin::Bin); + unless ( Cwd::abs_path(Cwd::cwd()) eq $base_path ) { + delete $args{prefix}; + } + return $args{_self} if $args{_self}; + + $args{dispatch} ||= 'Admin'; + $args{prefix} ||= 'inc'; + $args{author} ||= ($^O eq 'VMS' ? '_author' : '.author'); + $args{bundle} ||= 'inc/BUNDLES'; + $args{base} ||= $base_path; + $class =~ s/^\Q$args{prefix}\E:://; + $args{name} ||= $class; + $args{version} ||= $class->VERSION; + unless ( $args{path} ) { + $args{path} = $args{name}; + $args{path} =~ s!::!/!g; + } + $args{file} ||= "$args{base}/$args{prefix}/$args{path}.pm"; + $args{wrote} = 0; + + bless( \%args, $class ); +} + +sub call { + my ($self, $method) = @_; + my $obj = $self->load($method) or return; + splice(@_, 0, 2, $obj); + goto &{$obj->can($method)}; +} + +sub load { + my ($self, $method) = @_; + + $self->load_extensions( + "$self->{prefix}/$self->{path}", $self + ) unless $self->{extensions}; + + foreach my $obj (@{$self->{extensions}}) { + return $obj if $obj->can($method); + } + + my $admin = $self->{admin} or die <<"END_DIE"; +The '$method' method does not exist in the '$self->{prefix}' path! +Please remove the '$self->{prefix}' directory and run $0 again to load it. +END_DIE + + my $obj = $admin->load($method, 1); + push @{$self->{extensions}}, $obj; + + $obj; +} + +sub load_extensions { + my ($self, $path, $top) = @_; + + my $should_reload = 0; + unless ( grep { ! ref $_ and lc $_ eq lc $self->{prefix} } @INC ) { + unshift @INC, $self->{prefix}; + $should_reload = 1; + } + + foreach my $rv ( $self->find_extensions($path) ) { + my ($file, $pkg) = @{$rv}; + next if $self->{pathnames}{$pkg}; + + local $@; + my $new = eval { local $^W; require $file; $pkg->can('new') }; + unless ( $new ) { + warn $@ if $@; + next; + } + $self->{pathnames}{$pkg} = + $should_reload ? delete $INC{$file} : $INC{$file}; + push @{$self->{extensions}}, &{$new}($pkg, _top => $top ); + } + + $self->{extensions} ||= []; +} + +sub find_extensions { + my ($self, $path) = @_; + + my @found; + File::Find::find( sub { + my $file = $File::Find::name; + return unless $file =~ m!^\Q$path\E/(.+)\.pm\Z!is; + my $subpath = $1; + return if lc($subpath) eq lc($self->{dispatch}); + + $file = "$self->{path}/$subpath.pm"; + my $pkg = "$self->{name}::$subpath"; + $pkg =~ s!/!::!g; + + # If we have a mixed-case package name, assume case has been preserved + # correctly. Otherwise, root through the file to locate the case-preserved + # version of the package name. + if ( $subpath eq lc($subpath) || $subpath eq uc($subpath) ) { + my $content = Module::Install::_read($subpath . '.pm'); + my $in_pod = 0; + foreach ( split //, $content ) { + $in_pod = 1 if /^=\w/; + $in_pod = 0 if /^=cut/; + next if ($in_pod || /^=cut/); # skip pod text + next if /^\s*#/; # and comments + if ( m/^\s*package\s+($pkg)\s*;/i ) { + $pkg = $1; + last; + } + } + } + + push @found, [ $file, $pkg ]; + }, $path ) if -d $path; + + @found; +} + + + + + +##################################################################### +# Common Utility Functions + +sub _caller { + my $depth = 0; + my $call = caller($depth); + while ( $call eq __PACKAGE__ ) { + $depth++; + $call = caller($depth); + } + return $call; +} + +# Done in evals to avoid confusing Perl::MinimumVersion +eval( $] >= 5.006 ? <<'END_NEW' : <<'END_OLD' ); die $@ if $@; +sub _read { + local *FH; + open( FH, '<', $_[0] ) or die "open($_[0]): $!"; + my $string = do { local $/; }; + close FH or die "close($_[0]): $!"; + return $string; +} +END_NEW +sub _read { + local *FH; + open( FH, "< $_[0]" ) or die "open($_[0]): $!"; + my $string = do { local $/; }; + close FH or die "close($_[0]): $!"; + return $string; +} +END_OLD + +sub _readperl { + my $string = Module::Install::_read($_[0]); + $string =~ s/(?:\015{1,2}\012|\015|\012)/\n/sg; + $string =~ s/(\n)\n*__(?:DATA|END)__\b.*\z/$1/s; + $string =~ s/\n\n=\w+.+?\n\n=cut\b.+?\n+/\n\n/sg; + return $string; +} + +sub _readpod { + my $string = Module::Install::_read($_[0]); + $string =~ s/(?:\015{1,2}\012|\015|\012)/\n/sg; + return $string if $_[0] =~ /\.pod\z/; + $string =~ s/(^|\n=cut\b.+?\n+)[^=\s].+?\n(\n=\w+|\z)/$1$2/sg; + $string =~ s/\n*=pod\b[^\n]*\n+/\n\n/sg; + $string =~ s/\n*=cut\b[^\n]*\n+/\n\n/sg; + $string =~ s/^\n+//s; + return $string; +} + +# Done in evals to avoid confusing Perl::MinimumVersion +eval( $] >= 5.006 ? <<'END_NEW' : <<'END_OLD' ); die $@ if $@; +sub _write { + local *FH; + open( FH, '>', $_[0] ) or die "open($_[0]): $!"; + foreach ( 1 .. $#_ ) { + print FH $_[$_] or die "print($_[0]): $!"; + } + close FH or die "close($_[0]): $!"; +} +END_NEW +sub _write { + local *FH; + open( FH, "> $_[0]" ) or die "open($_[0]): $!"; + foreach ( 1 .. $#_ ) { + print FH $_[$_] or die "print($_[0]): $!"; + } + close FH or die "close($_[0]): $!"; +} +END_OLD + +# _version is for processing module versions (eg, 1.03_05) not +# Perl versions (eg, 5.8.1). +sub _version ($) { + my $s = shift || 0; + my $d =()= $s =~ /(\.)/g; + if ( $d >= 2 ) { + # Normalise multipart versions + $s =~ s/(\.)(\d{1,3})/sprintf("$1%03d",$2)/eg; + } + $s =~ s/^(\d+)\.?//; + my $l = $1 || 0; + my @v = map { + $_ . '0' x (3 - length $_) + } $s =~ /(\d{1,3})\D?/g; + $l = $l . '.' . join '', @v if @v; + return $l + 0; +} + +sub _cmp ($$) { + _version($_[0]) <=> _version($_[1]); +} + +# Cloned from Params::Util::_CLASS +sub _CLASS ($) { + ( + defined $_[0] + and + ! ref $_[0] + and + $_[0] =~ m/^[^\W\d]\w*(?:::\w+)*\z/s + ) ? $_[0] : undef; +} + +1; + +# Copyright 2008 - 2010 Adam Kennedy. diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/lib/Monitoring/Livestatus/INET.pm check-mk-1.2.6p12/=unpacked-tar11=/api/perl/lib/Monitoring/Livestatus/INET.pm --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/lib/Monitoring/Livestatus/INET.pm 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/lib/Monitoring/Livestatus/INET.pm 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,121 @@ +package Monitoring::Livestatus::INET; + +use 5.000000; +use strict; +use warnings; +use IO::Socket::INET; +use Socket qw(IPPROTO_TCP TCP_NODELAY); +use Carp; +use base "Monitoring::Livestatus"; + +=head1 NAME + +Monitoring::Livestatus::INET - connector with tcp sockets + +=head1 SYNOPSIS + + use Monitoring::Livestatus; + my $nl = Monitoring::Livestatus::INET->new( 'localhost:9999' ); + my $hosts = $nl->selectall_arrayref("GET hosts"); + +=head1 CONSTRUCTOR + +=head2 new ( [ARGS] ) + +Creates an C object. C takes at least the server. +Arguments are the same as in C. +If the constructor is only passed a single argument, it is assumed to +be a the C specification. Use either socker OR server. + +=cut + +sub new { + my $class = shift; + unshift(@_, "peer") if scalar @_ == 1; + my(%options) = @_; + $options{'name'} = $options{'peer'} unless defined $options{'name'}; + + $options{'backend'} = $class; + my $self = Monitoring::Livestatus->new(%options); + bless $self, $class; + confess('not a scalar') if ref $self->{'peer'} ne ''; + + return $self; +} + + +######################################## + +=head1 METHODS + +=cut + +sub _open { + my $self = shift; + my $sock; + + eval { + local $SIG{'ALRM'} = sub { die("connection timeout"); }; + alarm($self->{'connect_timeout'}); + $sock = IO::Socket::INET->new( + PeerAddr => $self->{'peer'}, + Type => SOCK_STREAM, + Timeout => $self->{'connect_timeout'}, + ); + if(!defined $sock or !$sock->connected()) { + my $msg = "failed to connect to $self->{'peer'} :$!"; + if($self->{'errors_are_fatal'}) { + croak($msg); + } + $Monitoring::Livestatus::ErrorCode = 500; + $Monitoring::Livestatus::ErrorMessage = $msg; + alarm(0); + return; + } + + if(defined $self->{'query_timeout'}) { + # set timeout + $sock->timeout($self->{'query_timeout'}); + } + + setsockopt($sock, IPPROTO_TCP, TCP_NODELAY, 1); + + }; + alarm(0); + + if($@) { + $Monitoring::Livestatus::ErrorCode = 500; + $Monitoring::Livestatus::ErrorMessage = $@; + return; + } + + return($sock); +} + + +######################################## + +sub _close { + my $self = shift; + my $sock = shift; + return unless defined $sock; + return close($sock); +} + + +1; + +=head1 AUTHOR + +Sven Nierlein, Enierlein@cpan.orgE + +=head1 COPYRIGHT AND LICENSE + +Copyright (C) 2009 by Sven Nierlein + +This library is free software; you can redistribute it and/or modify +it under the same terms as Perl itself. + +=cut + +__END__ diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/lib/Monitoring/Livestatus/MULTI.pm check-mk-1.2.6p12/=unpacked-tar11=/api/perl/lib/Monitoring/Livestatus/MULTI.pm --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/lib/Monitoring/Livestatus/MULTI.pm 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/lib/Monitoring/Livestatus/MULTI.pm 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,922 @@ +package Monitoring::Livestatus::MULTI; + +use 5.000000; +use strict; +use warnings; +use Carp; +use Data::Dumper; +use Config; +use Time::HiRes qw/gettimeofday tv_interval/; +use Scalar::Util qw/looks_like_number/; +use Monitoring::Livestatus; +use base "Monitoring::Livestatus"; + +=head1 NAME + +Monitoring::Livestatus::MULTI - connector with multiple peers + +=head1 SYNOPSIS + + use Monitoring::Livestatus; + my $nl = Monitoring::Livestatus::MULTI->new( qw{nagioshost1:9999 nagioshost2:9999 /var/spool/nagios/live.socket} ); + my $hosts = $nl->selectall_arrayref("GET hosts"); + +=head1 CONSTRUCTOR + +=head2 new ( [ARGS] ) + +Creates an C object. C takes at least the server. +Arguments are the same as in L. + +=cut + +sub new { + my $class = shift; + unshift(@_, "peer") if scalar @_ == 1; + my(%options) = @_; + + $options{'backend'} = $class; + my $self = Monitoring::Livestatus->new(%options); + bless $self, $class; + + if(!defined $self->{'peers'}) { + $self->{'peer'} = $self->_get_peers(); + + # set our peer(s) from the options + my %peer_options; + my $peers; + for my $opt_key (keys %options) { + $peer_options{$opt_key} = $options{$opt_key}; + } + $peer_options{'errors_are_fatal'} = 0; + for my $peer (@{$self->{'peer'}}) { + $peer_options{'name'} = $peer->{'name'}; + $peer_options{'peer'} = $peer->{'peer'}; + delete $peer_options{'socket'}; + delete $peer_options{'server'}; + + if($peer->{'type'} eq 'UNIX') { + push @{$peers}, new Monitoring::Livestatus::UNIX(%peer_options); + } + elsif($peer->{'type'} eq 'INET') { + push @{$peers}, new Monitoring::Livestatus::INET(%peer_options); + } + } + $self->{'peers'} = $peers; + delete $self->{'socket'}; + delete $self->{'server'}; + } + + if(!defined $self->{'peers'}) { + croak('please specify at least one peer, socket or server'); + } + + # dont use threads with only one peer + if(scalar @{$self->{'peers'}} == 1) { $self->{'use_threads'} = 0; } + + # check for threads support + if(!defined $self->{'use_threads'}) { + $self->{'use_threads'} = 0; + if($Config{useithreads}) { + $self->{'use_threads'} = 1; + } + } + if($self->{'use_threads'}) { + eval { + require threads; + require Thread::Queue; + }; + if($@) { + $self->{'use_threads'} = 0; + $self->{'logger'}->debug('error initializing threads: '.$@) if defined $self->{'logger'}; + } else { + $self->_start_worker; + } + } + + # initialize peer keys + $self->{'peer_by_key'} = {}; + $self->{'peer_by_addr'} = {}; + for my $peer (@{$self->{'peers'}}) { + $self->{'peer_by_key'}->{$peer->peer_key} = $peer; + $self->{'peer_by_addr'}->{$peer->peer_addr} = $peer; + } + + $self->{'name'} = 'multiple connector' unless defined $self->{'name'}; + $self->{'logger'}->debug('initialized Monitoring::Livestatus::MULTI '.($self->{'use_threads'} ? 'with' : 'without' ).' threads') if $self->{'verbose'}; + + return $self; +} + + +######################################## + +=head1 METHODS + +=head2 do + +See L for more information. + +=cut + +sub do { + my $self = shift; + my $opts = $self->_lowercase_and_verify_options($_[1]); + my $t0 = [gettimeofday]; + + $self->_do_on_peers("do", $opts->{'backends'}, @_); + my $elapsed = tv_interval ( $t0 ); + $self->{'logger'}->debug(sprintf('%.4f', $elapsed).' sec for do('.$_[0].') in total') if $self->{'verbose'}; + return 1; +} + + +######################################## + +=head2 selectall_arrayref + +See L for more information. + +=cut + +sub selectall_arrayref { + my $self = shift; + my $opts = $self->_lowercase_and_verify_options($_[1]); + my $t0 = [gettimeofday]; + + $self->_log_statement($_[0], $opts, 0) if $self->{'verbose'}; + + my $return = $self->_merge_answer($self->_do_on_peers("selectall_arrayref", $opts->{'backends'}, @_)); + my $elapsed = tv_interval ( $t0 ); + if($self->{'verbose'}) { + my $total_results = 0; + $total_results = scalar @{$return} if defined $return; + $self->{'logger'}->debug(sprintf('%.4f', $elapsed).' sec for selectall_arrayref() in total, results: '.$total_results); + } + + return $return; +} + + +######################################## + +=head2 selectall_hashref + +See L for more information. + +=cut + +sub selectall_hashref { + my $self = shift; + my $opts = $self->_lowercase_and_verify_options($_[2]); + my $t0 = [gettimeofday]; + + my $return = $self->_merge_answer($self->_do_on_peers("selectall_hashref", $opts->{'backends'}, @_)); + my $elapsed = tv_interval ( $t0 ); + $self->{'logger'}->debug(sprintf('%.4f', $elapsed).' sec for selectall_hashref() in total') if $self->{'verbose'}; + + return $return; +} + + +######################################## + +=head2 selectcol_arrayref + +See L for more information. + +=cut + +sub selectcol_arrayref { + my $self = shift; + my $opts = $self->_lowercase_and_verify_options($_[1]); + my $t0 = [gettimeofday]; + + my $return = $self->_merge_answer($self->_do_on_peers("selectcol_arrayref", $opts->{'backends'}, @_)); + my $elapsed = tv_interval ( $t0 ); + $self->{'logger'}->debug(sprintf('%.4f', $elapsed).' sec for selectcol_arrayref() in total') if $self->{'verbose'}; + + return $return; +} + + +######################################## + +=head2 selectrow_array + +See L for more information. + +=cut + +sub selectrow_array { + my $self = shift; + my $statement = $_[0]; + my $opts = $self->_lowercase_and_verify_options($_[1]); + my $t0 = [gettimeofday]; + my @return; + + if((defined $opts->{'sum'} and $opts->{'sum'} == 1) or (!defined $opts->{'sum'} and $statement =~ m/^Stats:/mx)) { + @return = @{$self->_sum_answer($self->_do_on_peers("selectrow_arrayref", $opts->{'backends'}, @_))}; + } else { + if($self->{'warnings'}) { + carp("selectrow_arrayref without Stats on multi backend will not work as expected!"); + } + my $rows = $self->_merge_answer($self->_do_on_peers("selectrow_arrayref", $opts->{'backends'}, @_)); + @return = @{$rows} if defined $rows; + } + + my $elapsed = tv_interval ( $t0 ); + $self->{'logger'}->debug(sprintf('%.4f', $elapsed).' sec for selectrow_array() in total') if $self->{'verbose'}; + + return @return; +} + + +######################################## + +=head2 selectrow_arrayref + +See L for more information. + +=cut + +sub selectrow_arrayref { + my $self = shift; + my $statement = $_[0]; + my $opts = $self->_lowercase_and_verify_options($_[1]); + my $t0 = [gettimeofday]; + my $return; + + if((defined $opts->{'sum'} and $opts->{'sum'} == 1) or (!defined $opts->{'sum'} and $statement =~ m/^Stats:/mx)) { + $return = $self->_sum_answer($self->_do_on_peers("selectrow_arrayref", $opts->{'backends'}, @_)); + } else { + if($self->{'warnings'}) { + carp("selectrow_arrayref without Stats on multi backend will not work as expected!"); + } + my $rows = $self->_merge_answer($self->_do_on_peers("selectrow_arrayref", $opts->{'backends'}, @_)); + $return = $rows->[0] if defined $rows->[0]; + } + + my $elapsed = tv_interval ( $t0 ); + $self->{'logger'}->debug(sprintf('%.4f', $elapsed).' sec for selectrow_arrayref() in total') if $self->{'verbose'}; + + return $return; +} + + +######################################## + +=head2 selectrow_hashref + +See L for more information. + +=cut + +sub selectrow_hashref { + my $self = shift; + my $statement = $_[0]; + my $opts = $self->_lowercase_and_verify_options($_[1]); + + my $t0 = [gettimeofday]; + + my $return; + + if((defined $opts->{'sum'} and $opts->{'sum'} == 1) or (!defined $opts->{'sum'} and $statement =~ m/^Stats:/mx)) { + $return = $self->_sum_answer($self->_do_on_peers("selectrow_hashref", $opts->{'backends'}, @_)); + } else { + if($self->{'warnings'}) { + carp("selectrow_hashref without Stats on multi backend will not work as expected!"); + } + $return = $self->_merge_answer($self->_do_on_peers("selectrow_hashref", $opts->{'backends'}, @_)); + } + + my $elapsed = tv_interval ( $t0 ); + $self->{'logger'}->debug(sprintf('%.4f', $elapsed).' sec for selectrow_hashref() in total') if $self->{'verbose'}; + + return $return; +} + + +######################################## + +=head2 selectscalar_value + +See L for more information. + +=cut + +sub selectscalar_value { + my $self = shift; + my $statement = $_[0]; + my $opts = $self->_lowercase_and_verify_options($_[1]); + + my $t0 = [gettimeofday]; + + my $return; + + if((defined $opts->{'sum'} and $opts->{'sum'} == 1) or (!defined $opts->{'sum'} and $statement =~ m/^Stats:/mx)) { + return $self->_sum_answer($self->_do_on_peers("selectscalar_value", $opts->{'backends'}, @_)); + } else { + if($self->{'warnings'}) { + carp("selectscalar_value without Stats on multi backend will not work as expected!"); + } + my $rows = $self->_merge_answer($self->_do_on_peers("selectscalar_value", $opts->{'backends'}, @_)); + + $return = $rows->[0] if defined $rows->[0]; + } + + my $elapsed = tv_interval ( $t0 ); + $self->{'logger'}->debug(sprintf('%.4f', $elapsed).' sec for selectscalar_value() in total') if $self->{'verbose'}; + + return $return; +} + + +######################################## + +=head2 errors_are_fatal + +See L for more information. + +=cut + +sub errors_are_fatal { + my $self = shift; + my $value = shift; + return $self->_change_setting('errors_are_fatal', $value); +} + + +######################################## + +=head2 warnings + +See L for more information. + +=cut + +sub warnings { + my $self = shift; + my $value = shift; + return $self->_change_setting('warnings', $value); +} + + +######################################## + +=head2 verbose + +See L for more information. + +=cut + +sub verbose { + my $self = shift; + my $value = shift; + return $self->_change_setting('verbose', $value); +} + + +######################################## + +=head2 peer_addr + +See L for more information. + +=cut + +sub peer_addr { + my $self = shift; + + my @addrs; + for my $peer (@{$self->{'peers'}}) { + push @addrs, $peer->peer_addr; + } + + return wantarray ? @addrs : undef; +} + + +######################################## + +=head2 peer_name + +See L for more information. + +=cut + +sub peer_name { + my $self = shift; + + my @names; + for my $peer (@{$self->{'peers'}}) { + push @names, $peer->peer_name; + } + + return wantarray ? @names : $self->{'name'}; +} + + +######################################## + +=head2 peer_key + +See L for more information. + +=cut + +sub peer_key { + my $self = shift; + + my @keys; + for my $peer (@{$self->{'peers'}}) { + push @keys, $peer->peer_key; + } + + return wantarray ? @keys : $self->{'key'}; +} + + +######################################## + +=head2 disable + + $ml->disable() + +disables this connection, returns the last state. + +=cut +sub disable { + my $self = shift; + my $peer_key = shift; + if(!defined $peer_key) { + for my $peer (@{$self->{'peers'}}) { + $peer->disable(); + } + return 1; + } else { + my $peer = $self->_get_peer_by_key($peer_key); + my $prev = $peer->{'disabled'}; + $peer->{'disabled'} = 1; + return $prev; + } +} + + +######################################## + +=head2 enable + + $ml->enable() + +enables this connection, returns the last state. + +=cut +sub enable { + my $self = shift; + my $peer_key = shift; + if(!defined $peer_key) { + for my $peer (@{$self->{'peers'}}) { + $peer->enable(); + } + return 1; + } else { + my $peer = $self->_get_peer_by_key($peer_key); + my $prev = $peer->{'disabled'}; + $peer->{'disabled'} = 0; + return $prev; + } +} + +######################################## +# INTERNAL SUBS +######################################## + +sub _change_setting { + my $self = shift; + my $key = shift; + my $value = shift; + my $old = $self->{$key}; + + # set new value + if(defined $value) { + $self->{$key} = $value; + for my $peer (@{$self->{'peers'}}) { + $peer->{$key} = $value; + } + + # restart workers + if($self->{'use_threads'}) { + _stop_worker(); + $self->_start_worker(); + } + } + + return $old; +} + + +######################################## +sub _start_worker { + my $self = shift; + + # create job transports + $self->{'WorkQueue'} = Thread::Queue->new; + $self->{'WorkResults'} = Thread::Queue->new; + + # set signal handler before thread is started + # otherwise they would be killed when started + # and stopped immediately after start + $SIG{'USR1'} = sub { threads->exit(); }; + + # start worker threads + our %threads; + my $threadcount = scalar @{$self->{'peers'}}; + for(my $x = 0; $x < $threadcount; $x++) { + $self->{'threads'}->[$x] = threads->new(\&_worker_thread, $self->{'peers'}, $self->{'WorkQueue'}, $self->{'WorkResults'}, $self->{'logger'}); + } + + # restore sig handler as it was only for the threads + $SIG{'USR1'} = 'DEFAULT'; + return; +} + + +######################################## +sub _stop_worker { + # try to kill our threads safely + eval { + for my $thr (threads->list()) { + $thr->kill('USR1')->detach(); + } + }; + return; +} + + +######################################## +sub _worker_thread { + local $SIG{'USR1'} = sub { threads->exit(); }; + + my $peers = shift; + my $workQueue = shift; + my $workResults = shift; + my $logger = shift; + + while (my $job = $workQueue->dequeue) { + my $erg; + eval { + $erg = _do_wrapper($peers->[$job->{'peer'}], $job->{'sub'}, $logger, @{$job->{'opts'}}); + }; + if($@) { + warn("Error in Thread ".$job->{'peer'}." :".$@); + $job->{'logger'}->error("Error in Thread ".$job->{'peer'}." :".$@) if defined $job->{'logger'}; + }; + $workResults->enqueue({ peer => $job->{'peer'}, result => $erg }); + } + return; +} + + +######################################## +sub _do_wrapper { + my $peer = shift; + my $sub = shift; + my $logger = shift; + my @opts = @_; + + my $t0 = [gettimeofday]; + + my $data = $peer->$sub(@opts); + + my $elapsed = tv_interval ( $t0 ); + $logger->debug(sprintf('%.4f', $elapsed).' sec for fetching data on '.$peer->peer_name.' ('.$peer->peer_addr.')') if defined $logger; + + $Monitoring::Livestatus::ErrorCode = 0 unless defined $Monitoring::Livestatus::ErrorCode; + $Monitoring::Livestatus::ErrorMessage = '' unless defined $Monitoring::Livestatus::ErrorMessage; + my $return = { + 'msg' => $Monitoring::Livestatus::ErrorMessage, + 'code' => $Monitoring::Livestatus::ErrorCode, + 'data' => $data, + }; + return $return; +} + + +######################################## +sub _do_on_peers { + my $self = shift; + my $sub = shift; + my $backends = shift; + my @opts = @_; + my $statement = $opts[0]; + my $use_threads = $self->{'use_threads'}; + my $t0 = [gettimeofday]; + + my $return; + my %codes; + my %messages; + my $query_options; + if($sub eq 'selectall_hashref') { + $query_options = $self->_lowercase_and_verify_options($opts[2]); + } else { + $query_options = $self->_lowercase_and_verify_options($opts[1]); + } + + # which peers affected? + my @peers; + if(defined $backends) { + my @backends; + if(ref $backends eq '') { + push @backends, $backends; + } + elsif(ref $backends eq 'ARRAY') { + @backends = @{$backends}; + } else { + croak("unsupported type for backend: ".ref($backends)); + } + + for my $key (@backends) { + my $backend = $self->_get_peer_by_key($key); + push @peers, $backend unless $backend->{'disabled'}; + } + } else { + # use all backends + @peers = @{$self->{'peers'}}; + } + + # its faster without threads for only one peer + if(scalar @peers <= 1) { $use_threads = 0; } + + # if we have limits set, we cannot use threads + if(defined $query_options->{'limit_start'}) { $use_threads = 0; } + + if($use_threads) { + # use the threaded variant + $self->{'logger'}->debug('using threads') if $self->{'verbose'}; + + my $peers_to_use; + for my $peer (@peers) { + if($peer->{'disabled'}) { + # dont send any query + } + elsif($peer->marked_bad) { + warn($peer->peer_name.' ('.$peer->peer_key.') is marked bad') if $self->{'verbose'}; + } + else { + $peers_to_use->{$peer->peer_key} = 1; + } + } + my $x = 0; + for my $peer (@{$self->{'peers'}}) { + if(defined $peers_to_use->{$peer->peer_key}) { + my $job = { + 'peer' => $x, + 'sub' => $sub, + 'opts' => \@opts, + }; + $self->{'WorkQueue'}->enqueue($job); + } + $x++; + } + + for(my $x = 0; $x < scalar keys %{$peers_to_use}; $x++) { + my $result = $self->{'WorkResults'}->dequeue; + my $peer = $self->{'peers'}->[$result->{'peer'}]; + if(defined $result->{'result'}) { + push @{$codes{$result->{'result'}->{'code'}}}, { 'peer' => $peer->peer_key, 'msg' => $result->{'result'}->{'msg'} }; + $return->{$peer->peer_key} = $result->{'result'}->{'data'}; + } else { + warn("undefined result for: $statement"); + } + } + } else { + $self->{'logger'}->debug('not using threads') if $self->{'verbose'}; + for my $peer (@peers) { + if($peer->{'disabled'}) { + # dont send any query + } + elsif($peer->marked_bad) { + warn($peer->peer_name.' ('.$peer->peer_key.') is marked bad') if $self->{'verbose'}; + } else { + my $erg = _do_wrapper($peer, $sub, $self->{'logger'}, @opts); + $return->{$peer->peer_key} = $erg->{'data'}; + push @{$codes{$erg->{'code'}}}, { 'peer' => $peer, 'msg' => $erg->{'msg'} }; + + # compute limits + if(defined $query_options->{'limit_length'} and $peer->{'meta_data'}->{'result_count'}) { + last; + } + # set a new start if we had rows already + if(defined $query_options->{'limit_start'}) { + $query_options->{'limit_start'} = $query_options->{'limit_start'} - $peer->{'meta_data'}->{'row_count'}; + } + } + } + } + + + # check if we different result stati + undef $Monitoring::Livestatus::ErrorMessage; + $Monitoring::Livestatus::ErrorCode = 0; + my @codes = sort keys %codes; + if(scalar @codes > 1) { + # got different results for our backends + if($self->{'verbose'}) { + $self->{'logger'}->warn("got different result stati: ".Dumper(\%codes)); + } + } else { + # got same result codes for all backend + } + + my $failed = 0; + my $code = $codes[0]; + if(defined $code and $code >= 300) { + $failed = 1; + } + + if($failed) { + my $msg = $codes{$code}->[0]->{'msg'}; + $self->{'logger'}->debug("same: $code -> $msg") if $self->{'verbose'}; + $Monitoring::Livestatus::ErrorMessage = $msg; + $Monitoring::Livestatus::ErrorCode = $code; + if($self->{'errors_are_fatal'}) { + croak("ERROR ".$code." - ".$Monitoring::Livestatus::ErrorMessage." in query:\n'".$statement."'\n"); + } + return; + } + + my $elapsed = tv_interval ( $t0 ); + $self->{'logger'}->debug(sprintf('%.4f', $elapsed).' sec for fetching all data') if $self->{'verbose'}; + + # deep copy result? + if($use_threads + and ( + (defined $query_options->{'deepcopy'} and $query_options->{'deepcopy'} == 1) + or + (defined $self->{'deepcopy'} and $self->{'deepcopy'} == 1) + ) + ) { + # result has to be cloned to avoid "Invalid value for shared scalar" error + + $return = $self->_clone($return, $self->{'logger'}); + } + + return($return); +} + + +######################################## +sub _merge_answer { + my $self = shift; + my $data = shift; + my $return; + + my $t0 = [gettimeofday]; + + # iterate over original peers to retain order + for my $peer (@{$self->{'peers'}}) { + my $key = $peer->peer_key; + next if !defined $data->{$key}; + + if(ref $data->{$key} eq 'ARRAY') { + $return = [] unless defined $return; + $return = [ @{$return}, @{$data->{$key}} ]; + } elsif(ref $data->{$key} eq 'HASH') { + $return = {} unless defined $return; + $return = { %{$return}, %{$data->{$key}} }; + } else { + push @{$return}, $data->{$key}; + } + } + + my $elapsed = tv_interval ( $t0 ); + $self->{'logger'}->debug(sprintf('%.4f', $elapsed).' sec for merging data') if $self->{'verbose'}; + + return($return); +} + + +######################################## +sub _sum_answer { + my $self = shift; + my $data = shift; + my $return; + my $t0 = [gettimeofday]; + for my $peername (keys %{$data}) { + if(ref $data->{$peername} eq 'HASH') { + for my $key (keys %{$data->{$peername}}) { + if(!defined $return->{$key}) { + $return->{$key} = $data->{$peername}->{$key}; + } elsif(looks_like_number($data->{$peername}->{$key})) { + $return->{$key} += $data->{$peername}->{$key}; + } + } + } + elsif(ref $data->{$peername} eq 'ARRAY') { + my $x = 0; + for my $val (@{$data->{$peername}}) { + if(!defined $return->[$x]) { + $return->[$x] = $data->{$peername}->[$x]; + } else { + $return->[$x] += $data->{$peername}->[$x]; + } + $x++; + } + } elsif(defined $data->{$peername}) { + $return = 0 unless defined $return; + next unless defined $data->{$peername}; + $return += $data->{$peername}; + } + } + + my $elapsed = tv_interval ( $t0 ); + $self->{'logger'}->debug(sprintf('%.4f', $elapsed).' sec for summarizing data') if $self->{'verbose'}; + + return $return; +} + + +######################################## +sub _clone { + my $self = shift; + my $data = shift; + my $logger = shift; + my $t0 = [gettimeofday]; + + my $return; + if(ref $data eq '') { + $return = $data; + } + elsif(ref $data eq 'ARRAY') { + $return = []; + for my $dat (@{$data}) { + push @{$return}, $self->_clone($dat); + } + } + elsif(ref $data eq 'HASH') { + $return = {}; + for my $key (keys %{$data}) { + $return->{$key} = $self->_clone($data->{$key}); + } + } + else { + croak("cant clone: ".(ref $data)); + } + + my $elapsed = tv_interval ( $t0 ); + $logger->debug(sprintf('%.4f', $elapsed).' sec for cloning data') if defined $logger; + + return $return; +} + + +######################################## +sub _get_peer_by_key { + my $self = shift; + my $key = shift; + + return unless defined $key; + return unless defined $self->{'peer_by_key'}->{$key}; + + return $self->{'peer_by_key'}->{$key}; +} + + +######################################## +sub _get_peer_by_addr { + my $self = shift; + my $addr = shift; + + return unless defined $addr; + return unless defined $self->{'peer_by_addr'}->{$addr}; + + return $self->{'peer_by_addr'}->{$addr}; +} + + +######################################## + +END { + # try to kill our threads safely + _stop_worker(); +} + +######################################## + +1; + +=head1 AUTHOR + +Sven Nierlein, Enierlein@cpan.orgE + +=head1 COPYRIGHT AND LICENSE + +Copyright (C) 2009 by Sven Nierlein + +This library is free software; you can redistribute it and/or modify +it under the same terms as Perl itself. + +=cut + +__END__ diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/lib/Monitoring/Livestatus/UNIX.pm check-mk-1.2.6p12/=unpacked-tar11=/api/perl/lib/Monitoring/Livestatus/UNIX.pm --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/lib/Monitoring/Livestatus/UNIX.pm 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/lib/Monitoring/Livestatus/UNIX.pm 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,112 @@ +package Monitoring::Livestatus::UNIX; + +use 5.000000; +use strict; +use warnings; +use IO::Socket::UNIX; +use Carp; +use base "Monitoring::Livestatus"; + +=head1 NAME + +Monitoring::Livestatus::UNIX - connector with unix sockets + +=head1 SYNOPSIS + + use Monitoring::Livestatus; + my $nl = Monitoring::Livestatus::UNIX->new( '/var/lib/livestatus/livestatus.sock' ); + my $hosts = $nl->selectall_arrayref("GET hosts"); + +=head1 CONSTRUCTOR + +=head2 new ( [ARGS] ) + +Creates an C object. C takes at least the socketpath. +Arguments are the same as in C. +If the constructor is only passed a single argument, it is assumed to +be a the C specification. Use either socker OR server. + +=cut + +sub new { + my $class = shift; + unshift(@_, "peer") if scalar @_ == 1; + my(%options) = @_; + $options{'name'} = $options{'peer'} unless defined $options{'name'}; + + $options{'backend'} = $class; + my $self = Monitoring::Livestatus->new(%options); + bless $self, $class; + confess('not a scalar') if ref $self->{'peer'} ne ''; + + return $self; +} + + +######################################## + +=head1 METHODS + +=cut + +sub _open { + my $self = shift; + + if(!-S $self->{'peer'}) { + my $msg = "failed to open socket $self->{'peer'}: $!"; + if($self->{'errors_are_fatal'}) { + croak($msg); + } + $Monitoring::Livestatus::ErrorCode = 500; + $Monitoring::Livestatus::ErrorMessage = $msg; + return; + } + my $sock = IO::Socket::UNIX->new( + Peer => $self->{'peer'}, + Type => SOCK_STREAM, + ); + if(!defined $sock or !$sock->connected()) { + my $msg = "failed to connect to $self->{'peer'} :$!"; + if($self->{'errors_are_fatal'}) { + croak($msg); + } + $Monitoring::Livestatus::ErrorCode = 500; + $Monitoring::Livestatus::ErrorMessage = $msg; + return; + } + + if(defined $self->{'query_timeout'}) { + # set timeout + $sock->timeout($self->{'query_timeout'}); + } + + return($sock); +} + + +######################################## + +sub _close { + my $self = shift; + my $sock = shift; + return unless defined $sock; + return close($sock); +} + + +1; + +=head1 AUTHOR + +Sven Nierlein, Enierlein@cpan.orgE + +=head1 COPYRIGHT AND LICENSE + +Copyright (C) 2009 by Sven Nierlein + +This library is free software; you can redistribute it and/or modify +it under the same terms as Perl itself. + +=cut + +__END__ diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/lib/Monitoring/Livestatus.pm check-mk-1.2.6p12/=unpacked-tar11=/api/perl/lib/Monitoring/Livestatus.pm --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/lib/Monitoring/Livestatus.pm 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/lib/Monitoring/Livestatus.pm 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,1564 @@ +package Monitoring::Livestatus; + +use 5.006; +use strict; +use warnings; +use Data::Dumper; +use Carp; +use Digest::MD5 qw(md5_hex); +use Monitoring::Livestatus::INET; +use Monitoring::Livestatus::UNIX; +use Monitoring::Livestatus::MULTI; +use Encode; +use JSON::XS; + +our $VERSION = '0.74'; + + +=head1 NAME + +Monitoring::Livestatus - Perl API for check_mk livestatus to access runtime +data from Nagios and Icinga + +=head1 SYNOPSIS + + use Monitoring::Livestatus; + my $ml = Monitoring::Livestatus->new( + socket => '/var/lib/livestatus/livestatus.sock' + ); + my $hosts = $ml->selectall_arrayref("GET hosts"); + +=head1 DESCRIPTION + +This module connects via socket/tcp to the check_mk livestatus addon for Nagios +and Icinga. You first have to install and activate the mklivestatus addon in your +monitoring installation. + +=head1 CONSTRUCTOR + +=head2 new ( [ARGS] ) + +Creates an C object. C takes at least the +socketpath. Arguments are in key-value pairs. +See L for more complex variants. + +=over 4 + +=item socket + +path to the UNIX socket of check_mk livestatus + +=item server + +use this server for a TCP connection + +=item peer + +alternative way to set socket or server, if value contains ':' server is used, +else socket + +=item name + +human readable name for this connection, defaults to the the socket/server +address + +=item verbose + +verbose mode + +=item line_seperator + +ascii code of the line seperator, defaults to 10, (newline) + +=item column_seperator + +ascii code of the column seperator, defaults to 0 (null byte) + +=item list_seperator + +ascii code of the list seperator, defaults to 44 (comma) + +=item host_service_seperator + +ascii code of the host/service seperator, defaults to 124 (pipe) + +=item keepalive + +enable keepalive. Default is off + +=item errors_are_fatal + +errors will die with an error message. Default: on + +=item warnings + +show warnings +currently only querys without Columns: Header will result in a warning + +=item timeout + +set a general timeout. Used for connect and querys, no default + +=item query_timeout + +set a query timeout. Used for retrieving querys, Default 60sec + +=item connect_timeout + +set a connect timeout. Used for initial connections, default 5sec + +=item use_threads + +only used with multiple backend connections. +Default is to don't threads where available. As threads in perl +are causing problems with tied resultset and using more memory. +Querys are usually faster without threads, except for very slow backends +connections. + +=back + +If the constructor is only passed a single argument, it is assumed to +be a the C specification. Use either socker OR server. + +=cut + +sub new { + my $class = shift; + unshift(@_, "peer") if scalar @_ == 1; + my(%options) = @_; + + my $self = { + "verbose" => 0, # enable verbose output + "socket" => undef, # use unix sockets + "server" => undef, # use tcp connections + "peer" => undef, # use for socket / server connections + "name" => undef, # human readable name + "line_seperator" => 10, # defaults to newline + "column_seperator" => 0, # defaults to null byte + "list_seperator" => 44, # defaults to comma + "host_service_seperator" => 124, # defaults to pipe + "keepalive" => 0, # enable keepalive? + "errors_are_fatal" => 1, # die on errors + "backend" => undef, # should be keept undef, used internally + "timeout" => undef, # timeout for tcp connections + "query_timeout" => 60, # query timeout for tcp connections + "connect_timeout" => 5, # connect timeout for tcp connections + "timeout" => undef, # timeout for tcp connections + "use_threads" => undef, # use threads, default is to use threads where available + "warnings" => 1, # show warnings, for example on querys without Column: Header + "logger" => undef, # logger object used for statistical informations and errors / warnings + "deepcopy" => undef, # copy result set to avoid errors with tied structures + "disabled" => 0, # if disabled, this peer will not receive any query + "retries_on_connection_error" => 3, # retry x times to connect + "retry_interval" => 1, # retry after x seconds + }; + + for my $opt_key (keys %options) { + if(exists $self->{$opt_key}) { + $self->{$opt_key} = $options{$opt_key}; + } + else { + croak("unknown option: $opt_key"); + } + } + + if($self->{'verbose'} and !defined $self->{'logger'}) { + croak('please specify a logger object when using verbose mode'); + $self->{'verbose'} = 0; + } + + # setting a general timeout? + if(defined $self->{'timeout'}) { + $self->{'query_timeout'} = $self->{'timeout'}; + $self->{'connect_timeout'} = $self->{'timeout'}; + } + + bless $self, $class; + + # set our peer(s) from the options + my $peers = $self->_get_peers(); + + if(!defined $peers) { + croak('please specify at least one peer, socket or server'); + } + + if(!defined $self->{'backend'}) { + if(scalar @{$peers} == 1) { + my $peer = $peers->[0]; + $options{'name'} = $peer->{'name'}; + $options{'peer'} = $peer->{'peer'}; + if($peer->{'type'} eq 'UNIX') { + $self->{'CONNECTOR'} = new Monitoring::Livestatus::UNIX(%options); + } + elsif($peer->{'type'} eq 'INET') { + $self->{'CONNECTOR'} = new Monitoring::Livestatus::INET(%options); + } + $self->{'peer'} = $peer->{'peer'}; + } + else { + $options{'peer'} = $peers; + return new Monitoring::Livestatus::MULTI(%options); + } + } + + # set names and peer for non multi backends + if(defined $self->{'CONNECTOR'}->{'name'} and !defined $self->{'name'}) { + $self->{'name'} = $self->{'CONNECTOR'}->{'name'}; + } + if(defined $self->{'CONNECTOR'}->{'peer'} and !defined $self->{'peer'}) { + $self->{'peer'} = $self->{'CONNECTOR'}->{'peer'}; + } + + if($self->{'verbose'} and (!defined $self->{'backend'} or $self->{'backend'} ne 'Monitoring::Livestatus::MULTI')) { + $self->{'logger'}->debug('initialized Monitoring::Livestatus ('.$self->peer_name.')'); + } + + return $self; +} + + +######################################## + +=head1 METHODS + +=head2 do + + do($statement) + do($statement, %opts) + +Send a single statement without fetching the result. +Always returns true. + +=cut + +sub do { + my $self = shift; + my $statement = shift; + return if $self->{'disabled'}; + $self->_send($statement); + return(1); +} + + +######################################## + +=head2 selectall_arrayref + + selectall_arrayref($statement) + selectall_arrayref($statement, %opts) + selectall_arrayref($statement, %opts, $limit ) + +Sends a query and returns an array reference of arrays + + my $arr_refs = $ml->selectall_arrayref("GET hosts"); + +to get an array of hash references do something like + + my $hash_refs = $ml->selectall_arrayref( + "GET hosts", { Slice => {} } + ); + +to get an array of hash references from the first 2 returned rows only + + my $hash_refs = $ml->selectall_arrayref( + "GET hosts", { Slice => {} }, 2 + ); + +use limit to limit the result to this number of rows + +column aliases can be defined with a rename hash + + my $hash_refs = $ml->selectall_arrayref( + "GET hosts", { + Slice => {}, + rename => { + 'name' => 'host_name' + } + } + ); + +=cut + +sub selectall_arrayref { + my $self = shift; + my $statement = shift; + my $opt = shift; + my $limit = shift || 0; + return if $self->{'disabled'}; + my $result; + + # make opt hash keys lowercase + $opt = $self->_lowercase_and_verify_options($opt); + + $self->_log_statement($statement, $opt, $limit) if $self->{'verbose'}; + + $result = $self->_send($statement, $opt); + + if(!defined $result) { + return unless $self->{'errors_are_fatal'}; + croak("got undef result for: $statement"); + } + + # trim result set down to excepted row count + if(defined $limit and $limit >= 1) { + if(scalar @{$result->{'result'}} > $limit) { + @{$result->{'result'}} = @{$result->{'result'}}[0..$limit-1]; + } + } + + if($opt->{'slice'}) { + # make an array of hashes + my @hash_refs; + for my $res (@{$result->{'result'}}) { + my $hash_ref; + for(my $x=0;$x{'keys'}->[$x]; + if(exists $opt->{'rename'} and defined $opt->{'rename'}->{$key}) { + $key = $opt->{'rename'}->{$key}; + } + $hash_ref->{$key} = $res->[$x]; + } + # add callbacks + if(exists $opt->{'callbacks'}) { + for my $key (keys %{$opt->{'callbacks'}}) { + $hash_ref->{$key} = $opt->{'callbacks'}->{$key}->($hash_ref); + } + } + push @hash_refs, $hash_ref; + } + return(\@hash_refs); + } + elsif(exists $opt->{'callbacks'}) { + for my $res (@{$result->{'result'}}) { + # add callbacks + if(exists $opt->{'callbacks'}) { + for my $key (keys %{$opt->{'callbacks'}}) { + push @{$res}, $opt->{'callbacks'}->{$key}->($res); + } + } + } + } + + if(exists $opt->{'callbacks'}) { + for my $key (keys %{$opt->{'callbacks'}}) { + push @{$result->{'keys'}}, $key; + } + } + + return($result->{'result'}); +} + + +######################################## + +=head2 selectall_hashref + + selectall_hashref($statement, $key_field) + selectall_hashref($statement, $key_field, %opts) + +Sends a query and returns a hashref with the given key + + my $hashrefs = $ml->selectall_hashref("GET hosts", "name"); + +=cut + +sub selectall_hashref { + my $self = shift; + my $statement = shift; + my $key_field = shift; + my $opt = shift; + + $opt = $self->_lowercase_and_verify_options($opt); + + $opt->{'slice'} = 1; + + croak("key is required for selectall_hashref") if !defined $key_field; + + my $result = $self->selectall_arrayref($statement, $opt); + + my %indexed; + for my $row (@{$result}) { + if($key_field eq '$peername') { + $indexed{$self->peer_name} = $row; + } + elsif(!defined $row->{$key_field}) { + my %possible_keys = keys %{$row}; + croak("key $key_field not found in result set, possible keys are: ".join(', ', sort keys %possible_keys)); + } else { + $indexed{$row->{$key_field}} = $row; + } + } + return(\%indexed); +} + + +######################################## + +=head2 selectcol_arrayref + + selectcol_arrayref($statement) + selectcol_arrayref($statement, %opt ) + +Sends a query an returns an arrayref for the first columns + + my $array_ref = $ml->selectcol_arrayref("GET hosts\nColumns: name"); + + $VAR1 = [ + 'localhost', + 'gateway', + ]; + +returns an empty array if nothing was found + +to get a different column use this + + my $array_ref = $ml->selectcol_arrayref( + "GET hosts\nColumns: name contacts", + { Columns => [2] } + ); + + you can link 2 columns in a hash result set + + my %hash = @{ + $ml->selectcol_arrayref( + "GET hosts\nColumns: name contacts", + { Columns => [1,2] } + ) + }; + +produces a hash with host the contact assosiation + + $VAR1 = { + 'localhost' => 'user1', + 'gateway' => 'user2' + }; + +=cut + +sub selectcol_arrayref { + my $self = shift; + my $statement = shift; + my $opt = shift; + + # make opt hash keys lowercase + $opt = $self->_lowercase_and_verify_options($opt); + + # if now colums are set, use just the first one + if(!defined $opt->{'columns'} or ref $opt->{'columns'} ne 'ARRAY') { + @{$opt->{'columns'}} = qw{1}; + } + + my $result = $self->selectall_arrayref($statement); + + my @column; + for my $row (@{$result}) { + for my $nr (@{$opt->{'columns'}}) { + push @column, $row->[$nr-1]; + } + } + return(\@column); +} + + +######################################## + +=head2 selectrow_array + + selectrow_array($statement) + selectrow_array($statement, %opts) + +Sends a query and returns an array for the first row + + my @array = $ml->selectrow_array("GET hosts"); + +returns undef if nothing was found + +=cut +sub selectrow_array { + my $self = shift; + my $statement = shift; + my $opt = shift; + + # make opt hash keys lowercase + $opt = $self->_lowercase_and_verify_options($opt); + + my @result = @{$self->selectall_arrayref($statement, $opt, 1)}; + return @{$result[0]} if scalar @result > 0; + return; +} + + +######################################## + +=head2 selectrow_arrayref + + selectrow_arrayref($statement) + selectrow_arrayref($statement, %opts) + +Sends a query and returns an array reference for the first row + + my $arrayref = $ml->selectrow_arrayref("GET hosts"); + +returns undef if nothing was found + +=cut +sub selectrow_arrayref { + my $self = shift; + my $statement = shift; + my $opt = shift; + + # make opt hash keys lowercase + $opt = $self->_lowercase_and_verify_options($opt); + + my $result = $self->selectall_arrayref($statement, $opt, 1); + return if !defined $result; + return $result->[0] if scalar @{$result} > 0; + return; +} + + +######################################## + +=head2 selectrow_hashref + + selectrow_hashref($statement) + selectrow_hashref($statement, %opt) + +Sends a query and returns a hash reference for the first row + + my $hashref = $ml->selectrow_hashref("GET hosts"); + +returns undef if nothing was found + +=cut +sub selectrow_hashref { + my $self = shift; + my $statement = shift; + my $opt = shift; + + # make opt hash keys lowercase + $opt = $self->_lowercase_and_verify_options($opt); + $opt->{slice} = 1; + + my $result = $self->selectall_arrayref($statement, $opt, 1); + return if !defined $result; + return $result->[0] if scalar @{$result} > 0; + return; +} + + +######################################## + +=head2 selectscalar_value + + selectscalar_value($statement) + selectscalar_value($statement, %opt) + +Sends a query and returns a single scalar + + my $count = $ml->selectscalar_value("GET hosts\nStats: state = 0"); + +returns undef if nothing was found + +=cut +sub selectscalar_value { + my $self = shift; + my $statement = shift; + my $opt = shift; + + # make opt hash keys lowercase + $opt = $self->_lowercase_and_verify_options($opt); + + my $row = $self->selectrow_arrayref($statement); + return if !defined $row; + return $row->[0] if scalar @{$row} > 0; + return; +} + +######################################## + +=head2 errors_are_fatal + + errors_are_fatal() + errors_are_fatal($value) + +Enable or disable fatal errors. When enabled the module will croak on any error. + +returns the current setting if called without new value + +=cut +sub errors_are_fatal { + my $self = shift; + my $value = shift; + my $old = $self->{'errors_are_fatal'}; + + $self->{'errors_are_fatal'} = $value; + $self->{'CONNECTOR'}->{'errors_are_fatal'} = $value if defined $self->{'CONNECTOR'}; + + return $old; +} + +######################################## + +=head2 warnings + + warnings() + warnings($value) + +Enable or disable warnings. When enabled the module will carp on warnings. + +returns the current setting if called without new value + +=cut +sub warnings { + my $self = shift; + my $value = shift; + my $old = $self->{'warnings'}; + + $self->{'warnings'} = $value; + $self->{'CONNECTOR'}->{'warnings'} = $value if defined $self->{'CONNECTOR'}; + + return $old; +} + + + +######################################## + +=head2 verbose + + verbose() + verbose($values) + +Enable or disable verbose output. When enabled the module will dump out debug output + +returns the current setting if called without new value + +=cut +sub verbose { + my $self = shift; + my $value = shift; + my $old = $self->{'verbose'}; + + $self->{'verbose'} = $value; + $self->{'CONNECTOR'}->{'verbose'} = $value if defined $self->{'CONNECTOR'}; + + return $old; +} + + +######################################## + +=head2 peer_addr + + $ml->peer_addr() + +returns the current peer address + +when using multiple backends, a list of all addresses is returned in list context + +=cut +sub peer_addr { + my $self = shift; + + return "".$self->{'peer'}; +} + + +######################################## + +=head2 peer_name + + $ml->peer_name() + $ml->peer_name($string) + +if new value is set, name is set to this value + +always returns the current peer name + +when using multiple backends, a list of all names is returned in list context + +=cut +sub peer_name { + my $self = shift; + my $value = shift; + + if(defined $value and $value ne '') { + $self->{'name'} = $value; + } + + return "".$self->{'name'}; +} + + +######################################## + +=head2 peer_key + + $ml->peer_key() + +returns a uniq key for this peer + +when using multiple backends, a list of all keys is returned in list context + +=cut +sub peer_key { + my $self = shift; + + if(!defined $self->{'key'}) { $self->{'key'} = md5_hex($self->peer_addr." ".$self->peer_name); } + + return $self->{'key'}; +} + + +######################################## + +=head2 marked_bad + + $ml->marked_bad() + +returns true if the current connection is marked down + +=cut +sub marked_bad { + my $self = shift; + + return 0; +} + + +######################################## + +=head2 disable + + $ml->disable() + +disables this connection, returns the last state. + +=cut +sub disable { + my $self = shift; + my $prev = $self->{'disabled'}; + $self->{'disabled'} = 1; + return $prev; +} + + +######################################## + +=head2 enable + + $ml->enable() + +enables this connection, returns the last state. + +=cut +sub enable { + my $self = shift; + my $prev = $self->{'disabled'}; + $self->{'disabled'} = 0; + return $prev; +} + +######################################## +# INTERNAL SUBS +######################################## +sub _send { + my $self = shift; + my $statement = shift; + my $opt = shift; + + delete $self->{'meta_data'}; + + my $header = ""; + my $keys; + + my $with_peers = 0; + if(defined $opt->{'addpeer'} and $opt->{'addpeer'}) { + $with_peers = 1; + } + + $Monitoring::Livestatus::ErrorCode = 0; + undef $Monitoring::Livestatus::ErrorMessage; + + return(490, $self->_get_error(490), undef) if !defined $statement; + chomp($statement); + + my($status,$msg,$body); + if($statement =~ m/^Separators:/mx) { + $status = 492; + $msg = $self->_get_error($status); + } + + elsif($statement =~ m/^KeepAlive:/mx) { + $status = 496; + $msg = $self->_get_error($status); + } + + elsif($statement =~ m/^ResponseHeader:/mx) { + $status = 495; + $msg = $self->_get_error($status); + } + + elsif($statement =~ m/^ColumnHeaders:/mx) { + $status = 494; + $msg = $self->_get_error($status); + } + + elsif($statement =~ m/^OuputFormat:/mx) { + $status = 493; + $msg = $self->_get_error($status); + } + + # should be cought in mlivestatus directly + elsif($statement =~ m/^Limit:\ (.*)$/mx and $1 !~ m/^\d+$/mx) { + $status = 403; + $msg = $self->_get_error($status); + } + elsif($statement =~ m/^GET\ (.*)$/mx and $1 =~ m/^\s*$/mx) { + $status = 403; + $msg = $self->_get_error($status); + } + + elsif($statement =~ m/^Columns:\ (.*)$/mx and ($1 =~ m/,/mx or $1 =~ /^\s*$/mx)) { + $status = 405; + $msg = $self->_get_error($status); + } + elsif($statement !~ m/^GET\ /mx and $statement !~ m/^COMMAND\ /mx) { + $status = 401; + $msg = $self->_get_error($status); + } + + else { + + # Add Limits header + if(defined $opt->{'limit_start'}) { + $statement .= "\nLimit: ".($opt->{'limit_start'} + $opt->{'limit_length'}); + } + + # for querys with column header, no seperate columns will be returned + if($statement =~ m/^Columns:\ (.*)$/mx) { + ($statement,$keys) = $self->_extract_keys_from_columns_header($statement); + } elsif($statement =~ m/^Stats:\ (.*)$/mx or $statement =~ m/^StatsGroupBy:\ (.*)$/mx) { + ($statement,$keys) = $self->_extract_keys_from_stats_statement($statement); + } + + # Commands need no additional header + if($statement !~ m/^COMMAND/mx) { + $header .= "OutputFormat: json\n"; + $header .= "ResponseHeader: fixed16\n"; + if($self->{'keepalive'}) { + $header .= "KeepAlive: on\n"; + } + # remove empty lines from statement + $statement =~ s/\n+/\n/gmx; + } + + # add additional headers + if(defined $opt->{'header'} and ref $opt->{'header'} eq 'HASH') { + for my $key ( keys %{$opt->{'header'}}) { + $header .= $key.": ".$opt->{'header'}->{$key}."\n"; + } + } + + chomp($statement); + my $send = "$statement\n$header"; + $self->{'logger'}->debug("> ".Dumper($send)) if $self->{'verbose'}; + ($status,$msg,$body) = $self->_send_socket($send); + if($self->{'verbose'}) { + #$self->{'logger'}->debug("got:"); + #$self->{'logger'}->debug(Dumper(\@erg)); + $self->{'logger'}->debug("status: ".Dumper($status)); + $self->{'logger'}->debug("msg: ".Dumper($msg)); + $self->{'logger'}->debug("< ".Dumper($body)); + } + } + + if($status >= 300) { + $body = '' if !defined $body; + chomp($body); + $Monitoring::Livestatus::ErrorCode = $status; + if(defined $body and $body ne '') { + $Monitoring::Livestatus::ErrorMessage = $body; + } else { + $Monitoring::Livestatus::ErrorMessage = $msg; + } + $self->{'logger'}->error($status." - ".$Monitoring::Livestatus::ErrorMessage." in query:\n'".$statement) if $self->{'verbose'}; + if($self->{'errors_are_fatal'}) { + croak("ERROR ".$status." - ".$Monitoring::Livestatus::ErrorMessage." in query:\n'".$statement."'\n"); + } + return; + } + + # return a empty result set if nothing found + return({ keys => [], result => []}) if !defined $body; + + my $line_seperator = chr($self->{'line_seperator'}); + my $col_seperator = chr($self->{'column_seperator'}); + + my $peer_name = $self->peer_name; + my $peer_addr = $self->peer_addr; + my $peer_key = $self->peer_key; + + my $limit_start = 0; + if(defined $opt->{'limit_start'}) { $limit_start = $opt->{'limit_start'}; } + my $result; + # fix json output + $body =~ s/\],\n\]\n$/]]/mx; + eval { + $result = decode_json($body); + }; + if($@) { + my $message = "ERROR ".$@." in text: '".$body."'\" for statement: '$statement'\n"; + $self->{'logger'}->error($message) if $self->{'verbose'}; + if($self->{'errors_are_fatal'}) { + croak($message); + } + } + + # for querys with column header, no separate columns will be returned + if(!defined $keys) { + $self->{'logger'}->warn("got statement without Columns: header!") if $self->{'verbose'}; + if($self->{'warnings'}) { + carp("got statement without Columns: header! -> ".$statement); + } + $keys = shift @{$result}; + } + + # add peer information? + if(defined $with_peers and $with_peers == 1) { + unshift @{$keys}, 'peer_name'; + unshift @{$keys}, 'peer_addr'; + unshift @{$keys}, 'peer_key'; + + for my $row (@{$result}) { + unshift @{$row}, $peer_name; + unshift @{$row}, $peer_addr; + unshift @{$row}, $peer_key; + } + } + + # set some metadata + $self->{'meta_data'} = { + 'result_count' => scalar @${result}, + }; + + return({ keys => $keys, result => $result }); +} + +######################################## +sub _open { + my $self = shift; + my $statement = shift; + + # return the current socket in keep alive mode + if($self->{'keepalive'} and defined $self->{'sock'} and $self->{'sock'}->connected) { + $self->{'logger'}->debug("reusing old connection") if $self->{'verbose'}; + return($self->{'sock'}); + } + + my $sock = $self->{'CONNECTOR'}->_open(); + + # store socket for later retrieval + if($self->{'keepalive'}) { + $self->{'sock'} = $sock; + } + + $self->{'logger'}->debug("using new connection") if $self->{'verbose'}; + return($sock); +} + +######################################## +sub _close { + my $self = shift; + my $sock = shift; + undef $self->{'sock'}; + return($self->{'CONNECTOR'}->_close($sock)); +} + + +######################################## + +=head1 QUERY OPTIONS + +In addition to the normal query syntax from the livestatus addon, it is +possible to set column aliases in various ways. + +=head2 AddPeer + +adds the peers name, addr and key to the result set: + + my $hosts = $ml->selectall_hashref( + "GET hosts\nColumns: name alias state", + "name", + { AddPeer => 1 } + ); + +=head2 Backend + +send the query only to some specific backends. Only +useful when using multiple backends. + + my $hosts = $ml->selectall_arrayref( + "GET hosts\nColumns: name alias state", + { Backends => [ 'key1', 'key4' ] } + ); + +=head2 Columns + + only return the given column indexes + + my $array_ref = $ml->selectcol_arrayref( + "GET hosts\nColumns: name contacts", + { Columns => [2] } + ); + + see L for more examples + +=head2 Deepcopy + + deep copy/clone the result set. + + Only effective when using multiple backends and threads. + This can be safely turned off if you dont change the + result set. + If you get an error like "Invalid value for shared scalar" error" this + should be turned on. + + my $array_ref = $ml->selectcol_arrayref( + "GET hosts\nColumns: name contacts", + { Deepcopy => 1 } + ); + +=head2 Limit + + Just like the Limit: option from livestatus itself. + In addition you can add a start,length limit. + + my $array_ref = $ml->selectcol_arrayref( + "GET hosts\nColumns: name contacts", + { Limit => "10,20" } + ); + + This example will return 20 rows starting at row 10. You will + get row 10-30. + + Cannot be combined with a Limit inside the query + because a Limit will be added automatically. + + Adding a limit this way will greatly increase performance and + reduce memory usage. + + This option is multibackend safe contrary to the "Limit: " part of a statement. + Sending a statement like "GET...Limit: 10" with 3 backends will result in 30 rows. + Using this options, you will receive only the first 10 rows. + +=head2 Rename + + see L for detailed explainaton + +=head2 Slice + + see L for detailed explainaton + +=head2 Sum + +The Sum option only applies when using multiple backends. +The values from all backends with be summed up to a total. + + my $stats = $ml->selectrow_hashref( + "GET hosts\nStats: state = 0\nStats: state = 1", + { Sum => 1 } + ); + +=cut + + +######################################## +# wrapper around _send_socket_do +sub _send_socket { + my $self = shift; + my $statement = shift; + + my $retries = 0; + my($status, $msg, $recv); + + + # try to avoid connection errors + eval { + local $SIG{PIPE} = sub { + die("broken pipe"); + $self->{'logger'}->debug("broken pipe, closing socket") if $self->{'verbose'}; + $self->_close($self->{'sock'}); + }; + + if($self->{'retries_on_connection_error'} <= 0) { + ($status, $msg, $recv) = $self->_send_socket_do($statement); + return; + } + + while((!defined $status or ($status == 491 or $status == 497 or $status == 500)) and $retries < $self->{'retries_on_connection_error'}) { + $retries++; + ($status, $msg, $recv) = $self->_send_socket_do($statement); + $self->{'logger'}->debug('query status '.$status) if $self->{'verbose'}; + if($status == 491 or $status == 497 or $status == 500) { + $self->{'logger'}->debug('got status '.$status.' retrying in '.$self->{'retry_interval'}.' seconds') if $self->{'verbose'}; + $self->_close(); + sleep($self->{'retry_interval'}) if $retries < $self->{'retries_on_connection_error'}; + } + } + }; + if($@) { + $self->{'logger'}->debug("try 1 failed: $@") if $self->{'verbose'}; + if(defined $@ and $@ =~ /broken\ pipe/mx) { + return $self->_send_socket_do($statement); + } + croak($@) if $self->{'errors_are_fatal'}; + } + + croak($msg) if($status >= 400 and $self->{'errors_are_fatal'}); + + return($status, $msg, $recv); +} + +######################################## +sub _send_socket_do { + my $self = shift; + my $statement = shift; + my($recv,$header); + + my $sock = $self->_open() or return(491, $self->_get_error(491), $!); + utf8::decode($statement); + print $sock encode('utf-8' => $statement) or return($self->_socket_error($statement, $sock, 'write to socket failed: '.$!)); + + print $sock "\n"; + + # COMMAND statements never return something + if($statement =~ m/^COMMAND/mx) { + return('201', $self->_get_error(201), undef); + } + + $sock->read($header, 16) or return($self->_socket_error($statement, $sock, 'reading header from socket failed, check your livestatus logfile: '.$!)); + $self->{'logger'}->debug("header: $header") if $self->{'verbose'}; + my($status, $msg, $content_length) = $self->_parse_header($header, $sock); + return($status, $msg, undef) if !defined $content_length; + if($content_length > 0) { + $sock->read($recv, $content_length) or return($self->_socket_error($statement, $sock, 'reading body from socket failed')); + } + + $self->_close($sock) unless $self->{'keepalive'}; + return($status, $msg, $recv); +} + +######################################## +sub _socket_error { + my $self = shift; + my $statement = shift; + my $sock = shift; + my $body = shift; + + my $message = "\n"; + $message .= "peer ".Dumper($self->peer_name); + $message .= "statement ".Dumper($statement); + $message .= "message ".Dumper($body); + + $self->{'logger'}->error($message) if $self->{'verbose'}; + + if($self->{'retries_on_connection_error'} <= 0) { + if($self->{'errors_are_fatal'}) { + croak($message); + } + else { + carp($message); + } + } + $self->_close(); + return(500, $self->_get_error(500), $message); +} + +######################################## +sub _parse_header { + my $self = shift; + my $header = shift; + my $sock = shift; + + if(!defined $header) { + return(497, $self->_get_error(497), undef); + } + + my $headerlength = length($header); + if($headerlength != 16) { + return(498, $self->_get_error(498)."\ngot: ".$header.<$sock>, undef); + } + chomp($header); + + my $status = substr($header,0,3); + my $content_length = substr($header,5); + if($content_length !~ m/^\s*(\d+)$/mx) { + return(499, $self->_get_error(499)."\ngot: ".$header.<$sock>, undef); + } else { + $content_length = $1; + } + + return($status, $self->_get_error($status), $content_length); +} + +######################################## + +=head1 COLUMN ALIAS + +In addition to the normal query syntax from the livestatus addon, it is +possible to set column aliases in various ways. + +A valid Columns: Header could look like this: + + my $hosts = $ml->selectall_arrayref( + "GET hosts\nColumns: state as status" + ); + +Stats queries could be aliased too: + + my $stats = $ml->selectall_arrayref( + "GET hosts\nStats: state = 0 as up" + ); + +This syntax is available for: Stats, StatsAnd, StatsOr and StatsGroupBy + + +An alternative way to set column aliases is to define rename option key/value +pairs: + + my $hosts = $ml->selectall_arrayref( + "GET hosts\nColumns: name", { + rename => { 'name' => 'hostname' } + } + ); + +=cut + +######################################## +sub _extract_keys_from_stats_statement { + my $self = shift; + my $statement = shift; + + my(@header, $new_statement); + + for my $line (split/\n/mx, $statement) { + if($line =~ m/^Stats:\ (.*)\s+as\s+(.*)$/mxi) { + push @header, $2; + $line = 'Stats: '.$1; + } + elsif($line =~ m/^Stats:\ (.*)$/mx) { + push @header, $1; + } + + if($line =~ m/^StatsAnd:\ (\d+)\s+as\s+(.*)$/mx) { + for(my $x = 0; $x < $1; $x++) { + pop @header; + } + $line = 'StatsAnd: '.$1; + push @header, $2; + } + elsif($line =~ m/^StatsAnd:\ (\d+)$/mx) { + my @to_join; + for(my $x = 0; $x < $1; $x++) { + unshift @to_join, pop @header; + } + push @header, join(' && ', @to_join); + } + + if($line =~ m/^StatsOr:\ (\d+)\s+as\s+(.*)$/mx) { + for(my $x = 0; $x < $1; $x++) { + pop @header; + } + $line = 'StatsOr: '.$1; + push @header, $2; + } + elsif($line =~ m/^StatsOr:\ (\d+)$/mx) { + my @to_join; + for(my $x = 0; $x < $1; $x++) { + unshift @to_join, pop @header; + } + push @header, join(' || ', @to_join); + } + + # StatsGroupBy header are always sent first + if($line =~ m/^StatsGroupBy:\ (.*)\s+as\s+(.*)$/mxi) { + unshift @header, $2; + $line = 'StatsGroupBy: '.$1; + } + elsif($line =~ m/^StatsGroupBy:\ (.*)$/mx) { + unshift @header, $1; + } + $new_statement .= $line."\n"; + } + + return($new_statement, \@header); +} + +######################################## +sub _extract_keys_from_columns_header { + my $self = shift; + my $statement = shift; + + my(@header, $new_statement); + for my $line (split/\n/mx, $statement) { + if($line =~ m/^Columns:\s+(.*)$/mx) { + for my $column (split/\s+/mx, $1) { + if($column eq 'as') { + pop @header; + } else { + push @header, $column; + } + } + $line =~ s/\s+as\s+([^\s]+)/\ /gmx; + } + $new_statement .= $line."\n"; + } + + return($new_statement, \@header); +} + +######################################## + +=head1 ERROR HANDLING + +Errorhandling can be done like this: + + use Monitoring::Livestatus; + my $ml = Monitoring::Livestatus->new( + socket => '/var/lib/livestatus/livestatus.sock' + ); + $ml->errors_are_fatal(0); + my $hosts = $ml->selectall_arrayref("GET hosts"); + if($Monitoring::Livestatus::ErrorCode) { + croak($Monitoring::Livestatus::ErrorMessage); + } + +=cut +sub _get_error { + my $self = shift; + my $code = shift; + + my $codes = { + '200' => 'OK. Reponse contains the queried data.', + '201' => 'COMMANDs never return something', + '400' => 'The request contains an invalid header.', + '401' => 'The request contains an invalid header.', + '402' => 'The request is completely invalid.', + '403' => 'The request is incomplete.', + '404' => 'The target of the GET has not been found (e.g. the table).', + '405' => 'A non-existing column was being referred to', + '490' => 'no query', + '491' => 'failed to connect', + '492' => 'Separators not allowed in statement. Please use the seperator options in new()', + '493' => 'OuputFormat not allowed in statement. Header will be set automatically', + '494' => 'ColumnHeaders not allowed in statement. Header will be set automatically', + '495' => 'ResponseHeader not allowed in statement. Header will be set automatically', + '496' => 'Keepalive not allowed in statement. Please use the keepalive option in new()', + '497' => 'got no header', + '498' => 'header is not exactly 16byte long', + '499' => 'not a valid header (no content-length)', + '500' => 'socket error', + }; + + confess('non existant error code: '.$code) if !defined $codes->{$code}; + + return($codes->{$code}); +} + +######################################## +sub _get_peers { + my $self = shift; + + # set options for our peer(s) + my %options; + for my $opt_key (keys %{$self}) { + $options{$opt_key} = $self->{$opt_key}; + } + + my $peers = []; + + # check if the supplied peer is a socket or a server address + if(defined $self->{'peer'}) { + if(ref $self->{'peer'} eq '') { + my $name = $self->{'name'} || "".$self->{'peer'}; + if(index($self->{'peer'}, ':') > 0) { + push @{$peers}, { 'peer' => "".$self->{'peer'}, type => 'INET', name => $name }; + } else { + push @{$peers}, { 'peer' => "".$self->{'peer'}, type => 'UNIX', name => $name }; + } + } + elsif(ref $self->{'peer'} eq 'ARRAY') { + for my $peer (@{$self->{'peer'}}) { + if(ref $peer eq 'HASH') { + next if !defined $peer->{'peer'}; + $peer->{'name'} = "".$peer->{'peer'} unless defined $peer->{'name'}; + if(!defined $peer->{'type'}) { + $peer->{'type'} = 'UNIX'; + if(index($peer->{'peer'}, ':') >= 0) { + $peer->{'type'} = 'INET'; + } + } + push @{$peers}, $peer; + } else { + my $type = 'UNIX'; + if(index($peer, ':') >= 0) { + $type = 'INET'; + } + push @{$peers}, { 'peer' => "".$peer, type => $type, name => "".$peer }; + } + } + } + elsif(ref $self->{'peer'} eq 'HASH') { + for my $peer (keys %{$self->{'peer'}}) { + my $name = $self->{'peer'}->{$peer}; + my $type = 'UNIX'; + if(index($peer, ':') >= 0) { + $type = 'INET'; + } + push @{$peers}, { 'peer' => "".$peer, type => $type, name => "".$name }; + } + } else { + confess("type ".(ref $self->{'peer'})." is not supported for peer option"); + } + } + if(defined $self->{'socket'}) { + my $name = $self->{'name'} || "".$self->{'socket'}; + push @{$peers}, { 'peer' => "".$self->{'socket'}, type => 'UNIX', name => $name }; + } + if(defined $self->{'server'}) { + my $name = $self->{'name'} || "".$self->{'server'}; + push @{$peers}, { 'peer' => "".$self->{'server'}, type => 'INET', name => $name }; + } + + # check if we got a peer + if(scalar @{$peers} == 0) { + croak('please specify at least one peer, socket or server'); + } + + # clean up + delete $options{'peer'}; + delete $options{'socket'}; + delete $options{'server'}; + + return $peers; +} + + +######################################## +sub _lowercase_and_verify_options { + my $self = shift; + my $opts = shift; + my $return = {}; + + # list of allowed options + my $allowed_options = { + 'addpeer' => 1, + 'backend' => 1, + 'columns' => 1, + 'deepcopy' => 1, + 'header' => 1, + 'limit' => 1, + 'limit_start' => 1, + 'limit_length' => 1, + 'rename' => 1, + 'slice' => 1, + 'sum' => 1, + 'callbacks' => 1, + }; + + for my $key (keys %{$opts}) { + if($self->{'warnings'} and !defined $allowed_options->{lc $key}) { + carp("unknown option used: $key - please use only: ".join(", ", keys %{$allowed_options})); + } + $return->{lc $key} = $opts->{$key}; + } + + # set limits + if(defined $return->{'limit'}) { + if(index($return->{'limit'}, ',') != -1) { + my($limit_start,$limit_length) = split /,/mx, $return->{'limit'}; + $return->{'limit_start'} = $limit_start; + $return->{'limit_length'} = $limit_length; + } + else { + $return->{'limit_start'} = 0; + $return->{'limit_length'} = $return->{'limit'}; + } + delete $return->{'limit'}; + } + + return($return); +} + +######################################## +sub _log_statement { + my $self = shift; + my $statement = shift; + my $opt = shift; + my $limit = shift; + my $d = Data::Dumper->new([$opt]); + $d->Indent(0); + my $optstring = $d->Dump; + $optstring =~ s/^\$VAR1\s+=\s+//mx; + $optstring =~ s/;$//mx; + + # remove empty lines from statement + $statement =~ s/\n+/\n/gmx; + + my $cleanstatement = $statement; + $cleanstatement =~ s/\n/\\n/gmx; + $self->{'logger'}->debug('selectall_arrayref("'.$cleanstatement.'", '.$optstring.', '.$limit.')'); + return 1; +} + +######################################## + +1; + +=head1 EXAMPLES + +=head2 Multibackend Configuration + + use Monitoring::Livestatus; + my $ml = Monitoring::Livestatus->new( + name => 'multiple connector', + verbose => 0, + keepalive => 1, + peer => [ + { + name => 'DMZ Monitoring', + peer => '50.50.50.50:9999', + }, + { + name => 'Local Monitoring', + peer => '/tmp/livestatus.socket', + }, + { + name => 'Special Monitoring', + peer => '100.100.100.100:9999', + } + ], + ); + my $hosts = $ml->selectall_arrayref("GET hosts"); + +=head1 SEE ALSO + +For more information about the query syntax and the livestatus plugin installation +see the Livestatus page: http://mathias-kettner.de/checkmk_livestatus.html + +=head1 AUTHOR + +Sven Nierlein, Enierlein@cpan.orgE + +=head1 COPYRIGHT AND LICENSE + +Copyright (C) 2009 by Sven Nierlein + +This library is free software; you can redistribute it and/or modify +it under the same terms as Perl itself. + +=cut + +__END__ diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/Makefile.PL check-mk-1.2.6p12/=unpacked-tar11=/api/perl/Makefile.PL --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/Makefile.PL 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/Makefile.PL 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,41 @@ +# IMPORTANT: if you delete this file your app will not work as +# expected. you have been warned +use inc::Module::Install; + +name 'Monitoring-Livestatus'; +all_from 'lib/Monitoring/Livestatus.pm'; +perl_version '5.006'; +license 'perl'; + +resources( + 'homepage', => 'http://search.cpan.org/dist/Monitoring-Livestatus/', + 'bugtracker' => 'http://github.com/sni/Monitoring-Livestatus/issues', + 'repository', => 'http://github.com/sni/Monitoring-Livestatus', +); + + +requires 'IO::Socket::UNIX'; +requires 'IO::Socket::INET'; +requires 'Digest::MD5'; +requires 'Scalar::Util'; +requires 'Test::More' => '0.87'; +requires 'Thread::Queue' => '2.11'; +requires 'utf8'; +requires 'Encode'; +requires 'JSON::XS'; + +# test requirements +# these requirements still make it into the META.yml, so they are commented so far +#feature ('authortests', +# -default => 0, +# 'File::Copy::Recursive' => 0, +# 'Test::Pod' => 1.14, +# 'Test::Perl::Critic' => 0, +# 'Test::Pod::Coverage' => 0, +# 'Perl::Critic::Policy::Dynamic::NoIndirect' => 0, +# 'Perl::Critic::Policy::NamingConventions::ProhibitMixedCaseSubs' => 0, +# 'Perl::Critic::Policy::ValuesAndExpressions::ProhibitAccessOfPrivateData' => 0, +#); + +auto_install; +WriteAll; diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/MANIFEST check-mk-1.2.6p12/=unpacked-tar11=/api/perl/MANIFEST --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/MANIFEST 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/MANIFEST 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,38 @@ +Changes +examples/dump.pl +examples/test.pl +inc/Module/AutoInstall.pm +inc/Module/Install.pm +inc/Module/Install/AutoInstall.pm +inc/Module/Install/Base.pm +inc/Module/Install/Can.pm +inc/Module/Install/Fetch.pm +inc/Module/Install/Include.pm +inc/Module/Install/Makefile.pm +inc/Module/Install/Metadata.pm +inc/Module/Install/Win32.pm +inc/Module/Install/WriteAll.pm +lib/Monitoring/Livestatus.pm +lib/Monitoring/Livestatus/INET.pm +lib/Monitoring/Livestatus/MULTI.pm +lib/Monitoring/Livestatus/UNIX.pm +Makefile.PL +MANIFEST This list of files +META.yml +README +t/01-Monitoring-Livestatus-basic_tests.t +t/02-Monitoring-Livestatus-internals.t +t/03-Monitoring-Livestatus-MULTI-internals.t +t/20-Monitoring-Livestatus-test_socket.t +t/21-Monitoring-Livestatus-INET.t +t/22-Monitoring-Livestatus-UNIX.t +t/30-Monitoring-Livestatus-live-test.t +t/31-Monitoring-Livestatus-MULTI-live-test.t +t/32-Monitoring-Livestatus-backend-test.t +t/33-Monitoring-Livestatus-test_socket_timeout.t +t/34-Monitoring-Livestatus-utf8_support.t +t/35-Monitoring-Livestatus-callbacks_support.t +t/97-Pod.t +t/98-Pod-Coverage.t +t/99-Perl-Critic.t +t/perlcriticrc diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/META.yml check-mk-1.2.6p12/=unpacked-tar11=/api/perl/META.yml --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/META.yml 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/META.yml 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,37 @@ +--- +abstract: 'Perl API for check_mk livestatus to access runtime' +author: + - 'Sven Nierlein, ' +build_requires: + ExtUtils::MakeMaker: 6.42 +configure_requires: + ExtUtils::MakeMaker: 6.42 +distribution_type: module +generated_by: 'Module::Install version 1.00' +license: perl +meta-spec: + url: http://module-build.sourceforge.net/META-spec-v1.4.html + version: 1.4 +name: Monitoring-Livestatus +no_index: + directory: + - examples + - inc + - t +requires: + Digest::MD5: 0 + Encode: 0 + IO::Socket::INET: 0 + IO::Socket::UNIX: 0 + JSON::XS: 0 + Scalar::Util: 0 + Test::More: 0.87 + Thread::Queue: 2.11 + perl: 5.6.0 + utf8: 0 +resources: + bugtracker: http://github.com/sni/Monitoring-Livestatus/issues + homepage: http://search.cpan.org/dist/Monitoring-Livestatus/ + license: http://dev.perl.org/licenses/ + repository: http://github.com/sni/Monitoring-Livestatus +version: 0.74 diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/README check-mk-1.2.6p12/=unpacked-tar11=/api/perl/README --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/README 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/README 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,32 @@ +Monitoring-Livestatus +===================== + + Monitoring::Livestatus can be used to access the data of the check_mk + Livestatus Addon for Nagios and Icinga. + +INSTALLATION + + To install this module type the following: + + perl Makefile.PL + make + make test + make install + +DEPENDENCIES + + This module requires no other modules. + +SYNOPSIS + my $ml = Monitoring::Livestatus->new( socket => '/var/lib/livestatus/livestatus.sock' ); + my $hosts = $ml->selectall_arrayref("GET hosts"); + +AUTHOR + Sven Nierlein + +COPYRIGHT AND LICENCE + + Copyright (C) 2009 by Sven Nierlein + + This library is free software; you can redistribute it and/or modify + it under the same terms as Perl itself. diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/t/01-Monitoring-Livestatus-basic_tests.t check-mk-1.2.6p12/=unpacked-tar11=/api/perl/t/01-Monitoring-Livestatus-basic_tests.t --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/t/01-Monitoring-Livestatus-basic_tests.t 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/t/01-Monitoring-Livestatus-basic_tests.t 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,149 @@ +#!/usr/bin/env perl + +######################### + +use strict; +use Test::More; +use File::Temp; +use Data::Dumper; +use IO::Socket::UNIX qw( SOCK_STREAM SOMAXCONN ); +use_ok('Monitoring::Livestatus'); + +BEGIN { + if( $^O eq 'MSWin32' ) { + plan skip_all => 'no sockets on windows'; + } + else { + plan tests => 35; + } +} + +######################### +# get a temp file from File::Temp and replace it with our socket +my $fh = File::Temp->new(UNLINK => 0); +my $socket_path = $fh->filename; +unlink($socket_path); +my $listener = IO::Socket::UNIX->new( + Type => SOCK_STREAM, + Listen => SOMAXCONN, + Local => $socket_path, + ) or die("failed to open $socket_path as test socket: $!"); +######################### +# create object with single arg +my $ml = Monitoring::Livestatus->new( $socket_path ); +isa_ok($ml, 'Monitoring::Livestatus', 'single args'); +is($ml->peer_name(), $socket_path, 'get peer_name()'); +is($ml->peer_addr(), $socket_path, 'get peer_addr()'); + +######################### +# create object with hash args +my $line_seperator = 10; +my $column_seperator = 0; +$ml = Monitoring::Livestatus->new( + verbose => 0, + socket => $socket_path, + line_seperator => $line_seperator, + column_seperator => $column_seperator, + ); +isa_ok($ml, 'Monitoring::Livestatus', 'new hash args'); +is($ml->peer_name(), $socket_path, 'get peer_name()'); +is($ml->peer_addr(), $socket_path, 'get peer_addr()'); + +######################### +# create object with peer arg +$ml = Monitoring::Livestatus->new( + peer => $socket_path, + ); +isa_ok($ml, 'Monitoring::Livestatus', 'peer hash arg socket'); +is($ml->peer_name(), $socket_path, 'get peer_name()'); +is($ml->peer_addr(), $socket_path, 'get peer_addr()'); +isa_ok($ml->{'CONNECTOR'}, 'Monitoring::Livestatus::UNIX', 'peer backend UNIX'); + +######################### +# create object with peer arg +my $server = 'localhost:12345'; +$ml = Monitoring::Livestatus->new( + peer => $server, + ); +isa_ok($ml, 'Monitoring::Livestatus', 'peer hash arg server'); +is($ml->peer_name(), $server, 'get peer_name()'); +is($ml->peer_addr(), $server, 'get peer_addr()'); +isa_ok($ml->{'CONNECTOR'}, 'Monitoring::Livestatus::INET', 'peer backend INET'); + +######################### +# create multi object with peers +$ml = Monitoring::Livestatus->new( + peer => [ $server, $socket_path ], + ); +isa_ok($ml, 'Monitoring::Livestatus', 'peer hash arg multi'); +my @names = $ml->peer_name(); +my @addrs = $ml->peer_addr(); +my $name = $ml->peer_name(); +my $expect = [ $server, $socket_path ]; +is_deeply(\@names, $expect, 'list context get peer_name()') or diag("got peer names: ".Dumper(\@names)."but expected: ".Dumper($expect)); +is($name, 'multiple connector', 'scalar context get peer_name()') or diag("got peer name: ".Dumper($name)."but expected: ".Dumper('multiple connector')); +is_deeply(\@addrs, $expect, 'list context get peer_addr()') or diag("got peer addrs: ".Dumper(\@addrs)."but expected: ".Dumper($expect)); + +######################### +# create multi object with peers and name +$ml = Monitoring::Livestatus->new( + peer => [ $server, $socket_path ], + name => 'test multi', + ); +isa_ok($ml, 'Monitoring::Livestatus', 'peer hash arg multi with name'); +$name = $ml->peer_name(); +is($name, 'test multi', 'peer_name()'); + +######################### +$ml = Monitoring::Livestatus->new( + peer => [ $socket_path ], + verbose => 0, + keepalive => 1, + logger => undef, + ); +isa_ok($ml, 'Monitoring::Livestatus', 'peer hash arg multi with keepalive'); +is($ml->peer_name(), $socket_path, 'get peer_name()'); +is($ml->peer_addr(), $socket_path, 'get peer_addr()'); + +######################### +# timeout checks +$ml = Monitoring::Livestatus->new( + peer => [ $socket_path ], + verbose => 0, + timeout => 13, + logger => undef, + ); +isa_ok($ml, 'Monitoring::Livestatus', 'peer hash arg multi with general timeout'); +is($ml->peer_name(), $socket_path, 'get peer_name()'); +is($ml->peer_addr(), $socket_path, 'get peer_addr()'); +is($ml->{'connect_timeout'}, 13, 'connect_timeout'); +is($ml->{'query_timeout'}, 13, 'query_timeout'); + +$ml = Monitoring::Livestatus->new( + peer => [ $socket_path ], + verbose => 0, + query_timeout => 14, + connect_timeout => 17, + logger => undef, + ); +isa_ok($ml, 'Monitoring::Livestatus', 'peer hash arg multi with general timeout'); +is($ml->peer_name(), $socket_path, 'get peer_name()'); +is($ml->peer_addr(), $socket_path, 'get peer_addr()'); +is($ml->{'connect_timeout'}, 17, 'connect_timeout'); +is($ml->{'query_timeout'}, 14, 'query_timeout'); + + +######################### +# error retry +$ml = Monitoring::Livestatus->new( + peer => [ $socket_path ], + verbose => 0, + retries_on_connection_error => 3, + retry_interval => 1, + logger => undef, + ); +isa_ok($ml, 'Monitoring::Livestatus', 'peer hash arg multi with error retry'); + +######################### +# cleanup +unlink($socket_path); diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/t/02-Monitoring-Livestatus-internals.t check-mk-1.2.6p12/=unpacked-tar11=/api/perl/t/02-Monitoring-Livestatus-internals.t --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/t/02-Monitoring-Livestatus-internals.t 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/t/02-Monitoring-Livestatus-internals.t 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,148 @@ +#!/usr/bin/env perl + +######################### + +use strict; +use Test::More; +use File::Temp; +use Data::Dumper; +use IO::Socket::UNIX qw( SOCK_STREAM SOMAXCONN ); +use_ok('Monitoring::Livestatus'); + +BEGIN { + if( $^O eq 'MSWin32' ) { + plan skip_all => 'no sockets on windows'; + } + else { + plan tests => 14; + } +} + +######################### +# get a temp file from File::Temp and replace it with our socket +my $fh = File::Temp->new(UNLINK => 0); +my $socket_path = $fh->filename; +unlink($socket_path); +my $listener = IO::Socket::UNIX->new( + Type => SOCK_STREAM, + Listen => SOMAXCONN, + Local => $socket_path, + ) or die("failed to open $socket_path as test socket: $!"); + +######################### +# create object with single arg +my $ml = Monitoring::Livestatus->new( 'localhost:12345' ); +isa_ok($ml, 'Monitoring::Livestatus', 'single args server'); +isa_ok($ml->{'CONNECTOR'}, 'Monitoring::Livestatus::INET', 'single args server peer'); +is($ml->{'CONNECTOR'}->peer_name, 'localhost:12345', 'single args server peer name'); +is($ml->{'CONNECTOR'}->peer_addr, 'localhost:12345', 'single args server peer addr'); + +######################### +# create object with single arg +$ml = Monitoring::Livestatus->new( $socket_path ); +isa_ok($ml, 'Monitoring::Livestatus', 'single args socket'); +isa_ok($ml->{'CONNECTOR'}, 'Monitoring::Livestatus::UNIX', 'single args socket peer'); +is($ml->{'CONNECTOR'}->peer_name, $socket_path, 'single args socket peer name'); +is($ml->{'CONNECTOR'}->peer_addr, $socket_path, 'single args socket peer addr'); + +my $header = "404 43\n"; +my($error,$error_msg) = $ml->_parse_header($header); +is($error, '404', 'error code 404'); +isnt($error_msg, undef, 'error code 404 message'); + +######################### +my $stats_query1 = "GET services +Stats: state = 0 +Stats: state = 1 +Stats: state = 2 +Stats: state = 3 +Stats: state = 4 +Stats: host_state != 0 +Stats: state = 1 +StatsAnd: 2 +Stats: host_state != 0 +Stats: state = 2 +StatsAnd: 2 +Stats: host_state != 0 +Stats: state = 3 +StatsAnd: 2 +Stats: host_state != 0 +Stats: state = 3 +Stats: active_checks = 1 +StatsAnd: 3 +Stats: state = 3 +Stats: active_checks = 1 +StatsOr: 2"; +my @expected_keys1 = ( + 'state = 0', + 'state = 1', + 'state = 2', + 'state = 3', + 'state = 4', + 'host_state != 0 && state = 1', + 'host_state != 0 && state = 2', + 'host_state != 0 && state = 3', + 'host_state != 0 && state = 3 && active_checks = 1', + 'state = 3 || active_checks = 1', + ); +my @got_keys1 = @{$ml->_extract_keys_from_stats_statement($stats_query1)}; +is_deeply(\@got_keys1, \@expected_keys1, 'statsAnd, statsOr query keys') + or ( diag('got keys: '.Dumper(\@got_keys1)) ); + + +######################### +my $stats_query2 = "GET services +Stats: state = 0 as all_ok +Stats: state = 1 as all_warning +Stats: state = 2 as all_critical +Stats: state = 3 as all_unknown +Stats: state = 4 as all_pending +Stats: host_state != 0 +Stats: state = 1 +StatsAnd: 2 as all_warning_on_down_hosts +Stats: host_state != 0 +Stats: state = 2 +StatsAnd: 2 as all_critical_on_down_hosts +Stats: host_state != 0 +Stats: state = 3 +StatsAnd: 2 as all_unknown_on_down_hosts +Stats: host_state != 0 +Stats: state = 3 +Stats: active_checks_enabled = 1 +StatsAnd: 3 as all_unknown_active_on_down_hosts +Stats: state = 3 +Stats: active_checks_enabled = 1 +StatsOr: 2 as all_active_or_unknown"; +my @expected_keys2 = ( + 'all_ok', + 'all_warning', + 'all_critical', + 'all_unknown', + 'all_pending', + 'all_warning_on_down_hosts', + 'all_critical_on_down_hosts', + 'all_unknown_on_down_hosts', + 'all_unknown_active_on_down_hosts', + 'all_active_or_unknown', + ); +my @got_keys2 = @{$ml->_extract_keys_from_stats_statement($stats_query2)}; +is_deeply(\@got_keys2, \@expected_keys2, 'stats query keys2') + or ( diag('got keys: '.Dumper(\@got_keys2)) ); + + +######################### +my $normal_query1 = "GET services +Columns: host_name as host is_flapping description as name state +"; +my @expected_keys3 = ( + 'host', + 'is_flapping', + 'name', + 'state', + ); +my @got_keys3 = @{$ml->_extract_keys_from_columns_header($normal_query1)}; +is_deeply(\@got_keys3, \@expected_keys3, 'normal query keys') + or ( diag('got keys: '.Dumper(\@got_keys3)) ); + +######################### +unlink($socket_path); diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/t/03-Monitoring-Livestatus-MULTI-internals.t check-mk-1.2.6p12/=unpacked-tar11=/api/perl/t/03-Monitoring-Livestatus-MULTI-internals.t --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/t/03-Monitoring-Livestatus-MULTI-internals.t 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/t/03-Monitoring-Livestatus-MULTI-internals.t 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,215 @@ +#!/usr/bin/env perl + +######################### + +use strict; +use Test::More; +use Data::Dumper; +use File::Temp; +use IO::Socket::UNIX qw( SOCK_STREAM SOMAXCONN ); +use_ok('Monitoring::Livestatus::MULTI'); + +BEGIN { + if( $^O eq 'MSWin32' ) { + plan skip_all => 'no sockets on windows'; + } + else { + plan tests => 57; + } +} + +######################### +# create 2 test sockets +# get a temp file from File::Temp and replace it with our socket +my $fh = File::Temp->new(UNLINK => 0); +my $socket_path1 = $fh->filename; +unlink($socket_path1); +my $listener1 = IO::Socket::UNIX->new( + Type => SOCK_STREAM, + Listen => SOMAXCONN, + Local => $socket_path1, + ) or die("failed to open $socket_path1 as test socket: $!"); + +$fh = File::Temp->new(UNLINK => 0); +my $socket_path2 = $fh->filename; +unlink($socket_path2); +my $listener2 = IO::Socket::UNIX->new( + Type => SOCK_STREAM, + Listen => SOMAXCONN, + Local => $socket_path2, + ) or die("failed to open $socket_path2 as test socket: $!"); + +######################### +# test the _merge_answer +my $mergetests = [ + { # simple test for sliced selectall_arrayref + in => { '820e03551b95b42ec037c87aed9b8f4a' => [ { 'description' => 'test_flap_07', 'host_name' => 'test_host_000', 'state' => '0' }, { 'description' => 'test_flap_11', 'host_name' => 'test_host_000', 'state' => '0' } ], + '35bbb11a888f66131d429efd058fb141' => [ { 'description' => 'test_ok_00', 'host_name' => 'test_host_000', 'state' => '0' }, { 'description' => 'test_ok_01', 'host_name' => 'test_host_000', 'state' => '0' } ], + '70ea8fa14abb984761bdd45ef27685b0' => [ { 'description' => 'test_critical_00', 'host_name' => 'test_host_000', 'state' => '2' }, { 'description' => 'test_critical_19', 'host_name' => 'test_host_000', 'state' => '2' } ] + }, + exp => [ + { 'description' => 'test_flap_07', 'host_name' => 'test_host_000', 'state' => '0' }, + { 'description' => 'test_flap_11', 'host_name' => 'test_host_000', 'state' => '0' }, + { 'description' => 'test_ok_00', 'host_name' => 'test_host_000', 'state' => '0' }, + { 'description' => 'test_ok_01', 'host_name' => 'test_host_000', 'state' => '0' }, + { 'description' => 'test_critical_00', 'host_name' => 'test_host_000', 'state' => '2' }, + { 'description' => 'test_critical_19', 'host_name' => 'test_host_000', 'state' => '2' }, + ] + }, +]; + +######################### +# test object creation +my $ml = Monitoring::Livestatus::MULTI->new( [ $socket_path1, $socket_path2 ] ); +isa_ok($ml, 'Monitoring::Livestatus', 'single args sockets'); +for my $peer (@{$ml->{'peers'}}) { + isa_ok($peer, 'Monitoring::Livestatus::UNIX', 'single args sockets peer'); +} + +$ml = Monitoring::Livestatus::MULTI->new( [$socket_path1] ); +isa_ok($ml, 'Monitoring::Livestatus', 'single array args socket'); +for my $peer (@{$ml->{'peers'}}) { + isa_ok($peer, 'Monitoring::Livestatus::UNIX', 'single array args socket peer'); + is($peer->peer_addr, $socket_path1, 'single arrays args socket peer addr'); + is($peer->peer_name, $socket_path1, 'single arrays args socket peer name'); +} + +$ml = Monitoring::Livestatus::MULTI->new( 'localhost:5001' ); +isa_ok($ml, 'Monitoring::Livestatus', 'single args server'); +for my $peer (@{$ml->{'peers'}}) { + isa_ok($peer, 'Monitoring::Livestatus::INET', 'single args server peer'); + like($peer->peer_addr, qr/^localhost/, 'single args servers peer addr'); + like($peer->peer_name, qr/^localhost/, 'single args servers peer name'); +} + +$ml = Monitoring::Livestatus::MULTI->new( ['localhost:5001'] ); +isa_ok($ml, 'Monitoring::Livestatus', 'single array args server'); +for my $peer (@{$ml->{'peers'}}) { + isa_ok($peer, 'Monitoring::Livestatus::INET', 'single arrays args server peer'); + like($peer->peer_addr, qr/^localhost/, 'single arrays args servers peer addr'); + like($peer->peer_name, qr/^localhost/, 'single arrays args servers peer name'); +} + +$ml = Monitoring::Livestatus::MULTI->new( [ 'localhost:5001', 'localhost:5002' ] ); +isa_ok($ml, 'Monitoring::Livestatus', 'single args servers'); +for my $peer (@{$ml->{'peers'}}) { + isa_ok($peer, 'Monitoring::Livestatus::INET', 'single args servers peer'); + like($peer->peer_addr, qr/^localhost/, 'single args servers peer addr'); + like($peer->peer_name, qr/^localhost/, 'single args servers peer name'); +} + +$ml = Monitoring::Livestatus::MULTI->new( peer => [ 'localhost:5001', 'localhost:5002' ] ); +isa_ok($ml, 'Monitoring::Livestatus', 'hash args servers'); +for my $peer (@{$ml->{'peers'}}) { + isa_ok($peer, 'Monitoring::Livestatus::INET', 'hash args servers peer'); + like($peer->peer_addr, qr/^localhost/, 'hash args servers peer addr'); + like($peer->peer_name, qr/^localhost/, 'hash args servers peer name'); +} + +$ml = Monitoring::Livestatus::MULTI->new( peer => [ $socket_path1, $socket_path2 ] ); +isa_ok($ml, 'Monitoring::Livestatus', 'hash args sockets'); +for my $peer (@{$ml->{'peers'}}) { + isa_ok($peer, 'Monitoring::Livestatus::UNIX', 'hash args sockets peer'); +} + +$ml = Monitoring::Livestatus::MULTI->new( peer => { $socket_path1 => 'Location 1', $socket_path2 => 'Location2' } ); +isa_ok($ml, 'Monitoring::Livestatus', 'hash args hashed sockets'); +for my $peer (@{$ml->{'peers'}}) { + isa_ok($peer, 'Monitoring::Livestatus::UNIX', 'hash args hashed sockets peer'); + like($peer->peer_name, qr/^Location/, 'hash args hashed sockets peer name'); +} + +$ml = Monitoring::Livestatus::MULTI->new( peer => { 'localhost:5001' => 'Location 1', 'localhost:5002' => 'Location2' } ); +isa_ok($ml, 'Monitoring::Livestatus', 'hash args hashed servers'); +for my $peer (@{$ml->{'peers'}}) { + isa_ok($peer, 'Monitoring::Livestatus::INET', 'hash args hashed servers peer'); + like($peer->peer_addr, qr/^localhost/, 'hash args hashed servers peer addr'); + like($peer->peer_name, qr/^Location/, 'hash args hashed servers peer name'); +} + +$ml = Monitoring::Livestatus::MULTI->new( $socket_path1 ); +isa_ok($ml, 'Monitoring::Livestatus', 'single args socket'); +for my $peer (@{$ml->{'peers'}}) { + isa_ok($peer, 'Monitoring::Livestatus::UNIX', 'single args socket peer'); +} + +######################### +# test internal subs +$ml = Monitoring::Livestatus::MULTI->new('peer' => ['192.168.123.2:9996', '192.168.123.2:9997', '192.168.123.2:9998' ] ); + +my $x = 0; +for my $test (@{$mergetests}) { + my $got = $ml->_merge_answer($test->{'in'}); + is_deeply($got, $test->{'exp'}, '_merge_answer test '.$x) + or diag("got: ".Dumper($got)."\nbut expected ".Dumper($test->{'exp'})); + $x++; +} + +######################### +# test the _sum_answer +my $sumtests = [ + { # hashes + in => { '192.168.123.2:9996' => { 'ok' => '12', 'warning' => '8' }, + '192.168.123.2:9997' => { 'ok' => '17', 'warning' => '7' }, + '192.168.123.2:9998' => { 'ok' => '13', 'warning' => '2' } + }, + exp => { 'ok' => '42', 'warning' => '17' } + }, + { # hashes, undefs + in => { '192.168.123.2:9996' => { 'ok' => '12', 'warning' => '8' }, + '192.168.123.2:9997' => undef, + '192.168.123.2:9998' => { 'ok' => '13', 'warning' => '2' } + }, + exp => { 'ok' => '25', 'warning' => '10' } + }, + { # hashes, undefs + in => { '192.168.123.2:9996' => { 'ok' => '12', 'warning' => '8' }, + '192.168.123.2:9997' => {}, + '192.168.123.2:9998' => { 'ok' => '13', 'warning' => '2' } + }, + exp => { 'ok' => '25', 'warning' => '10' } + }, + { # arrays + in => { '192.168.123.2:9996' => [ '3302', '235' ], + '192.168.123.2:9997' => [ '3324', '236' ], + '192.168.123.2:9998' => [ '3274', '236' ] + }, + exp => [ 9900, 707 ] + }, + { # undefs / scalars + in => { 'e69322abf0352888e598da3e2514df4a' => undef, + 'f42530d7e8c2b52732ba427b1e5e0a8e' => '1' + }, + exp => 1, + }, + { # arrays, undefs + in => { '192.168.123.2:9996' => [ '2', '5' ], + '192.168.123.2:9997' => [ ], + '192.168.123.2:9998' => [ '4', '6' ] + }, + exp => [ 6, 11 ] + }, + { # arrays, undefs + in => { '192.168.123.2:9996' => [ '2', '5' ], + '192.168.123.2:9997' => undef, + '192.168.123.2:9998' => [ '4', '6' ] + }, + exp => [ 6, 11 ] + }, +]; + +$x = 1; +for my $test (@{$sumtests}) { + my $got = $ml->_sum_answer($test->{'in'}); + is_deeply($got, $test->{'exp'}, '_sum_answer test '.$x) + or diag("got: ".Dumper($got)."\nbut expected ".Dumper($test->{'exp'})); + $x++; +} + +######################### +# clone test +my $clone = $ml->_clone($mergetests); +is_deeply($clone, $mergetests, 'merge test clone'); + +$clone = $ml->_clone($sumtests); +is_deeply($clone, $sumtests, 'sum test clone'); diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/t/20-Monitoring-Livestatus-test_socket.t check-mk-1.2.6p12/=unpacked-tar11=/api/perl/t/20-Monitoring-Livestatus-test_socket.t --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/t/20-Monitoring-Livestatus-test_socket.t 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/t/20-Monitoring-Livestatus-test_socket.t 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,329 @@ +#!/usr/bin/env perl + +######################### + +use strict; +use Test::More; +use IO::Socket::UNIX qw( SOCK_STREAM SOMAXCONN ); +use Data::Dumper; +use JSON::XS; + +BEGIN { + eval {require threads;}; + if ( $@ ) { + plan skip_all => 'need threads support for testing a real socket' + } + elsif( $^O eq 'MSWin32' ) { + plan skip_all => 'no sockets on windows'; + } + else{ + plan tests => 109 + } +} + +use File::Temp; +BEGIN { use_ok('Monitoring::Livestatus') }; + +######################### +# Normal Querys +######################### +my $line_seperator = 10; +my $column_seperator = 0; +my $test_data = [ ["alias","name","contacts"], # table header + ["alias1","host1","contact1"], # row 1 + ["alias2","host2","contact2"], # row 2 + ["alias3","host3","contact3"], # row 3 + ]; +my $test_hostgroups = [['']]; # test one row with no data + +# expected results +my $selectall_arrayref1 = [ [ 'alias1', 'host1', 'contact1' ], + [ 'alias2', 'host2', 'contact2' ], + [ 'alias3', 'host3', 'contact3' ] + ]; +my $selectall_arrayref2 = [ + { 'contacts' => 'contact1', 'name' => 'host1', 'alias' => 'alias1' }, + { 'contacts' => 'contact2', 'name' => 'host2', 'alias' => 'alias2' }, + { 'contacts' => 'contact3', 'name' => 'host3', 'alias' => 'alias3' } + ]; +my $selectall_hashref = { + 'host1' => { 'contacts' => 'contact1', 'name' => 'host1', 'alias' => 'alias1' }, + 'host2' => { 'contacts' => 'contact2', 'name' => 'host2', 'alias' => 'alias2' }, + 'host3' => { 'contacts' => 'contact3', 'name' => 'host3', 'alias' => 'alias3' } + }; +my $selectcol_arrayref1 = [ 'alias1', 'alias2', 'alias3' ]; +my $selectcol_arrayref2 = [ 'alias1', 'host1', 'alias2', 'host2', 'alias3', 'host3' ]; +my $selectcol_arrayref3 = [ 'alias1', 'host1', 'contact1', 'alias2', 'host2', 'contact2', 'alias3', 'host3', 'contact3' ]; +my @selectrow_array = ( 'alias1', 'host1', 'contact1' ); +my $selectrow_arrayref = [ 'alias1', 'host1', 'contact1' ]; +my $selectrow_hashref = { 'contacts' => 'contact1', 'name' => 'host1', 'alias' => 'alias1' }; + +######################### +# Single Querys +######################### +my $single_statement = "GET hosts\nColumns: alias\nFilter: name = host1"; +my $selectscalar_value = 'alias1'; + +######################### +# Stats Querys +######################### +my $stats_statement = "GET services\nStats: state = 0\nStats: state = 1\nStats: state = 2\nStats: state = 3"; +my $stats_data = [[4297,13,9,0]]; + +# expected results +my $stats_selectall_arrayref1 = [ [4297,13,9,0] ]; +my $stats_selectall_arrayref2 = [ { 'state = 0' => '4297', 'state = 1' => '13', 'state = 2' => '9', 'state = 3' => 0 } ]; +my $stats_selectcol_arrayref = [ '4297' ]; +my @stats_selectrow_array = ( '4297', '13', '9', '0' ); +my $stats_selectrow_arrayref = [ '4297', '13', '9', '0' ]; +my $stats_selectrow_hashref = { 'state = 0' => '4297', 'state = 1' => '13', 'state = 2' => '9', 'state = 3' => 0 }; + +######################### +# Empty Querys +######################### +my $empty_statement = "GET services\nFilter: description = empty"; + +# expected results +my $empty_selectall_arrayref = []; +my $empty_selectcol_arrayref = []; +my @empty_selectrow_array; +my $empty_selectrow_arrayref; +my $empty_selectrow_hashref; + + +######################### +# get a temp file from File::Temp and replace it with our socket +my $fh = File::Temp->new(UNLINK => 0); +my $socket_path = $fh->filename; +unlink($socket_path); +my $thr1 = threads->create('create_socket', 'unix'); +######################### +# get a temp file from File::Temp and replace it with our socket +my $server = 'localhost:32987'; +my $thr2 = threads->create('create_socket', 'inet'); +sleep(1); + +######################### +my $objects_to_test = { + # create unix object with hash args + 'unix_hash_args' => Monitoring::Livestatus->new( + verbose => 0, + socket => $socket_path, + line_seperator => $line_seperator, + column_seperator => $column_seperator, + ), + + # create unix object with a single arg + 'unix_single_arg' => Monitoring::Livestatus::UNIX->new( $socket_path ), + + # create inet object with hash args + 'inet_hash_args' => Monitoring::Livestatus->new( + verbose => 0, + server => $server, + line_seperator => $line_seperator, + column_seperator => $column_seperator, + ), + + # create inet object with a single arg + 'inet_single_arg' => Monitoring::Livestatus::INET->new( $server ), + +}; + +for my $key (keys %{$objects_to_test}) { + my $ml = $objects_to_test->{$key}; + isa_ok($ml, 'Monitoring::Livestatus'); + + # we dont need warnings for testing + $ml->warnings(0); + + ################################################## + # test settings + my $rt = $ml->verbose(1); + is($rt, '0', 'enable verbose'); + $rt = $ml->verbose(0); + is($rt, '1', 'disable verbose'); + + $rt = $ml->errors_are_fatal(0); + is($rt, '1', 'disable errors_are_fatal'); + $rt = $ml->errors_are_fatal(1); + is($rt, '0', 'enable errors_are_fatal'); + + ################################################## + # do some sample querys + my $statement = "GET hosts"; + + ######################### + my $ary_ref = $ml->selectall_arrayref($statement); + is_deeply($ary_ref, $selectall_arrayref1, 'selectall_arrayref($statement)') + or diag("got: ".Dumper($ary_ref)."\nbut expected ".Dumper($selectall_arrayref1)); + + ######################### + $ary_ref = $ml->selectall_arrayref($statement, { Slice => {} }); + is_deeply($ary_ref, $selectall_arrayref2, 'selectall_arrayref($statement, { Slice => {} })') + or diag("got: ".Dumper($ary_ref)."\nbut expected ".Dumper($selectall_arrayref2)); + + ######################### + my $hash_ref = $ml->selectall_hashref($statement, 'name'); + is_deeply($hash_ref, $selectall_hashref, 'selectall_hashref($statement, "name")') + or diag("got: ".Dumper($hash_ref)."\nbut expected ".Dumper($selectall_hashref)); + + ######################### + $ary_ref = $ml->selectcol_arrayref($statement); + is_deeply($ary_ref, $selectcol_arrayref1, 'selectcol_arrayref($statement)') + or diag("got: ".Dumper($ary_ref)."\nbut expected ".Dumper($selectcol_arrayref1)); + + ######################### + $ary_ref = $ml->selectcol_arrayref($statement, { Columns=>[1,2] }); + is_deeply($ary_ref, $selectcol_arrayref2, 'selectcol_arrayref($statement, { Columns=>[1,2] })') + or diag("got: ".Dumper($ary_ref)."\nbut expected ".Dumper($selectcol_arrayref2)); + + $ary_ref = $ml->selectcol_arrayref($statement, { Columns=>[1,2,3] }); + is_deeply($ary_ref, $selectcol_arrayref3, 'selectcol_arrayref($statement, { Columns=>[1,2,3] })') + or diag("got: ".Dumper($ary_ref)."\nbut expected ".Dumper($selectcol_arrayref3)); + + ######################### + my @row_ary = $ml->selectrow_array($statement); + is_deeply(\@row_ary, \@selectrow_array, 'selectrow_array($statement)') + or diag("got: ".Dumper(\@row_ary)."\nbut expected ".Dumper(\@selectrow_array)); + + ######################### + $ary_ref = $ml->selectrow_arrayref($statement); + is_deeply($ary_ref, $selectrow_arrayref, 'selectrow_arrayref($statement)') + or diag("got: ".Dumper($ary_ref)."\nbut expected ".Dumper($selectrow_arrayref)); + + ######################### + $hash_ref = $ml->selectrow_hashref($statement); + is_deeply($hash_ref, $selectrow_hashref, 'selectrow_hashref($statement)') + or diag("got: ".Dumper($hash_ref)."\nbut expected ".Dumper($selectrow_hashref)); + + ################################################## + # stats querys + ################################################## + $ary_ref = $ml->selectall_arrayref($stats_statement); + is_deeply($ary_ref, $stats_selectall_arrayref1, 'selectall_arrayref($stats_statement)') + or diag("got: ".Dumper($ary_ref)."\nbut expected ".Dumper($stats_selectall_arrayref1)); + + $ary_ref = $ml->selectall_arrayref($stats_statement, { Slice => {} }); + is_deeply($ary_ref, $stats_selectall_arrayref2, 'selectall_arrayref($stats_statement, { Slice => {} })') + or diag("got: ".Dumper($ary_ref)."\nbut expected ".Dumper($stats_selectall_arrayref2)); + + $ary_ref = $ml->selectcol_arrayref($stats_statement); + is_deeply($ary_ref, $stats_selectcol_arrayref, 'selectcol_arrayref($stats_statement)') + or diag("got: ".Dumper($ary_ref)."\nbut expected ".Dumper($stats_selectcol_arrayref)); + + @row_ary = $ml->selectrow_array($stats_statement); + is_deeply(\@row_ary, \@stats_selectrow_array, 'selectrow_arrayref($stats_statement)') + or diag("got: ".Dumper(\@row_ary)."\nbut expected ".Dumper(\@stats_selectrow_array)); + + $ary_ref = $ml->selectrow_arrayref($stats_statement); + is_deeply($ary_ref, $stats_selectrow_arrayref, 'selectrow_arrayref($stats_statement)') + or diag("got: ".Dumper($ary_ref)."\nbut expected ".Dumper($stats_selectrow_arrayref)); + + $hash_ref = $ml->selectrow_hashref($stats_statement); + is_deeply($hash_ref, $stats_selectrow_hashref, 'selectrow_hashref($stats_statement)') + or diag("got: ".Dumper($hash_ref)."\nbut expected ".Dumper($stats_selectrow_hashref)); + + my $scal = $ml->selectscalar_value($single_statement); + is($scal, $selectscalar_value, 'selectscalar_value($single_statement)') + or diag("got: ".Dumper($scal)."\nbut expected ".Dumper($selectscalar_value)); + + ################################################## + # empty querys + ################################################## + $ary_ref = $ml->selectall_arrayref($empty_statement); + is_deeply($ary_ref, $empty_selectall_arrayref, 'selectall_arrayref($empty_statement)') + or diag("got: ".Dumper($ary_ref)."\nbut expected ".Dumper($empty_selectall_arrayref)); + + $ary_ref = $ml->selectcol_arrayref($empty_statement); + is_deeply($ary_ref, $empty_selectcol_arrayref, 'selectcol_arrayref($empty_statement)') + or diag("got: ".Dumper($ary_ref)."\nbut expected ".Dumper($empty_selectcol_arrayref)); + + @row_ary = $ml->selectrow_array($empty_statement); + is_deeply(\@row_ary, \@empty_selectrow_array, 'selectrow_arrayref($empty_statement)') + or diag("got: ".Dumper(\@row_ary)."\nbut expected ".Dumper(\@empty_selectrow_array)); + + $ary_ref = $ml->selectrow_arrayref($empty_statement); + is_deeply($ary_ref, $empty_selectrow_arrayref, 'selectrow_arrayref($empty_statement)') + or diag("got: ".Dumper($ary_ref)."\nbut expected ".Dumper($empty_selectrow_arrayref)); + + $hash_ref = $ml->selectrow_hashref($empty_statement); + is_deeply($hash_ref, $empty_selectrow_hashref, 'selectrow_hashref($empty_statement)') + or diag("got: ".Dumper($hash_ref)."\nbut expected ".Dumper($empty_selectrow_hashref)); + + ################################################## + # empty rows and columns + ################################################## + my $empty_hostgroups_stm = "GET hostgroups\nColumns: members"; + $ary_ref = $ml->selectall_arrayref($empty_hostgroups_stm); + is_deeply($ary_ref, $test_hostgroups, 'selectall_arrayref($empty_hostgroups_stm)') + or diag("got: ".Dumper($ary_ref)."\nbut expected ".Dumper($test_hostgroups)); + +} + +################################################## +# exit threads +$thr1->kill('KILL')->detach(); +$thr2->kill('KILL')->detach(); +exit; + + +######################### +# SUBS +######################### +# test socket server +sub create_socket { + my $type = shift; + my $listener; + + $SIG{'KILL'} = sub { threads->exit(); }; + + if($type eq 'unix') { + print "creating unix socket\n"; + $listener = IO::Socket::UNIX->new( + Type => SOCK_STREAM, + Listen => SOMAXCONN, + Local => $socket_path, + ) or die("failed to open $socket_path as test socket: $!"); + } + elsif($type eq 'inet') { + print "creating tcp socket\n"; + $listener = IO::Socket::INET->new( + LocalAddr => $server, + Proto => 'tcp', + Listen => 1, + Reuse => 1, + ) or die("failed to listen on $server: $!"); + } else { + die("unknown type"); + } + while( my $socket = $listener->accept() or die('cannot accept: $!') ) { + my $recv = ""; + while(<$socket>) { $recv .= $_; last if $_ eq "\n" } + my $data; + my $status = 200; + if($recv =~ m/^GET .*?\s+Filter:.*?empty/m) { + $data = ''; + } + elsif($recv =~ m/^GET hosts\s+Columns: alias/m) { + my @data = @{$test_data}[1..3]; + $data = encode_json(\@data)."\n"; + } + elsif($recv =~ m/^GET hosts\s+Columns: name/m) { + $data = encode_json(\@{$test_data}[1..3])."\n"; + } + elsif($recv =~ m/^GET hosts/) { + $data = encode_json($test_data)."\n"; + } + elsif($recv =~ m/^GET hostgroups/) { + $data = encode_json(\@{$test_hostgroups})."\n"; + } + elsif($recv =~ m/^GET services/ and $recv =~ m/Stats:/m) { + $data = encode_json(\@{$stats_data})."\n"; + } + my $content_length = sprintf("%11s", length($data)); + print $socket $status." ".$content_length."\n"; + print $socket $data; + close($socket); + } + unlink($socket_path); +} diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/t/21-Monitoring-Livestatus-INET.t check-mk-1.2.6p12/=unpacked-tar11=/api/perl/t/21-Monitoring-Livestatus-INET.t --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/t/21-Monitoring-Livestatus-INET.t 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/t/21-Monitoring-Livestatus-INET.t 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,30 @@ +#!/usr/bin/env perl + +######################### + +use strict; +use Test::More tests => 3; +use IO::Socket::INET; +BEGIN { use_ok('Monitoring::Livestatus::INET') }; + +######################### +# create a tmp listener +my $server = 'localhost:9999'; +my $listener = IO::Socket::INET->new( + ) or die("failed to open port as test listener: $!"); +######################### +# create object with single arg +my $ml = Monitoring::Livestatus::INET->new( $server ); +isa_ok($ml, 'Monitoring::Livestatus', 'Monitoring::Livestatus::INET->new()'); + +######################### +# create object with hash args +my $line_seperator = 10; +my $column_seperator = 0; +$ml = Monitoring::Livestatus::INET->new( + verbose => 0, + server => $server, + line_seperator => $line_seperator, + column_seperator => $column_seperator, + ); +isa_ok($ml, 'Monitoring::Livestatus', 'Monitoring::Livestatus::INET->new(%args)'); diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/t/22-Monitoring-Livestatus-UNIX.t check-mk-1.2.6p12/=unpacked-tar11=/api/perl/t/22-Monitoring-Livestatus-UNIX.t --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/t/22-Monitoring-Livestatus-UNIX.t 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/t/22-Monitoring-Livestatus-UNIX.t 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,26 @@ +#!/usr/bin/env perl + +######################### + +use strict; +use Test::More tests => 3; +use IO::Socket::INET; +BEGIN { use_ok('Monitoring::Livestatus::UNIX') }; + +######################### +# create object with single arg +my $socket = "/tmp/blah.socket"; +my $ml = Monitoring::Livestatus::UNIX->new( $socket ); +isa_ok($ml, 'Monitoring::Livestatus', 'Monitoring::Livestatus::UNIX->new()'); + +######################### +# create object with hash args +my $line_seperator = 10; +my $column_seperator = 0; +$ml = Monitoring::Livestatus::UNIX->new( + verbose => 0, + socket => $socket, + line_seperator => $line_seperator, + column_seperator => $column_seperator, + ); +isa_ok($ml, 'Monitoring::Livestatus', 'Monitoring::Livestatus::UNIX->new(%args)'); diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/t/30-Monitoring-Livestatus-live-test.t check-mk-1.2.6p12/=unpacked-tar11=/api/perl/t/30-Monitoring-Livestatus-live-test.t --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/t/30-Monitoring-Livestatus-live-test.t 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/t/30-Monitoring-Livestatus-live-test.t 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,472 @@ +#!/usr/bin/env perl + +######################### + +use strict; +use Test::More; +use Data::Dumper; + +if ( ! defined $ENV{TEST_SOCKET} or !defined $ENV{TEST_SERVER} ) { + my $msg = 'Author test. Set $ENV{TEST_SOCKET} and $ENV{TEST_SERVER} to run'; + plan( skip_all => $msg ); +} else { + plan( tests => 727 ); +} + +# set an alarm +my $lastquery; +$SIG{ALRM} = sub { + my @caller = caller; + print STDERR 'last query: '.$lastquery if defined $lastquery; + die "timeout reached:".Dumper(\@caller)."\n" +}; +alarm(120); + +use_ok('Monitoring::Livestatus'); + +######################### +my $line_seperator = 10; +my $column_seperator = 0; +my $objects_to_test = { + # UNIX + # create unix object with a single arg +# '01 unix_single_arg' => Monitoring::Livestatus::UNIX->new( $ENV{TEST_SOCKET} ), + + # create unix object with hash args + '02 unix_few_args' => Monitoring::Livestatus->new( + #verbose => 1, + socket => $ENV{TEST_SOCKET}, + line_seperator => $line_seperator, + column_seperator => $column_seperator, + ), + + # create unix object with hash args + '03 unix_keepalive' => Monitoring::Livestatus->new( + verbose => 0, + socket => $ENV{TEST_SOCKET}, + keepalive => 1, + ), + + # TCP + # create inet object with a single arg + '04 inet_single_arg' => Monitoring::Livestatus::INET->new( $ENV{TEST_SERVER} ), + + # create inet object with hash args + '05 inet_few_args' => Monitoring::Livestatus->new( + verbose => 0, + server => $ENV{TEST_SERVER}, + line_seperator => $line_seperator, + column_seperator => $column_seperator, + ), + + + # create inet object with keepalive + '06 inet_keepalive' => Monitoring::Livestatus->new( + verbose => 0, + server => $ENV{TEST_SERVER}, + keepalive => 1, + ), + + # create multi single args + '07 multi_keepalive' => Monitoring::Livestatus->new( [ $ENV{TEST_SERVER}, $ENV{TEST_SOCKET} ] ), + + # create multi object with keepalive + '08 multi_keepalive_hash_args' => Monitoring::Livestatus->new( + verbose => 0, + peer => [ $ENV{TEST_SERVER}, $ENV{TEST_SOCKET} ], + keepalive => 1, + ), + + # create multi object without keepalive + '09 multi_no_keepalive' => Monitoring::Livestatus->new( + peer => [ $ENV{TEST_SERVER}, $ENV{TEST_SOCKET} ], + keepalive => 0, + ), + + # create multi object without threads + '10 multi_no_threads' => Monitoring::Livestatus->new( + peer => [ $ENV{TEST_SERVER}, $ENV{TEST_SOCKET} ], + use_threads => 0, + ), + + # create multi object with only one peer + '11 multi_one_peer' => Monitoring::Livestatus::MULTI->new( + peer => $ENV{TEST_SERVER}, + ), + + # create multi object without threads + '12 multi_two_peers' => Monitoring::Livestatus::MULTI->new( + peer => [ $ENV{TEST_SERVER}, $ENV{TEST_SOCKET} ], + ), +}; + +my $expected_keys = { + 'columns' => [ + 'description','name','table','type' + ], + 'commands' => [ + 'line','name' + ], + 'comments' => [ + '__all_from_hosts__', '__all_from_services__', + 'author','comment','entry_time','entry_type','expire_time','expires', 'id','persistent', + 'source','type' + ], + 'contacts' => [ + 'address1','address2','address3','address4','address5','address6','alias', + 'can_submit_commands','custom_variable_names','custom_variable_values','email', + 'host_notification_period','host_notifications_enabled','in_host_notification_period', + 'in_service_notification_period','name','modified_attributes','modified_attributes_list', + 'pager','service_notification_period','service_notifications_enabled' + ], + 'contactgroups' => [ 'name', 'alias', 'members' ], + 'downtimes' => [ + '__all_from_hosts__', '__all_from_services__', + 'author','comment','duration','end_time','entry_time','fixed','id','start_time', + 'triggered_by','type' + ], + 'hostgroups' => [ + 'action_url','alias','members','name','members_with_state','notes','notes_url','num_hosts','num_hosts_down', + 'num_hosts_pending','num_hosts_unreach','num_hosts_up','num_services','num_services_crit', + 'num_services_hard_crit','num_services_hard_ok','num_services_hard_unknown', + 'num_services_hard_warn','num_services_ok','num_services_pending','num_services_unknown', + 'num_services_warn','worst_host_state','worst_service_hard_state','worst_service_state' + ], + 'hosts' => [ + 'accept_passive_checks','acknowledged','acknowledgement_type','action_url','action_url_expanded', + 'active_checks_enabled','address','alias','check_command','check_freshness','check_interval', + 'check_options','check_period','check_type','checks_enabled','childs','comments','comments_with_info', + 'contacts','current_attempt','current_notification_number','custom_variable_names', + 'custom_variable_values','display_name','downtimes','downtimes_with_info','event_handler_enabled', + 'execution_time','first_notification_delay','flap_detection_enabled','groups','hard_state','has_been_checked', + 'high_flap_threshold','icon_image','icon_image_alt','icon_image_expanded','in_check_period', + 'in_notification_period','initial_state','is_executing','is_flapping','last_check','last_hard_state', + 'last_hard_state_change','last_notification','last_state','last_state_change','latency','last_time_down', + 'last_time_unreachable','last_time_up','long_plugin_output','low_flap_threshold','max_check_attempts','name', + 'modified_attributes','modified_attributes_list','next_check', + 'next_notification','notes','notes_expanded','notes_url','notes_url_expanded','notification_interval', + 'notification_period','notifications_enabled','num_services','num_services_crit','num_services_hard_crit', + 'num_services_hard_ok','num_services_hard_unknown','num_services_hard_warn','num_services_ok', + 'num_services_pending','num_services_unknown','num_services_warn','obsess_over_host','parents', + 'pending_flex_downtime','percent_state_change','perf_data','plugin_output', + 'process_performance_data','retry_interval','scheduled_downtime_depth','services','services_with_state', + 'state','state_type','statusmap_image','total_services','worst_service_hard_state','worst_service_state', + 'x_3d','y_3d','z_3d' + ], + 'hostsbygroup' => [ + '__all_from_hosts__', '__all_from_hostgroups__' + ], + 'log' => [ + '__all_from_hosts__','__all_from_services__','__all_from_contacts__','__all_from_commands__', + 'attempt','class','command_name','comment','contact_name','host_name','lineno','message','options', + 'plugin_output','service_description','state','state_type','time','type' + ], + 'servicegroups' => [ + 'action_url','alias','members','name','members_with_state','notes','notes_url','num_services','num_services_crit', + 'num_services_hard_crit','num_services_hard_ok','num_services_hard_unknown', + 'num_services_hard_warn','num_services_ok','num_services_pending','num_services_unknown', + 'num_services_warn','worst_service_state' + ], + 'servicesbygroup' => [ + '__all_from_services__', '__all_from_hosts__', '__all_from_servicegroups__' + ], + 'services' => [ + '__all_from_hosts__', + 'accept_passive_checks','acknowledged','acknowledgement_type','action_url','action_url_expanded', + 'active_checks_enabled','check_command','check_interval','check_options','check_period', + 'check_type','checks_enabled','comments','comments_with_info','contacts','current_attempt', + 'current_notification_number','custom_variable_names','custom_variable_values', + 'description','display_name','downtimes','downtimes_with_info','event_handler','event_handler_enabled', + 'execution_time','first_notification_delay','flap_detection_enabled','groups', + 'has_been_checked','high_flap_threshold','icon_image','icon_image_alt','icon_image_expanded','in_check_period', + 'in_notification_period','initial_state','is_executing','is_flapping','last_check', + 'last_hard_state','last_hard_state_change','last_notification','last_state', + 'last_state_change','latency','last_time_critical','last_time_ok','last_time_unknown','last_time_warning', + 'long_plugin_output','low_flap_threshold','max_check_attempts','modified_attributes','modified_attributes_list', + 'next_check','next_notification','notes','notes_expanded','notes_url','notes_url_expanded', + 'notification_interval','notification_period','notifications_enabled','obsess_over_service', + 'percent_state_change','perf_data','plugin_output','process_performance_data','retry_interval', + 'scheduled_downtime_depth','state','state_type' + ], + 'servicesbyhostgroup' => [ + '__all_from_services__', '__all_from_hosts__', '__all_from_hostgroups__' + ], + 'status' => [ + 'accept_passive_host_checks','accept_passive_service_checks','cached_log_messages', + 'check_external_commands','check_host_freshness','check_service_freshness','connections', + 'connections_rate','enable_event_handlers','enable_flap_detection','enable_notifications', + 'execute_host_checks','execute_service_checks','forks','forks_rate','host_checks','host_checks_rate','interval_length', + 'last_command_check','last_log_rotation','livestatus_version','log_messages','log_messages_rate','nagios_pid','neb_callbacks', + 'neb_callbacks_rate','obsess_over_hosts','obsess_over_services','process_performance_data', + 'program_start','program_version','requests','requests_rate','service_checks','service_checks_rate' + ], + 'timeperiods' => [ 'in', 'name', 'alias' ], +}; + +my $author = 'Monitoring::Livestatus test'; +for my $key (sort keys %{$objects_to_test}) { + my $ml = $objects_to_test->{$key}; + isa_ok($ml, 'Monitoring::Livestatus') or BAIL_OUT("no need to continue without a proper Monitoring::Livestatus object: ".$key); + + # dont die on errors + $ml->errors_are_fatal(0); + $ml->warnings(0); + + ######################### + # set downtime for a host and service + my $downtimes = $ml->selectall_arrayref("GET downtimes\nColumns: id"); + my $num_downtimes = 0; + $num_downtimes = scalar @{$downtimes} if defined $downtimes; + my $firsthost = $ml->selectscalar_value("GET hosts\nColumns: name\nLimit: 1"); + isnt($firsthost, undef, 'get test hostname') or BAIL_OUT($key.': got not test hostname'); + $ml->do('COMMAND ['.time().'] SCHEDULE_HOST_DOWNTIME;'.$firsthost.';'.time().';'.(time()+300).';1;0;300;'.$author.';perl test: '.$0); + my $firstservice = $ml->selectscalar_value("GET services\nColumns: description\nFilter: host_name = $firsthost\nLimit: 1"); + isnt($firstservice, undef, 'get test servicename') or BAIL_OUT('got not test servicename'); + $ml->do('COMMAND ['.time().'] SCHEDULE_SVC_DOWNTIME;'.$firsthost.';'.$firstservice.';'.time().';'.(time()+300).';1;0;300;'.$author.';perl test: '.$0); + # sometimes it takes while till the downtime is accepted + my $waited = 0; + while(scalar @{$ml->selectall_arrayref("GET downtimes\nColumns: id")} < $num_downtimes + 2) { + print "waiting for the downtime...\n"; + sleep(1); + $waited++; + BAIL_OUT('waited 30 seconds for the downtime...') if $waited > 30; + } + ######################### + + ######################### + # check tables + my $data = $ml->selectall_hashref("GET columns\nColumns: table", 'table'); + my @tables = sort keys %{$data}; + my @expected_tables = sort keys %{$expected_keys}; + is_deeply(\@tables, \@expected_tables, $key.' tables') or BAIL_OUT("got tables:\n".join(', ', @tables)."\nbut expected\n".join(', ', @expected_tables)); + + ######################### + # check keys + for my $type (keys %{$expected_keys}) { + my $filter = ""; + $filter = "Filter: time > ".(time() - 86400)."\n" if $type eq 'log'; + $filter .= "Filter: time < ".(time())."\n" if $type eq 'log'; + my $expected_keys = get_expected_keys($type); + my $statement = "GET $type\n".$filter."Limit: 1"; + $lastquery = $statement; + my $hash_ref = $ml->selectrow_hashref($statement ); + undef $lastquery; + is(ref $hash_ref, 'HASH', $type.' keys are a hash') or BAIL_OUT($type.'keys are not in hash format, got '.Dumper($hash_ref)); + my @keys = sort keys %{$hash_ref}; + is_deeply(\@keys, $expected_keys, $key.' '.$type.' table columns') or BAIL_OUT("got $type keys:\n".join(', ', @keys)."\nbut expected\n".join(', ', @{$expected_keys})); + } + + my $statement = "GET hosts\nColumns: name as hostname state\nLimit: 1"; + $lastquery = $statement; + my $hash_ref = $ml->selectrow_hashref($statement); + undef $lastquery; + isnt($hash_ref, undef, $key.' test column alias'); + is($Monitoring::Livestatus::ErrorCode, 0, $key.' test column alias') or + diag('got error: '.$Monitoring::Livestatus::ErrorMessage); + + ######################### + # send a test command + # commands still dont work and breaks livestatus + my $rt = $ml->do('COMMAND ['.time().'] SAVE_STATE_INFORMATION'); + is($rt, '1', $key.' test command'); + + ######################### + # check for errors + #$ml->{'verbose'} = 1; + $statement = "GET hosts\nLimit: 1"; + $lastquery = $statement; + $hash_ref = $ml->selectrow_hashref($statement ); + undef $lastquery; + isnt($hash_ref, undef, $key.' test error 200 body'); + is($Monitoring::Livestatus::ErrorCode, 0, $key.' test error 200 status') or + diag('got error: '.$Monitoring::Livestatus::ErrorMessage); + + $statement = "BLAH hosts"; + $lastquery = $statement; + $hash_ref = $ml->selectrow_hashref($statement ); + undef $lastquery; + is($hash_ref, undef, $key.' test error 401 body'); + is($Monitoring::Livestatus::ErrorCode, '401', $key.' test error 401 status') or + diag('got error: '.$Monitoring::Livestatus::ErrorMessage); + + $statement = "GET hosts\nLimit: "; + $lastquery = $statement; + $hash_ref = $ml->selectrow_hashref($statement ); + undef $lastquery; + is($hash_ref, undef, $key.' test error 403 body'); + is($Monitoring::Livestatus::ErrorCode, '403', $key.' test error 403 status') or + diag('got error: '.$Monitoring::Livestatus::ErrorMessage); + + $statement = "GET unknowntable\nLimit: 1"; + $lastquery = $statement; + $hash_ref = $ml->selectrow_hashref($statement ); + undef $lastquery; + is($hash_ref, undef, $key.' test error 404 body'); + is($Monitoring::Livestatus::ErrorCode, '404', $key.' test error 404 status') or + diag('got error: '.$Monitoring::Livestatus::ErrorMessage); + + $statement = "GET hosts\nColumns: unknown"; + $lastquery = $statement; + $hash_ref = $ml->selectrow_hashref($statement ); + undef $lastquery; + is($hash_ref, undef, $key.' test error 405 body'); + TODO: { + local $TODO = 'livestatus returns wrong status'; + is($Monitoring::Livestatus::ErrorCode, '405', $key.' test error 405 status') or + diag('got error: '.$Monitoring::Livestatus::ErrorMessage); + }; + + ######################### + # some more broken statements + $statement = "GET "; + $lastquery = $statement; + $hash_ref = $ml->selectrow_hashref($statement); + undef $lastquery; + is($hash_ref, undef, $key.' test error 403 body'); + is($Monitoring::Livestatus::ErrorCode, '403', $key.' test error 403 status: GET ') or + diag('got error: '.$Monitoring::Livestatus::ErrorMessage); + + $statement = "GET hosts\nColumns: name, name"; + $lastquery = $statement; + $hash_ref = $ml->selectrow_hashref($statement ); + undef $lastquery; + is($hash_ref, undef, $key.' test error 405 body'); + is($Monitoring::Livestatus::ErrorCode, '405', $key.' test error 405 status: GET hosts\nColumns: name, name') or + diag('got error: '.$Monitoring::Livestatus::ErrorMessage); + + $statement = "GET hosts\nColumns: "; + $lastquery = $statement; + $hash_ref = $ml->selectrow_hashref($statement ); + undef $lastquery; + is($hash_ref, undef, $key.' test error 405 body'); + is($Monitoring::Livestatus::ErrorCode, '405', $key.' test error 405 status: GET hosts\nColumns: ') or + diag('got error: '.$Monitoring::Livestatus::ErrorMessage); + + ######################### + # some forbidden headers + $statement = "GET hosts\nKeepAlive: on"; + $lastquery = $statement; + $hash_ref = $ml->selectrow_hashref($statement ); + undef $lastquery; + is($hash_ref, undef, $key.' test error 496 body'); + is($Monitoring::Livestatus::ErrorCode, '496', $key.' test error 496 status: KeepAlive: on') or + diag('got error: '.$Monitoring::Livestatus::ErrorMessage); + + $statement = "GET hosts\nResponseHeader: fixed16"; + $lastquery = $statement; + $hash_ref = $ml->selectrow_hashref($statement ); + undef $lastquery; + is($hash_ref, undef, $key.' test error 495 body'); + is($Monitoring::Livestatus::ErrorCode, '495', $key.' test error 495 status: ResponseHeader: fixed16') or + diag('got error: '.$Monitoring::Livestatus::ErrorMessage); + + $statement = "GET hosts\nColumnHeaders: on"; + $lastquery = $statement; + $hash_ref = $ml->selectrow_hashref($statement ); + undef $lastquery; + is($hash_ref, undef, $key.' test error 494 body'); + is($Monitoring::Livestatus::ErrorCode, '494', $key.' test error 494 status: ColumnHeader: on') or + diag('got error: '.$Monitoring::Livestatus::ErrorMessage); + + $statement = "GET hosts\nOuputFormat: json"; + $lastquery = $statement; + $hash_ref = $ml->selectrow_hashref($statement ); + undef $lastquery; + is($hash_ref, undef, $key.' test error 493 body'); + is($Monitoring::Livestatus::ErrorCode, '493', $key.' test error 493 status: OutputForma: json') or + diag('got error: '.$Monitoring::Livestatus::ErrorMessage); + + $statement = "GET hosts\nSeparators: 0 1 2 3"; + $lastquery = $statement; + $hash_ref = $ml->selectrow_hashref($statement ); + undef $lastquery; + is($hash_ref, undef, $key.' test error 492 body'); + is($Monitoring::Livestatus::ErrorCode, '492', $key.' test error 492 status: Seperators: 0 1 2 3') or + diag('got error: '.$Monitoring::Livestatus::ErrorMessage); + + + ######################### + # check some fancy stats queries + my $stats_query = "GET services +Stats: state = 0 as all_ok +Stats: state = 1 as all_warning +Stats: state = 2 as all_critical +Stats: state = 3 as all_unknown +Stats: state = 4 as all_pending +Stats: host_state != 0 +Stats: state = 1 +StatsAnd: 2 as all_warning_on_down_hosts +Stats: host_state != 0 +Stats: state = 2 +StatsAnd: 2 as all_critical_on_down_hosts +Stats: host_state != 0 +Stats: state = 3 +StatsAnd: 2 as all_unknown_on_down_hosts +Stats: host_state != 0 +Stats: state = 3 +Stats: active_checks_enabled = 1 +StatsAnd: 3 as all_unknown_active_on_down_hosts +Stats: state = 3 +Stats: active_checks_enabled = 1 +StatsOr: 2 as all_active_or_unknown"; + $lastquery = $stats_query; + $hash_ref = $ml->selectrow_hashref($stats_query ); + undef $lastquery; + isnt($hash_ref, undef, $key.' test fancy stats query') or + diag('got error: '.Dumper($hash_ref)); +} + + + +# generate expected keys +sub get_expected_keys { + my $type = shift; + my $skip = shift; + my @keys = @{$expected_keys->{$type}}; + + my @new_keys; + for my $key (@keys) { + my $replaced = 0; + for my $replace_with (keys %{$expected_keys}) { + if($key eq '__all_from_'.$replace_with.'__') { + $replaced = 1; + next if $skip; + my $prefix = $replace_with.'_'; + if($replace_with eq "hosts") { $prefix = 'host_'; } + if($replace_with eq "services") { $prefix = 'service_'; } + if($replace_with eq "commands") { $prefix = 'command_'; } + if($replace_with eq "contacts") { $prefix = 'contact_'; } + if($replace_with eq "servicegroups") { $prefix = 'servicegroup_'; } + if($replace_with eq "hostgroups") { $prefix = 'hostgroup_'; } + + if($type eq "log") { $prefix = 'current_'.$prefix; } + + if($type eq "servicesbygroup" and $replace_with eq 'services') { $prefix = ''; } + if($type eq "servicesbyhostgroup" and $replace_with eq 'services') { $prefix = ''; } + if($type eq "hostsbygroup" and $replace_with eq 'hosts') { $prefix = ''; } + + my $replace_keys = get_expected_keys($replace_with, 1); + for my $key2 (@{$replace_keys}) { + push @new_keys, $prefix.$key2; + } + } + } + if($replaced == 0) { + push @new_keys, $key; + } + } + + # has been fixed in 1.1.1rc + #if($type eq 'log') { + # my %keys = map { $_ => 1 } @new_keys; + # delete $keys{'current_contact_can_submit_commands'}; + # delete $keys{'current_contact_host_notifications_enabled'}; + # delete $keys{'current_contact_in_host_notification_period'}; + # delete $keys{'current_contact_in_service_notification_period'}; + # delete $keys{'current_contact_service_notifications_enabled'}; + # @new_keys = keys %keys; + #} + + my @return = sort @new_keys; + return(\@return); +} diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/t/31-Monitoring-Livestatus-MULTI-live-test.t check-mk-1.2.6p12/=unpacked-tar11=/api/perl/t/31-Monitoring-Livestatus-MULTI-live-test.t --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/t/31-Monitoring-Livestatus-MULTI-live-test.t 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/t/31-Monitoring-Livestatus-MULTI-live-test.t 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,95 @@ +#!/usr/bin/env perl + +######################### + +use strict; +use Test::More; +use Data::Dumper; + +if ( ! defined $ENV{TEST_SOCKET} or !defined $ENV{TEST_SERVER} ) { + my $msg = 'Author test. Set $ENV{TEST_SOCKET} and $ENV{TEST_SERVER} to run'; + plan( skip_all => $msg ); +} else { + plan( tests => 22 ); +} + +use_ok('Monitoring::Livestatus::MULTI'); + +######################### +# create new test object +my $objects_to_test = { + 'multi_one' => Monitoring::Livestatus::MULTI->new( peer => [ $ENV{TEST_SERVER} ], warnings => 0 ), + 'multi_two' => Monitoring::Livestatus::MULTI->new( peer => [ $ENV{TEST_SERVER}, $ENV{TEST_SOCKET} ], warnings => 0 ), + 'multi_three' => Monitoring::Livestatus::MULTI->new( + 'verbose' => '0', + 'warnings' => '0', + 'timeout' => '10', + 'peer' => [ + { 'name' => 'Mon 1', 'peer' => $ENV{TEST_SERVER} }, + { 'name' => 'Mon 2', 'peer' => $ENV{TEST_SOCKET} }, + ], + 'keepalive' => '1' + ), +}; + +# dont die on errors +#$ml->errors_are_fatal(0); + +for my $key (keys %{$objects_to_test}) { + my $ml = $objects_to_test->{$key}; + isa_ok($ml, 'Monitoring::Livestatus::MULTI') or BAIL_OUT("no need to continue without a proper Monitoring::Livestatus::MULTI object"); + + ######################### + # DATA INTEGRITY + ######################### + + my $statement = "GET hosts\nColumns: state name alias\nLimit: 1"; + my $data1 = $ml->selectall_arrayref($statement, {Slice => 1}); + my $data2 = $ml->selectall_arrayref($statement, {Slice => 1, AddPeer => 1}); + for my $data (@{$data2}) { + delete $data->{'peer_name'}; + delete $data->{'peer_addr'}; + delete $data->{'peer_key'}; + } + is_deeply($data1, $data2, "data integrity with peers added and Column"); + + $statement = "GET hosts\nLimit: 1"; + $data1 = $ml->selectall_arrayref($statement, {Slice => 1, Deepcopy => 1}); + $data2 = $ml->selectall_arrayref($statement, {Slice => 1, AddPeer => 1, Deepcopy => 1}); + for my $data (@{$data2}) { + delete $data->{'peer_name'}; + delete $data->{'peer_addr'}; + delete $data->{'peer_key'}; + } + is_deeply($data1, $data2, "data integrity with peers added without Columns"); + + ######################### + # try to change result set to scalar + for my $data (@{$data1}) { $data->{'peer_name'} = 1; } + for my $data (@{$data2}) { $data->{'peer_name'} = 1; } + is_deeply($data1, $data2, "data integrity with changed result set"); + + ######################### + # try to change result set to hash + for my $data (@{$data1}) { $data->{'peer_name'} = {}; } + for my $data (@{$data2}) { $data->{'peer_name'} = {}; } + is_deeply($data1, $data2, "data integrity with changed result set"); + + ######################### + # BACKENDS + ######################### + my @backends = $ml->peer_key(); + $data1 = $ml->selectall_arrayref($statement, {Slice => 1}); + $data2 = $ml->selectall_arrayref($statement, {Slice => 1, Backend => \@backends }); + is_deeply($data1, $data2, "data integrity with backends"); + + ######################### + # BUGS + ######################### + + ######################### + # Bug: Can't use string ("flap") as an ARRAY ref while "strict refs" in use at Monitoring/Livestatus/MULTI.pm line 206. + $statement = "GET servicegroups\nColumns: name alias\nFilter: name = flap\nLimit: 1"; + $data1 = $ml->selectrow_array($statement); + isnt($data1, undef, "bug check: Can't use string (\"group\")..."); +} diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/t/32-Monitoring-Livestatus-backend-test.t check-mk-1.2.6p12/=unpacked-tar11=/api/perl/t/32-Monitoring-Livestatus-backend-test.t --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/t/32-Monitoring-Livestatus-backend-test.t 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/t/32-Monitoring-Livestatus-backend-test.t 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,106 @@ +#!/usr/bin/env perl + +######################### + +use strict; +use Carp; +use Test::More; +use Data::Dumper; + +if ( ! defined $ENV{TEST_SOCKET} or !defined $ENV{TEST_SERVER} or !defined $ENV{TEST_BACKEND} ) { + my $msg = 'Author test. Set $ENV{TEST_SOCKET} and $ENV{TEST_SERVER} and $ENV{TEST_BACKEND} to run'; + plan( skip_all => $msg ); +} else { + # we dont know yet how many tests we got + plan( tests => 55237 ); +} + +# set an alarm +my $lastquery; +$SIG{ALRM} = sub { + my @caller = caller; + $lastquery =~ s/\n+/\n/g; + print STDERR 'last query: '.$lastquery."\n" if defined $lastquery; + confess "timeout reached:".Dumper(\@caller)."\n" +}; + +use_ok('Monitoring::Livestatus'); + +######################### +my $objects_to_test = { + # UNIX + '01 unix_single_arg' => Monitoring::Livestatus::UNIX->new( $ENV{TEST_SOCKET} ), + + # TCP + '02 inet_single_arg' => Monitoring::Livestatus::INET->new( $ENV{TEST_SERVER} ), + + # MULTI + '03 multi_keepalive' => Monitoring::Livestatus->new( [ $ENV{TEST_SERVER}, $ENV{TEST_SOCKET} ] ), +}; + +for my $key (sort keys %{$objects_to_test}) { + my $ml = $objects_to_test->{$key}; + isa_ok($ml, 'Monitoring::Livestatus') or BAIL_OUT("no need to continue without a proper Monitoring::Livestatus object: ".$key); + + # dont die on errors + $ml->errors_are_fatal(0); + $ml->warnings(0); + + ######################### + # get tables + my $data = $ml->selectall_hashref("GET columns\nColumns: table", 'table'); + my @tables = sort keys %{$data}; + + ######################### + # check keys + for my $type (@tables) { + alarm(120); + my $filter = ""; + $filter = "Filter: time > ".(time() - 86400)."\n" if $type eq 'log'; + $filter .= "Filter: time < ".(time())."\n" if $type eq 'log'; + my $statement = "GET $type\n".$filter."Limit: 1"; + $lastquery = $statement; + my $keys = $ml->selectrow_hashref($statement ); + undef $lastquery; + is(ref $keys, 'HASH', $type.' keys are a hash');# or BAIL_OUT('keys are not in hash format, got '.Dumper($keys)); + + # status has no filter implemented + next if $type eq 'status'; + + for my $key (keys %{$keys}) { + my $value = $keys->{$key}; + if(index($value, ',') > 0) { my @vals = split /,/, $value; $value = $vals[0]; } + my $typefilter = "Filter: $key >= $value\n"; + if($value eq '') { + $typefilter = "Filter: $key =\n"; + } + my $statement = "GET $type\n".$filter.$typefilter."Limit: 1"; + $lastquery = $statement; + my $hash_ref = $ml->selectrow_hashref($statement ); + undef $lastquery; + is($Monitoring::Livestatus::ErrorCode, 0, "GET ".$type." Filter: ".$key." >= ".$value) or BAIL_OUT("query failed: ".$statement); + #isnt($hash_ref, undef, "GET ".$type." Filter: ".$key." >= ".$value);# or BAIL_OUT("got undef for ".$statement); + + # send test stats query + my $stats_query = [ $key.' = '.$value, 'std '.$key, 'min '.$key, 'max '.$key, 'avg '.$key, 'sum '.$key ]; + for my $stats_part (@{$stats_query}) { + my $statement = "GET $type\n".$filter.$typefilter."\nStats: $stats_part"; + $lastquery = $statement; + my $hash_ref = $ml->selectrow_hashref($statement ); + undef $lastquery; + is($Monitoring::Livestatus::ErrorCode, 0, "GET ".$type." Filter: ".$key." >= ".$value." Stats: $stats_part") or BAIL_OUT("query failed:\n".$statement); + + $statement = "GET $type\n".$filter.$typefilter."\nStats: $stats_part\nStatsGroupBy: $key"; + $lastquery = $statement; + $hash_ref = $ml->selectrow_hashref($statement ); + undef $lastquery; + is($Monitoring::Livestatus::ErrorCode, 0, "GET ".$type." Filter: ".$key." >= ".$value." Stats: $stats_part StatsGroupBy: $key") or BAIL_OUT("query failed:\n".$statement); + } + + # wait till backend is started up again + if(!defined $hash_ref and $Monitoring::Livestatus::ErrorCode > 200) { + sleep(2); + } + } + } +} diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/t/33-Monitoring-Livestatus-test_socket_timeout.t check-mk-1.2.6p12/=unpacked-tar11=/api/perl/t/33-Monitoring-Livestatus-test_socket_timeout.t --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/t/33-Monitoring-Livestatus-test_socket_timeout.t 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/t/33-Monitoring-Livestatus-test_socket_timeout.t 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,74 @@ +#!/usr/bin/env perl + +######################### + +use strict; +use Test::More; +use Data::Dumper; + +if ( !defined $ENV{TEST_SERVER} ) { + my $msg = 'Author test. Set $ENV{TEST_SOCKET} and $ENV{TEST_SERVER} to run'; + plan( skip_all => $msg ); +} else { + plan( tests => 7 ); +} + +# set an alarm +my $lastquery; +$SIG{ALRM} = sub { + my @caller = caller; + print STDERR 'last query: '.$lastquery if defined $lastquery; + die "timeout reached:".Dumper(\@caller)."\n" +}; +alarm(30); + +use_ok('Monitoring::Livestatus'); + +#use Log::Log4perl qw(:easy); +#Log::Log4perl->easy_init($DEBUG); + +######################### +# Test Query +######################### +my $statement = "GET hosts\nColumns: alias\nFilter: name = host1"; + +######################### +my $objects_to_test = { + # create inet object with hash args + '01 inet_hash_args' => Monitoring::Livestatus->new( + verbose => 0, + server => $ENV{TEST_SERVER}, + keepalive => 1, + timeout => 3, + retries_on_connection_error => 0, +# logger => get_logger(), + ), + + # create inet object with a single arg + '02 inet_single_arg' => Monitoring::Livestatus::INET->new( $ENV{TEST_SERVER} ), + +}; + +for my $key (sort keys %{$objects_to_test}) { + my $ml = $objects_to_test->{$key}; + isa_ok($ml, 'Monitoring::Livestatus'); + + # we dont need warnings for testing + $ml->warnings(0); + + ######################### + my $ary_ref = $ml->selectall_arrayref($statement); + is($Monitoring::Livestatus::ErrorCode, 0, 'Query Status 0'); + #is_deeply($ary_ref, $selectall_arrayref1, 'selectall_arrayref($statement)') + # or diag("got: ".Dumper($ary_ref)."\nbut expected ".Dumper($selectall_arrayref1)); + + sleep(10); + + $ary_ref = $ml->selectall_arrayref($statement); + is($Monitoring::Livestatus::ErrorCode, 0, 'Query Status 0'); + #is_deeply($ary_ref, $selectall_arrayref1, 'selectall_arrayref($statement)') + # or diag("got: ".Dumper($ary_ref)."\nbut expected ".Dumper($selectall_arrayref1)); + + #print Dumper($Monitoring::Livestatus::ErrorCode); + #print Dumper($Monitoring::Livestatus::ErrorMessage); +} diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/t/34-Monitoring-Livestatus-utf8_support.t check-mk-1.2.6p12/=unpacked-tar11=/api/perl/t/34-Monitoring-Livestatus-utf8_support.t --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/t/34-Monitoring-Livestatus-utf8_support.t 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/t/34-Monitoring-Livestatus-utf8_support.t 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,78 @@ +#!/usr/bin/env perl + +######################### + +use strict; +use Encode; +use Test::More; +use Data::Dumper; + +if ( !defined $ENV{TEST_SERVER} ) { + my $msg = 'Author test. Set $ENV{TEST_SOCKET} and $ENV{TEST_SERVER} to run'; + plan( skip_all => $msg ); +} else { + plan( tests => 9 ); +} + +use_ok('Monitoring::Livestatus'); + +#use Log::Log4perl qw(:easy); +#Log::Log4perl->easy_init($DEBUG); + +######################### +my $objects_to_test = { + # create inet object with hash args + '01 inet_hash_args' => Monitoring::Livestatus->new( + verbose => 0, + server => $ENV{TEST_SERVER}, + keepalive => 1, + timeout => 3, + retries_on_connection_error => 0, +# logger => get_logger(), + ), + + # create inet object with a single arg + '02 inet_single_arg' => Monitoring::Livestatus::INET->new( $ENV{TEST_SERVER} ), +}; + +my $author = 'Monitoring::Livestatus test'; +for my $key (sort keys %{$objects_to_test}) { + my $ml = $objects_to_test->{$key}; + isa_ok($ml, 'Monitoring::Livestatus'); + + # we dont need warnings for testing + $ml->warnings(0); + + ######################### + my $downtimes = $ml->selectall_arrayref("GET downtimes\nColumns: id"); + my $num_downtimes = 0; + $num_downtimes = scalar @{$downtimes} if defined $downtimes; + + ######################### + # get a test host + my $firsthost = $ml->selectscalar_value("GET hosts\nColumns: name\nLimit: 1"); + isnt($firsthost, undef, 'get test hostname') or BAIL_OUT($key.': got not test hostname'); + + my $expect = "aa ²&é\"'''(§è!çà)- %s ''%s'' aa ~ € bb"; + #my $expect = "öäüß"; + my $teststrings = [ + $expect, + "aa \x{c2}\x{b2}&\x{c3}\x{a9}\"'''(\x{c2}\x{a7}\x{c3}\x{a8}!\x{c3}\x{a7}\x{c3}\x{a0})- %s ''%s'' aa ~ \x{e2}\x{82}\x{ac} bb", + ]; + for my $string (@{$teststrings}) { + $ml->do('COMMAND ['.time().'] SCHEDULE_HOST_DOWNTIME;'.$firsthost.';'.time().';'.(time()+300).';1;0;300;'.$author.';'.$string); + + # sometimes it takes while till the downtime is accepted + my $waited = 0; + while($downtimes = $ml->selectall_arrayref("GET downtimes\nColumns: id comment", { Slice => 1 }) and scalar @{$downtimes} < $num_downtimes + 1) { + print "waiting for the downtime...\n"; + sleep(1); + $waited++; + BAIL_OUT('waited 30 seconds for the downtime...') if $waited > 30; + } + + my $last_downtime = pop @{$downtimes}; + #utf8::decode($expect); + is($last_downtime->{'comment'}, $expect, 'get same utf8 comment: got '.Dumper($last_downtime)); + } +} diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/t/35-Monitoring-Livestatus-callbacks_support.t check-mk-1.2.6p12/=unpacked-tar11=/api/perl/t/35-Monitoring-Livestatus-callbacks_support.t --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/t/35-Monitoring-Livestatus-callbacks_support.t 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/t/35-Monitoring-Livestatus-callbacks_support.t 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,53 @@ +#!/usr/bin/env perl + +######################### + +use strict; +use Encode; +use Test::More; +use Data::Dumper; + +if ( !defined $ENV{TEST_SERVER} ) { + my $msg = 'Author test. Set $ENV{TEST_SOCKET} and $ENV{TEST_SERVER} to run'; + plan( skip_all => $msg ); +} else { + plan( tests => 15 ); +} + +use_ok('Monitoring::Livestatus'); + +#use Log::Log4perl qw(:easy); +#Log::Log4perl->easy_init($DEBUG); + +######################### +my $objects_to_test = { + # create inet object with hash args + '01 inet_hash_args' => Monitoring::Livestatus->new( + verbose => 0, + server => $ENV{TEST_SERVER}, + keepalive => 1, + timeout => 3, + retries_on_connection_error => 0, +# logger => get_logger(), + ), + + # create inet object with a single arg + '02 inet_single_arg' => Monitoring::Livestatus::INET->new( $ENV{TEST_SERVER} ), +}; + +for my $key (sort keys %{$objects_to_test}) { + my $ml = $objects_to_test->{$key}; + isa_ok($ml, 'Monitoring::Livestatus'); + + my $got = $ml->selectall_arrayref("GET hosts\nColumns: name alias state\nLimit: 1", { Slice => 1, callbacks => { 'c1' => sub { return $_[0]->{'alias'}; } } }); + isnt($got->[0]->{'alias'}, undef, 'got a test host'); + is($got->[0]->{'alias'}, $got->[0]->{'c1'}, 'callback for sliced results'); + + $got = $ml->selectall_arrayref("GET hosts\nColumns: name alias state\nLimit: 1", { Slice => 1, callbacks => { 'name' => sub { return $_[0]->{'alias'}; } } }); + isnt($got->[0]->{'alias'}, undef, 'got a test host'); + is($got->[0]->{'alias'}, $got->[0]->{'name'}, 'callback for sliced results which overwrites key'); + + $got = $ml->selectall_arrayref("GET hosts\nColumns: name alias state\nLimit: 1", { callbacks => { 'c1' => sub { return $_[0]->[1]; } } }); + isnt($got->[0]->[1], undef, 'got a test host'); + is($got->[0]->[1], $got->[0]->[3], 'callback for non sliced results'); +} diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/t/97-Pod.t check-mk-1.2.6p12/=unpacked-tar11=/api/perl/t/97-Pod.t --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/t/97-Pod.t 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/t/97-Pod.t 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,9 @@ +use strict; +use warnings; +use Test::More; + +eval "use Test::Pod 1.14"; +plan skip_all => 'Test::Pod 1.14 required' if $@; +plan skip_all => 'Author test. Set $ENV{TEST_AUTHOR} to a true value to run.' unless $ENV{TEST_AUTHOR}; + +all_pod_files_ok(); diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/t/98-Pod-Coverage.t check-mk-1.2.6p12/=unpacked-tar11=/api/perl/t/98-Pod-Coverage.t --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/t/98-Pod-Coverage.t 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/t/98-Pod-Coverage.t 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,23 @@ +#!/usr/bin/env perl +# +# $Id$ +# +use strict; +use warnings; +use File::Spec; +use Test::More; + +if ( not $ENV{TEST_AUTHOR} ) { + my $msg = 'Author test. Set $ENV{TEST_AUTHOR} to a true value to run.'; + plan( skip_all => $msg ); +} + +eval { require Test::Pod::Coverage; }; + +if ( $@ ) { + my $msg = 'Test::Pod::Coverage required to criticise pod'; + plan( skip_all => $msg ); +} + +eval "use Test::Pod::Coverage 1.00"; +all_pod_coverage_ok(); diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/t/99-Perl-Critic.t check-mk-1.2.6p12/=unpacked-tar11=/api/perl/t/99-Perl-Critic.t --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/t/99-Perl-Critic.t 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/t/99-Perl-Critic.t 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,24 @@ +#!/usr/bin/env perl +# +# $Id$ +# +use strict; +use warnings; +use File::Spec; +use Test::More; + +if ( not $ENV{TEST_AUTHOR} ) { + my $msg = 'Author test. Set $ENV{TEST_AUTHOR} to a true value to run.'; + plan( skip_all => $msg ); +} + +eval { require Test::Perl::Critic; }; + +if ( $@ ) { + my $msg = 'Test::Perl::Critic required to criticise code'; + plan( skip_all => $msg ); +} + +my $rcfile = File::Spec->catfile( 't', 'perlcriticrc' ); +Test::Perl::Critic->import( -profile => $rcfile ); +all_critic_ok(); diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/perl/t/perlcriticrc check-mk-1.2.6p12/=unpacked-tar11=/api/perl/t/perlcriticrc --- check-mk-1.2.2p3/=unpacked-tar11=/api/perl/t/perlcriticrc 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/perl/t/perlcriticrc 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,286 @@ +############################################################################## +# This Perl::Critic configuration file sets the Policy severity levels +# according to Damian Conway's own personal recommendations. Feel free to +# use this as your own, or make modifications. +############################################################################## + +[Perl::Critic::Policy::ValuesAndExpressions::ProhibitAccessOfPrivateData] +severity = 3 + +[Perl::Critic::Policy::BuiltinFunctions::ProhibitLvalueSubstr] +severity = 3 + +[Perl::Critic::Policy::BuiltinFunctions::ProhibitReverseSortBlock] +severity = 1 + +[Perl::Critic::Policy::BuiltinFunctions::ProhibitSleepViaSelect] +severity = 5 + +[Perl::Critic::Policy::BuiltinFunctions::ProhibitStringyEval] +severity = 5 + +[Perl::Critic::Policy::BuiltinFunctions::ProhibitStringySplit] +severity = 2 + +[Perl::Critic::Policy::BuiltinFunctions::ProhibitUniversalCan] +severity = 4 + +[Perl::Critic::Policy::BuiltinFunctions::ProhibitUniversalIsa] +severity = 4 + +[Perl::Critic::Policy::BuiltinFunctions::ProhibitVoidGrep] +severity = 3 + +[Perl::Critic::Policy::BuiltinFunctions::ProhibitVoidMap] +severity = 3 + +[Perl::Critic::Policy::BuiltinFunctions::RequireBlockGrep] +severity = 4 + +[Perl::Critic::Policy::BuiltinFunctions::RequireBlockMap] +severity = 4 + +[Perl::Critic::Policy::BuiltinFunctions::RequireGlobFunction] +severity = 5 + +[Perl::Critic::Policy::BuiltinFunctions::RequireSimpleSortBlock] +severity = 3 + +[Perl::Critic::Policy::ClassHierarchies::ProhibitAutoloading] +severity = 3 + +[Perl::Critic::Policy::ClassHierarchies::ProhibitExplicitISA] +severity = 4 + +[Perl::Critic::Policy::ClassHierarchies::ProhibitOneArgBless] +severity = 5 + +[Perl::Critic::Policy::CodeLayout::ProhibitHardTabs] +severity = 3 + +[Perl::Critic::Policy::CodeLayout::ProhibitParensWithBuiltins] +severity = 1 + +[Perl::Critic::Policy::CodeLayout::ProhibitQuotedWordLists] +severity = 2 + +[Perl::Critic::Policy::CodeLayout::RequireConsistentNewlines] +severity = 4 + +[Perl::Critic::Policy::CodeLayout::RequireTidyCode] +severity = 1 + +[Perl::Critic::Policy::CodeLayout::RequireTrailingCommas] +severity = 3 + +[Perl::Critic::Policy::ControlStructures::ProhibitCStyleForLoops] +severity = 3 + +[Perl::Critic::Policy::ControlStructures::ProhibitCascadingIfElse] +severity = 3 + +[Perl::Critic::Policy::ControlStructures::ProhibitDeepNests] +severity = 3 + +[Perl::Critic::Policy::ControlStructures::ProhibitMutatingListFunctions] +severity = 5 + +[Perl::Critic::Policy::ControlStructures::ProhibitPostfixControls] +severity = 4 + +[Perl::Critic::Policy::ControlStructures::ProhibitUnlessBlocks] +severity = 4 + +[Perl::Critic::Policy::ControlStructures::ProhibitUnreachableCode] +severity = 4 + +[Perl::Critic::Policy::ControlStructures::ProhibitUntilBlocks] +severity = 4 + +[Perl::Critic::Policy::Documentation::RequirePodAtEnd] +severity = 2 + +[Perl::Critic::Policy::Documentation::RequirePodSections] +severity = 2 + +[Perl::Critic::Policy::ErrorHandling::RequireCarping] +severity = 4 + +[Perl::Critic::Policy::InputOutput::ProhibitBacktickOperators] +severity = 3 + +[Perl::Critic::Policy::InputOutput::ProhibitBarewordFileHandles] +severity = 5 + +[Perl::Critic::Policy::InputOutput::ProhibitInteractiveTest] +severity = 4 + +[Perl::Critic::Policy::InputOutput::ProhibitOneArgSelect] +severity = 4 + +[Perl::Critic::Policy::InputOutput::ProhibitReadlineInForLoop] +severity = 5 + +[Perl::Critic::Policy::InputOutput::ProhibitTwoArgOpen] +severity = 4 + +[Perl::Critic::Policy::InputOutput::RequireBracedFileHandleWithPrint] +severity = 3 + +[Perl::Critic::Policy::Miscellanea::ProhibitFormats] +severity = 3 + +[Perl::Critic::Policy::Miscellanea::ProhibitTies] +severity = 4 + +[-Perl::Critic::Policy::Miscellanea::RequireRcsKeywords] + +[Perl::Critic::Policy::Modules::ProhibitAutomaticExportation] +severity = 4 + +[Perl::Critic::Policy::Modules::ProhibitEvilModules] +severity = 5 + +[Perl::Critic::Policy::Modules::ProhibitMultiplePackages] +severity = 4 + +[Perl::Critic::Policy::Modules::RequireBarewordIncludes] +severity = 5 + +[Perl::Critic::Policy::Modules::RequireEndWithOne] +severity = 4 + +[Perl::Critic::Policy::Modules::RequireExplicitPackage] +severity = 4 + +[Perl::Critic::Policy::Modules::RequireFilenameMatchesPackage] +severity = 5 + +[Perl::Critic::Policy::Modules::RequireVersionVar] +severity = 4 + +[Perl::Critic::Policy::NamingConventions::ProhibitAmbiguousNames] +severity = 3 + +[Perl::Critic::Policy::NamingConventions::ProhibitMixedCaseSubs] +severity = 1 + +[Perl::Critic::Policy::NamingConventions::ProhibitMixedCaseVars] +severity = 1 + +[Perl::Critic::Policy::References::ProhibitDoubleSigils] +severity = 4 + +[Perl::Critic::Policy::RegularExpressions::ProhibitCaptureWithoutTest] +severity = 4 + +[Perl::Critic::Policy::RegularExpressions::RequireExtendedFormatting] +severity = 5 + +[Perl::Critic::Policy::RegularExpressions::RequireLineBoundaryMatching] +severity = 5 + +[Perl::Critic::Policy::Subroutines::ProhibitAmpersandSigils] +severity = 2 + +[Perl::Critic::Policy::Subroutines::ProhibitBuiltinHomonyms] +severity = 4 + +[Perl::Critic::Policy::Subroutines::ProhibitExcessComplexity] +severity = 3 + +[Perl::Critic::Policy::Subroutines::ProhibitExplicitReturnUndef] +severity = 5 + +[Perl::Critic::Policy::Subroutines::ProhibitSubroutinePrototypes] +severity = 4 + +[Perl::Critic::Policy::Subroutines::ProtectPrivateSubs] +severity = 3 + +[Perl::Critic::Policy::Subroutines::RequireFinalReturn] +severity = 5 + +[Perl::Critic::Policy::TestingAndDebugging::ProhibitNoStrict] +severity = 5 + +[Perl::Critic::Policy::TestingAndDebugging::ProhibitNoWarnings] +severity = 4 + +[Perl::Critic::Policy::TestingAndDebugging::ProhibitProlongedStrictureOverride] +severity = 4 + +[Perl::Critic::Policy::TestingAndDebugging::RequireTestLabels] +severity = 3 + +[Perl::Critic::Policy::TestingAndDebugging::RequireUseStrict] +severity = 5 + +[Perl::Critic::Policy::TestingAndDebugging::RequireUseWarnings] +severity = 4 + +[Perl::Critic::Policy::ValuesAndExpressions::ProhibitConstantPragma] +severity = 4 + +[Perl::Critic::Policy::ValuesAndExpressions::ProhibitEmptyQuotes] +severity = 2 + +[Perl::Critic::Policy::ValuesAndExpressions::ProhibitEscapedCharacters] +severity = 2 + +[Perl::Critic::Policy::ValuesAndExpressions::ProhibitInterpolationOfLiterals] +severity = 1 + +[Perl::Critic::Policy::ValuesAndExpressions::ProhibitLeadingZeros] +severity = 5 + +[Perl::Critic::Policy::ValuesAndExpressions::ProhibitMismatchedOperators] +severity = 2 + +[Perl::Critic::Policy::ValuesAndExpressions::ProhibitMixedBooleanOperators] +severity = 4 + +[Perl::Critic::Policy::ValuesAndExpressions::ProhibitNoisyQuotes] +severity = 2 + +[Perl::Critic::Policy::ValuesAndExpressions::ProhibitVersionStrings] +severity = 3 + +[Perl::Critic::Policy::ValuesAndExpressions::RequireInterpolationOfMetachars] +severity = 1 + +[Perl::Critic::Policy::ValuesAndExpressions::RequireNumberSeparators] +severity = 2 + +[Perl::Critic::Policy::ValuesAndExpressions::RequireQuotedHeredocTerminator] +severity = 4 + +[Perl::Critic::Policy::ValuesAndExpressions::RequireUpperCaseHeredocTerminator] +severity = 4 + +[Perl::Critic::Policy::Variables::ProhibitConditionalDeclarations] +severity = 5 + +[Perl::Critic::Policy::Variables::ProhibitLocalVars] +severity = 2 + +[Perl::Critic::Policy::Variables::ProhibitMatchVars] +severity = 4 + +[Perl::Critic::Policy::Variables::ProhibitPackageVars] +severity = 3 + +[Perl::Critic::Policy::Variables::ProhibitPunctuationVars] +severity = 2 + +[Perl::Critic::Policy::Variables::ProtectPrivateVars] +severity = 3 + +[Perl::Critic::Policy::Variables::RequireInitializationForLocalVars] +severity = 5 + +[Perl::Critic::Policy::Variables::RequireLexicalLoopIterators] +severity = 5 + +[Perl::Critic::Policy::Variables::RequireNegativeIndices] +severity = 4 \ No newline at end of file diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/python/example_multisite.py check-mk-1.2.6p12/=unpacked-tar11=/api/python/example_multisite.py --- check-mk-1.2.2p3/=unpacked-tar11=/api/python/example_multisite.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/python/example_multisite.py 2014-10-30 13:30:24.000000000 +0000 @@ -0,0 +1,93 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +import os +import livestatus + +try: + omd_root = os.getenv("OMD_ROOT") + socket_path = "unix:" + omd_root + "/tmp/run/live" +except: + sys.stderr.write("This example is indented to run in an OMD site\n") + sys.stderr.write("Please change socket_path in this example, if you are\n") + sys.stderr.write("not using OMD.\n") + sys.exit(1) + + +sites = { + "muc" : { + "socket" : socket_path, + "alias" : "Munich", + }, + "sitea" : { + "alias" : "Augsburg", + "socket" : "tcp:sitea:6557", + "nagios_url" : "/nagios/", + "timeout" : 2, + }, + "siteb" : { + "alias" : "Berlin", + "socket" : "tcp:siteb:6557", + "nagios_url" : "/nagios/", + "timeout" : 10, + }, +} + +c = livestatus.MultiSiteConnection(sites) +c.set_prepend_site(True) +print c.query("GET hosts\nColumns: name state\n") +c.set_prepend_site(False) +print c.query("GET hosts\nColumns: name state\n") + +# Beware: When doing stats, you need to aggregate yourself: +print sum(c.query_column("GET hosts\nStats: state >= 0\n")) + +# Detect errors: +sites = { + "muc" : { + "socket" : "unix:/var/run/nagios/rw/live", + "alias" : "Munich", + }, + "sitea" : { + "alias" : "Augsburg", + "socket" : "tcp:sitea:6558", # BROKEN + "nagios_url" : "/nagios/", + "timeout" : 2, + }, + "siteb" : { + "alias" : "Berlin", + "socket" : "tcp:siteb:6557", + "nagios_url" : "/nagios/", + "timeout" : 10, + }, +} + +c = livestatus.MultiSiteConnection(sites) +for name, state in c.query("GET hosts\nColumns: name state\n"): + print "%-15s: %d" % (name, state) +print "Dead sites:" +for sitename, info in c.dead_sites().items(): + print "%s: %s" % (sitename, info["exception"]) diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/python/example.py check-mk-1.2.6p12/=unpacked-tar11=/api/python/example.py --- check-mk-1.2.2p3/=unpacked-tar11=/api/python/example.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/python/example.py 2014-10-30 13:30:24.000000000 +0000 @@ -0,0 +1,72 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +import os, sys +import livestatus + +try: + omd_root = os.getenv("OMD_ROOT") + socket_path = "unix:" + omd_root + "/tmp/run/live" +except: + sys.stderr.write("This example is indented to run in an OMD site\n") + sys.stderr.write("Please change socket_path in this example, if you are\n") + sys.stderr.write("not using OMD.\n") + sys.exit(1) + +try: + # Make a single connection for each query + print "\nPerformance:" + for key, value in livestatus.SingleSiteConnection(socket_path).query_row_assoc("GET status").items(): + print "%-30s: %s" % (key, value) + print "\nHosts:" + hosts = livestatus.SingleSiteConnection(socket_path).query_table("GET hosts\nColumns: name alias address") + for name, alias, address in hosts: + print "%-16s %-16s %s" % (name, address, alias) + + # Do several queries in one connection + conn = livestatus.SingleSiteConnection(socket_path) + num_up = conn.query_value("GET hosts\nStats: hard_state = 0") + print "\nHosts up: %d" % num_up + + stats = conn.query_row( + "GET services\n" + "Stats: state = 0\n" + "Stats: state = 1\n" + "Stats: state = 2\n" + "Stats: state = 3\n") + print "Service stats: %d/%d/%d/%d" % tuple(stats) + + print "List of commands: %s" % \ + ", ".join(conn.query_column("GET commands\nColumns: name")) + + print "Query error:" + conn.query_value("GET hosts\nColumns: hirni") + + +except Exception, e: # livestatus.MKLivestatusException, e: + print "Livestatus error: %s" % str(e) + + diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/python/livestatus.py check-mk-1.2.6p12/=unpacked-tar11=/api/python/livestatus.py --- check-mk-1.2.2p3/=unpacked-tar11=/api/python/livestatus.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/python/livestatus.py 2014-10-30 13:30:24.000000000 +0000 @@ -0,0 +1,680 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +import socket, time, re + +# Python 2.3 does not have 'set' in normal namespace. +# But it can be imported from 'sets' +try: + set() +except NameError: + from sets import Set as set + +"""MK Livestatus Python API + +This module allows easy access to Nagios via MK Livestatus. +It supports persistent connections via the connection class. +If you want single-shot connections, just initialize a +connection object on-the-fly, e.g.: + +r = connection("/var/lib/nagios/rw/live").query_table_assoc("GET hosts") + +For persistent connections create and keep an object: + +conn = connection("/var/lib/nagios/rw/live") +r1 = conn.query_table_assoc("GET hosts") +r2 = conn.query_row("GET status") +""" + +# Keep a global array of persistant connections +persistent_connections = {} + +# Regular expression for removing Cache: headers if caching is not allowed +remove_cache_regex = re.compile("\nCache:[^\n]*") + +# DEBUGGING PERSISTENT CONNECTIONS +# import os +# hirn_debug = file("/tmp/live.log", "a") +# def hirn(x): +# pid = os.getpid() +# hirn_debug.write("[\033[1;3%d;4%dm%d\033[0m] %s\n" % (pid%7+1, (pid/7)%7+1, pid, x)) +# hirn_debug.flush() + +class MKLivestatusException(Exception): + def __init__(self, value): + self.parameter = value + def __str__(self): + return str(self.parameter) + +class MKLivestatusSocketError(MKLivestatusException): + def __init__(self, reason): + MKLivestatusException.__init__(self, reason) + +class MKLivestatusSocketClosed(MKLivestatusSocketError): + def __init__(self, reason): + MKLivestatusSocketError.__init__(self, reason) + +class MKLivestatusConfigError(MKLivestatusException): + def __init__(self, reason): + MKLivestatusException.__init__(self, reason) + +class MKLivestatusQueryError(MKLivestatusException): + def __init__(self, code, reason): + MKLivestatusException.__init__(self, "%s: %s" % (code, reason)) + self.code = code + +class MKLivestatusNotFoundError(MKLivestatusException): + def __init__(self, query): + MKLivestatusException.__init__(self, query) + self.query = query + +# We need some unique value here +NO_DEFAULT = lambda: None +class Helpers: + def query_value(self, query, deflt = NO_DEFAULT): + """Issues a query that returns exactly one line and one columns and returns + the response as a single value""" + result = self.query(query, "ColumnHeaders: off\n") + try: + return result[0][0] + except: + if deflt == NO_DEFAULT: + raise MKLivestatusNotFoundError(query) + else: + return deflt + + def query_row(self, query): + """Issues a query that returns one line of data and returns the elements + of that line as list""" + return self.query(query, "ColumnHeaders: off\n")[0] + + def query_row_assoc(self, query): + """Issues a query that returns one line of data and returns the elements + of that line as a dictionary from column names to values""" + r = self.query(query, "ColumnHeaders: on\n")[0:2] + return dict(zip(r[0], r[1])) + + def query_column(self, query): + """Issues a query that returns exactly one column and returns the values + of all lines in that column as a single list""" + return [ l[0] for l in self.query(query, "ColumnHeaders: off\n") ] + + def query_column_unique(self, query): + """Issues a query that returns exactly one column and returns the values + of all lines with duplicates removed""" + result = [] + for line in self.query(query, "ColumnHeaders: off\n"): + if line[0] not in result: + result.append(line[0]) + return result + + def query_table(self, query): + """Issues a query that may return multiple lines and columns and returns + a list of lists""" + return self.query(query, "ColumnHeaders: off\n") + + def query_table_assoc(self, query): + """Issues a query that may return multiple lines and columns and returns + a dictionary from column names to values for each line. This can be + very ineffective for large response sets.""" + response = self.query(query, "ColumnHeaders: on\n") + headers = response[0] + result = [] + for line in response[1:]: + result.append(dict(zip(headers, line))) + return result + + def query_summed_stats(self, query, add_headers = ""): + """Conveniance function for adding up numbers from Stats queries + Adds up results column-wise. This is useful for multisite queries.""" + data = self.query(query, add_headers) + if len(data) == 1: + return data[0] + elif len(data) == 0: + raise MKLivestatusNotFoundError("Empty result to Stats-Query") + + result = [] + for x in range(0, len(data[0])): + result.append(sum([row[x] for row in data])) + return result + + +class BaseConnection: + def __init__(self, socketurl, persist = False, allow_cache = False): + """Create a new connection to a MK Livestatus socket""" + self.add_headers = "" + self.persist = persist + self.allow_cache = allow_cache + self.socketurl = socketurl + self.socket = None + self.timeout = None + self.successful_persistence = False + + def successfully_persisted(self): + return self.successful_persistence + + def add_header(self, header): + self.add_headers += header + "\n" + + def set_timeout(self, timeout): + self.timeout = timeout + if self.socket: + self.socket.settimeout(float(timeout)) + + def connect(self): + if self.persist and self.socketurl in persistent_connections: + self.socket = persistent_connections[self.socketurl] + self.successful_persistence = True + return + + self.successful_persistence = False + + # Create new socket + self.socket = None + url = self.socketurl + parts = url.split(":") + if parts[0] == "unix": + if len(parts) != 2: + raise MKLivestatusConfigError("Invalid livestatus unix URL: %s. " + "Correct example is 'unix:/var/run/nagios/rw/live'" % url) + self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + target = parts[1] + + elif parts[0] == "tcp": + try: + host = parts[1] + port = int(parts[2]) + except: + raise MKLivestatusConfigError("Invalid livestatus tcp URL '%s'. " + "Correct example is 'tcp:somehost:6557'" % url) + self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + target = (host, port) + else: + raise MKLivestatusConfigError("Invalid livestatus URL '%s'. " + "Must begin with 'tcp:' or 'unix:'" % url) + + # If a timeout is set, then we retry after a failure with mild + # a binary backoff. + if self.timeout: + before = time.time() + sleep_interval = 0.1 + + while True: + try: + if self.timeout: + self.socket.settimeout(float(sleep_interval)) + self.socket.connect(target) + break + except Exception, e: + if self.timeout: + time_left = self.timeout - (time.time() - before) + # only try again, if there is substantial time left + if time_left > sleep_interval: + time.sleep(sleep_interval) + sleep_interval *= 1.5 + continue + + self.socket = None + raise MKLivestatusSocketError("Cannot connect to '%s': %s" % (self.socketurl, e)) + + if self.persist: + persistent_connections[self.socketurl] = self.socket + + def disconnect(self): + self.socket = None + if self.persist: + del persistent_connections[self.socketurl] + + def receive_data(self, size): + result = "" + # Timeout is only honored when connecting + self.socket.settimeout(None) + while size > 0: + packet = self.socket.recv(size) + if len(packet) == 0: + raise MKLivestatusSocketClosed("Read zero data from socket, nagios server closed connection") + size -= len(packet) + result += packet + return result + + def do_query(self, query, add_headers = ""): + self.send_query(query, add_headers) + return self.recv_response(query, add_headers) + + def send_query(self, query, add_headers = "", do_reconnect=True): + if not self.allow_cache: + query = remove_cache_regex.sub("", query) + orig_query = query + if self.socket == None: + self.connect() + if not query.endswith("\n"): + query += "\n" + query += self.auth_header + self.add_headers + query += "Localtime: %d\nOutputFormat: python\nKeepAlive: on\nResponseHeader: fixed16\n" % int(time.time()) + query += add_headers + + if not query.endswith("\n"): + query += "\n" + query += "\n" + + try: + # socket.send() will implicitely cast to str(), we need ot + # convert to UTF-8 in order to avoid exceptions + if type(query) == unicode: + query = query.encode("utf-8") + self.socket.send(query) + except IOError, e: + if self.persist: + del persistent_connections[self.socketurl] + self.successful_persistence = False + self.socket = None + + if do_reconnect: + # Automatically try to reconnect in case of an error, but + # only once. + self.connect() + self.send_query(orig_query, add_headers, False) + return + + raise MKLivestatusSocketError("RC1:" + str(e)) + + # Reads a response from the livestatus socket. If the socket is closed + # by the livestatus server, we automatically make a reconnect and send + # the query again (once). This is due to timeouts during keepalive. + def recv_response(self, query = None, add_headers = "", timeout_at = None): + try: + resp = self.receive_data(16) + code = resp[0:3] + try: + length = int(resp[4:15].lstrip()) + except: + raise MKLivestatusSocketError("Malformed output. Livestatus TCP socket might be unreachable.") + data = self.receive_data(length) + if code == "200": + try: + return eval(data) + except: + raise MKLivestatusSocketError("Malformed output") + else: + raise MKLivestatusQueryError(code, data.strip()) + + # In case of an IO error or the other side having + # closed the socket do a reconnect and try again, but + # only once + except (MKLivestatusSocketClosed, IOError), e: + self.disconnect() + now = time.time() + if query and (not timeout_at or timeout_at > now): + if timeout_at == None: + timeout_at = now + self.timeout + time.sleep(0.1) + self.connect() + self.send_query(query, add_headers) + return self.recv_response(query, add_headers, timeout_at) # do not send query again -> danger of infinite loop + else: + raise MKLivestatusSocketError(str(e)) + + except Exception, e: + raise MKLivestatusSocketError("Unhandled exception: %s" % e) + + + def do_command(self, command): + if self.socket == None: + self.connect() + if not command.endswith("\n"): + command += "\n" + try: + self.socket.send("COMMAND " + command + "\n") + except IOError, e: + self.socket = None + if self.persist: + del persistent_connections[self.socketurl] + raise MKLivestatusSocketError(str(e)) + + +class SingleSiteConnection(BaseConnection, Helpers): + def __init__(self, socketurl, persist = False, allow_cache = False): + BaseConnection.__init__(self, socketurl, persist, allow_cache) + self.prepend_site = False + self.auth_users = {} + self.deadsites = {} # never filled, just for compatibility + self.auth_header = "" + self.limit = None + + def set_prepend_site(self, p): + self.prepend_site = p + + def set_only_sites(self, os = None): + pass + + def set_limit(self, limit = None): + self.limit = limit + + def query(self, query, add_headers = ""): + if self.limit != None: + query += "Limit: %d\n" % self.limit + data = self.do_query(query, add_headers) + if self.prepend_site: + return [ [''] + line for line in data ] + else: + return data + + def command(self, command, site = None): + self.do_command(command) + + # Set user to be used in certain authorization domain + def set_auth_user(self, domain, user): + if user: + self.auth_users[domain] = user + else: + del self.auth_users[domain] + + # Switch future request to new authorization domain + def set_auth_domain(self, domain): + auth_user = self.auth_users.get(domain) + if auth_user: + self.auth_header = "AuthUser: %s\n" % auth_user + else: + self.auth_header = "" + + +# sites is a dictionary from site name to a dict. +# Keys in the dictionary: +# socket: socketurl (obligatory) +# timeout: timeout for tcp/unix in seconds + +class MultiSiteConnection(Helpers): + def __init__(self, sites, disabled_sites = []): + self.sites = sites + self.connections = [] + self.deadsites = {} + self.prepend_site = False + self.only_sites = None + self.limit = None + self.parallelize = True + + # Helper function for connecting to a site + def connect_to_site(sitename, site, temporary=False): + try: + url = site["socket"] + persist = not temporary and site.get("persist", False) + connection = SingleSiteConnection(url, persist, allow_cache=site.get("cache", False)) + if "timeout" in site: + connection.set_timeout(int(site["timeout"])) + connection.connect() + self.connections.append((sitename, site, connection)) + + except Exception, e: + self.deadsites[sitename] = { + "exception" : e, + "site" : site, + } + + # Needed for temporary connection for status_hosts in disabled sites + def disconnect_site(sitename): + i = 0 + for name, site, connection in self.connections: + if name == sitename: + del self.connections[i] + return + i += 1 + + + # Status host: A status host helps to prevent trying to connect + # to a remote site which is unreachable. This is done by looking + # at the current state of a certain host on a local site that is + # representing the connection to the remote site. The status host + # is specified as an optional pair of (site, host) in the entry + # "status_host". We first connect to all sites without a status_host + # entry, then retrieve the host states of the status hosts and then + # connect to the remote site which are reachable + + # Tackle very special problem: If the user disables a site which + # provides status_host information for other sites, the dead-detection + # would not work. For that cases we make a temporary connection just + # to fetch the status information + extra_status_sites = {} + if len(disabled_sites) > 0: + status_sitenames = set([]) + for sitename, site in sites.items(): + try: + s, h = site.get("status_host") + status_sitenames.add(s) + except: + continue + for sitename in status_sitenames: + site = disabled_sites.get(sitename) + if site: + extra_status_sites[sitename] = site + + + # First connect to sites without status host. Collect status + # hosts at the same time. + + status_hosts = {} # dict from site to list of status_hosts + for sitename, site in sites.items() + extra_status_sites.items(): + status_host = site.get("status_host") + if status_host: + if type(status_host) != tuple or len(status_host) != 2: + raise MKLivestatusConfigError("Status host of site %s is %r, but must be pair of site and host" % + (sitename, status_host)) + s, h = status_host + status_hosts[s] = status_hosts.get(s, []) + [h] + else: + connect_to_site(sitename, site) + + # Now learn current states of status hosts and store it in a dictionary + # from (local_site, host) => state + status_host_states = {} + for sitename, hosts in status_hosts.items(): + # Fetch all the states of status hosts of this local site in one query + query = "GET hosts\nColumns: name state has_been_checked last_time_up\n" + for host in hosts: + query += "Filter: name = %s\n" % host + query += "Or: %d\n" % len(hosts) + self.set_only_sites([sitename]) # only connect one site + try: + result = self.query_table(query) + # raise MKLivestatusConfigError("TRESulT: %s" % (result,)) + for host, state, has_been_checked, lastup in result: + if has_been_checked == 0: + state = 3 + status_host_states[(sitename, host)] = (state, lastup) + except Exception, e: + raise MKLivestatusConfigError(e) + status_host_states[(sitename, host)] = (str(e), None) + self.set_only_sites() # clear site filter + + # Disconnect from disabled sites that we connected to only to + # get status information from + for sitename, site in extra_status_sites.items(): + disconnect_site(sitename) + + # Now loop over all sites having a status_host and take that state + # of that into consideration + + for sitename, site in sites.items(): + status_host = site.get("status_host") + if status_host: + now = time.time() + shs, lastup = status_host_states.get(status_host, (4, now)) # None => Status host not existing + deltatime = now - lastup + if shs == 0 or shs == None: + connect_to_site(sitename, site) + else: + if shs == 1: + ex = "The remote monitoring host is down" + elif shs == 2: + ex = "The remote monitoring host is unreachable" + elif shs == 3: + ex = "The remote monitoring host's state it not yet determined" + elif shs == 4: + ex = "Invalid status host: site %s has no host %s" % (status_host[0], status_host[1]) + else: + ex = "Error determining state of remote monitoring host: %s" % shs + self.deadsites[sitename] = { + "site" : site, + "status_host_state" : shs, + "exception" : ex, + } + + def add_header(self, header): + for sitename, site, connection in self.connections: + connection.add_header(header) + + def set_prepend_site(self, p): + self.prepend_site = p + + def set_only_sites(self, os = None): + self.only_sites = os + + # Impose Limit on number of returned datasets (distributed amoung sites) + def set_limit(self, limit = None): + self.limit = limit + + def dead_sites(self): + return self.deadsites + + def alive_sites(self): + return self.connections.keys() + + def successfully_persisted(self): + for sitename, site, connection in self.connections: + if connection.successfully_persisted(): + return True + return False + + def set_auth_user(self, domain, user): + for sitename, site, connection in self.connections: + connection.set_auth_user(domain, user) + + def set_auth_domain(self, domain): + for sitename, site, connection in self.connections: + connection.set_auth_domain(domain) + + def query(self, query, add_headers = ""): + if self.parallelize: + return self.query_parallel(query, add_headers) + else: + return self.query_non_parallel(query, add_headers) + + def query_non_parallel(self, query, add_headers = ""): + result = [] + stillalive = [] + limit = self.limit + for sitename, site, connection in self.connections: + if self.only_sites != None and sitename not in self.only_sites: + stillalive.append( (sitename, site, connection) ) # state unknown, assume still alive + continue + try: + if limit != None: + limit_header = "Limit: %d\n" % limit + else: + limit_header = "" + r = connection.query(query, add_headers + limit_header) + if self.prepend_site: + r = [ [sitename] + l for l in r ] + if limit != None: + limit -= len(r) # Account for portion of limit used by this site + result += r + stillalive.append( (sitename, site, connection) ) + except Exception, e: + self.deadsites[sitename] = { + "exception" : e, + "site" : site, + } + self.connections = stillalive + return result + + # New parallelized version of query(). The semantics differs in the handling + # of Limit: since all sites are queried in parallel, the Limit: is simply + # applied to all sites - resulting in possibly more results then Limit requests. + def query_parallel(self, query, add_headers = ""): + if self.only_sites != None: + active_sites = [ c for c in self.connections if c[0] in self.only_sites ] + else: + active_sites = self.connections + + start_time = time.time() + stillalive = [] + limit = self.limit + if limit != None: + limit_header = "Limit: %d\n" % limit + else: + limit_header = "" + + # First send all queries + for sitename, site, connection in active_sites: + try: + connection.send_query(query, add_headers + limit_header) + except Exception, e: + self.deadsites[sitename] = { + "exception" : e, + "site" : site, + } + + # Then retrieve all answers. We will be as slow as the slowest of all + # connections. + result = [] + for sitename, site, connection in self.connections: + if self.only_sites != None and sitename not in self.only_sites: + stillalive.append( (sitename, site, connection) ) # state unknown, assume still alive + continue + + try: + r = connection.recv_response(query, add_headers + limit_header) + stillalive.append( (sitename, site, connection) ) + if self.prepend_site: + r = [ [sitename] + l for l in r ] + result += r + except Exception, e: + self.deadsites[sitename] = { + "exception" : e, + "site" : site, + } + + + self.connections = stillalive + return result + + def command(self, command, sitename = "local"): + if sitename in self.deadsites: + raise MKLivestatusSocketError("Connection to site %s is dead: %s" % \ + (sitename, self.deadsites[sitename]["exception"])) + conn = [t[2] for t in self.connections if t[0] == sitename] + if len(conn) == 0: + raise MKLivestatusConfigError("Cannot send command to unconfigured site '%s'" % sitename) + conn[0].do_command(command) + + # Return connection to localhost (UNIX), if available + def local_connection(self): + for sitename, site, connection in self.connections: + if site["socket"].startswith("unix:") and "liveproxy" not in site["socket"]: + return connection + raise MKLivestatusConfigError("No livestatus connection to local host") + +# Examle for forcing local connection: +# live.local_connection().query_single_value(...) diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/python/make_nagvis_map.py check-mk-1.2.6p12/=unpacked-tar11=/api/python/make_nagvis_map.py --- check-mk-1.2.2p3/=unpacked-tar11=/api/python/make_nagvis_map.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/python/make_nagvis_map.py 2014-10-30 13:30:24.000000000 +0000 @@ -0,0 +1,119 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# This is an example for a usage of Livestatus: it creates +# a NagVis map using actual live data from a running Nagios +# system. Most things are hardcoded here but this might by +# a useful example for coding your own stuff... + +import livestatus + +g_y = 50 +y_title = 40 +lineheight = 30 +x_hostgroup = 30 +x_therm = 200 +x_usv = 560 + +def make_label(text, x, y, width): + print """ +define textbox { + text=%s + x=%d + y=%d + background_color=#C0C0C1 + border_color=#000055 + w=%d +}""" % (text, x, y, width) + + +def render_hostgroup(name, alias): + global g_y + g_y += lineheight + + # Name des Serverraums + make_label(alias, x_hostgroup, g_y, x_therm - x_hostgroup - 20) + def display_servicegroup(name, x): + if live.query_value("GET servicegroups\nStats: name = %s\n" % name) == 1: + print """ +define servicegroup { + servicegroup_name = %s + x=%d + y=%d +}""" % (name, x, g_y) + + # Einzelauflistung der Thermometer + num = 0 + shift = 16 + for host, service in live.query("GET services\nFilter: groups >= %s\nColumns: host_name description" % name): + num += 1 + print """ +define service { + host_name=%s + service_description=%s + x=%d + y=%d + url=/pnp4nagios/graph?host=%s&srv=%s +} + """ % (host, service, x + 30 + shift * num, g_y, host, service) + + # Gesamtzustand Thermometer + display_servicegroup(name + "_therm", x_therm) + + # Auflistung der USV-Parameter + display_servicegroup(name + "_usv", x_usv) + + + + +socket_path = "unix:/var/run/nagios/rw/live" +live = livestatus.SingleSiteConnection(socket_path) + +print """ +define global { + allowed_for_config=nagiosadmin + allowed_user=nagiosadmin + map_image=demo_background.png + iconset=std_medium +} +""" + + +# hostgroups = live.query("GET hostgroups\nColumns: name alias") +hostgroups = [ + ( "s02", "S-02" ), + ( "s06", "S-06" ), + ( "s48", "S-48" ), + ( "ad214", "AD-214" ), + ( "ik026", "IK-026" ), + ( "etage", "Etagenverteiler" ), + ] +for name, alias in hostgroups: + render_hostgroup(name, alias) + +make_label("Temperaturen", x_therm, y_title, 250) +make_label("USV-Status", x_usv, y_title, 160) + diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/api/python/README check-mk-1.2.6p12/=unpacked-tar11=/api/python/README --- check-mk-1.2.2p3/=unpacked-tar11=/api/python/README 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/api/python/README 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,23 @@ +This directory contains a very efficient API to MK Livestatus +for Python. It is directly taken from the Multisite GUI and +has the following features: + +* It supports keep alive +* It returns typed values +* It support transparent multi-site access +* It supports persistent connection caching +* It supports parallelized queries (though still single-threaded) +* It supports detection of dead sites (via "status_host") + +Please look at the two examples: + +example.py: Example for a single site +example_multisite.py: Example querying several sites + +Both example are written to be run within an OMD instance +and need no further configuration. + +If you are not using OMD, you need to modify the examples +and enter the correct path to you livestatus socket. +Or even better: give OMD a try --> omdistro.org. This will +make you live *really* easier! diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/check_mkevents check-mk-1.2.6p12/=unpacked-tar11=/check_mkevents --- check-mk-1.2.2p3/=unpacked-tar11=/check_mkevents 2013-10-31 10:20:12.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/check_mkevents 1970-01-01 00:00:00.000000000 +0000 @@ -1,30 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8; py-indent-offset: 4 -*- - -def check_mkevents_arguments(params): - args = "" - if "remote" in params: - remote = params["remote"] - if type(remote) == tuple: - args += "-H %s:%d " % (quote_shell_string(remote[0]), remote[1]) - else: - args += "-H %s " % quote_shell_string(remote) - if params.get("ignore_acknowledged"): - args += "-a " - args += params.get("hostspec", "$HOSTADDRESS$") - if "application" in params: - args += " " + quote_shell_string(params["application"]) - return args - -def check_mkevents_description(params): - if "application" in params: - return "Events %s" % params["application"] - else: - return "Events" - -active_check_info['mkevents'] = { - "command_line" : '$USER1$/check_mkevents $ARG1$', - "argument_function" : check_mkevents_arguments, - "service_description" : check_mkevents_description, - "has_perfdata" : False, -} diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/LQL-examples/10.lql check-mk-1.2.6p12/=unpacked-tar11=/LQL-examples/10.lql --- check-mk-1.2.2p3/=unpacked-tar11=/LQL-examples/10.lql 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/LQL-examples/10.lql 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,7 @@ +GET services +Stats: state = 0 +Stats: state = 1 +Stats: state = 2 +Stats: state = 3 +StatsGroupBy: host_name +OutputFormat: json diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/LQL-examples/11.lql check-mk-1.2.6p12/=unpacked-tar11=/LQL-examples/11.lql --- check-mk-1.2.2p3/=unpacked-tar11=/LQL-examples/11.lql 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/LQL-examples/11.lql 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,3 @@ +GET log +Columns: message +Limit: 10 diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/LQL-examples/12.lql check-mk-1.2.6p12/=unpacked-tar11=/LQL-examples/12.lql --- check-mk-1.2.2p3/=unpacked-tar11=/LQL-examples/12.lql 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/LQL-examples/12.lql 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,4 @@ +GET log +Columns: message +Filter: host_name = windows +Limit: 10 diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/LQL-examples/13.lql check-mk-1.2.6p12/=unpacked-tar11=/LQL-examples/13.lql --- check-mk-1.2.2p3/=unpacked-tar11=/LQL-examples/13.lql 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/LQL-examples/13.lql 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,8 @@ +GET services +Stats: state = 0 +Stats: state = 1 +Stats: state = 2 +Stats: state = 3 +StatsGroupBy: host_name +AuthUser: mk + diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/LQL-examples/1.lql check-mk-1.2.6p12/=unpacked-tar11=/LQL-examples/1.lql --- check-mk-1.2.2p3/=unpacked-tar11=/LQL-examples/1.lql 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/LQL-examples/1.lql 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,2 @@ +GET hosts +Columns: name state diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/LQL-examples/2.lql check-mk-1.2.6p12/=unpacked-tar11=/LQL-examples/2.lql --- check-mk-1.2.2p3/=unpacked-tar11=/LQL-examples/2.lql 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/LQL-examples/2.lql 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,3 @@ +GET hosts +Columns: name state +Filter: state = 0 diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/LQL-examples/3.lql check-mk-1.2.6p12/=unpacked-tar11=/LQL-examples/3.lql --- check-mk-1.2.2p3/=unpacked-tar11=/LQL-examples/3.lql 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/LQL-examples/3.lql 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,2 @@ +GET columns +Columns: table name diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/LQL-examples/4.lql check-mk-1.2.6p12/=unpacked-tar11=/LQL-examples/4.lql --- check-mk-1.2.2p3/=unpacked-tar11=/LQL-examples/4.lql 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/LQL-examples/4.lql 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,3 @@ +GET columns +Columns: name +Filter: table = services diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/LQL-examples/5.lql check-mk-1.2.6p12/=unpacked-tar11=/LQL-examples/5.lql --- check-mk-1.2.2p3/=unpacked-tar11=/LQL-examples/5.lql 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/LQL-examples/5.lql 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,3 @@ +GET services +Columns: host_name description last_check last_hard_state_change +Filter: host_name = windows diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/LQL-examples/6.lql check-mk-1.2.6p12/=unpacked-tar11=/LQL-examples/6.lql --- check-mk-1.2.2p3/=unpacked-tar11=/LQL-examples/6.lql 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/LQL-examples/6.lql 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,5 @@ +GET services +Columns: host_name description state +Filter: state = 1 +Filter: state = 2 +Or: 2 diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/LQL-examples/7.lql check-mk-1.2.6p12/=unpacked-tar11=/LQL-examples/7.lql --- check-mk-1.2.2p3/=unpacked-tar11=/LQL-examples/7.lql 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/LQL-examples/7.lql 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,5 @@ +GET services +Stats: state = 0 +Stats: state = 1 +Stats: state = 2 +Stats: state = 3 diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/LQL-examples/8.lql check-mk-1.2.6p12/=unpacked-tar11=/LQL-examples/8.lql --- check-mk-1.2.2p3/=unpacked-tar11=/LQL-examples/8.lql 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/LQL-examples/8.lql 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,6 @@ +GET services +Stats: state = 0 +Stats: state = 1 +Stats: state = 2 +Stats: state = 3 +Filter: host_state = 0 diff -Nru check-mk-1.2.2p3/=unpacked-tar11=/LQL-examples/9.lql check-mk-1.2.6p12/=unpacked-tar11=/LQL-examples/9.lql --- check-mk-1.2.2p3/=unpacked-tar11=/LQL-examples/9.lql 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar11=/LQL-examples/9.lql 2012-10-04 13:12:37.000000000 +0000 @@ -0,0 +1,7 @@ +GET services +Stats: state = 0 +Stats: state = 1 +Stats: state = 2 +Stats: state = 3 +StatsGroupBy: host_name + diff -Nru check-mk-1.2.2p3/=unpacked-tar12=/check_mkevents check-mk-1.2.6p12/=unpacked-tar12=/check_mkevents --- check-mk-1.2.2p3/=unpacked-tar12=/check_mkevents 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar12=/check_mkevents 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,36 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- + +def check_mkevents_arguments(params): + args = "" + if "remote" in params: + remote = params["remote"] + if type(remote) == tuple: + args += "-H %s:%d " % (quote_shell_string(remote[0]), remote[1]) + elif remote: + args += "-s %s " % quote_shell_string(remote) + + if params.get("ignore_acknowledged"): + args += "-a " + + if params.get("less_verbose"): + args += "-l " + + args += params.get("hostspec", "$HOSTADDRESS$") + if "application" in params: + args += " " + quote_shell_string(params["application"]) + return args + +def check_mkevents_description(params): + item = params.get('item', params.get('application')) + if item: + return "Events %s" % item + else: + return "Events" + +active_check_info['mkevents'] = { + "command_line" : '$USER1$/check_mkevents $ARG1$', + "argument_function" : check_mkevents_arguments, + "service_description" : check_mkevents_description, + "has_perfdata" : False, +} Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/=unpacked-tar7=/htdocs/images/button_mkeventd_hi.png and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/=unpacked-tar7=/htdocs/images/button_mkeventd_hi.png differ Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/=unpacked-tar7=/htdocs/images/button_mkeventd_lo.png and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/=unpacked-tar7=/htdocs/images/button_mkeventd_lo.png differ Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/=unpacked-tar7=/htdocs/images/icon_ack.png and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/=unpacked-tar7=/htdocs/images/icon_ack.png differ Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/=unpacked-tar7=/htdocs/images/icon_clear.png and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/=unpacked-tar7=/htdocs/images/icon_clear.png differ Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/=unpacked-tar7=/htdocs/images/icon_counting.png and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/=unpacked-tar7=/htdocs/images/icon_counting.png differ Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/=unpacked-tar7=/htdocs/images/icon_delayed.png and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/=unpacked-tar7=/htdocs/images/icon_delayed.png differ Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/=unpacked-tar7=/htdocs/images/icon_mkeventd.png and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/=unpacked-tar7=/htdocs/images/icon_mkeventd.png differ Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/=unpacked-tar7=/htdocs/images/icon_resetcounters.png and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/=unpacked-tar7=/htdocs/images/icon_resetcounters.png differ diff -Nru check-mk-1.2.2p3/=unpacked-tar7=/htdocs/mkeventd.py check-mk-1.2.6p12/=unpacked-tar7=/htdocs/mkeventd.py --- check-mk-1.2.2p3/=unpacked-tar7=/htdocs/mkeventd.py 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar7=/htdocs/mkeventd.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,248 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# ails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -import socket, config, defaults, re, time -from lib import * - -# TODO: make this configurable and thus work for non OMD-users as -# well. -try: - socket_path = defaults.omd_root + "/tmp/run/mkeventd/status" - pipe_path = defaults.omd_root + "/tmp/run/mkeventd/events" -except: - run_dir = defaults.livestatus_unix_socket.rsplit("/",1)[0] - socket_path = run_dir + "/mkeventd/status" - pipe_path = run_dir + "/mkeventd/events" - -syslog_priorities = [ - (0, "emerg" ), - (1, "alert" ), - (2, "crit" ), - (3, "err" ), - (4, "warning" ), - (5, "notice" ), - (6, "info" ), - (7, "debug" ), -] - -syslog_facilities = [ - (0, "kern"), - (1, "user"), - (2, "mail"), - (3, "daemon"), - (4, "auth"), - (5, "syslog"), - (6, "lpr"), - (7, "news"), - (8, "uucp"), - (9, "cron"), - (10, "authpriv"), - (11, "ftp"), - (12, "(12: unused)"), - (13, "(13: unused)"), - (14, "(14: unused)"), - (15, "(15: unused)"), - (16, "local0"), - (17, "local1"), - (18, "local2"), - (19, "local3"), - (20, "local4"), - (21, "local5"), - (22, "local6"), - (23, "local7"), -] - -phase_names = { - 'counting' : _("counting"), - 'delayed' : _("delayed"), - 'open' : _("open"), - 'ack' : _("acknowledged"), -} - -action_whats = { - "ORPHANED" : _("Event deleted in counting state because rule was deleted."), - "NOCOUNT" : _("Event deleted in counting state because rule does not count anymore"), - "DELAYOVER" : _("Event opened because the delay time has elapsed before cancelling event arrived."), - "EXPIRED" : _("Event deleted because its livetime expired"), - "COUNTREACHED" : _("Event deleted bacause required count had been reached"), - "COUNTFAILED" : _("Event created by required count was not reached in time"), - "UPDATE" : _("Event information updated by user"), - "NEW" : _("New event created"), - "DELETE" : _("Event deleted manually bu user"), - "EMAIL" : _("Email sent"), - "SCRIPT" : _("Script executed"), - "CANCELLED" : _("The event was cancelled because the corresponding OK message was received"), -} - -def service_levels(): - try: - return config.mkeventd_service_levels - except: - return [(0, "(no service level)")] - -def action_choices(omit_hidden = False): - # The possible actions are configured in mkeventd.mk, - # not in multisite.mk (like the service levels). That - # way we have not direct access to them but need - # to load them from the configuration. - return [ (a["id"], a["title"]) - for a in eventd_configuration().get("actions", []) - if not omit_hidden or not a.get("hidden") ] - -cached_config = None -def eventd_configuration(): - global cached_config - if cached_config and cached_config[0] is html: - return cached_config[1] - - config = { - "rules" : [], - "debug_rules" : False, - } - main_file = defaults.default_config_dir + "/mkeventd.mk" - list_of_files = reduce(lambda a,b: a+b, - [ [ "%s/%s" % (d, f) for f in fs if f.endswith(".mk")] - for d, sb, fs in os.walk(defaults.default_config_dir + "/mkeventd.d" ) ], []) - - list_of_files.sort() - for path in [ main_file ] + list_of_files: - execfile(path, config, config) - cached_config = (html, config) - return config - - -def daemon_running(): - return os.path.exists(socket_path) - - -def send_event(event): - # "<%PRI%>%TIMESTAMP% %HOSTNAME% %syslogtag% %msg%\n" - prio = (event["facility"] << 3) + event["priority"] - timestamp = time.strftime("%b %d %T", time.localtime()) - rfc = "<%d>%s %s %s: %s\n" % ( - prio, timestamp, event["host"], event["application"], event["text"]) - pipe = file(pipe_path, "w") - pipe.write(rfc + "\n") - return rfc - -def query(query): - try: - sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - try: - timeout = config.mkeventd_connect_timeout - except: - timeout = 10 - - sock.settimeout(timeout) - # TODO: Pfad nicht auf OMD hart kodieren - sock.connect(socket_path) - sock.send(query) - - response_text = "" - while True: - chunk = sock.recv(8192) - response_text += chunk - if not chunk: - break - - return eval(response_text) - except SyntaxError, e: - raise MKGeneralException("Invalid response from event daemon:
    %s
    " % response_text) - - except Exception, e: - raise MKGeneralException("Cannot connect to event daemon via %s: %s" % (socket_path, e)) - -def replication_mode(): - try: - response = query("GET status") - status = dict(zip(response[0], response[1])) - return status["status_replication_slavemode"] - except: - return None - - -# Rule matching for simulation. Yes - there is some hateful code duplication -# here. But it does not make sense to query the live eventd here since it -# does not know anything about the currently configured but not yet activated -# rules. And also we do not want to have shared code. -def event_rule_matches(rule, event): - if False == match(rule.get("match_host"), event["host"], complete=True): - return _("The host name does not match.") - - if False == match(rule.get("match_application"), event["application"], complete=False): - return _("The application (syslog tag) does not match") - - if "match_facility" in rule and event["facility"] != rule["match_facility"]: - return _("The syslog facility does not match") - - - # First try cancelling rules - if "match_ok" in rule or "cancel_priority" in rule: - if "cancel_priority" in rule: - up, lo = rule["cancel_priority"] - cp = event["priority"] >= lo and event["priority"] <= up - else: - cp = True - - match_groups = match(rule.get("match_ok", ""), event["text"], complete = False) - if match_groups != False and cp: - if match_groups == True: - match_groups = () - return True, match_groups - - try: - match_groups = match(rule.get("match"), event["text"], complete = False) - except Exception, e: - return _("Invalid regular expression: %s" % e) - if match_groups == False: - return _("The message text does not match the required pattern.") - - if "match_priority" in rule: - prio_from, prio_to = rule["match_priority"] - if prio_from > prio_to: - prio_to, prio_from = prio_from, prio_to - p = event["priority"] - if p < prio_from or p > prio_to: - return _("The syslog priority is not in the required range.") - - if match_groups == True: - match_groups = () # no matching groups - return False, match_groups - -def match(pattern, text, complete = True): - if pattern == None: - return True - else: - if complete: - if not pattern.endswith("$"): - pattern += '$' - m = re.compile(pattern, re.IGNORECASE).match(text) - else: - m = re.compile(pattern, re.IGNORECASE).search(text) - if m: - return m.groups() - else: - return False diff -Nru check-mk-1.2.2p3/=unpacked-tar7=/plugins/icons/mkeventd.py check-mk-1.2.6p12/=unpacked-tar7=/plugins/icons/mkeventd.py --- check-mk-1.2.2p3/=unpacked-tar7=/plugins/icons/mkeventd.py 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar7=/plugins/icons/mkeventd.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,100 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# ails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -import mkeventd - -try: - mkeventd_enabled = config.mkeventd_enabled -except: - mkeventd_enabled = False - -def paint_mkeventd(what, row, tags, custom_vars): - # show for services based on the mkevents active check - command = row[what + '_check_command'] - - if what != 'service' or not command.startswith('check_mk_active-mkevents'): - return - - if '!' not in command: - return - - host = None - app = None - - # Extract parameters from check_command: - args = command.split('!')[1].split() - if not args: - return - - # Handle -a and -H options. Sorry for the hack. We currently - # have no better idea - if len(args) >= 2 and args[0] == '-H': - args = args[2:] # skip two arguments - if len(args) >= 1 and args[0] == '-a': - args = args[1:] - - if len(args) >= 1: - if args[0] == '$HOSTNAME$': - host = row['host_name'] - elif args[0] == '$HOSTADDRESS$': - host = row['host_address'] - else: - host = args[0] - - # If we have no host then the command line from the check_command seems - # to be garbled. Better show nothing in this case. - if not host: - return - - # It is possible to have a central event console, this is the default case. - # Another possible architecture is to have an event console in each site in - # a distributed environment. For the later case the base url need to be - # constructed here - site = html.site_status[row['site']]["site"] - url_prefix = '' - if getattr(config, 'mkeventd_distributed', False): - url_prefix = site['url_prefix'] + 'check_mk/' - - title = _('Events of Host %s') % (row["host_name"]) - url = 'view.py?' + htmllib.urlencode_vars([ - ("view_name", "ec_events_of_monhost"), - ("site", row["site"]), - ("host", row["host_name"]), - ]) - - if len(args) >= 2: - app = args[1].strip('\'') - title = _('Events of Application "%s" on Host %s') % (app, host) - url += '&event_application=' + app - - return '' % \ - (url_prefix + url, title) - -if mkeventd_enabled: - multisite_icons.append({ - 'host_columns': [ 'address', 'name' ], - 'paint': paint_mkeventd, - }) diff -Nru check-mk-1.2.2p3/=unpacked-tar7=/plugins/sidebar/mkeventd.py check-mk-1.2.6p12/=unpacked-tar7=/plugins/sidebar/mkeventd.py --- check-mk-1.2.2p3/=unpacked-tar7=/plugins/sidebar/mkeventd.py 2013-11-05 09:22:41.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar7=/plugins/sidebar/mkeventd.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,106 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# ails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -import mkeventd - -try: - mkeventd_enabled = config.mkeventd_enabled -except: - mkeventd_enabled = False - -def render_mkeventd_performance(): - def write_line(left, right): - html.write("%s:" - "%s" % (left, right)) - - html.write("\n") - - raw_data = mkeventd.query("GET status") - data = dict(zip(raw_data[0], raw_data[1])) - columns = [ - (_("Received messages"), "message", "%.2f/s"), - (_("Rule hits"), "rule_hit", "%.2f/s"), - (_("Rule tries"), "rule_trie", "%.2f/s"), - (_("Created events"), "event", "%.2f/s"), - (_("Client connects"), "connect", "%.2f/s"), - ] - for what, col, format in columns: - write_line(what, format % data["status_average_%s_rate" % col]) - - # Hit rate - try: - write_line(_("Rule hit ratio"), "%.2f %%" % ( - data["status_average_rule_hit_rate"] / - data["status_average_rule_trie_rate"] * 100)) - except: # division by zero - write_line(_("Rule hit ratio"), _("-.-- %")) - pass - - # Time columns - time_columns = [ - (_("Processing time per message"), "processing"), - (_("Time per client request"), "request"), - (_("Replication synchronization"), "sync"), - ] - for title, name in time_columns: - value = data.get("status_average_%s_time" % name) - if value: - write_line(title, "%.2f ms" % (value * 1000)) - else: - write_line(title, _("-.-- ms")) - html.write("
    \n") - -if mkeventd_enabled: - sidebar_snapins["mkeventd_performance"] = { - "title" : _("Event Console Performance"), - "description" : _("Monitor the performance of the Event Console"), - "refresh" : 15, - "render" : render_mkeventd_performance, - "allowed" : [ "admin", ], - "styles" : """ - table.mkeventd_performance { - width: %dpx; - -moz-border-radius: 5px; - background-color: #589; - /* background-color: #6da1b8;*/ - border-style: solid; - border-color: #444 #bbb #eee #666; - /* The border needs to be substracted from the width */ - border-width: 1px; - } - table.mkeventd_performance td { - padding: 0px 2px; - font-size: 8pt; - } - table.mkeventd_performance td.right { - text-align: right; - padding: 0px; - padding-right: 1px; - white-space: nowrap; - } - - """ % (snapin_width - 2) - } diff -Nru check-mk-1.2.2p3/=unpacked-tar7=/plugins/views/mkeventd.py check-mk-1.2.6p12/=unpacked-tar7=/plugins/views/mkeventd.py --- check-mk-1.2.2p3/=unpacked-tar7=/plugins/views/mkeventd.py 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar7=/plugins/views/mkeventd.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,1145 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# ails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -import mkeventd -from valuespec import * - -try: - mkeventd_enabled = config.mkeventd_enabled -except: - mkeventd_enabled = False - -# .--Datasources---------------------------------------------------------. -# | ____ _ | -# | | _ \ __ _| |_ __ _ ___ ___ _ _ _ __ ___ ___ ___ | -# | | | | |/ _` | __/ _` / __|/ _ \| | | | '__/ __/ _ \/ __| | -# | | |_| | (_| | || (_| \__ \ (_) | |_| | | | (_| __/\__ \ | -# | |____/ \__,_|\__\__,_|___/\___/ \__,_|_| \___\___||___/ | -# | | -# '----------------------------------------------------------------------' - -def table_events(what, columns, add_headers, only_sites, limit, filters): - # First we wetch the list of all events from mkeventd - either current - # or historic ones. We ignore any filters for host_ here. Note: - # event_host and host_name needn't be compatible. They might differ - # in case. Also in the events table instead of the host name there - # might be the IP address of the host - while in the monitoring we - # name. We will join later. - rows = get_all_events(what, filters, limit) - - # Now we join the stuff with the host information. Therefore we - # get the information about all hosts that are referred to in - # any of the events. - required_hosts = set() - for row in rows: - host = row.get("event_host") - if host: - required_hosts.add(host.lower()) - - # Get information about these hosts via Livestatus. We - # allow event_host to match either the host_name or - # the host_address. - host_filters = "" - for host in required_hosts: - host_filters += "Filter: host_name =~ %s\n" \ - "Filter: host_address = %s\n" % (host.encode("utf-8"), host.encode("utf-8")) - if len(required_hosts) > 0: - host_filters += "Or: %d\n" % (len(required_hosts) * 2) - - # Make sure that the host name is fetched. We need it for - # joining. The event columns are always fetched all. The event - # daemon currently does not implement any Columns: header. - if "host_name" not in columns: - columns.append("host_name") - if "host_address" not in columns: - columns.append("host_address") - - # Fetch list of hosts. Here is much room for optimization. - # If no host filter is set, then the data of all hosts would - # be fetched before we even know if there are any events - # for those hosts. Better would be first fetching all events - # and later fetch the data of the relevant hosts. - hostrows = event_hostrows(columns, only_sites, filters, host_filters) - - # Create lookup dict from hostname/address to the dataset of the host. - # This speeds up the mapping to the events. - hostdict = {} - for row in hostrows: - hostdict[row["host_name"].lower()] = row - hostdict[row["host_address"]] = row - - # If there is at least one host filter, then we do not show event - # entries with an empty host information - have_host_filter = False - for filt in filters: - if filt.info == "host": - filter_code = filt.filter('event') - if filter_code: - have_host_filter = True - break - - if not have_host_filter: - # Create empty host for outer join on host table - empty_host = dict([ (c, "") for c in columns if c.startswith("host_") ]) - empty_host["site"] = '' - empty_host["host_state"] = 0 - empty_host["host_has_been_checked"] = 0 - - - # We're ready to join the host-data with the event data now. The question - # is what to do with events that cannot be mapped to a host... - new_rows = [] - for event in rows: - host = event["event_host"].lower() - - if host in hostdict: - event.update(hostdict[host]) - new_rows.append(event) - elif not have_host_filter: - # This event does not belong to any host known by - # the monitoring. We need to create the columns nevertheless. - # TODO: If there are any host filters, these events should - # be dropped. - # Hier könnten wir Leerdaten eintragen. Dann - # kann man auch Events sehen, die keinem - # Host zugeordnet sind. Wenn wir nichts machen, - # dann fehlen Spalten und die Painter fallen - # auf die Nase. - event.update(empty_host) - new_rows.append(event) - - return new_rows - - -def event_hostrows(columns, only_sites, filters, host_filters): - filter_code = "" - for filt in filters: - header = filt.filter("event") - if not header.startswith("Sites:"): - filter_code += header - filter_code += host_filters - - host_columns = filter(lambda c: c.startswith("host_"), columns) - return get_host_table(filter_code, only_sites, host_columns) - - -def get_host_table(filter_header, only_sites, add_columns): - columns = [ "host_name" ] + add_columns - - html.live.set_only_sites(only_sites) - html.live.set_prepend_site(True) - data = html.live.query( - "GET hosts\n" + - "Columns: " + (" ".join(columns)) + "\n" + - filter_header) - html.live.set_prepend_site(False) - html.live.set_only_sites(None) - - headers = [ "site" ] + columns - rows = [ dict(zip(headers, row)) for row in data ] - return rows - -def get_all_events(what, filters, limit): - headers = "" - for f in filters: - try: - headers += f.event_headers() - except: - pass - if limit: - headers += "Limit: %d\n" % limit - - query = "GET %s\n%s" % (what, headers) - try: - debug = config.debug_mkeventd_queries - except: - debug = False - if debug \ - and html.output_format == "html" and 'W' in html.display_options: - html.write('
    ' - '%s
    \n' % (query.replace('\n', '
    \n'))) - response = mkeventd.query(query) - - # First line of the response is the list of column names. - headers = response[0] - rows = [] - for r in response[1:]: - rows.append(dict(zip(headers, r))) - return rows - - -# Declare datasource only if the event console is activated. We do -# not want to irritate users that do not know anything about the EC. -if mkeventd_enabled: - multisite_datasources["mkeventd_events"] = { - "title" : _("Event Console: Current Events"), - "table" : lambda *args: table_events('events', *args), - "infos" : [ "event", "host" ], - "keys" : [], - "idkeys" : [ 'site', 'host_name', 'event_id' ], - } - - multisite_datasources["mkeventd_history"] = { - "title" : _("Event Console: Event History"), - "table" : lambda *args: table_events('history', *args), - "infos" : [ "history", "event", "host" ], - "keys" : [], - "idkeys" : [ 'site', 'host_name', 'event_id', 'history_line' ], - } - - #. - # .--Filters-------------------------------------------------------------. - # | _____ _ _ _ | - # | | ___(_) | |_ ___ _ __ ___ | - # | | |_ | | | __/ _ \ '__/ __| | - # | | _| | | | || __/ | \__ \ | - # | |_| |_|_|\__\___|_| |___/ | - # | | - # '----------------------------------------------------------------------' - - # All filters for events define a function event_headers, that - # returns header lines for the event daemon, if the filter is in - # use. - class EventFilterText(FilterText): - def __init__(self, table, filter_name, column, title, op): - FilterText.__init__(self, filter_name, title, table, column, filter_name, op) - self._table = table - - # Disable Livestatus filter - def filter(self, infoname): - return "" - - def event_headers(self): - return FilterText.filter(self, self._table) - - declare_filter(200, EventFilterText("event", "event_id", "event_id", _("Event ID"), "=")) - declare_filter(200, EventFilterText("event", "event_rule_id", "event_rule_id", _("ID of rule"), "=")) - declare_filter(201, EventFilterText("event", "event_text", "event_text", _("Message/Text of event"), "~~")) - declare_filter(201, EventFilterText("event", "event_application","event_application", _("Application / Syslog-Tag"), "~~")) - declare_filter(201, EventFilterText("event", "event_contact", "event_contact", _("Contact Person"), "~~")) - declare_filter(201, EventFilterText("event", "event_comment", "event_comment", _("Comment to the event"), "~~")) - declare_filter(201, EventFilterText("event", "event_host_regex", "event_host", _("Hostname/IP-Address of original event"), "~~")) - declare_filter(201, EventFilterText("event", "event_host", "event_host", _("Hostname/IP-Address of event, exact match"), "=")) - declare_filter(201, EventFilterText("event", "event_owner", "event_owner", _("Owner of event"), "~~")) - declare_filter(221, EventFilterText("history", "history_who", "history_who", _("User that performed action"), "~~")) - declare_filter(222, EventFilterText("history", "history_line", "history_line", _("Line number in history logfile"), "=")) - - - class EventFilterCount(Filter): - def __init__(self, name, title): - Filter.__init__(self, name, title, "event", [name + "_from", name + "_to"], [name]) - self._name = name - - def display(self): - html.write("from: ") - html.number_input(self._name + "_from", "") - html.write(" to: ") - html.number_input(self._name + "_to", "") - - def filter(self, infoname): - return "" - - def event_headers(self): - try: - f = "" - if html.var(self._name + "_from"): - f += "Filter: event_count >= %d\n" % int(html.var(self._name + "_from")) - if html.var(self._name + "_to"): - f += "Filter: event_count <= %d\n" % int(html.var(self._name + "_to")) - return f - except: - return "" - - - declare_filter(205, EventFilterCount("event_count", _("Message count"))) - - class EventFilterState(Filter): - def __init__(self, table, name, title, choices): - varnames = [ name + "_" + str(c[0]) for c in choices ] - Filter.__init__(self, name, title, table, varnames, [name]) - self._name = name - self._choices = choices - - def double_height(self): - return len(self._choices) >= 5 - - def display(self): - html.begin_checkbox_group() - c = 0 - for name, title in self._choices: - c += 1 - html.checkbox(self._name + "_" + str(name), True, label=title) - if c == 3: - html.write("
    ") - c = 0 - html.end_checkbox_group() - - def filter(self, infoname): - return "" - - def event_headers(self): - sel = [] - for name, title in self._choices: - if html.get_checkbox(self._name + "_" + str(name)): - sel.append(str(name)) - if len(sel) > 0 and len(sel) < len(self._choices): - return "Filter: %s in %s\n" % (self._name, " ".join(sel)) - - - - declare_filter(206, EventFilterState("event", "event_state", _("State classification"), [ (0, _("OK")), (1, _("WARN")), (2, _("CRIT")), (3,_("UNKNOWN")) ])) - declare_filter(207, EventFilterState("event", "event_phase", _("Phase"), mkeventd.phase_names.items())) - declare_filter(209, EventFilterState("event", "event_priority", _("Syslog Priority"), mkeventd.syslog_priorities)) - declare_filter(225, EventFilterState("history", "history_what", _("History action type"), [(k,k) for k in mkeventd.action_whats.keys()])) - - - class EventFilterTime(FilterTime): - def __init__(self, table, name, title): - FilterTime.__init__(self, table, name, title, name) - self._table = table - - def filter(self, infoname): - return "" - - def event_headers(self): - return FilterTime.filter(self, self._table) - - declare_filter(220, EventFilterTime("event", "event_first", _("First occurrance of event"))) - declare_filter(221, EventFilterTime("event", "event_last", _("Last occurrance of event"))) - declare_filter(222, EventFilterTime("history", "history_time", _("Time of entry in event history"))) - - - class EventFilterDropdown(Filter): - def __init__(self, name, title, choices, operator = '='): - Filter.__init__(self, "event_" + name, title, "event", [ "event_" + name ], [ "event_" + name ]) - self._choices = choices - self._varname = "event_" + name - self._operator = operator - - def display(self): - if type(self._choices) == list: - choices = self._choices - else: - choices = self._choices() - html.select(self._varname, [ ("", "") ] + [(str(n),t) for (n,t) in choices]) - - def filter(self, infoname): - return "" - - def event_headers(self): - val = html.var(self._varname) - if val: - return "Filter: %s %s %s\n" % (self._varname, self._operator, val) - - - declare_filter(210, EventFilterDropdown("facility", _("Syslog Facility"), mkeventd.syslog_facilities)) - declare_filter(211, EventFilterDropdown("sl", _("Service Level at least"), mkeventd.service_levels, operator='>=')) - - #. - # .--Painters------------------------------------------------------------. - # | ____ _ _ | - # | | _ \ __ _(_)_ __ | |_ ___ _ __ ___ | - # | | |_) / _` | | '_ \| __/ _ \ '__/ __| | - # | | __/ (_| | | | | | || __/ | \__ \ | - # | |_| \__,_|_|_| |_|\__\___|_| |___/ | - # | | - # '----------------------------------------------------------------------' - - def paint_event_host(row): - if row["host_name"]: - return "", row["host_name"] - else: - return "", row["event_host"] - - multisite_painters["event_id"] = { - "title" : _("ID of the event"), - "short" : _("ID"), - "columns" : ["event_id"], - "paint" : lambda row: ("number", row["event_id"]), - } - - multisite_painters["event_count"] = { - "title" : _("Count (number of recent occurrances)"), - "short" : _("Cnt."), - "columns" : ["event_count"], - "paint" : lambda row: ("number", row["event_count"]), - } - - multisite_painters["event_text"] = { - "title" : _("Text/Message of the event"), - "short" : _("Message"), - "columns" : ["event_text"], - "paint" : lambda row: ("", row["event_text"]), - } - - multisite_painters["event_first"] = { - "title" : _("Time of first occurrance of this serial"), - "short" : _("First"), - "columns" : ["event_first"], - "options" : [ "ts_format", "ts_date" ], - "paint" : lambda row: paint_age(row["event_first"], True, True), - } - - multisite_painters["event_last"] = { - "title" : _("Time of last occurrance"), - "short" : _("Last"), - "columns" : ["event_last"], - "options" : [ "ts_format", "ts_date" ], - "paint" : lambda row: paint_age(row["event_last"], True, True), - } - - multisite_painters["event_comment"] = { - "title" : _("Comment to the event"), - "short" : _("Comment"), - "columns" : ["event_comment"], - "paint" : lambda row: ("", row["event_comment"]), - } - - def mkeventd_paint_sl(row): - try: - return "", dict(config.mkeventd_service_levels)[row["event_sl"]] - except: - return "", row["event_sl"] - - multisite_painters["event_sl"] = { - "title" : _("Service-Level"), - "short" : _("Level"), - "columns" : ["event_sl"], - "paint" : mkeventd_paint_sl, - } - - multisite_painters["event_host"] = { - "title" : _("Hostname/IP-Address"), - "short" : _("Host"), - "columns" : ["event_host", "host_name"], - "paint" : paint_event_host, - } - - multisite_painters["event_owner"] = { - "title" : _("Owner of event"), - "short" : _("owner"), - "columns" : ["event_owner"], - "paint" : lambda row: ("", row["event_owner"]), - } - - multisite_painters["event_contact"] = { - "title" : _("Contact Person"), - "short" : _("Contact"), - "columns" : ["event_contact" ], - "paint" : lambda row: ("", row["event_contact"]), - } - - multisite_painters["event_application"] = { - "title" : _("Application / Syslog-Tag"), - "short" : _("Application"), - "columns" : ["event_application" ], - "paint" : lambda row: ("", row["event_application"]), - } - - multisite_painters["event_pid"] = { - "title" : _("Process ID"), - "short" : _("PID"), - "columns" : ["event_pid" ], - "paint" : lambda row: ("", row["event_pid"]), - } - - multisite_painters["event_priority"] = { - "title" : _("Syslog-Priority"), - "short" : _("Prio"), - "columns" : ["event_priority" ], - "paint" : lambda row: ("", dict(mkeventd.syslog_priorities)[row["event_priority"]]), - } - - multisite_painters["event_facility"] = { - "title" : _("Syslog-Facility"), - "short" : _("Facility"), - "columns" : ["event_facility" ], - "paint" : lambda row: ("", dict(mkeventd.syslog_facilities)[row["event_facility"]]), - } - - def paint_rule_id(row): - rule_id = row["event_rule_id"] - if config.may("mkeventd.edit"): - urlvars = htmllib.urlencode_vars([("mode", "mkeventd_edit_rule"), ("rule_id", rule_id)]) - return "", '%s' % (urlvars, rule_id) - else: - return "", rule_id - - multisite_painters["event_rule_id"] = { - "title" : _("Rule-ID"), - "short" : _("Rule"), - "columns" : ["event_rule_id" ], - "paint" : paint_rule_id, - } - - def paint_event_state(row): - state = row["event_state"] - name = nagios_short_state_names[row["event_state"]] - return "state svcstate state%s" % state, name - - multisite_painters["event_state"] = { - "title" : _("State (severity) of event"), - "short" : _("State"), - "columns" : ["event_state"], - "paint" : paint_event_state, - } - - multisite_painters["event_phase"] = { - "title" : _("Phase of event (open, counting, etc.)"), - "short" : _("Phase"), - "columns" : ["event_phase" ], - "paint" : lambda row: ("", mkeventd.phase_names.get(row["event_phase"], '')) - } - - def paint_event_icons(row): - phase = row["event_phase"] - if phase == "ack": - title = _("This event has been acknowledged.") - elif phase == "counting": - title = _("This event has not reached the target count yet.") - elif phase == "delayed": - title = _("The action of this event is still delayed in the hope of a cancelling event.") - else: - return "", "" - return 'icons', '' % (title, phase) - - multisite_painters["event_icons"] = { - "title" : _("Event Icons"), - "short" : _("Icons"), - "columns" : [ "event_phase" ], - "paint" : paint_event_icons, - } - - # Event History - - multisite_painters["history_line"] = { - "title" : _("Line number in log file"), - "short" : _("Line"), - "columns" : ["history_line" ], - "paint" : lambda row: ("number", row["history_line"]), - } - - multisite_painters["history_time"] = { - "title" : _("Time of entry in logfile"), - "short" : _("Time"), - "columns" : ["history_time" ], - "options" : [ "ts_format", "ts_date" ], - "paint" : lambda row: paint_age(row["history_time"], True, True), - } - - multisite_painters["history_what"] = { - "title" : _("Type of event action"), - "short" : _("Action"), - "columns" : ["history_what" ], - "paint" : lambda row: ("", row["history_what"]), - } - - multisite_painters["history_what_explained"] = { - "title" : _("Explanation for event action"), - "columns" : ["history_what" ], - "paint" : lambda row: ("", mkeventd.action_whats[row["history_what"]]), - } - - - multisite_painters["history_who"] = { - "title" : _("User who performed action"), - "short" : _("Who"), - "columns" : ["history_who" ], - "paint" : lambda row: ("", row["history_who"]), - } - - multisite_painters["history_addinfo"] = { - "title" : _("Additional Information"), - "short" : _("Info"), - "columns" : ["history_addinfo" ], - "paint" : lambda row: ("", row["history_addinfo"]), - } - - #. - # .--Commands------------------------------------------------------------. - # | ____ _ | - # | / ___|___ _ __ ___ _ __ ___ __ _ _ __ __| |___ | - # | | | / _ \| '_ ` _ \| '_ ` _ \ / _` | '_ \ / _` / __| | - # | | |__| (_) | | | | | | | | | | | (_| | | | | (_| \__ \ | - # | \____\___/|_| |_| |_|_| |_| |_|\__,_|_| |_|\__,_|___/ | - # | | - # '----------------------------------------------------------------------' - - def command_executor_mkeventd(command, site): - response = mkeventd.query("COMMAND %s" % command) - - - # Acknowledge and update comment and contact - config.declare_permission("mkeventd.update", - _("Update an event"), - _("Needed for acknowledging and changing the comment and contact of an event"), - [ "user", "admin" ]) - - # Sub-Permissions for Changing Comment, Contact and Acknowledgement - config.declare_permission("mkeventd.update_comment", - _("Update an event: change comment"), - _("Needed for changing a comment when updating an event"), - [ "user", "admin" ]) - config.declare_permission("mkeventd.update_contact", - _("Update an event: change contact"), - _("Needed for changing a contact when updating an event"), - [ "user", "admin" ]) - - def render_mkeventd_update(): - html.write('') - if config.may("mkeventd.update_comment"): - html.write('') - if config.may("mkeventd.update_contact"): - html.write('') - html.write('') - html.write('
    %s' % _("Change comment:")) - html.text_input('_mkeventd_comment', size=50) - html.write('
    %s' % _("Change contact:")) - html.text_input('_mkeventd_contact', size=50) - html.write('
    ') - html.checkbox('_mkeventd_acknowledge', True, label=_("Set event to acknowledged")) - html.write('
    ') - html.button('_mkeventd_update', _("Update")) - - def command_mkeventd_update(cmdtag, spec, row): - if html.var('_mkeventd_update'): - if config.may("mkeventd.update_comment"): - comment = html.var_utf8("_mkeventd_comment").strip().replace(";",",") - else: - comment = "" - if config.may("mkeventd.update_contact"): - contact = html.var_utf8("_mkeventd_contact").strip().replace(":",",") - else: - contact = "" - ack = html.get_checkbox("_mkeventd_acknowledge") - return "UPDATE;%s;%s;%s;%s;%s" % \ - (row["event_id"], config.user_id, ack and 1 or 0, comment, contact), \ - _("update") - - multisite_commands.append({ - "tables" : [ "event" ], - "permission" : "mkeventd.update", - "title" : _("Update & Acknowledge"), - "render" : render_mkeventd_update, - "action" : command_mkeventd_update, - "executor" : command_executor_mkeventd, - }) - - # Change event state - config.declare_permission("mkeventd.changestate", - _("Change event state"), - _("This permission allows to change the state classification of an event " - "(e.g. from CRIT to WARN)."), - [ "user", "admin" ]) - - def render_mkeventd_changestate(): - html.button('_mkeventd_changestate', _("Change Event state to:")) - html.write(" ") - MonitoringState().render_input("_mkeventd_state", 2) - - def command_mkeventd_changestate(cmdtag, spec, row): - if html.var('_mkeventd_changestate'): - state = MonitoringState().from_html_vars("_mkeventd_state") - return "CHANGESTATE;%s;%s;%s" % \ - (row["event_id"], config.user_id, state), \ - _("change the state") - - multisite_commands.append({ - "tables" : [ "event" ], - "permission" : "mkeventd.changestate", - "title" : _("Change State"), - "render" : render_mkeventd_changestate, - "action" : command_mkeventd_changestate, - "executor" : command_executor_mkeventd, - }) - - - # Perform custom actions - config.declare_permission("mkeventd.actions", - _("Perform custom action"), - _("This permission is needed for performing the configured actions " - "(execution of scripts and sending emails)."), - [ "user", "admin" ]) - - def render_mkeventd_actions(): - for action_id, title in mkeventd.action_choices(omit_hidden = True): - html.button("_action_" + action_id, title) - html.write("
    ") - - def command_mkeventd_action(cmdtag, spec, row): - for action_id, title in mkeventd.action_choices(omit_hidden = True): - if html.var("_action_" + action_id): - return "ACTION;%s;%s;%s" % (row["event_id"], config.user_id, action_id), \ - (_("execute that action "%s"") % title) - - multisite_commands.append({ - "tables" : [ "event" ], - "permission" : "mkeventd.actions", - "title" : _("Custom Action"), - "render" : render_mkeventd_actions, - "action" : command_mkeventd_action, - "executor" : command_executor_mkeventd, - }) - - - # Delete events - config.declare_permission("mkeventd.delete", - _("Archive an event"), - _("Finally archive an event without any further action"), - [ "user", "admin" ]) - - - def command_mkeventd_delete(cmdtag, spec, row): - if html.var("_delete_event"): - command = "DELETE;%s;%s" % (row["event_id"], config.user_id) - title = _("delete") - return command, title - - - multisite_commands.append({ - "tables" : [ "event" ], - "permission" : "mkeventd.delete", - "title" : _("Archive Event"), - "render" : lambda: \ - html.button("_delete_event", _("Archive Event")), - "action" : command_mkeventd_delete, - "executor" : command_executor_mkeventd, - }) - - #. - # .--Sorters-------------------------------------------------------------. - # | ____ _ | - # | / ___| ___ _ __| |_ ___ _ __ ___ | - # | \___ \ / _ \| '__| __/ _ \ '__/ __| | - # | ___) | (_) | | | || __/ | \__ \ | - # | |____/ \___/|_| \__\___|_| |___/ | - # | | - # '----------------------------------------------------------------------' - - def cmp_simple_state(column, ra, rb): - a = ra.get(column, -1) - b = rb.get(column, -1) - if a == 3: - a = 1.5 - if b == 3: - b = 1.5 - return cmp(a, b) - - - declare_1to1_sorter("event_id", cmp_simple_number) - declare_1to1_sorter("event_count", cmp_simple_number) - declare_1to1_sorter("event_text", cmp_simple_string) - declare_1to1_sorter("event_first", cmp_simple_number) - declare_1to1_sorter("event_last", cmp_simple_number) - declare_1to1_sorter("event_comment", cmp_simple_string) - declare_1to1_sorter("event_sl", cmp_simple_number) - declare_1to1_sorter("event_host", cmp_simple_string) - declare_1to1_sorter("event_contact", cmp_simple_string) - declare_1to1_sorter("event_application", cmp_simple_string) - declare_1to1_sorter("event_pid", cmp_simple_number) - declare_1to1_sorter("event_priority", cmp_simple_number) - declare_1to1_sorter("event_facility", cmp_simple_number) # maybe convert to text - declare_1to1_sorter("event_rule_id", cmp_simple_string) - declare_1to1_sorter("event_state", cmp_simple_state) - declare_1to1_sorter("event_phase", cmp_simple_string) - declare_1to1_sorter("event_owner", cmp_simple_string) - - declare_1to1_sorter("history_line", cmp_simple_number) - declare_1to1_sorter("history_time", cmp_simple_number) - declare_1to1_sorter("history_what", cmp_simple_string) - declare_1to1_sorter("history_who", cmp_simple_string) - declare_1to1_sorter("history_addinfo", cmp_simple_string) - - #. - # .--Views---------------------------------------------------------------. - # | __ ___ | - # | \ \ / (_) _____ _____ | - # | \ \ / /| |/ _ \ \ /\ / / __| | - # | \ V / | | __/\ V V /\__ \ | - # | \_/ |_|\___| \_/\_/ |___/ | - # | | - # '----------------------------------------------------------------------' - - def mkeventd_view(d): - x = { - 'topic': u'Event Console', - 'browser_reload': 60, - 'column_headers': 'pergroup', - 'icon': 'mkeventd', - 'mobile': False, - 'hidden': False, - 'mustsearch': False, - 'group_painters': [], - 'num_columns': 1, - 'hidebutton': False, - 'play_sounds': False, - 'public': True, - 'sorters': [], - 'user_sortable': 'on', - 'show_filters': [], - 'hard_filters': [], - 'hide_filters': [], - 'hard_filtervars': [], - } - x.update(d) - return x - - # Table of all open events - multisite_builtin_views['ec_events'] = mkeventd_view({ - 'title': u'Events', - 'description': u'Table of all currently open events (handled and unhandled)', - 'datasource': 'mkeventd_events', - 'layout': 'table', - 'painters': [ - ('event_id', 'ec_event', ''), - ('event_icons', None, ''), - ('event_state', None, ''), - ('event_sl', None, ''), - ('event_host', 'ec_events_of_host', ''), - ('event_rule_id', None, ''), - ('event_application', None, ''), - ('event_text', None, ''), - ('event_last', None, ''), - ('event_count', None, ''), - ], - 'show_filters': [ - 'event_id', - 'event_rule_id', - 'event_text', - 'event_application', - 'event_contact', - 'event_comment', - 'event_host_regex', - 'event_count', - 'event_phase', - 'event_state', - 'event_first', - 'event_last', - 'event_priority', - 'event_facility', - 'event_sl', - 'hostregex', - ], - 'hard_filtervars': [ - ( 'event_phase_open', "on" ), - ( 'event_phase_ack', "on" ), - ( 'event_phase_counting', "" ), - ( 'event_phase_delayed', "" ), - ], - }) - - multisite_builtin_views['ec_events_of_monhost'] = mkeventd_view({ - 'title': u'Events of Monitored Host', - 'description': u'Currently open events of a host that is monitored', - 'datasource': 'mkeventd_events', - 'layout': 'table', - 'hidden': True, - 'painters': [ - ('event_id', 'ec_event', ''), - ('event_icons', None, ''), - ('event_state', None, ''), - ('event_sl', None, ''), - ('event_rule_id', None, ''), - ('event_application', None, ''), - ('event_text', None, ''), - ('event_last', None, ''), - ('event_count', None, ''), - ], - 'show_filters': [ - 'event_id', - 'event_rule_id', - 'event_text', - 'event_application', - 'event_contact', - 'event_comment', - 'event_count', - 'event_phase', - 'event_state', - 'event_first', - 'event_last', - 'event_priority', - 'event_facility', - 'event_sl', - ], - 'hide_filters': [ - 'site', - 'host', - ], - }) - multisite_builtin_views['ec_events_of_host'] = mkeventd_view({ - 'title': u'Events of Host', - 'description': u'Currently open events of one specific host', - 'datasource': 'mkeventd_events', - 'layout': 'table', - 'hidden': True, - 'painters': [ - ('event_id', 'ec_event', ''), - ('event_icons', None, ''), - ('event_state', None, ''), - ('event_sl', None, ''), - ('event_rule_id', None, ''), - ('event_application', None, ''), - ('event_text', None, ''), - ('event_last', None, ''), - ('event_count', None, ''), - ], - 'show_filters': [ - 'event_id', - 'event_rule_id', - 'event_text', - 'event_application', - 'event_contact', - 'event_comment', - 'event_count', - 'event_phase', - 'event_state', - 'event_first', - 'event_last', - 'event_priority', - 'event_facility', - 'event_sl', - ], - 'hide_filters': [ - 'site', - 'event_host', - ], - }) - - multisite_builtin_views['ec_event'] = mkeventd_view({ - 'title': u'Event Details', - 'description': u'Details about one event', - 'linktitle': 'Event Details', - 'datasource': 'mkeventd_events', - 'layout': 'dataset', - - 'hidden': True, - 'browser_reload': 0, - 'hide_filters': [ - 'event_id', - ], - 'painters': [ - ('event_state', None, ''), - ('event_host', None, ''), - ('host_address', 'hoststatus', ''), - ('host_contacts', None, ''), - ('host_icons', None, ''), - ('event_text', None, ''), - ('event_comment', None, ''), - ('event_owner', None, ''), - ('event_first', None, ''), - ('event_last', None, ''), - ('event_id', None, ''), - ('event_icons', None, ''), - ('event_count', None, ''), - ('event_sl', None, ''), - ('event_contact', None, ''), - ('event_application', None, ''), - ('event_pid', None, ''), - ('event_priority', None, ''), - ('event_facility', None, ''), - ('event_rule_id', None, ''), - ('event_phase', None, ''), - ('host_services', None, ''), - ], - }) - - multisite_builtin_views['ec_history_recent'] = mkeventd_view({ - 'title': u'Recent Event History', - 'description': u'Information about events and actions on events during the ' - u'recent 24 hours.', - 'datasource': 'mkeventd_history', - 'layout': 'table', - - 'painters': [ - ('history_time', None, ''), - ('event_id', 'ec_historyentry', ''), - ('history_who', None, ''), - ('history_what', None, ''), - ('event_icons', None, ''), - ('event_state', None, ''), - ('event_phase', None, ''), - ('event_sl', None, ''), - ('event_host', 'ec_history_of_host', ''), - ('event_rule_id', None, ''), - ('event_application', None, ''), - ('event_text', None, ''), - ('event_last', None, ''), - ('event_count', None, ''), - ], - 'show_filters': [ - 'event_id', - 'event_rule_id', - 'event_text', - 'event_application', - 'event_contact', - 'event_comment', - 'event_host_regex', - 'event_count', - 'event_phase', - 'event_state', - 'event_first', - 'event_last', - 'event_priority', - 'event_facility', - 'event_sl', - 'history_time', - 'history_who', - 'history_what', - ], - 'hard_filtervars': [ - ('history_time_from', '1'), - ('history_time_from_range', '86400'), - ], - 'sorters': [ - ('history_time', False), - ], - }) - - multisite_builtin_views['ec_historyentry'] = mkeventd_view({ - 'title': u'Event History Entry', - 'description': u'Details about a historical event history entry', - 'datasource': 'mkeventd_history', - 'layout': 'dataset', - - 'hidden': True, - 'browser_reload': 0, - 'hide_filters': [ - 'event_id', - 'history_line', - ], - 'painters': [ - ('history_time', None, ''), - ('history_line', None, ''), - ('history_what', None, ''), - ('history_what_explained', None, ''), - ('history_who', None, ''), - ('history_addinfo', None, ''), - ('event_state', None, ''), - ('event_host', 'ec_history_of_host', ''), - ('event_text', None, ''), - ('event_comment', None, ''), - ('event_owner', None, ''), - ('event_first', None, ''), - ('event_last', None, ''), - ('event_id', 'ec_history_of_event', ''), - ('event_icons', None, ''), - ('event_count', None, ''), - ('event_sl', None, ''), - ('event_contact', None, ''), - ('event_application', None, ''), - ('event_pid', None, ''), - ('event_priority', None, ''), - ('event_facility', None, ''), - ('event_rule_id', None, ''), - ('event_phase', None, ''), - ], - }) - - multisite_builtin_views['ec_history_of_event'] = mkeventd_view({ - 'title': u'History of Event', - 'description': u'History entries of one specific event', - 'datasource': 'mkeventd_history', - 'layout': 'table', - 'columns': 1, - - 'hidden': True, - 'browser_reload': 0, - 'hide_filters': [ - 'event_id', - ], - 'painters': [ - ('history_time', None, ''), - ('history_line', 'ec_historyentry', ''), - ('history_what', None, ''), - ('history_what_explained', None, ''), - ('history_who', None, ''), - ('event_state', None, ''), - ('event_host', None, ''), - ('event_application', None, ''), - ('event_text', None, ''), - ('event_sl', None, ''), - ('event_priority', None, ''), - ('event_facility', None, ''), - ('event_phase', None, ''), - ('event_count', None, ''), - ], - 'sorters': [ - ('history_time', False), - ], - }) - - multisite_builtin_views['ec_history_of_host'] = mkeventd_view({ - 'title': u'Event History of Host', - 'description': u'History entries of one specific host', - 'datasource': 'mkeventd_history', - 'layout': 'table', - 'columns': 1, - - 'hidden': True, - 'browser_reload': 0, - 'hide_filters': [ - 'event_host', - ], - 'show_filters': [ - 'event_id', - 'event_rule_id', - 'event_text', - 'event_application', - 'event_contact', - 'event_comment', - 'event_count', - 'event_phase', - 'event_state', - 'event_first', - 'event_last', - 'event_priority', - 'event_facility', - 'event_sl', - 'history_time', - 'history_who', - 'history_what', - ], - 'painters': [ - ('history_time', None, ''), - ('event_id', 'ec_history_of_event', ''), - ('history_line', 'ec_historyentry', ''), - ('history_what', None, ''), - ('history_what_explained', None, ''), - ('history_who', None, ''), - ('event_state', None, ''), - ('event_host', None, ''), - ('event_application', None, ''), - ('event_text', None, ''), - ('event_sl', None, ''), - ('event_priority', None, ''), - ('event_facility', None, ''), - ('event_phase', None, ''), - ('event_count', None, ''), - ], - 'sorters': [ - ('history_time', False), - ], - }) diff -Nru check-mk-1.2.2p3/=unpacked-tar7=/plugins/wato/mkeventd.py check-mk-1.2.6p12/=unpacked-tar7=/plugins/wato/mkeventd.py --- check-mk-1.2.2p3/=unpacked-tar7=/plugins/wato/mkeventd.py 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar7=/plugins/wato/mkeventd.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,1899 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# ails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -import mkeventd - -try: - mkeventd_enabled = config.mkeventd_enabled -except: - mkeventd_enabled = False - -# main_config_file = defaults.check_mk_configdir + "/mkeventd.mk" -config_dir = defaults.default_config_dir + "/mkeventd.d/wato/" -if defaults.omd_root: - status_file = defaults.omd_root + "/var/mkeventd/status" - -# Include rule configuration into backup/restore/replication. Current -# status is not backed up. -if mkeventd_enabled: - replication_paths.append(( "dir", "mkeventd", config_dir )) - backup_paths.append(( "dir", "mkeventd", config_dir )) - -#. -# .--ValueSpecs----------------------------------------------------------. -# | __ __ _ ____ | -# | \ \ / /_ _| |_ _ ___/ ___| _ __ ___ ___ ___ | -# | \ \ / / _` | | | | |/ _ \___ \| '_ \ / _ \/ __/ __| | -# | \ V / (_| | | |_| | __/___) | |_) | __/ (__\__ \ | -# | \_/ \__,_|_|\__,_|\___|____/| .__/ \___|\___|___/ | -# | |_| | -# +----------------------------------------------------------------------+ -# | Declarations of the structure of rules and actions | -# '----------------------------------------------------------------------' -substitute_help = _(""" -The following placeholdes will be substituted by value from the actual event: - - - - - - - - - - - - - - - - - - - - - - -
    $ID$Event ID
    $COUNT$Number of occurrances
    $TEXT$Message text
    $FIRST$Time of the first occurrance (time stamp)
    $LAST$Time of the most recent occurrance
    $COMMENT$Event comment/td>
    $SL$Service Level
    $HOST$Host name (as sent by syslog)
    $CONTACT$Contact information
    $APPLICATION$Syslog tag / Application
    $PID$Process ID of the origin process
    $PRIORITY$Syslog Priority
    $FACILITY$Syslog Facility
    $RULE_ID$ID of the rule
    $STATE$State of the event (0/1/2/3)
    $PHASE$Phase of the event (always open)
    $OWNER$Owner of the event
    $MATCH_GROUPS$Text groups from regular expression match, separated by spaces/td>
    $MATCH_GROUP_1$Text of the first match group from expression match
    $MATCH_GROUP_2$Text of the second match group from expression match
    $MATCH_GROUP_3$Text of the third match group from expression match (and so on...)
    -""" -) - -class ActionList(ListOf): - def __init__(self, vs, **kwargs): - ListOf.__init__(self, vs, **kwargs) - - def validate_value(self, value, varprefix): - ListOf.validate_value(self, value, varprefix) - action_ids = [ v["id"] for v in value ] - rules = load_mkeventd_rules() - for rule in rules: - for action_id in rule.get("actions", []): - if action_id not in action_ids: - raise MKUserError(varprefix, _("You are missing the action with the ID %s, " - "which is still used in some rules.") % action_id) - - -vs_mkeventd_actions = \ - ActionList( - Foldable( - Dictionary( - title = _("Action"), - optional_keys = False, - elements = [ - ( "id", - ID( - title = _("Action ID"), - help = _("A unique ID of this action that is used as an internal " - "reference in the configuration. Changing the ID is not " - "possible if still rules refer to this ID."), - allow_empty = False, - size = 12, - ) - ), - ( "title", - TextUnicode( - title = _("Title"), - help = _("A descriptive title of this action."), - allow_empty = False, - size = 64, - ) - ), - ( "disabled", - Checkbox( - title = _("Disable"), - label = _("Current disable execution of this action"), - ) - ), - ( "hidden", - Checkbox( - title = _("Hide from Status GUI"), - label = _("Do not offer this action as a command on open events"), - help = _("If you enabled this option, then this action will not " - "be available as an interactive user command. It is usable " - "as an ad-hoc action when a rule fires, nevertheless."), - ), - ), - ( "action", - CascadingDropdown( - title = _("Type of Action"), - help = _("Choose the type of action to perform"), - choices = [ - ( "email", - _("Send Email"), - Dictionary( - optional_keys = False, - elements = [ - ( "to", - TextAscii( - title = _("Recipient Email address"), - allow_empty = False, - ), - ), - ( "subject", - TextUnicode( - title = _("Subject"), - allow_empty = False, - size = 64, - ), - ), - ( "body", - TextAreaUnicode( - title = _("Body"), - help = _("Text-body of the email to send. ") + substitute_help, - cols = 64, - rows = 10, - ), - ), - ] - ) - ), - ( "script", - _("Execute Shell Script"), - Dictionary( - optional_keys = False, - elements = [ - ( "script", - TextAreaUnicode( - title = _("Script body"), - help = _("This script will be executed using the BASH shell. ") + substitute_help, - cols = 64, - rows = 10, - ) - ), - ] - ) - ), - ] - ), - ), - ], - ), - title_function = lambda value: not value["id"] and _("New Action") or (value["id"] + " - " + value["title"]), - ), - title = _("Actions (Emails & Scripts)"), - help = _("Configure that possible actions that can be performed when a " - "rule triggers and also manually by a user."), - totext = _("%d actions"), - ) - - -class RuleState(MonitoringState): - def __init__(self, **kwargs): - MonitoringState.__init__(self, **kwargs) - self._choices.append((-1, _("(set by syslog)"))) - -vs_mkeventd_rule = Dictionary( - title = _("Rule Properties"), - elements = [ - ( "id", - ID( - title = _("Rule ID"), - help = _("A unique ID of this rule. Each event will remember the rule " - "it was classified with by its rule ID."), - allow_empty = False, - size = 12, - )), - ( "description", - TextUnicode( - title = _("Description"), - help = _("You can use this description for commenting your rules. It " - "will not be attached to the event this rule classifies."), - size = 64, - )), - ( "disabled", - Checkbox( - title = _("Rule activation"), - help = _("Disabled rules are kept in the configuration but are not applied."), - label = _("do not apply this rule"), - ) - ), - ( "drop", - Checkbox( - title = _("Drop Message"), - help = _("With this option all messages matching this rule will be silently dropped."), - label = _("Silently drop messages, do no actions"), - ) - ), - ( "state", - RuleState( - title = _("State"), - help = _("The monitoring state that this event will trigger."), - default_value = -1, - )), - ( "sl", - DropdownChoice( - title = _("Service Level"), - choices = mkeventd.service_levels, - prefix_values = True, - ), - ), - ( "actions", - ListChoice( - title = _("Actions"), - help = _("Actions to automatically perform when this event occurs"), - choices = mkeventd.action_choices, - ) - ), - ( "count", - Dictionary( - title = _("Count messages in defined interval"), - help = _("With this option you can make the rule being executed not before " - "the matching message is seen a couple of times in a defined " - "time interval. Also counting activates the aggregation of messages " - "that result from the same rule into one event, even if count is " - "set to 1."), - optional_keys = False, - columns = 2, - elements = [ - ( "count", - Integer( - title = _("Count until triggered"), - help = _("That many times the message must occur until an event is created"), - minvalue = 1, - ), - ), - ( "period", - Age( - title = _("Time period for counting"), - help = _("If in this time range the configured number of time the rule is " - "triggered, an event is being created. If the required count is not reached " - "then the count is reset to zero."), - default_value = 86400, - ), - ), - ( "algorithm", - DropdownChoice( - title = _("Algorithm"), - help = _("Select how the count is computed. The algorithm Interval will count the " - "number of messages from the first occurrance and reset this counter as soon as " - "the interval is elapsed or the maximum count has reached. The token bucket algorithm " - "does not work with intervals but simply decreases the current count by one for " - "each partial time interval. Please refer to the online documentation for more details."), - choices = [ - ( "interval", _("Interval")), - ( "tokenbucket", _("Token Bucket")), - ( "dynabucket", _("Dynamic Token Bucket")), - ], - default_value = "interval") - ), - ( "count_ack", - Checkbox( - label = _("Continue counting when event is acknowledged"), - help = _("Otherwise counting will start from one with a new event for " - "the next rule match."), - default_value = False, - ) - ), - ( "separate_host", - Checkbox( - label = _("Force separate events for different hosts"), - help = _("When aggregation is turned on and the rule matches for " - "two different hosts then these two events will be kept " - "separate if you check this box."), - default_value = True, - ), - ), - ( "separate_application", - Checkbox( - label = _("Force separate events for different applications"), - help = _("When aggregation is turned on and the rule matches for " - "two different applications then these two events will be kept " - "separate if you check this box."), - default_value = True, - ), - ), - ( "separate_match_groups", - Checkbox( - label = _("Force separate events for different match groups"), - help = _("When you use subgroups in the regular expression of your " - "match text then you can have different values for the matching " - "groups be reflected in different events."), - default_value = True, - ), - ), - ], - ) - ), - ( "expect", - Dictionary( - title = _("Expect regular messages"), - help = _("With this option activated you can make the Event Console monitor " - "that a certain number of messages are at least seen within " - "each regular time interval. Otherwise an event will be created. " - "The options week, two days and day refer to " - "periodic intervals aligned at 00:00:00 on the 1st of January 1970. " - "You can specify a relative offset in hours in order to re-align this " - "to any other point of time."), - optional_keys = False, - columns = 2, - elements = [ - ( "interval", - CascadingDropdown( - title = _("Interval"), - html_separator = " ", - choices = [ - ( 7*86400, _("week"), - Integer( - label = _("Timezone offset"), - unit = _("hours"), - default_value = 0, - minvalue = - 167, - maxvalue = 167, - ) - ), - ( 2*86400, _("two days"), - Integer( - label = _("Timezone offset"), - unit = _("hours"), - default_value = 0, - minvalue = - 47, - maxvalue = 47, - ) - ), - ( 86400, _("day"), - DropdownChoice( - label = _("in timezone"), - choices = [ - ( -12, _("UTC -12 hours") ), - ( -11, _("UTC -11 hours") ), - ( -10, _("UTC -10 hours") ), - ( -9, _("UTC -9 hours") ), - ( -8, _("UTC -8 hours") ), - ( -7, _("UTC -7 hours") ), - ( -6, _("UTC -6 hours") ), - ( -5, _("UTC -5 hours") ), - ( -4, _("UTC -4 hours") ), - ( -3, _("UTC -3 hours") ), - ( -2, _("UTC -2 hours") ), - ( -1, _("UTC -1 hour") ), - ( 0, _("UTC") ), - ( 1, _("UTC +1 hour") ), - ( 2, _("UTC +2 hours") ), - ( 3, _("UTC +3 hours") ), - ( 4, _("UTC +4 hours") ), - ( 5, _("UTC +5 hours") ), - ( 6, _("UTC +8 hours") ), - ( 7, _("UTC +7 hours") ), - ( 8, _("UTC +8 hours") ), - ( 9, _("UTC +9 hours") ), - ( 10, _("UTC +10 hours") ), - ( 11, _("UTC +11 hours") ), - ( 12, _("UTC +12 hours") ), - ], - default_value = 0, - ) - ), - ( 3600, _("hour") ), - ( 900, _("15 minutes") ), - ( 300, _("5 minutes") ), - ( 60, _("minute") ), - ( 10, _("10 seconds") ), - ], - default_value = 3600, - ) - ), - ( "count", - Integer( - title = _("Number of expected messages in each interval"), - minvalue = 1, - ) - ), - ( "merge", - DropdownChoice( - title = _("Merge with open event"), - help = _("If there already exists an open event because of absent " - "messages according to this rule, you can optionally merge " - "the new incident with the exising event or create a new " - "event for each interval with absent messages."), - choices = [ - ( "open", _("Merge if there is an open un-acknowledged event") ), - ( "acked", _("Merge even if there is an acknowledged event") ), - ( "never", _("Create a new event for each incident - never merge") ), - ], - default_value = "open", - ) - ), - ]) - ), - ( "delay", - Age( - title = _("Delay event creation"), - help = _("The creation of an event will be delayed by this time period. This " - "does only make sense for events that can be cancelled by a negative " - "rule.")) - ), - ( "livetime", - Tuple( - title = _("Limit event livetime"), - help = _("If you set a livetime of an event, then it will automatically be " - "deleted after that time if, even if no action has taken by the user. You can " - "decide whether to expire open, acknowledged or both types of events. The lifetime " - "always starts when the event is entering the open state."), - elements = [ - Age(), - ListChoice( - choices = [ - ( "open", _("Expire events that are in the state open") ), - ( "ack", _("Expire events that are in the state acknowledged") ), - ], - default_value = [ "open" ], - ) - ], - ), - ), - ( "match", - RegExpUnicode( - title = _("Text to match"), - help = _("The rules does only apply when the given regular expression matches " - "the message text (infix search)."), - size = 64, - ) - ), - ( "match_host", - RegExpUnicode( - title = _("Match host"), - help = _("The rules does only apply when the given regular expression matches " - "the host name the message originates from. Note: in some cases the " - "event might use the IP address instead of the host name."), - ) - ), - ( "match_application", - RegExpUnicode( - title = _("Match syslog application (tag)"), - help = _("Regular expression for matching the syslog tag (case insenstive)"), - ) - ), - ( "match_priority", - Tuple( - title = _("Match syslog priority"), - help = _("Define a range of syslog priorities this rule matches"), - orientation = "horizontal", - show_titles = False, - elements = [ - DropdownChoice(label = _("from:"), choices = mkeventd.syslog_priorities, default_value = 4), - DropdownChoice(label = _(" to:"), choices = mkeventd.syslog_priorities, default_value = 0), - ], - ), - ), - ( "match_facility", - DropdownChoice( - title = _("Match syslog facility"), - help = _("Make the rule match only if the message has a certain syslog facility. " - "Messages not having a facility are classified as user."), - choices = mkeventd.syslog_facilities, - ) - ), - ( "match_sl", - Tuple( - title = _("Match service level"), - help = _("This setting is only useful for events that result from monitoring notifications " - "sent by Check_MK. Those can set a service level already in the event. In such a " - "case you can make this rule match only certain service levels. Events that do not "), - orientation = "horizontal", - show_titles = False, - elements = [ - DropdownChoice(label = _("from:"), choices = mkeventd.service_levels, prefix_values = True), - DropdownChoice(label = _(" to:"), choices = mkeventd.service_levels, prefix_values = True), - ], - ), - ), - ( "match_ok", - RegExpUnicode( - title = _("Text to cancel event"), - help = _("If a matching message appears with this text, then an event created " - "by this rule will automatically be cancelled (if host, application and match groups match). "), - size = 64, - ) - ), - ( "cancel_priority", - Tuple( - title = _("Syslog priority to cancel event"), - help = _("If the priority of the event lies withing this range and either no text to cancel " - "is specified or that text also matched, then events created with this rule will " - "automatically be cancelled (if host, application and match groups match)."), - orientation = "horizontal", - show_titles = False, - elements = [ - DropdownChoice(label = _("from:"), choices = mkeventd.syslog_priorities, default_value = 7), - DropdownChoice(label = _(" to:"), choices = mkeventd.syslog_priorities, default_value = 5), - ], - ), - ), - ( "set_text", - TextUnicode( - title = _("Rewrite message text"), - help = _("Replace the message text with this text. If you have bracketed " - "groups in the text to match, then you can use the placeholders " - "\\1, \\2, etc. for inserting the first, second " - "etc matching group.") + - _("The placeholder \\0 will be replaced by the original text. " - "This allows you to add new information in front or at the end."), - size = 64, - allow_empty = False, - ) - ), - ( "set_host", - TextUnicode( - title = _("Rewrite hostname"), - help = _("Replace the host name with this text. If you have bracketed " - "groups in the text to match, then you can use the placeholders " - "\\1, \\2, etc. for inserting the first, second " - "etc matching group.") + - _("The placeholder \\0 will be replaced by the original host name. " - "This allows you to add new information in front or at the end."), - allow_empty = False, - ) - ), - ( "set_application", - TextUnicode( - title = _("Rewrite application"), - help = _("Replace the application (syslog tag) with this text. If you have bracketed " - "groups in the text to match, then you can use the placeholders " - "\\1, \\2, etc. for inserting the first, second " - "etc matching group.") + - _("The placeholder \\0 will be replaced by the original text. " - "This allows you to add new information in front or at the end."), - allow_empty = False, - ) - ), - ( "set_comment", - TextUnicode( - title = _("Add comment"), - help = _("Attach a comment to the event. If you have bracketed " - "groups in the text to match, then you can use the placeholders " - "\\1, \\2, etc. for inserting the first, second " - "etc matching group.") + - _("The placeholder \\0 will be replaced by the original text. " - "This allows you to add new information in front or at the end."), - size = 64, - allow_empty = False, - ) - ), - ( "set_contact", - TextUnicode( - title = _("Add contact information"), - help = _("Attach information about a contact person. If you have bracketed " - "groups in the text to match, then you can use the placeholders " - "\\1, \\2, etc. for inserting the first, second " - "etc matching group.") + - _("The placeholder \\0 will be replaced by the original text. " - "This allows you to add new information in front or at the end."), - size = 64, - allow_empty = False, - ) - ), - ], - optional_keys = [ "delay", "livetime", "count", "expect", "match_priority", "match_priority", - "match_facility", "match_sl", "match_host", "match_application", - "set_text", "set_host", "set_application", "set_comment", - "set_contact", "cancel_priority", "match_ok" ], - headers = [ - ( _("General Properties"), [ "id", "description", "disabled" ] ), - ( _("Matching Criteria"), [ "match", "match_host", "match_application", "match_priority", "match_facility", - "match_sl", "match_ok", "cancel_priority" ]), - ( _("Outcome & Action"), [ "state", "sl", "actions", "drop" ]), - ( _("Counting & Timing"), [ "count", "expect", "delay", "livetime", ]), - ( _("Rewriting"), [ "set_text", "set_host", "set_application", "set_comment", "set_contact" ]), - ], - render = "form", - form_narrow = True, -) - -# VS for simulating an even -vs_mkeventd_event = Dictionary( - title = _("Event Simulator"), - help = _("You can simulate an event here and check out, which rules are matching."), - render = "form", - form_narrow = True, - optional_keys = False, - elements = [ - ( "text", - TextUnicode( - title = _("Message Text"), - size = 80, - allow_empty = False, - default_value = _("Still nothing happened.")), - ), - ( "application", - TextUnicode( - title = _("Application Name"), - help = _("The syslog tag"), - size = 40, - allow_empty = True) - ), - ( "host", - TextUnicode( - title = _("Host Name"), - help = _("The host name of the event"), - size = 40, - allow_empty = True) - ), - ( "priority", - DropdownChoice( - title = _("Syslog Priority"), - choices = mkeventd.syslog_priorities, - default_value = 5, - ) - ), - ( "facility", - DropdownChoice( - title = _("Syslog Facility"), - choices = mkeventd.syslog_facilities, - default_value = 1, - ) - ), - ]) - - -#. -# .--Persistence---------------------------------------------------------. -# | ____ _ _ | -# | | _ \ ___ _ __ ___(_)___| |_ ___ _ __ ___ ___ | -# | | |_) / _ \ '__/ __| / __| __/ _ \ '_ \ / __/ _ \ | -# | | __/ __/ | \__ \ \__ \ || __/ | | | (_| __/ | -# | |_| \___|_| |___/_|___/\__\___|_| |_|\___\___| | -# | | -# +----------------------------------------------------------------------+ -# | | -# '----------------------------------------------------------------------' - -def load_mkeventd_rules(): - filename = config_dir + "rules.mk" - if not os.path.exists(filename): - return [] - try: - vars = { "rules" : [] } - execfile(filename, vars, vars) - # If we are running on OMD then we know the path to - # the state retention file of mkeventd and can read - # the rule statistics directly from that file. - if defaults.omd_root and os.path.exists(status_file): - mkeventd_status = eval(file(status_file).read()) - rule_stats = mkeventd_status["rule_stats"] - for rule in vars["rules"]: - rule["hits"] = rule_stats.get(rule["id"], 0) - - # Convert some data fields into a new format - for rule in vars["rules"]: - if "livetime" in rule: - livetime = rule["livetime"] - if type(livetime) != tuple: - rule["livetime"] = ( livetime, ["open"] ) - - return vars["rules"] - - except Exception, e: - if config.debug: - raise MKGeneralException(_("Cannot read configuration file %s: %s" % - (filename, e))) - return [] - -def save_mkeventd_rules(rules): - make_nagios_directory(defaults.default_config_dir + "/mkeventd.d") - make_nagios_directory(config_dir) - out = create_user_file(config_dir + "rules.mk", "w") - out.write("# Written by WATO\n# encoding: utf-8\n\n") - try: - if config.mkeventd_pprint_rules: - out.write("rules += \\\n%s\n" % pprint.pformat(rules)) - return - except: - pass - - out.write("rules += \\\n%r\n" % rules) - - -#. -# .--WATO Modes----------------------------------------------------------. -# | __ ___ _____ ___ __ __ _ | -# | \ \ / / \|_ _/ _ \ | \/ | ___ __| | ___ ___ | -# | \ \ /\ / / _ \ | || | | | | |\/| |/ _ \ / _` |/ _ \/ __| | -# | \ V V / ___ \| || |_| | | | | | (_) | (_| | __/\__ \ | -# | \_/\_/_/ \_\_| \___/ |_| |_|\___/ \__,_|\___||___/ | -# | | -# +----------------------------------------------------------------------+ -# | The actual configuration modes for all rules, one rule and the | -# | activation of the changes. | -# '----------------------------------------------------------------------' - -def mode_mkeventd_rules(phase): - if phase == "title": - return _("Rules for event corelation") - - elif phase == "buttons": - home_button() - mkeventd_changes_button() - if config.may("mkeventd.edit"): - html.context_button(_("New Rule"), make_link([("mode", "mkeventd_edit_rule")]), "new") - html.context_button(_("Reset Counters"), - make_action_link([("mode", "mkeventd_rules"), ("_reset_counters", "1")]), "resetcounters") - html.context_button(_("Server Status"), make_link([("mode", "mkeventd_status")]), "status") - return - - rules = load_mkeventd_rules() - - if phase == "action": - # Validation of input for rule simulation (no further action here) - if html.var("simulate") or html.var("_generate"): - event = vs_mkeventd_event.from_html_vars("event") - vs_mkeventd_event.validate_value(event, "event") - - if html.has_var("_generate") and html.check_transaction(): - if not event.get("application"): - raise MKUserError("event_p_application", _("Please specify an application name")) - if not event.get("host"): - raise MKUserError("event_p_host", _("Please specify a host name")) - rfc = mkeventd.send_event(event) - return None, "Test event generated and sent to Event Console.
    %s
    " % rfc - - - if html.has_var("_delete"): - nr = int(html.var("_delete")) - rule = rules[nr] - c = wato_confirm(_("Confirm rule deletion"), - _("Do you really want to delete the rule %s %s?" % - (rule["id"], rule.get("description","")))) - if c: - log_mkeventd("delete-rule", _("Deleted rule %s") % rules[nr]["id"]) - del rules[nr] - save_mkeventd_rules(rules) - elif c == False: - return "" - else: - return - - elif html.has_var("_reset_counters"): - c = wato_confirm(_("Confirm counter reset"), - _("Do you really want to reset all Hits counters to zero?")) - if c: - mkeventd.query("COMMAND RESETCOUNTERS") - log_mkeventd("counter-reset", _("Resetted all rule hit counters to zero")) - elif c == False: - return "" - else: - return - - elif html.has_var("_copy_rules"): - c = wato_confirm(_("Confirm copying rules"), - _("Do you really want to copy all event rules from the master and " - "replace your local configuration with them?")) - if c: - copy_rules_from_master() - log_mkeventd("copy-rules-from-master", _("Copied the event rules from the master " - "into the local configuration")) - return None, _("Copied rules from master") - elif c == False: - return "" - else: - return - - - if html.check_transaction(): - if html.has_var("_move"): - from_pos = int(html.var("_move")) - to_pos = int(html.var("_where")) - rule = rules[from_pos] - del rules[from_pos] # make to_pos now match! - rules[to_pos:to_pos] = [rule] - save_mkeventd_rules(rules) - log_mkeventd("move-rule", _("Changed position of rule %s") % rule["id"]) - return - - rep_mode = mkeventd.replication_mode() - if rep_mode in [ "sync", "takeover" ]: - copy_url = make_action_link([("mode", "mkeventd_rules"), ("_copy_rules", "1")]) - html.show_warning(_("WARNING: This Event Console is currently running as a replication " - "slave. The rules edited here will not be used. Instead a copy of the rules of the " - "master are being used in the case of a takeover. The same holds for the event " - "actions in the global settings.

    If you want you can copy the ruleset of " - "the master into your local slave configuration: ") + \ - '' % copy_url + - _("Copy Rules From Master") + '') - - if len(rules) == 0: - html.write(_("You have not created any rules yet.")) - return - - # Simulator - event = config.load_user_file("simulated_event", {}) - html.begin_form("simulator") - vs_mkeventd_event.render_input("event", event) - forms.end() - html.hidden_fields() - html.button("simulate", _("Try out")) - html.button("_generate", _("Generate Event!")) - html.end_form() - html.write("
    ") - - if html.var("simulate"): - event = vs_mkeventd_event.from_html_vars("event") - config.save_user_file("simulated_event", event) - else: - event = None - - html.write('') - html.write("") - html.write("" % _("Actions")) - html.write("") - html.write("" % _("ID")) - html.write("" % _("State")) - html.write("" % _("Priority")) - html.write("" % _("Facility")) - html.write("" % _("Service Level")) - if defaults.omd_root: - html.write("" % _("Hits")) - html.write("" % _("Description")) - html.write("" % _("Text to match")) - html.write("") - - odd = "even" - have_match = False - for nr, rule in enumerate(rules): - odd = odd == "odd" and "even" or "odd" - html.write('' % odd) - delete_url = make_action_link([("mode", "mkeventd_rules"), ("_delete", nr)]) - top_url = make_action_link([("mode", "mkeventd_rules"), ("_move", nr), ("_where", 0)]) - bottom_url = make_action_link([("mode", "mkeventd_rules"), ("_move", nr), ("_where", len(rules)-1)]) - up_url = make_action_link([("mode", "mkeventd_rules"), ("_move", nr), ("_where", nr-1)]) - down_url = make_action_link([("mode", "mkeventd_rules"), ("_move", nr), ("_where", nr+1)]) - edit_url = make_link([("mode", "mkeventd_edit_rule"), ("edit", nr)]) - clone_url = make_link([("mode", "mkeventd_edit_rule"), ("clone", nr)]) - html.write('') - html.write('') - html.write('' % (edit_url, rule["id"])) - if rule.get("drop"): - html.write('' % _("DROP")) - else: - html.write('' % (rule["state"], - {0:_("OK"), 1:_("WARN"), 2:_("CRIT"), 3:_("UNKNOWN"), -1:_("(syslog)")}[rule["state"]])) - - # Syslog priority - if "match_priority" in rule: - prio_from, prio_to = rule["match_priority"] - if prio_from == prio_to: - prio_text = mkeventd.syslog_priorities[prio_from][1] - else: - prio_text = mkeventd.syslog_priorities[prio_from][1][:2] + ".." + \ - mkeventd.syslog_priorities[prio_to][1][:2] - else: - prio_text = "" - html.write("" % prio_text) - - # Syslog Facility - if "match_facility" in rule: - facnr = rule["match_facility"] - html.write("" % mkeventd.syslog_facilities[facnr][1]) - else: - html.write("") - - html.write('' % dict(mkeventd.service_levels()).get(rule["sl"], rule["sl"])) - if defaults.omd_root: - hits = rule.get('hits') - html.write('' % (hits != None and hits or '')) - html.write('' % rule.get("description")) - html.write('' % rule.get("match")) - html.write('\n') - html.write('
    %s%s%s%s%s%s%s%s%s
    ') - html.icon_button(edit_url, _("Edit this rule"), "edit") - html.icon_button(clone_url, _("Create a copy of this rule"), "clone") - html.icon_button(delete_url, _("Delete this rule"), "delete") - if not rule is rules[0]: - html.icon_button(top_url, _("Move this rule to the top"), "top") - html.icon_button(up_url, _("Move this rule one position up"), "up") - else: - html.empty_icon_button() - html.empty_icon_button() - - if not rule is rules[-1]: - html.icon_button(down_url, _("Move this rule one position down"), "down") - html.icon_button(bottom_url, _("Move this rule to the bottom"), "bottom") - else: - html.empty_icon_button() - html.empty_icon_button() - - html.write('') - if rule.get("disabled"): - html.icon(_("This rule is currently disabled and will not be applied"), "disabled") - elif event: - result = mkeventd.event_rule_matches(rule, event) - if type(result) != tuple: - html.icon(_("Rule does not match: %s") % result, "rulenmatch") - else: - cancelling, groups = result - if have_match: - msg = _("This rule matches, but is overruled by a previous match.") - icon = "rulepmatch" - else: - if cancelling: - msg = _("This rule does a cancelling match.") - else: - msg = _("This rule matches.") - icon = "rulematch" - have_match = True - if groups: - msg += _(" Match groups: %s") % ",".join(groups) - html.icon(msg, icon) - - html.write('%s%s%s%s%s%s%s%s%s
    ') - - -def copy_rules_from_master(): - answer = mkeventd.query("REPLICATE 0") - if "rules" not in answer: - raise MKGeneralException(_("Cannot get rules from local event daemon.")) - rules = answer["rules"] - save_mkeventd_rules(rules) - - -def mode_mkeventd_edit_rule(phase): - rules = load_mkeventd_rules() - # Links from status view refer to rule via the rule id - if html.var("rule_id"): - rule_id = html.var("rule_id") - for nr, rule in enumerate(rules): - if rule["id"] == rule_id: - html.set_var("edit", str(nr)) - break - - edit_nr = int(html.var("edit", -1)) # missing -> new rule - clone_nr = int(html.var("clone", -1)) # Only needed in 'new' mode - new = edit_nr < 0 - - if phase == "title": - if new: - return _("Create new rule") - else: - return _("Edit rule %s" % rules[edit_nr]["id"]) - - elif phase == "buttons": - home_button() - mkeventd_rules_button() - mkeventd_changes_button() - if clone_nr >= 0: - html.context_button(_("Clear Rule"), html.makeuri([("_clear", "1")]), "clear") - return - - if new: - if clone_nr >= 0 and not html.var("_clear"): - rule = {} - rule.update(rules[clone_nr]) - else: - rule = {} - else: - rule = rules[edit_nr] - - if phase == "action": - if not html.check_transaction(): - return "mkeventd_rules" - - if not new: - old_id = rule["id"] - rule = vs_mkeventd_rule.from_html_vars("rule") - vs_mkeventd_rule.validate_value(rule, "rule") - if not new and old_id != rule["id"]: - raise MKUserError("rule_p_id", - _("It is not allowed to change the ID of an existing rule.")) - elif new: - for r in rules: - if r["id"] == rule["id"]: - raise MKUserError("rule_p_id", _("A rule with this ID already exists.")) - - try: - num_groups = re.compile(rule["match"]).groups - except: - raise MKUserError("rule_p_match", - _("Invalid regular expression")) - if num_groups > 9: - raise MKUserError("rule_p_match", - _("You matching text has too many regular expresssion subgroups. " - "Only nine are allowed.")) - - if "count" in rule and "expect" in rule: - raise MKUserError("rule_p_expect_USE", _("You cannot use counting and expecting " - "at the same time in the same rule.")) - - if "expect" in rule and "delay" in rule: - raise MKUserError("rule_p_expect_USE", _("You cannot use expecting and delay " - "at the same time in the same rule, sorry.")) - - # Make sure that number of group replacements do not exceed number - # of groups in regex of match - num_repl = 9 - while num_repl > num_groups: - repl = "\\%d" % num_repl - for name, value in rule.items(): - if name.startswith("set_") and type(value) in [ str, unicode ]: - if repl in value: - raise MKUserError("rule_p_" + name, - _("You are using the replacment reference \%d, " - "but your match text has only %d subgroups." % ( - num_repl, num_groups))) - num_repl -= 1 - - - if new and clone_nr >= 0: - rules[clone_nr:clone_nr] = [ rule ] - elif new: - rules = [ rule ] + rules - else: - rules[edit_nr] = rule - - save_mkeventd_rules(rules) - if new: - log_mkeventd("new-rule", _("Created new event corelation rule with id %s" % rule["id"])) - else: - log_mkeventd("edit-rule", _("Modified event corelation rule %s" % rule["id"])) - # Reset hit counters of this rule - mkeventd.query("COMMAND RESETCOUNTERS;" + rule["id"]) - return "mkeventd_rules" - - - html.begin_form("rule") - vs_mkeventd_rule.render_input("rule", rule) - vs_mkeventd_rule.set_focus("rule") - forms.end() - html.button("save", _("Save")) - html.hidden_fields() - html.end_form() - -def mkeventd_reload(): - mkeventd.query("COMMAND RELOAD") - try: - os.remove(log_dir + "mkeventd.log") - except OSError: - pass # ignore not existing logfile - log_audit(None, "mkeventd-activate", _("Activated changes of event console configuration")) - -# This hook is executed when one applies the pending configuration changes -# related to the mkeventd via WATO on the local system. The hook is called -# without parameters. -def call_hook_mkeventd_activate_changes(): - if hooks.registered('mkeventd-activate-changes'): - hooks.call("mkeventd-activate-changes") - -def mode_mkeventd_changes(phase): - if phase == "title": - return _("Event Console - Pending Changes") - - elif phase == "buttons": - home_button() - mkeventd_rules_button() - if config.may("mkeventd.activate") and parse_audit_log("mkeventd") and mkeventd.daemon_running(): - html.context_button(_("Activate Changes!"), - html.makeactionuri([("_activate", "now")]), "apply", hot=True) - - elif phase == "action": - if html.check_transaction(): - mkeventd_reload() - call_hook_mkeventd_activate_changes() - return "mkeventd_rules", _("Changes successfully activated.") - - else: - if not mkeventd.daemon_running(): - warning = _("The Event Console Daemon is currently not running. ") - if defaults.omd_root: - warning += _("Please make sure that you have activated it with omd config set MKEVENTD on " - "before starting this site.") - html.show_warning(warning) - entries = parse_audit_log("mkeventd") - if entries: - render_audit_log(entries, "pending", hilite_others=True) - else: - html.write("
    " + _("There are no pending changes.") + "
    ") - -def log_mkeventd(what, message): - log_entry(None, what, message, "audit.log") # central WATO audit log - log_entry(None, what, message, "mkeventd.log") # pending changes for mkeventd - -def mkeventd_changes_button(): - pending = parse_audit_log("mkeventd") - if len(pending) > 0: - buttontext = "%d " % len(pending) + _("Changes") + "" - hot = True - icon = "mkeventd" - else: - buttontext = _("No Changes") - hot = False - icon = "mkeventd" - html.context_button(buttontext, make_link([("mode", "mkeventd_changes")]), icon, hot) - -def mkeventd_rules_button(): - html.context_button(_("All Rules"), make_link([("mode", "mkeventd_rules")]), "back") - -def mode_mkeventd_status(phase): - if phase == "title": - return _("Event Console - Server Status") - - elif phase == "buttons": - home_button() - mkeventd_rules_button() - return - - elif phase == "action": - if config.may("mkeventd.switchmode"): - if html.has_var("_switch_sync"): - new_mode = "sync" - else: - new_mode = "takeover" - c = wato_confirm(_("Confirm switching replication mode"), - _("Do you really want to switch the event daemon to %s mode?" % - new_mode)) - if c: - mkeventd.query("COMMAND SWITCHMODE;%s" % new_mode) - log_audit(None, "mkeventd-switchmode", _("Switched replication slave mode to %s" % new_mode)) - return None, _("Switched to %s mode") % new_mode - elif c == False: - return "" - else: - return - - return - - if not mkeventd.daemon_running(): - warning = _("The Event Console Daemon is currently not running. ") - if defaults.omd_root: - warning += _("Please make sure that you have activated it with omd config set MKEVENTD on " - "before starting this site.") - html.show_warning(warning) - return - - response = mkeventd.query("GET status") - status = dict(zip(response[0], response[1])) - repl_mode = status["status_replication_slavemode"] - html.write("

    %s

    " % _("Current Server Status")) - html.write("
      ") - html.write("
    • %s
    • " % _("Event Daemon is running.")) - html.write("
    • %s: %s
    • " % (_("Current replication mode"), - { "sync" : _("synchronize"), - "takeover" : _("Takeover!"), - }.get(repl_mode, _("master / standalone")))) - if repl_mode in [ "sync", "takeover" ]: - html.write(("
    • " + _("Status of last synchronization: %s") + "
    • ") % ( - status["status_replication_success"] and _("Success") or _("Failed!"))) - last_sync = status["status_replication_last_sync"] - if last_sync: - html.write("
    • " + _("Last successful sync %d seconds ago.") % (time.time() - last_sync) + "
    • ") - else: - html.write(_("
    • No successful synchronization so far.
    • ")) - - html.write("
    ") - - if config.may("mkeventd.switchmode"): - html.begin_form("switch") - if repl_mode == "sync": - html.button("_switch_takeover", _("Switch to Takeover mode!")) - elif repl_mode == "takeover": - html.button("_switch_sync", _("Switch back to sync mode!")) - html.hidden_fields() - html.end_form() - - - -if mkeventd_enabled: - modes["mkeventd_rules"] = (["mkeventd.edit"], mode_mkeventd_rules) - modes["mkeventd_edit_rule"] = (["mkeventd.edit"], mode_mkeventd_edit_rule) - modes["mkeventd_changes"] = (["mkeventd.edit"], mode_mkeventd_changes) - modes["mkeventd_status"] = ([], mode_mkeventd_status) - - - -#. -# .--Permissions---------------------------------------------------------. -# | ____ _ _ | -# | | _ \ ___ _ __ _ __ ___ (_)___ ___(_) ___ _ __ ___ | -# | | |_) / _ \ '__| '_ ` _ \| / __/ __| |/ _ \| '_ \/ __| | -# | | __/ __/ | | | | | | | \__ \__ \ | (_) | | | \__ \ | -# | |_| \___|_| |_| |_| |_|_|___/___/_|\___/|_| |_|___/ | -# | | -# +----------------------------------------------------------------------+ -# | Declaration of Event Console specific permissions for Multisite | -# '----------------------------------------------------------------------' - -if mkeventd_enabled: - config.declare_permission_section("mkeventd", _("Event Console")) - - config.declare_permission("mkeventd.edit", - _("Configuration of event rules"), - _("This permission allows the creation, modification and " - "deletion of event corelation rules."), - ["admin"]) - - config.declare_permission("mkeventd.activate", - _("Activate changes for event console"), - _("Activation of changes for the event console (rule modification, " - "global settings) is done separately from the monitoring configuration " - "and needs this permission."), - ["admin"]) - - config.declare_permission("mkeventd.switchmode", - _("Switch slave replication mode"), - _("This permission is only useful if the Event Console is setup as a replication " - "slave. It allows a manual switch between sync and takeover mode."), - ["admin"]) - - modules.append( - ( "mkeventd_rules", _("Event Console"), "mkeventd", "mkeventd.edit", - _("Manage event classification and corelation rules for the " - "event console"))) - - -#. -# .--Settings & Rules----------------------------------------------------. -# | ____ _ _ _ ____ _ | -# |/ ___| ___| |_| |_(_)_ __ __ _ ___ _ | _ \ _ _| | ___ ___ | -# |\___ \ / _ \ __| __| | '_ \ / _` / __|_| |_| |_) | | | | |/ _ \/ __| | -# | ___) | __/ |_| |_| | | | | (_| \__ \_ _| _ <| |_| | | __/\__ \ | -# ||____/ \___|\__|\__|_|_| |_|\__, |___/ |_| |_| \_\\__,_|_|\___||___/ | -# | |___/ | -# +----------------------------------------------------------------------+ -# | Declarations for global settings of EC parameters and of a rule for | -# | active checks that query the EC status of a host. | -# '----------------------------------------------------------------------' - - -if mkeventd_enabled: - register_configvar_domain("mkeventd", config_dir, lambda msg: log_mkeventd('config-change', msg)) - group = _("Event Console") - - register_configvar(group, - "remote_status", - Optional( - Tuple( - elements = [ - Integer( - title = _("Port number:"), - help = _("If you are running the mkeventd as a non-root (such as in an OMD site) " - "please choose port number greater than 1024."), - minvalue = 1, - maxvalue = 65535, - default_value = 6558, - ), - Checkbox( - title = _("Security"), - label = _("allow execution of commands and actions via TCP"), - help = _("Without this option the access is limited to querying the current " - "and historic event status."), - default_value = False, - true_label = _("allow commands"), - false_label = _("no commands"), - ), - Optional( - ListOfStrings( - help = _("The access to the event status via TCP will only be allowed from " - "this source IP addresses"), - - valuespec = IPv4Address(), - orientation = "horizontal", - allow_empty = False, - ), - label = _("Restrict access to the following source IP addresses"), - none_label = _("access unrestricted"), - ) - ], - ), - title = _("Access to event status via TCP"), - help = _("In Multisite setups if you want event status checks for hosts that " - "live on a remote site you need to activate remote access to the event status socket " - "via TCP. This allows to query the current event status via TCP. If you do not restrict " - "this to queries also event actions are possible from remote. This feature is not used " - "by the event status checks nor by Multisite so we propose not allowing commands via TCP."), - none_label = _("no access via TCP"), - ), - domain = "mkeventd") - - register_configvar(group, - "mkeventd_connect_timeout", - Integer( - title = _("Connect timeout to status socket"), - help = _("When the Multisite GUI connects the socket of the event daemon " - "in order to retrieve information about current and historic events " - "then this timeout will be applied."), - minvalue = 1, - maxvalue = 120, - default_value = 10, - unit = "sec", - ), - domain = "multisite" - ) - - register_configvar(group, - "replication", - Optional( - Dictionary( - optional_keys = [ "takeover", "fallback", "disabled", "logging" ], - elements = [ - ( "master", - Tuple( - title = _("Master Event Console"), - help = _("Specify the host name or IP address of the master Event Console that " - "you want to replicate from. The port number must be the same as set " - "in the master in Access to event status via TCP."), - elements = [ - TextAscii( - title = _("Hostname/IP address of Master Event Console:"), - allow_empty = False, - ), - Integer( - title = _("TCP Port number of status socket:"), - minvalue = 1, - maxvalue = 65535, - default_value = 6558, - ), - ], - ) - ), - ( "interval", - Integer( - title = _("Replication interval"), - help = _("The replication will be triggered each this number of seconds"), - label = _("Do a replication every"), - unit = _("sec"), - minvalue = 1, - default_value = 10, - ), - ), - ( "connect_timeout", - Integer( - title = _("Connection timeout"), - help = _("TCP connection timeout for connecting to the master"), - label = _("Try bringing up TCP connection for"), - unit = _("sec"), - minvalue = 1, - default_value = 10, - ), - ), - ( "takeover", - Integer( - title = _("Automatic takeover"), - help = _("If you enable this option then the slave will automatically " - "takeover and enable event processing if the master is for " - "the configured number of seconds unreachable."), - label = _("Takeover after a master downtime of"), - unit = _("sec"), - minvalue = 1, - default_value = 30, - ), - ), - ( "fallback", - Integer( - title = _("Automatic fallback"), - help = _("If you enable this option then the slave will automatically " - "fallback from takeover mode to slavemode if the master is " - "rechable again within the selected number of seconds since " - "the previous unreachability (not since the takeover)"), - label = _("Fallback if master comes back within"), - unit = _("sec"), - minvalue = 1, - default_value = 60, - ), - ), - ( "disabled", - FixedValue( - True, - totext = _("Replication is disabled"), - title = _("Currently disable replication"), - help = _("This allows you to disable the replication without loosing " - "your settings. If you check this box, then no replication " - "will be done and the Event Console will act as its own master."), - ), - ), - ( "logging", - FixedValue( - True, - title = _("Log replication events"), - totext = _("logging is enabled"), - help = _("Enabling this option will create detailed log entries for all " - "replication activities of the slave. If disabled only problems " - "will be logged."), - ), - ), - ] - ), - title = _("Enable replication from a master"), - ), - domain = "mkeventd" - ) - - - - register_configvar(group, - "retention_interval", - Age(title = _("State Retention Interval"), - help = _("In this interval the event daemon will save its state " - "to disk, so that you won't loose your current event " - "state in case of a crash."), - default_value = 60, - ), - domain = "mkeventd") - - register_configvar(group, - "housekeeping_interval", - Age(title = _("Housekeeping Interval"), - help = _("From time to time the eventd checks for messages that are expected to " - "be seen on a regular base, for events that time out and yet for " - "count periods that elapse. Here you can specify the regular interval " - "for that job."), - default_value = 60, - ), - domain = "mkeventd") - - register_configvar(group, - "statistics_interval", - Age(title = _("Statistics Interval"), - help = _("The event daemon keeps statistics about the rate of messages, events " - "rule hits, and other stuff. These values are updated in the interval " - "configured here and are available in the sidebar snapin Event Console " - "Performance"), - default_value = 5, - ), - domain = "mkeventd") - - register_configvar(group, - "debug_rules", - Checkbox(title = _("Debug rule execution"), - label = _("enable extensive rule logging"), - help = _("This option turns on logging the execution of rules. For each message received " - "the execution details of each rule are logged. This creates an immense " - "volume of logging and should never be used in productive operation."), - default_value = False), - domain = "mkeventd") - - register_configvar(group, - "rule_optimizer", - Checkbox(title = _("Optimize rule execution"), - label = _("enable optimized rule execution"), - help = _("This option turns on a faster algorithm for matching events to rules. "), - default_value = True), - domain = "mkeventd") - - register_configvar(group, - "log_rulehits", - Checkbox(title = _("Log rule hits"), - label = _("Log hits for rules in log of mkeventd"), - help = _("If you enable this option then every time an event matches a rule " - "(by normal hit, cancelling, counting or dropping) a log entry will be written " - "into the log file of the mkeventd. Please be aware that this might lead to " - "a large number of log entries. "), - default_value = False), - domain = "mkeventd") - - - register_configvar(group, - "debug_mkeventd_queries", - Checkbox(title = _("Debug queries to mkeventd"), - label = _("enable debugging of queries"), - help = _("With this option turned on all queries made to the event daemon " - "will be displayed."), - default_value = False), - domain = "multisite") - - register_configvar(group, - "mkeventd_pprint_rules", - Checkbox(title = _("Pritty-Print rules in configuration file"), - label = _("enable pritty-printing of rules"), - help = _("When the WATO module of the Event Console saves rules to the file " - "mkeventd.d/wato/rules.mk it usually prints the Python " - "representation of the rules-list into one single line by using the " - "native Python code generator. Enabling this option switches to pprint, " - "which nicely indents everything. While this is a bit slower for large " - "rulesets it makes debugging and manual editing simpler."), - default_value = False), - domain = "multisite") - - - register_configvar(group, - "actions", - vs_mkeventd_actions, - domain = "mkeventd", - allow_reset = False) - - register_configvar(group, - "history_rotation", - DropdownChoice( - title = _("Event history logfile rotation"), - help = _("Specify at which time period a new file for the event history will be created."), - choices = [ - ( "daily", _("daily")), - ( "weekly", _("weekly")) - ], - default_value = "daily", - ), - domain = "mkeventd") - - register_configvar(group, - "history_lifetime", - Integer( - title = _("Event history lifetime"), - help = _("After this number of days old logfile of event history " - "will be deleted."), - default_value = 365, - unit = _("days"), - minvalue = 1, - ), - domain = "mkeventd") - - register_configvar(group, - "socket_queue_len", - Integer( - title = _("Max. number of pending connections to the status socket"), - help = _("When the Multisite GUI or the active check check_mkevents connects " - "to the socket of the event daemon in order to retrieve information " - "about current and historic events then its connection request might " - "be queued before being processed. This setting defines the number of unaccepted " - "connections to be queued before refusing new connections."), - minvalue = 1, - default_value = 10, - label = "max.", - unit = "pending connections", - ), - domain = "mkeventd" - ) - - register_configvar(group, - "eventsocket_queue_len", - Integer( - title = _("Max. number of pending connections to the event socket"), - help = _("The event socket is an alternative way for sending events " - "to the Event Console. It is used by the Check_MK logwatch check " - "when forwarding log messages to the Event Console. " - "This setting defines the number of unaccepted " - "connections to be queued before refusing new connections."), - minvalue = 1, - default_value = 10, - label = "max.", - unit = "pending connections", - ), - domain = "mkeventd" - ) - -# Settings that should also be avaiable on distributed Sites that -# do not run an own eventd but want to query one or send notifications -# to one. -group = _("Notification") -register_configvar(group, - "mkeventd_notify_contactgroup", - GroupSelection( - "contact", - title = _("Send notifications to Event Console"), - no_selection = _("(don't send notifications to Event Console)"), - label = _("send notifications of contactgroup:"), - help = _("If you select a contact group here, then all notifications of " - "hosts and services in that contact group will be sent to the " - "event console. Note: you still need to create a rule " - "matching those messages in order to have events created."), - default_value = '', - - ), - domain = "multisite", - need_restart = True) - -register_configvar(group, - "mkeventd_notify_facility", - DropdownChoice( - title = _("Syslog facility for Event Console notifications"), - help = _("When sending notifications from the monitoring system to the event console " - "the following syslog facility will be set for these messages. Choosing " - "a unique facility makes creation of rules easier."), - choices = mkeventd.syslog_facilities, - default_value = 16, # local0 - ), - domain = "multisite", - need_restart = True) - -register_configvar(group, - "mkeventd_notify_remotehost", - Optional( - TextAscii( - title = _("Host running Event Console") - ), - title = _("Forward notifications to remote host"), - help = _("This will send the notification to a Check_MK Event Console on a remote host " - "by using syslog. Note: this setting will only be applied if no Event " - "Console is running locally in this site! That way you can use the same global " - "settings on your central and decentralized system and makes distributed WATO " - "easier. Please also make sure that Send notifications to Event Console " - "is enabled."), - label = _("Send to remote Event Console via syslog"), - none_label = _("Do not send to remote host"), - ), - domain = "multisite", - need_restart = True) - -register_configvar(group, - "mkeventd_service_levels", - ListOf( - Tuple( - elements = [ - Integer( - title = _("internal ID"), - minvalue = 0, - maxvalue = 100, - ), - TextUnicode( - title = _("Name / Description"), - allow_empty = False, - ), - ], - orientation = "horizontal", - ), - title = _("Service Levels for Event Console"), - help = _("Here you can configure the list of possible service levels for an " - "event. Each event rule selects a service level. Internally the level is " - "represented as an integer number. Note: a higher number represents " - "a higher service level. This is important when filtering views " - "after the service level.

    You can also attach service levels to hosts " - "and services in the monitoring. These levels will then be sent to the " - "Event Console when you forward notifications to it and will override the " - "setting of the matching rule."), - allow_empty = False, - default_value = [ (0, _("(no service level)")) ], - ), - domain = "multisite", - allow_reset = False, -) - - -register_rulegroup("eventconsole", - _("Event Console"), - _("Settings and Checks dealing with the Check_MK Event Console")) -group = "eventconsole" - - -register_rule( - group, - "active_checks:mkevents", - Dictionary( - title = _("Check event state in Event Console"), - help = _("This check is part of the Check_MK Event Console and will check " - "if there are any open events for a certain host (and maybe a certain " - "application on that host. The state of the check will reflect the status " - "of the worst open event for that host."), - elements = [ - ( "hostspec", - OptionalDropdownChoice( - title = _("Host specification"), - help = _("When quering the event status you can either use the monitoring " - "host name, the IP address or a custom host name for referring to a " - "host. This is needed in cases where the event source (syslog, snmptrapd) " - "do not send a host name that matches the monitoring host name."), - choices = [ - ( '$HOSTNAME$', _("Monitoring Host name") ), - ( '$HOSTADDRESS$', _("Host IP Address" ) ) ], - otherlabel = _("Specify explicitly"), - explicit = TextAscii(allow_empty = False), - default_value = '$HOSTNAME$', - ) - ), - ( "application", - RegExp( - title = _("Application (regular expression)"), - help = _("If you enter an application name here then only " - "events for that application name are counted. You enter " - "a regular expression here that must match a part " - "of the application name. Use anchors ^ and $ " - "if you need a complete match."), - allow_empty = False, - ) - ), - ( "ignore_acknowledged", - FixedValue( - True, - title = _("Ignore Acknowledged Events"), - help = _("If you check this box then only open events are honored when " - "determining the event state. Acknowledged events are displayed " - "(i.e. their count) but not taken into account."), - totext = _("acknowledged events will not be honored"), - ) - ), - ( "remote", - Alternative( - title = _("Access to the Event Console"), - elements = [ - Tuple( - elements = [ - TextAscii( - title = _("Hostname/IP address of Event Console:"), - allow_empty = False, - ), - Integer( - title = _("TCP Port number:"), - minvalue = 1, - maxvalue = 65535, - default_value = 6558, - ), - ], - title = _("Access via TCP"), - help = _("In a distributed setup where the Event Console is not running in the same " - "site as the host is monitored you need to access the remote Event Console " - "via TCP. Please make sure that this is activated in the global settings of " - "the event console. The default port number is 6558."), - ), - TextAscii( - title = _("Access via UNIX socket"), - allow_empty = False, - size = 64, - ), - - ], - default_value = defaults.omd_root - and defaults.omd_root + "/tmp/run/mkeventd/status" - or defaults.livestatus_unix_socket.split("/",1)[0] + "/mkeventd/status" - ) - ), - ], - optional_keys = [ "application", "remote", "ignore_acknowledged" ], - ), - match = 'all', -) - -sl_help = _("This rule set is useful if you send your monitoring notifications " - "into the Event Console. A service level set by this rule will be " - "used as the service level of the resulting event in the Event Console.") - -register_rule( - group, - "extra_host_conf:_ec_sl", - DropdownChoice( - title = _("Service Level of hosts"), - help = sl_help, - choices = mkeventd.service_levels, - ), - match = 'first', -) - -register_rule( - group, - "extra_service_conf:_ec_sl", - DropdownChoice( - title = _("Service Level of services"), - help = sl_help + _(" Note: if no service level is configured for a service " - "then that of the host will be used instead (if configured)."), - choices = mkeventd.service_levels, - ), - itemtype = 'service', - match = 'first', -) - -contact_help = _("This rule set is useful if you send your monitoring notifications " - "into the Event Console. The contact information that is set by this rule " - "will be put into the resulting event in the Event Console.") -contact_regex = r"^[^;'$|]*$" -contact_regex_error = _("The contact information must not contain one of the characters ; ' | or $") - -register_rule( - group, - "extra_host_conf:_ec_contact", - TextUnicode( - title = _("Host contact information"), - help = contact_help, - size = 80, - regex = contact_regex, - regex_error = contact_regex_error, - ), - match = 'first', -) - -register_rule( - group, - "extra_service_conf:_ec_contact", - TextUnicode( - title = _("Service contact information"), - help = contact_help + _(" Note: if no contact information is configured for a service " - "then that of the host will be used instead (if configured)."), - size = 80, - regex = contact_regex, - regex_error = contact_regex_error, - ), - itemtype = 'service', - match = 'first', -) -#. -# .--Notifications-------------------------------------------------------. -# | _ _ _ _ __ _ _ _ | -# | | \ | | ___ | |_(_)/ _(_) ___ __ _| |_(_) ___ _ __ ___ | -# | | \| |/ _ \| __| | |_| |/ __/ _` | __| |/ _ \| '_ \/ __| | -# | | |\ | (_) | |_| | _| | (_| (_| | |_| | (_) | | | \__ \ | -# | |_| \_|\___/ \__|_|_| |_|\___\__,_|\__|_|\___/|_| |_|___/ | -# | | -# +----------------------------------------------------------------------+ -# | Stuff for sending monitoring notifications into the event console. | -# '----------------------------------------------------------------------' -def mkeventd_update_notifiation_configuration(hosts): - try: - contactgroup = config.mkeventd_notify_contactgroup - except: - contactgroup = None - - try: - facility = config.mkeventd_notify_facility - except: - facility = 16 - - try: - remote_console = config.mkeventd_notify_remotehost - except: - remote_console = None - if not remote_console: - remote_console = "" - - path = defaults.nagios_conf_dir + "/mkeventd_notifications.cfg" - if not contactgroup and os.path.exists(path): - os.remove(path) - elif contactgroup: - file(path, "w").write("""# Created by Check_MK Event Console -# This configuration will send notifications about hosts and -# services in the contact group '%(group)s' to the Event Console. - -define contact { - contact_name mkeventd - alias "Notifications for Check_MK Event Console" - contactgroups %(group)s - host_notification_commands mkeventd-notify-host - service_notification_commands mkeventd-notify-service - host_notification_options d,u,r - service_notification_options c,w,u,r - host_notification_period 24X7 - service_notification_period 24X7 - email none -} - -define command { - command_name mkeventd-notify-host - command_line mkevent -n %(facility)s '%(remote)s' $HOSTSTATEID$ '$HOSTNAME$' '' '$HOSTOUTPUT$' '$_HOSTEC_SL$' '$_HOSTEC_CONTACT$' -} - -define command { - command_name mkeventd-notify-service - command_line mkevent -n %(facility)s '%(remote)s' $SERVICESTATEID$ '$HOSTNAME$' '$SERVICEDESC$' '$SERVICEOUTPUT$' '$_SERVICEEC_SL$' '$_SERVICEEC_CONTACT$' '$_HOSTEC_SL$' '$_HOSTEC_CONTACT$' -} -""" % { "group" : contactgroup, "facility" : facility, "remote" : remote_console }) - -api.register_hook("pre-activate-changes", mkeventd_update_notifiation_configuration) - -# Only register the reload hook when mkeventd is enabled -if mkeventd_enabled: - api.register_hook("activate-changes", lambda hosts: mkeventd_reload()) - Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/=unpacked-tar8=/htdocs/images/button_mkeventd_hi.png and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/=unpacked-tar8=/htdocs/images/button_mkeventd_hi.png differ Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/=unpacked-tar8=/htdocs/images/button_mkeventd_lo.png and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/=unpacked-tar8=/htdocs/images/button_mkeventd_lo.png differ Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/=unpacked-tar8=/htdocs/images/icon_ack.png and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/=unpacked-tar8=/htdocs/images/icon_ack.png differ Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/=unpacked-tar8=/htdocs/images/icon_clear.png and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/=unpacked-tar8=/htdocs/images/icon_clear.png differ Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/=unpacked-tar8=/htdocs/images/icon_counting.png and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/=unpacked-tar8=/htdocs/images/icon_counting.png differ Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/=unpacked-tar8=/htdocs/images/icon_delayed.png and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/=unpacked-tar8=/htdocs/images/icon_delayed.png differ Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/=unpacked-tar8=/htdocs/images/icon_mkeventd.png and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/=unpacked-tar8=/htdocs/images/icon_mkeventd.png differ Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/=unpacked-tar8=/htdocs/images/icon_resetcounters.png and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/=unpacked-tar8=/htdocs/images/icon_resetcounters.png differ diff -Nru check-mk-1.2.2p3/=unpacked-tar8=/htdocs/mkeventd.py check-mk-1.2.6p12/=unpacked-tar8=/htdocs/mkeventd.py --- check-mk-1.2.2p3/=unpacked-tar8=/htdocs/mkeventd.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar8=/htdocs/mkeventd.py 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,276 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +import socket, config, defaults, re, time +from lib import * + +# TODO: make this configurable and thus work for non OMD-users as +# well. +try: + socket_path = defaults.omd_root + "/tmp/run/mkeventd/status" + pipe_path = defaults.omd_root + "/tmp/run/mkeventd/events" +except: + run_dir = defaults.livestatus_unix_socket.rsplit("/",1)[0] + socket_path = run_dir + "/mkeventd/status" + pipe_path = run_dir + "/mkeventd/events" + +syslog_priorities = [ + (0, "emerg" ), + (1, "alert" ), + (2, "crit" ), + (3, "err" ), + (4, "warning" ), + (5, "notice" ), + (6, "info" ), + (7, "debug" ), +] + +syslog_facilities = [ + (0, "kern"), + (1, "user"), + (2, "mail"), + (3, "daemon"), + (4, "auth"), + (5, "syslog"), + (6, "lpr"), + (7, "news"), + (8, "uucp"), + (9, "cron"), + (10, "authpriv"), + (11, "ftp"), + (12, "(12: unused)"), + (13, "(13: unused)"), + (14, "(14: unused)"), + (15, "(15: unused)"), + (16, "local0"), + (17, "local1"), + (18, "local2"), + (19, "local3"), + (20, "local4"), + (21, "local5"), + (22, "local6"), + (23, "local7"), + (31, "snmptrap"), +] + +phase_names = { + 'counting' : _("counting"), + 'delayed' : _("delayed"), + 'open' : _("open"), + 'ack' : _("acknowledged"), + 'closed' : _("closed"), +} + +action_whats = { + "ORPHANED" : _("Event deleted in counting state because rule was deleted."), + "NOCOUNT" : _("Event deleted in counting state because rule does not count anymore"), + "DELAYOVER" : _("Event opened because the delay time has elapsed before cancelling event arrived."), + "EXPIRED" : _("Event deleted because its livetime expired"), + "COUNTREACHED" : _("Event deleted because required count had been reached"), + "COUNTFAILED" : _("Event created by required count was not reached in time"), + "UPDATE" : _("Event information updated by user"), + "NEW" : _("New event created"), + "DELETE" : _("Event deleted manually by user"), + "EMAIL" : _("Email sent"), + "SCRIPT" : _("Script executed"), + "CANCELLED" : _("The event was cancelled because the corresponding OK message was received"), + "ARCHIVED" : _("Event was archived because no rule matched and archiving is activated in global settings."), + "AUTODELETE" : _("Event was deleted automatically"), + "CHANGESTATE" : _("State of event changed by user"), +} + +def service_levels(): + try: + return config.mkeventd_service_levels + except: + return [(0, "(no service level)")] + +def action_choices(omit_hidden = False): + # The possible actions are configured in mkeventd.mk, + # not in multisite.mk (like the service levels). That + # way we have not direct access to them but need + # to load them from the configuration. + return [ ( "@NOTIFY", _("Send monitoring notification")) ] + \ + [ (a["id"], a["title"]) + for a in eventd_configuration().get("actions", []) + if not omit_hidden or not a.get("hidden") ] + +cached_config = None +def eventd_configuration(): + global cached_config + if cached_config and cached_config[0] is html: + return cached_config[1] + + config = { + "rules" : [], + "debug_rules" : False, + } + main_file = defaults.default_config_dir + "/mkeventd.mk" + list_of_files = reduce(lambda a,b: a+b, + [ [ "%s/%s" % (d, f) for f in fs if f.endswith(".mk")] + for d, sb, fs in os.walk(defaults.default_config_dir + "/mkeventd.d" ) ], []) + + list_of_files.sort() + for path in [ main_file ] + list_of_files: + execfile(path, config, config) + cached_config = (html, config) + return config + + +def daemon_running(): + return os.path.exists(socket_path) + + +def send_event(event): + # "<%PRI%>%TIMESTAMP% %HOSTNAME% %syslogtag% %msg%\n" + prio = (event["facility"] << 3) + event["priority"] + timestamp = time.strftime("%b %d %T", time.localtime()) + rfc = "<%d>%s %s %s: %s\n" % ( + prio, timestamp, event["host"], event["application"], event["text"]) + if type(rfc) == unicode: + rfc = rfc.encode("utf-8") + pipe = file(pipe_path, "w") + pipe.write(rfc + "\n") + return rfc + +def query(query): + try: + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + try: + timeout = config.mkeventd_connect_timeout + except: + timeout = 10 + + sock.settimeout(timeout) + sock.connect(socket_path) + sock.send(query) + + response_text = "" + while True: + chunk = sock.recv(8192) + response_text += chunk + if not chunk: + break + + return eval(response_text) + except SyntaxError, e: + raise MKGeneralException("Invalid response from event daemon:

    %s
    " % response_text) + + except Exception, e: + raise MKGeneralException("Cannot connect to event daemon via %s: %s" % (socket_path, e)) + +def replication_mode(): + try: + response = query("GET status") + status = dict(zip(response[0], response[1])) + return status["status_replication_slavemode"] + except: + return None + + +# Rule matching for simulation. Yes - there is some hateful code duplication +# here. But it does not make sense to query the live eventd here since it +# does not know anything about the currently configured but not yet activated +# rules. And also we do not want to have shared code. +def event_rule_matches(rule, event): + if False == match(rule.get("match_host"), event["host"], complete=True): + return _("The host name does not match.") + + if False == match(rule.get("match_application"), event["application"], complete=False): + return _("The application (syslog tag) does not match") + + if "match_facility" in rule and event["facility"] != rule["match_facility"]: + return _("The syslog facility does not match") + + + # First try cancelling rules + if "match_ok" in rule or "cancel_priority" in rule: + if "cancel_priority" in rule: + up, lo = rule["cancel_priority"] + cp = event["priority"] >= lo and event["priority"] <= up + else: + cp = True + + match_groups = match(rule.get("match_ok", ""), event["text"], complete = False) + if match_groups != False and cp: + if match_groups == True: + match_groups = () + return True, match_groups + + try: + match_groups = match(rule.get("match"), event["text"], complete = False) + except Exception, e: + return _("Invalid regular expression: %s" % e) + if match_groups == False: + return _("The message text does not match the required pattern.") + + if "match_priority" in rule: + prio_from, prio_to = rule["match_priority"] + if prio_from > prio_to: + prio_to, prio_from = prio_from, prio_to + p = event["priority"] + if p < prio_from or p > prio_to: + return _("The syslog priority is not in the required range.") + + if "match_timeperiod" in rule: + reason = check_timeperiod(rule["match_timeperiod"]) + if reason: + return reason + + if match_groups == True: + match_groups = () # no matching groups + return False, match_groups + +def check_timeperiod(tpname): + try: + livesock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + livesock.connect(defaults.livestatus_unix_socket) + livesock.send("GET timeperiods\nFilter: name = %s\nColumns: in\n" % tpname) + livesock.shutdown(socket.SHUT_WR) + answer = livesock.recv(100).strip() + if answer == "": + return _("The timeperiod %s is not known to the local monitoring core") % tpname + elif int(answer) == 0: + return _("The timeperiod %s is currently not active" % tpname) + except Exception, e: + return _("Cannot update timeperiod information for %s: %s" % (tpname, e)) + if opt_debug: + raise + +def match(pattern, text, complete = True): + if pattern == None: + return True + else: + if complete: + if not pattern.endswith("$"): + pattern += '$' + m = re.compile(pattern, re.IGNORECASE).match(text) + else: + m = re.compile(pattern, re.IGNORECASE).search(text) + if m: + return m.groups() + else: + return False diff -Nru check-mk-1.2.2p3/=unpacked-tar8=/plugins/config/mkeventd.py check-mk-1.2.6p12/=unpacked-tar8=/plugins/config/mkeventd.py --- check-mk-1.2.2p3/=unpacked-tar8=/plugins/config/mkeventd.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar8=/plugins/config/mkeventd.py 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,42 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +mkeventd_enabled = False +mkeventd_pprint_rules = False +mkeventd_notify_contactgroup = None +mkeventd_notify_facility = 16 +mkeventd_notify_remotehost = None +mkeventd_connect_timeout = 10 +debug_mkeventd_queries = False +log_rulehits = False +rule_optimizer = True + +mkeventd_service_levels = [ + (0, _("(no Service level)")), + (10, _("Silver")), + (20, _("Gold")), + (30, _("Platinum")), +] diff -Nru check-mk-1.2.2p3/=unpacked-tar8=/plugins/icons/mkeventd.py check-mk-1.2.6p12/=unpacked-tar8=/plugins/icons/mkeventd.py --- check-mk-1.2.2p3/=unpacked-tar8=/plugins/icons/mkeventd.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar8=/plugins/icons/mkeventd.py 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,100 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +import mkeventd + +try: + mkeventd_enabled = config.mkeventd_enabled +except: + mkeventd_enabled = False + +def paint_mkeventd(what, row, tags, custom_vars): + # show for services based on the mkevents active check + command = row[what + '_check_command'] + + if what != 'service' or not command.startswith('check_mk_active-mkevents'): + return + + if '!' not in command: + return + + host = None + app = None + + # Extract parameters from check_command: + args = command.split('!')[1].split() + if not args: + return + + # Handle -a and -H options. Sorry for the hack. We currently + # have no better idea + if len(args) >= 2 and args[0] == '-H': + args = args[2:] # skip two arguments + if len(args) >= 1 and args[0] == '-a': + args = args[1:] + + if len(args) >= 1: + if args[0] == '$HOSTNAME$': + host = row['host_name'] + elif args[0] == '$HOSTADDRESS$': + host = row['host_address'] + else: + host = args[0] + + # If we have no host then the command line from the check_command seems + # to be garbled. Better show nothing in this case. + if not host: + return + + # It is possible to have a central event console, this is the default case. + # Another possible architecture is to have an event console in each site in + # a distributed environment. For the later case the base url need to be + # constructed here + site = html.site_status[row['site']]["site"] + url_prefix = '' + if getattr(config, 'mkeventd_distributed', False): + url_prefix = site['url_prefix'] + 'check_mk/' + + title = _('Events of Host %s') % (row["host_name"]) + url = 'view.py?' + html.urlencode_vars([ + ("view_name", "ec_events_of_monhost"), + ("site", row["site"]), + ("host", row["host_name"]), + ]) + + if len(args) >= 2: + app = args[1].strip('\'') + title = _('Events of Application "%s" on Host %s') % (app, host) + url += '&event_application=' + app + + return '%s' % (url_prefix + url, html.render_icon('mkeventd', title)) + +if mkeventd_enabled: + multisite_icons.append({ + 'columns': [ 'check_command' ], + 'host_columns': [ 'address', 'name' ], + 'paint': paint_mkeventd, + }) diff -Nru check-mk-1.2.2p3/=unpacked-tar8=/plugins/sidebar/mkeventd.py check-mk-1.2.6p12/=unpacked-tar8=/plugins/sidebar/mkeventd.py --- check-mk-1.2.2p3/=unpacked-tar8=/plugins/sidebar/mkeventd.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar8=/plugins/sidebar/mkeventd.py 2015-06-24 09:48:39.000000000 +0000 @@ -0,0 +1,112 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +import mkeventd + +try: + mkeventd_enabled = config.mkeventd_enabled +except: + mkeventd_enabled = False + +def render_mkeventd_performance(): + def write_line(left, right): + html.write("%s:" + "%s" % (left, right)) + + try: + raw_data = mkeventd.query("GET status") + except: + html.write(_("Event Console is not running.")) + return + + html.write("\n") + + + data = dict(zip(raw_data[0], raw_data[1])) + columns = [ + (_("Received messages"), "message", "%.2f/s"), + (_("Rule hits"), "rule_hit", "%.2f/s"), + (_("Rule tries"), "rule_trie", "%.2f/s"), + (_("Created events"), "event", "%.2f/s"), + (_("Client connects"), "connect", "%.2f/s"), + ] + for what, col, format in columns: + write_line(what, format % data["status_average_%s_rate" % col]) + + # Hit rate + try: + write_line(_("Rule hit ratio"), "%.2f %%" % ( + data["status_average_rule_hit_rate"] / + data["status_average_rule_trie_rate"] * 100)) + except: # division by zero + write_line(_("Rule hit ratio"), _("-.-- %")) + pass + + # Time columns + time_columns = [ + (_("Processing time per message"), "processing"), + (_("Time per client request"), "request"), + (_("Replication synchronization"), "sync"), + ] + for title, name in time_columns: + value = data.get("status_average_%s_time" % name) + if value: + write_line(title, "%.2f ms" % (value * 1000)) + else: + write_line(title, _("-.-- ms")) + html.write("
    \n") + +if mkeventd_enabled: + sidebar_snapins["mkeventd_performance"] = { + "title" : _("Event Console Performance"), + "description" : _("Monitor the performance of the Event Console"), + "refresh" : 15, + "render" : render_mkeventd_performance, + "allowed" : [ "admin", ], + "styles" : """ + table.mkeventd_performance { + width: %dpx; + -moz-border-radius: 5px; + background-color: #589; + /* background-color: #6da1b8;*/ + border-style: solid; + border-color: #444 #bbb #eee #666; + /* The border needs to be substracted from the width */ + border-width: 1px; + } + table.mkeventd_performance td { + padding: 0px 2px; + font-size: 8pt; + } + table.mkeventd_performance td.right { + text-align: right; + padding: 0px; + padding-right: 1px; + white-space: nowrap; + } + + """ % (snapin_width - 2) + } diff -Nru check-mk-1.2.2p3/=unpacked-tar8=/plugins/views/mkeventd.py check-mk-1.2.6p12/=unpacked-tar8=/plugins/views/mkeventd.py --- check-mk-1.2.2p3/=unpacked-tar8=/plugins/views/mkeventd.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar8=/plugins/views/mkeventd.py 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,1240 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +import mkeventd +from valuespec import * + +try: + mkeventd_enabled = config.mkeventd_enabled +except: + mkeventd_enabled = False + +# .--Datasources---------------------------------------------------------. +# | ____ _ | +# | | _ \ __ _| |_ __ _ ___ ___ _ _ _ __ ___ ___ ___ | +# | | | | |/ _` | __/ _` / __|/ _ \| | | | '__/ __/ _ \/ __| | +# | | |_| | (_| | || (_| \__ \ (_) | |_| | | | (_| __/\__ \ | +# | |____/ \__,_|\__\__,_|___/\___/ \__,_|_| \___\___||___/ | +# | | +# '----------------------------------------------------------------------' + +def table_events(what, columns, add_headers, only_sites, limit, filters): + # First we fetch the list of all events from mkeventd - either current + # or historic ones. We ignore any filters for host_ here. Note: + # event_host and host_name needn't be compatible. They might differ + # in case. Also in the events table instead of the host name there + # might be the IP address of the host - while in the monitoring we + # name. We will join later. + + # If due to limitation of visibility we do a post-filtering, we cannot + # impose a limit when fetching the data. This is dangerous, but we + # have no other chance, currently. + if not config.may("mkeventd.seeall"): + use_limit = None + else: + use_limit = limit + rows = get_all_events(what, filters, use_limit) + + # Now we join the stuff with the host information. Therefore we + # get the information about all hosts that are referred to in + # any of the events. + required_hosts = set() + for row in rows: + host = row.get("event_host") + if host: + required_hosts.add(host.lower()) + + # Get information about these hosts via Livestatus. We + # allow event_host to match either the host_name or + # the host_address. + host_filters = "" + for host in required_hosts: + host_filters += "Filter: host_name =~ %s\n" \ + "Filter: host_address = %s\n" % (host.encode("utf-8"), host.encode("utf-8")) + if len(required_hosts) > 0: + host_filters += "Or: %d\n" % (len(required_hosts) * 2) + + # Make sure that the host name is fetched. We need it for + # joining. The event columns are always fetched all. The event + # daemon currently does not implement any Columns: header. + if "host_name" not in columns: + columns.append("host_name") + if "host_address" not in columns: + columns.append("host_address") + + # Fetch list of hosts. Here is much room for optimization. + # If no host filter is set, then the data of all hosts would + # be fetched before we even know if there are any events + # for those hosts. Better would be first fetching all events + # and later fetch the data of the relevant hosts. + hostrows = event_hostrows(columns, only_sites, filters, host_filters) + + # Sichtbarkeit: Wenn der Benutzer *nicht* das Recht hat, alle Events + # zu sehen, dann müssen wir die Abfrage zweimal machen. Einmal with + # AuthUser: (normal) und einmal zusätzlich ohne AuthUser. Dabei brauchen + # wir dann nicht mehr alle Spalten, sondern nur noch die Liste der + # Kontaktgruppen. + # 1. Wenn ein Host bei der ersten Anfrage fehlt, aber bei der zweiten kommt, + # heißt das, dass der User diesen Host nicht sehen darf. Und der Event wird + # nicht angezeigt. + # 2. Wenn ein Host bei beiden Abfragen fehlt, gibt es diesen Host nicht im + # Monitoring. Dann gibt es zwei Fälle: + # a) Wenn im Event eine Liste von Kontaktgruppen eingetragen ist (kommt durch + # eine Regel), dann darf der User den Event sehen, wenn er Mitglied einer + # der Kontaktgruppen ist. Dies bekommen wir einfach aus dem User-Profil + # heraus. Für solche Events brauchen wir das Ergebnis der Abfrage nicht. + # b) Wenn im Event diese Option fehlt, dann darf der User das Event immer sehen. + # Wir können das aber nochmal global steuern über eine Permission. + + if not config.may("mkeventd.seeall"): + host_contact_groups = {} + query = "GET hosts\nColumns: name address contact_groups\n" + host_filters + html.live.set_only_sites(only_sites) + html.live.set_auth_domain('mkeventd') + data = html.live.query(query) + html.live.set_auth_domain('read') + html.live.set_only_sites(None) + for host, address, groups in data: + host_contact_groups[host.lower()] = groups + host_contact_groups[address] = groups + + else: + host_contact_groups = None + + # Create lookup dict from hostname/address to the dataset of the host. + # This speeds up the mapping to the events. + hostdict = {} + for row in hostrows: + hostdict[row["host_name"].lower()] = row + hostdict[row["host_address"]] = row + + # If there is at least one host filter, then we do not show event + # entries with an empty host information + have_host_filter = False + for filt in filters: + if filt.info == "host": + filter_code = filt.filter('event') + if filter_code: + have_host_filter = True + break + + if not have_host_filter: + # Create empty host for outer join on host table + empty_host = dict([ (c, "") for c in columns if c.startswith("host_") ]) + empty_host["site"] = '' + empty_host["host_state"] = 0 + empty_host["host_has_been_checked"] = 0 + + # We're ready to join the host-data with the event data now. The question + # is what to do with events that cannot be mapped to a host... + new_rows = [] + user_contact_groups = None + for event in rows: + host = event["event_host"].lower() + + # Users without the mkeventd.seeall permission only may see the host if + # they are a contact via the monitoring. In case the host is not known + # to the monitoring the permission mkeventd.seeunrelated is being neccessary + # as well. + if host_contact_groups != None: + if host in host_contact_groups: + if host not in hostdict: + continue # Host known to monitoring, but user is now allowed + else: # Host not known to monitoring + # Has the event explicit contact groups assigned? Use them! + cgs = event.get("event_contact_groups") + if cgs == None: + if not config.may("mkeventd.seeunrelated"): + continue + else: + if user_contact_groups == None: + user_contact_groups = get_user_contact_groups() + + allowed = False + for g in cgs: + if g in user_contact_groups: + allowed = True + if not allowed: + continue + + if host in hostdict: + event.update(hostdict[host]) + new_rows.append(event) + elif not have_host_filter: + # This event does not belong to any host known by + # the monitoring. We need to create the columns nevertheless. + # TODO: If there are any host filters, these events should + # be dropped. + # Hier könnten wir Leerdaten eintragen. Dann + # kann man auch Events sehen, die keinem + # Host zugeordnet sind. Wenn wir nichts machen, + # dann fehlen Spalten und die Painter fallen + # auf die Nase. + event.update(empty_host) + new_rows.append(event) + + return new_rows + + +def event_hostrows(columns, only_sites, filters, host_filters): + filter_code = "" + for filt in filters: + header = filt.filter("event") + if not header.startswith("Sites:"): + filter_code += header + filter_code += host_filters + + host_columns = filter(lambda c: c.startswith("host_"), columns) + return get_host_table(filter_code, only_sites, host_columns) + + +def get_user_contact_groups(): + query = "GET contactgroups\nFilter: members >= %s\nColumns: name\nCache: reload" % (config.user_id) + contacts = html.live.query_column(query) + return set(contacts) + +def get_host_table(filter_header, only_sites, add_columns): + columns = [ "host_name" ] + add_columns + + html.live.set_only_sites(only_sites) + html.live.set_prepend_site(True) + data = html.live.query( + "GET hosts\n" + + "Columns: " + (" ".join(columns)) + "\n" + + filter_header) + html.live.set_prepend_site(False) + html.live.set_only_sites(None) + + headers = [ "site" ] + columns + rows = [ dict(zip(headers, row)) for row in data ] + return rows + +def get_all_events(what, filters, limit): + headers = "" + for f in filters: + try: + headers += f.event_headers() + except: + pass + if limit: + headers += "Limit: %d\n" % limit + + query = "GET %s\n%s" % (what, headers) + try: + debug = config.debug_mkeventd_queries + except: + debug = False + if debug \ + and html.output_format == "html" and 'W' in html.display_options: + html.write('
    ' + '%s
    \n' % (query.replace('\n', '
    \n'))) + response = mkeventd.query(query) + + # First line of the response is the list of column names. + headers = response[0] + rows = [] + for r in response[1:]: + rows.append(dict(zip(headers, r))) + return rows + + +# Declare datasource only if the event console is activated. We do +# not want to irritate users that do not know anything about the EC. +if mkeventd_enabled: + config.declare_permission("mkeventd.seeall", + _("See all events"), + _("If a user lacks this permission then he/she can see only those events that " + "originate from a host that he/she is a contact for."), + [ "user", "admin", "guest" ]) + + config.declare_permission("mkeventd.seeunrelated", + _("See events not related to a known host"), + _("If that user does not have the permission See all events then this permission " + "controls wether he/she can see events that are not related to a host in the montioring " + "and that do not have been assigned specific contract groups to via the event rule."), + [ "user", "admin", "guest" ]) + + multisite_datasources["mkeventd_events"] = { + "title" : _("Event Console: Current Events"), + "table" : lambda *args: table_events('events', *args), + "infos" : [ "event", "host" ], + "keys" : [], + "idkeys" : [ 'site', 'host_name', 'event_id' ], + "time_filters" : [ "event_first" ], + } + + multisite_datasources["mkeventd_history"] = { + "title" : _("Event Console: Event History"), + "table" : lambda *args: table_events('history', *args), + "infos" : [ "history", "event", "host" ], + "keys" : [], + "idkeys" : [ 'site', 'host_name', 'event_id', 'history_line' ], + "time_filters" : [ "history_time" ], + } + + + #. + # .--Painters------------------------------------------------------------. + # | ____ _ _ | + # | | _ \ __ _(_)_ __ | |_ ___ _ __ ___ | + # | | |_) / _` | | '_ \| __/ _ \ '__/ __| | + # | | __/ (_| | | | | | || __/ | \__ \ | + # | |_| \__,_|_|_| |_|\__\___|_| |___/ | + # | | + # '----------------------------------------------------------------------' + + def paint_event_host(row): + if row["host_name"]: + return "", row["host_name"] + else: + return "", row["event_host"] + + multisite_painters["event_id"] = { + "title" : _("ID of the event"), + "short" : _("ID"), + "columns" : ["event_id"], + "paint" : lambda row: ("number", str(row["event_id"])), + } + + multisite_painters["event_count"] = { + "title" : _("Count (number of recent occurrances)"), + "short" : _("Cnt."), + "columns" : ["event_count"], + "paint" : lambda row: ("number", str(row["event_count"])), + } + + multisite_painters["event_text"] = { + "title" : _("Text/Message of the event"), + "short" : _("Message"), + "columns" : ["event_text"], + "paint" : lambda row: ("", html.attrencode(row["event_text"]).replace("\x01","
    ")), + } + + def paint_ec_match_groups(row): + groups = row["event_match_groups"] + if groups: + code = "" + for text in groups: + code += '%s' % text + return "matchgroups", code + else: + return "", "" + + multisite_painters["event_match_groups"] = { + "title" : _("Match Groups"), + "short" : _("Match"), + "columns" : ["event_match_groups"], + "paint" : paint_ec_match_groups, + } + + multisite_painters["event_first"] = { + "title" : _("Time of first occurrance of this serial"), + "short" : _("First"), + "columns" : ["event_first"], + "options" : [ "ts_format", "ts_date" ], + "paint" : lambda row: paint_age(row["event_first"], True, True), + } + + multisite_painters["event_last"] = { + "title" : _("Time of last occurrance"), + "short" : _("Last"), + "columns" : ["event_last"], + "options" : [ "ts_format", "ts_date" ], + "paint" : lambda row: paint_age(row["event_last"], True, True), + } + + multisite_painters["event_comment"] = { + "title" : _("Comment to the event"), + "short" : _("Comment"), + "columns" : ["event_comment"], + "paint" : lambda row: ("", row["event_comment"]), + } + + def mkeventd_paint_sl(row): + try: + return "", dict(config.mkeventd_service_levels)[row["event_sl"]] + except: + return "", str(row["event_sl"]) + + multisite_painters["event_sl"] = { + "title" : _("Service-Level"), + "short" : _("Level"), + "columns" : ["event_sl"], + "paint" : mkeventd_paint_sl, + } + + multisite_painters["event_host"] = { + "title" : _("Hostname/IP-Address"), + "short" : _("Host"), + "columns" : ["event_host", "host_name"], + "paint" : paint_event_host, + } + + multisite_painters["event_owner"] = { + "title" : _("Owner of event"), + "short" : _("owner"), + "columns" : ["event_owner"], + "paint" : lambda row: ("", row["event_owner"]), + } + + multisite_painters["event_contact"] = { + "title" : _("Contact Person"), + "short" : _("Contact"), + "columns" : ["event_contact" ], + "paint" : lambda row: ("", row["event_contact"]), + } + + multisite_painters["event_application"] = { + "title" : _("Application / Syslog-Tag"), + "short" : _("Application"), + "columns" : ["event_application" ], + "paint" : lambda row: ("", row["event_application"]), + } + + multisite_painters["event_pid"] = { + "title" : _("Process ID"), + "short" : _("PID"), + "columns" : ["event_pid" ], + "paint" : lambda row: ("", row["event_pid"]), + } + + multisite_painters["event_priority"] = { + "title" : _("Syslog-Priority"), + "short" : _("Prio"), + "columns" : ["event_priority" ], + "paint" : lambda row: ("", dict(mkeventd.syslog_priorities)[row["event_priority"]]), + } + + multisite_painters["event_facility"] = { + "title" : _("Syslog-Facility"), + "short" : _("Facility"), + "columns" : ["event_facility" ], + "paint" : lambda row: ("", dict(mkeventd.syslog_facilities)[row["event_facility"]]), + } + + def paint_rule_id(row): + rule_id = row["event_rule_id"] + if config.may("mkeventd.edit"): + urlvars = html.urlencode_vars([("mode", "mkeventd_edit_rule"), ("rule_id", rule_id)]) + return "", '%s' % (urlvars, rule_id) + else: + return "", rule_id + + multisite_painters["event_rule_id"] = { + "title" : _("Rule-ID"), + "short" : _("Rule"), + "columns" : ["event_rule_id" ], + "paint" : paint_rule_id, + } + + def paint_event_state(row): + state = row["event_state"] + name = nagios_short_state_names[row["event_state"]] + return "state svcstate state%s" % state, name + + multisite_painters["event_state"] = { + "title" : _("State (severity) of event"), + "short" : _("State"), + "columns" : ["event_state"], + "paint" : paint_event_state, + } + + multisite_painters["event_phase"] = { + "title" : _("Phase of event (open, counting, etc.)"), + "short" : _("Phase"), + "columns" : ["event_phase" ], + "paint" : lambda row: ("", mkeventd.phase_names.get(row["event_phase"], '')) + } + + def paint_event_icons(row): + phase = row["event_phase"] + if phase == "ack": + title = _("This event has been acknowledged.") + elif phase == "counting": + title = _("This event has not reached the target count yet.") + elif phase == "delayed": + title = _("The action of this event is still delayed in the hope of a cancelling event.") + else: + return "", "" + return 'icons', '' % (title, phase) + + multisite_painters["event_icons"] = { + "title" : _("Event Icons"), + "short" : _("Icons"), + "printable" : False, + "columns" : [ "event_phase" ], + "paint" : paint_event_icons, + } + + def paint_event_contact_groups(row): + cgs = row.get("event_contact_groups") + if cgs == None: + return "", "" + elif cgs: + return "", ", ".join(cgs) + else: + return "", "" + _("none") + "" + + multisite_painters["event_contact_groups"] = { + "title" : _("Fallback Contact Groups"), + "short" : _("Contact Groups"), + "columns" : [ "event_contact_groups" ], + "paint" : paint_event_contact_groups, + } + + # Event History + + multisite_painters["history_line"] = { + "title" : _("Line number in log file"), + "short" : _("Line"), + "columns" : ["history_line" ], + "paint" : lambda row: ("number", row["history_line"]), + } + + multisite_painters["history_time"] = { + "title" : _("Time of entry in logfile"), + "short" : _("Time"), + "columns" : ["history_time" ], + "options" : [ "ts_format", "ts_date" ], + "paint" : lambda row: paint_age(row["history_time"], True, True), + } + + multisite_painters["history_what"] = { + "title" : _("Type of event action"), + "short" : _("Action"), + "columns" : ["history_what" ], + "paint" : lambda row: ("", row["history_what"]), + } + + multisite_painters["history_what_explained"] = { + "title" : _("Explanation for event action"), + "columns" : ["history_what" ], + "paint" : lambda row: ("", mkeventd.action_whats[row["history_what"]]), + } + + + multisite_painters["history_who"] = { + "title" : _("User who performed action"), + "short" : _("Who"), + "columns" : ["history_who" ], + "paint" : lambda row: ("", row["history_who"]), + } + + multisite_painters["history_addinfo"] = { + "title" : _("Additional Information"), + "short" : _("Info"), + "columns" : ["history_addinfo" ], + "paint" : lambda row: ("", row["history_addinfo"]), + } + + #. + # .--Commands------------------------------------------------------------. + # | ____ _ | + # | / ___|___ _ __ ___ _ __ ___ __ _ _ __ __| |___ | + # | | | / _ \| '_ ` _ \| '_ ` _ \ / _` | '_ \ / _` / __| | + # | | |__| (_) | | | | | | | | | | | (_| | | | | (_| \__ \ | + # | \____\___/|_| |_| |_|_| |_| |_|\__,_|_| |_|\__,_|___/ | + # | | + # '----------------------------------------------------------------------' + + def command_executor_mkeventd(command, site): + response = mkeventd.query("COMMAND %s" % command) + + + # Acknowledge and update comment and contact + config.declare_permission("mkeventd.update", + _("Update an event"), + _("Needed for acknowledging and changing the comment and contact of an event"), + [ "user", "admin" ]) + + # Sub-Permissions for Changing Comment, Contact and Acknowledgement + config.declare_permission("mkeventd.update_comment", + _("Update an event: change comment"), + _("Needed for changing a comment when updating an event"), + [ "user", "admin" ]) + config.declare_permission("mkeventd.update_contact", + _("Update an event: change contact"), + _("Needed for changing a contact when updating an event"), + [ "user", "admin" ]) + + def render_mkeventd_update(): + html.write('') + if config.may("mkeventd.update_comment"): + html.write('') + if config.may("mkeventd.update_contact"): + html.write('') + html.write('') + html.write('
    %s' % _("Change comment:")) + html.text_input('_mkeventd_comment', size=50) + html.write('
    %s' % _("Change contact:")) + html.text_input('_mkeventd_contact', size=50) + html.write('
    ') + html.checkbox('_mkeventd_acknowledge', True, label=_("Set event to acknowledged")) + html.write('
    ') + html.button('_mkeventd_update', _("Update")) + + def command_mkeventd_update(cmdtag, spec, row): + if html.var('_mkeventd_update'): + if config.may("mkeventd.update_comment"): + comment = html.var_utf8("_mkeventd_comment").strip().replace(";",",") + else: + comment = "" + if config.may("mkeventd.update_contact"): + contact = html.var_utf8("_mkeventd_contact").strip().replace(":",",") + else: + contact = "" + ack = html.get_checkbox("_mkeventd_acknowledge") + return "UPDATE;%s;%s;%s;%s;%s" % \ + (row["event_id"], config.user_id, ack and 1 or 0, comment, contact), \ + _("update") + + multisite_commands.append({ + "tables" : [ "event" ], + "permission" : "mkeventd.update", + "title" : _("Update & Acknowledge"), + "render" : render_mkeventd_update, + "action" : command_mkeventd_update, + "executor" : command_executor_mkeventd, + }) + + # Change event state + config.declare_permission("mkeventd.changestate", + _("Change event state"), + _("This permission allows to change the state classification of an event " + "(e.g. from CRIT to WARN)."), + [ "user", "admin" ]) + + def render_mkeventd_changestate(): + html.button('_mkeventd_changestate', _("Change Event state to:")) + html.write(" ") + MonitoringState().render_input("_mkeventd_state", 2) + + def command_mkeventd_changestate(cmdtag, spec, row): + if html.var('_mkeventd_changestate'): + state = MonitoringState().from_html_vars("_mkeventd_state") + return "CHANGESTATE;%s;%s;%s" % \ + (row["event_id"], config.user_id, state), \ + _("change the state") + + multisite_commands.append({ + "tables" : [ "event" ], + "permission" : "mkeventd.changestate", + "title" : _("Change State"), + "render" : render_mkeventd_changestate, + "action" : command_mkeventd_changestate, + "executor" : command_executor_mkeventd, + }) + + + # Perform custom actions + config.declare_permission("mkeventd.actions", + _("Perform custom action"), + _("This permission is needed for performing the configured actions " + "(execution of scripts and sending emails)."), + [ "user", "admin" ]) + + def render_mkeventd_actions(): + for action_id, title in mkeventd.action_choices(omit_hidden = True): + html.button("_action_" + action_id, title) + html.write("
    ") + + def command_mkeventd_action(cmdtag, spec, row): + for action_id, title in mkeventd.action_choices(omit_hidden = True): + if html.var("_action_" + action_id): + return "ACTION;%s;%s;%s" % (row["event_id"], config.user_id, action_id), \ + (_("execute that action "%s"") % title) + + multisite_commands.append({ + "tables" : [ "event" ], + "permission" : "mkeventd.actions", + "title" : _("Custom Action"), + "render" : render_mkeventd_actions, + "action" : command_mkeventd_action, + "executor" : command_executor_mkeventd, + }) + + + # Delete events + config.declare_permission("mkeventd.delete", + _("Archive an event"), + _("Finally archive an event without any further action"), + [ "user", "admin" ]) + + + def command_mkeventd_delete(cmdtag, spec, row): + if html.var("_delete_event"): + command = "DELETE;%s;%s" % (row["event_id"], config.user_id) + title = _("archive") + return command, title + + + multisite_commands.append({ + "tables" : [ "event" ], + "permission" : "mkeventd.delete", + "title" : _("Archive Event"), + "render" : lambda: \ + html.button("_delete_event", _("Archive Event")), + "action" : command_mkeventd_delete, + "executor" : command_executor_mkeventd, + }) + + #. + # .--Sorters-------------------------------------------------------------. + # | ____ _ | + # | / ___| ___ _ __| |_ ___ _ __ ___ | + # | \___ \ / _ \| '__| __/ _ \ '__/ __| | + # | ___) | (_) | | | || __/ | \__ \ | + # | |____/ \___/|_| \__\___|_| |___/ | + # | | + # '----------------------------------------------------------------------' + + def cmp_simple_state(column, ra, rb): + a = ra.get(column, -1) + b = rb.get(column, -1) + if a == 3: + a = 1.5 + if b == 3: + b = 1.5 + return cmp(a, b) + + + declare_1to1_sorter("event_id", cmp_simple_number) + declare_1to1_sorter("event_count", cmp_simple_number) + declare_1to1_sorter("event_text", cmp_simple_string) + declare_1to1_sorter("event_first", cmp_simple_number) + declare_1to1_sorter("event_last", cmp_simple_number) + declare_1to1_sorter("event_comment", cmp_simple_string) + declare_1to1_sorter("event_sl", cmp_simple_number) + declare_1to1_sorter("event_host", cmp_simple_string) + declare_1to1_sorter("event_contact", cmp_simple_string) + declare_1to1_sorter("event_application", cmp_simple_string) + declare_1to1_sorter("event_pid", cmp_simple_number) + declare_1to1_sorter("event_priority", cmp_simple_number) + declare_1to1_sorter("event_facility", cmp_simple_number) # maybe convert to text + declare_1to1_sorter("event_rule_id", cmp_simple_string) + declare_1to1_sorter("event_state", cmp_simple_state) + declare_1to1_sorter("event_phase", cmp_simple_string) + declare_1to1_sorter("event_owner", cmp_simple_string) + + declare_1to1_sorter("history_line", cmp_simple_number) + declare_1to1_sorter("history_time", cmp_simple_number) + declare_1to1_sorter("history_what", cmp_simple_string) + declare_1to1_sorter("history_who", cmp_simple_string) + declare_1to1_sorter("history_addinfo", cmp_simple_string) + + #. + # .--Views---------------------------------------------------------------. + # | __ ___ | + # | \ \ / (_) _____ _____ | + # | \ \ / /| |/ _ \ \ /\ / / __| | + # | \ V / | | __/\ V V /\__ \ | + # | \_/ |_|\___| \_/\_/ |___/ | + # | | + # '----------------------------------------------------------------------' + + def mkeventd_view(d): + x = { + 'topic': _('Event Console'), + 'browser_reload': 60, + 'column_headers': 'pergroup', + 'icon': 'mkeventd', + 'mobile': False, + 'hidden': False, + 'mustsearch': False, + 'group_painters': [], + 'num_columns': 1, + 'hidebutton': False, + 'play_sounds': False, + 'public': True, + 'sorters': [], + 'user_sortable': 'on', + 'show_filters': [], + 'hard_filters': [], + 'hide_filters': [], + 'hard_filtervars': [], + } + x.update(d) + return x + + # Table of all open events + multisite_builtin_views['ec_events'] = mkeventd_view({ + 'title': _('Events'), + 'description': _('Table of all currently open events (handled and unhandled)'), + 'datasource': 'mkeventd_events', + 'layout': 'table', + 'painters': [ + ('event_id', 'ec_event', ''), + ('event_icons', None, ''), + ('event_state', None, ''), + ('event_sl', None, ''), + ('event_host', 'ec_events_of_host', ''), + ('event_rule_id', None, ''), + ('event_application', None, ''), + ('event_text', None, ''), + ('event_last', None, ''), + ('event_count', None, ''), + ], + 'show_filters': [ + 'event_id', + 'event_rule_id', + 'event_text', + 'event_application', + 'event_contact', + 'event_comment', + 'event_host_regex', + 'event_count', + 'event_phase', + 'event_state', + 'event_first', + 'event_last', + 'event_priority', + 'event_facility', + 'event_sl', + 'event_sl_max', + 'hostregex', + ], + 'hard_filtervars': [ + ( 'event_phase_open', "on" ), + ( 'event_phase_ack', "on" ), + ( 'event_phase_counting', "" ), + ( 'event_phase_delayed', "" ), + ], + }) + + multisite_builtin_views['ec_events_of_monhost'] = mkeventd_view({ + 'title': _('Events of Monitored Host'), + 'description': _('Currently open events of a host that is monitored'), + 'datasource': 'mkeventd_events', + 'layout': 'table', + 'hidden': True, + 'painters': [ + ('event_id', 'ec_event', ''), + ('event_icons', None, ''), + ('event_state', None, ''), + ('event_sl', None, ''), + ('event_rule_id', None, ''), + ('event_application', None, ''), + ('event_text', None, ''), + ('event_last', None, ''), + ('event_count', None, ''), + ], + 'show_filters': [ + 'event_id', + 'event_rule_id', + 'event_text', + 'event_application', + 'event_contact', + 'event_comment', + 'event_count', + 'event_phase', + 'event_state', + 'event_first', + 'event_last', + 'event_priority', + 'event_facility', + 'event_sl', + 'event_sl_max', + ], + 'hide_filters': [ + 'siteopt', + 'host', + ], + }) + multisite_builtin_views['ec_events_of_host'] = mkeventd_view({ + 'title': _('Events of Host'), + 'description': _('Currently open events of one specific host'), + 'datasource': 'mkeventd_events', + 'layout': 'table', + 'hidden': True, + 'painters': [ + ('event_id', 'ec_event', ''), + ('event_icons', None, ''), + ('event_state', None, ''), + ('event_sl', None, ''), + ('event_rule_id', None, ''), + ('event_application', None, ''), + ('event_text', None, ''), + ('event_last', None, ''), + ('event_count', None, ''), + ], + 'show_filters': [ + 'event_id', + 'event_rule_id', + 'event_text', + 'event_application', + 'event_contact', + 'event_comment', + 'event_count', + 'event_phase', + 'event_state', + 'event_first', + 'event_last', + 'event_priority', + 'event_facility', + 'event_sl', + 'event_sl_max', + ], + 'hide_filters': [ + 'siteopt', + 'event_host', + ], + }) + + multisite_builtin_views['ec_event'] = mkeventd_view({ + 'title': _('Event Details'), + 'description': _('Details about one event'), + 'linktitle': 'Event Details', + 'datasource': 'mkeventd_events', + 'layout': 'dataset', + + 'hidden': True, + 'browser_reload': 0, + 'hide_filters': [ + 'event_id', + ], + 'painters': [ + ('event_state', None, ''), + ('event_host', None, ''), + ('host_address', 'hoststatus', ''), + ('host_contacts', None, ''), + ('host_icons', None, ''), + ('event_text', None, ''), + ('event_match_groups', None, ''), + ('event_comment', None, ''), + ('event_owner', None, ''), + ('event_first', None, ''), + ('event_last', None, ''), + ('event_id', None, ''), + ('event_icons', None, ''), + ('event_count', None, ''), + ('event_sl', None, ''), + ('event_contact', None, ''), + ('event_contact_groups', None, ''), + ('event_application', None, ''), + ('event_pid', None, ''), + ('event_priority', None, ''), + ('event_facility', None, ''), + ('event_rule_id', None, ''), + ('event_phase', None, ''), + ('host_services', None, ''), + ], + }) + + multisite_builtin_views['ec_history_recent'] = mkeventd_view({ + 'title': _('Recent Event History'), + 'description': _('Information about events and actions on events during the recent 24 hours.'), + 'datasource': 'mkeventd_history', + 'layout': 'table', + + 'painters': [ + ('history_time', None, ''), + ('event_id', 'ec_historyentry', ''), + ('history_who', None, ''), + ('history_what', None, ''), + ('event_icons', None, ''), + ('event_state', None, ''), + ('event_phase', None, ''), + ('event_sl', None, ''), + ('event_host', 'ec_history_of_host', ''), + ('event_rule_id', None, ''), + ('event_application', None, ''), + ('event_text', None, ''), + ('event_last', None, ''), + ('event_count', None, ''), + ], + 'show_filters': [ + 'event_id', + 'event_rule_id', + 'event_text', + 'event_application', + 'event_contact', + 'event_comment', + 'event_host_regex', + 'event_count', + 'event_phase', + 'event_state', + 'event_first', + 'event_last', + 'event_priority', + 'event_facility', + 'event_sl', + 'event_sl_max', + 'history_time', + 'history_who', + 'history_what', + 'host_state_type', + ], + 'hard_filtervars': [ + ('history_time_from', '1'), + ('history_time_from_range', '86400'), + ], + 'sorters': [ + ('history_time', True), + ('history_line', True), + ], + }) + + multisite_builtin_views['ec_historyentry'] = mkeventd_view({ + 'title': _('Event History Entry'), + 'description': _('Details about a historical event history entry'), + 'datasource': 'mkeventd_history', + 'layout': 'dataset', + + 'hidden': True, + 'browser_reload': 0, + 'hide_filters': [ + 'event_id', + 'history_line', + ], + 'painters': [ + ('history_time', None, ''), + ('history_line', None, ''), + ('history_what', None, ''), + ('history_what_explained', None, ''), + ('history_who', None, ''), + ('history_addinfo', None, ''), + ('event_state', None, ''), + ('event_host', 'ec_history_of_host', ''), + ('event_text', None, ''), + ('event_match_groups', None, ''), + ('event_comment', None, ''), + ('event_owner', None, ''), + ('event_first', None, ''), + ('event_last', None, ''), + ('event_id', 'ec_history_of_event', ''), + ('event_icons', None, ''), + ('event_count', None, ''), + ('event_sl', None, ''), + ('event_contact', None, ''), + ('event_contact_groups', None, ''), + ('event_application', None, ''), + ('event_pid', None, ''), + ('event_priority', None, ''), + ('event_facility', None, ''), + ('event_rule_id', None, ''), + ('event_phase', None, ''), + ], + }) + + multisite_builtin_views['ec_history_of_event'] = mkeventd_view({ + 'title': _('History of Event'), + 'description': _('History entries of one specific event'), + 'datasource': 'mkeventd_history', + 'layout': 'table', + 'columns': 1, + + 'hidden': True, + 'browser_reload': 0, + 'hide_filters': [ + 'event_id', + ], + 'painters': [ + ('history_time', None, ''), + ('history_line', 'ec_historyentry', ''), + ('history_what', None, ''), + ('history_what_explained', None, ''), + ('history_who', None, ''), + ('event_state', None, ''), + ('event_host', None, ''), + ('event_application', None, ''), + ('event_text', None, ''), + ('event_sl', None, ''), + ('event_priority', None, ''), + ('event_facility', None, ''), + ('event_phase', None, ''), + ('event_count', None, ''), + ], + 'sorters': [ + ('history_time', True), + ('history_line', True), + ], + }) + + multisite_builtin_views['ec_history_of_host'] = mkeventd_view({ + 'title': _('Event History of Host'), + 'description': _('History entries of one specific host'), + 'datasource': 'mkeventd_history', + 'layout': 'table', + 'columns': 1, + + 'hidden': True, + 'browser_reload': 0, + 'hide_filters': [ + 'event_host', + ], + 'show_filters': [ + 'event_id', + 'event_rule_id', + 'event_text', + 'event_application', + 'event_contact', + 'event_comment', + 'event_count', + 'event_phase', + 'event_state', + 'event_first', + 'event_last', + 'event_priority', + 'event_facility', + 'event_sl', + 'event_sl_max', + 'history_time', + 'history_who', + 'history_what', + ], + 'painters': [ + ('history_time', None, ''), + ('event_id', 'ec_history_of_event', ''), + ('history_line', 'ec_historyentry', ''), + ('history_what', None, ''), + ('history_what_explained', None, ''), + ('history_who', None, ''), + ('event_state', None, ''), + ('event_host', None, ''), + ('event_application', None, ''), + ('event_text', None, ''), + ('event_sl', None, ''), + ('event_priority', None, ''), + ('event_facility', None, ''), + ('event_phase', None, ''), + ('event_count', None, ''), + ], + 'sorters': [ + ('history_time', True), + ('history_line', True), + ], + }) + + multisite_builtin_views['ec_event_mobile'] = \ + {'browser_reload': 0, + 'column_headers': 'pergroup', + 'context': {}, + 'datasource': 'mkeventd_events', + 'description': u'Details about one event\n', + 'group_painters': [], + 'hidden': True, + 'hidebutton': False, + 'icon': 'mkeventd', + 'layout': 'mobiledataset', + 'linktitle': u'Event Details', + 'mobile': True, + 'name': 'ec_event_mobile', + 'num_columns': 1, + 'painters': [('event_state', None, None), + ('event_host', None, None), + ('host_address', 'hoststatus', None), + ('host_contacts', None, None), + ('host_icons', None, None), + ('event_text', None, None), + ('event_comment', None, None), + ('event_owner', None, None), + ('event_first', None, None), + ('event_last', None, None), + ('event_id', None, None), + ('event_icons', None, None), + ('event_count', None, None), + ('event_sl', None, None), + ('event_contact', None, None), + ('event_contact_groups', None, None), + ('event_application', None, None), + ('event_pid', None, None), + ('event_priority', None, None), + ('event_facility', None, None), + ('event_rule_id', None, None), + ('event_phase', None, None), + ('host_services', None, None)], + 'public': True, + 'single_infos': ['event'], + 'sorters': [], + 'title': u'Event Details', + 'topic': u'Event Console', + 'user_sortable': True} + + multisite_builtin_views['ec_events_mobile'] = \ + {'browser_reload': 60, + 'column_headers': 'pergroup', + 'context': {'event_application': {'event_application': ''}, + 'event_comment': {'event_comment': ''}, + 'event_contact': {'event_contact': ''}, + 'event_count': {'event_count_from': '', + 'event_count_to': ''}, + 'event_facility': {'event_facility': ''}, + 'event_first': {'event_first_from': '', + 'event_first_from_range': '3600', + 'event_first_until': '', + 'event_first_until_range': '3600'}, + 'event_host_regex': {'event_host_regex': ''}, + 'event_id': {'event_id': ''}, + 'event_last': {'event_last_from': '', + 'event_last_from_range': '3600', + 'event_last_until': '', + 'event_last_until_range': '3600'}, + 'event_phase': {'event_phase_ack': 'on', + 'event_phase_closed': 'on', + 'event_phase_counting': '', + 'event_phase_delayed': '', + 'event_phase_open': 'on'}, + 'event_priority': {'event_priority_0': 'on', + 'event_priority_1': 'on', + 'event_priority_2': 'on', + 'event_priority_3': 'on', + 'event_priority_4': 'on', + 'event_priority_5': 'on', + 'event_priority_6': 'on', + 'event_priority_7': 'on'}, + 'event_rule_id': {'event_rule_id': ''}, + 'event_sl': {'event_sl': ''}, + 'event_sl_max': {'event_sl_max': ''}, + 'event_state': {'event_state_0': 'on', + 'event_state_1': 'on', + 'event_state_2': 'on', + 'event_state_3': 'on'}, + 'event_text': {'event_text': ''}, + 'hostregex': {'host_regex': ''}}, + 'datasource': 'mkeventd_events', + 'description': u'Table of all currently open events (handled and unhandled)\n', + 'group_painters': [], + 'hidden': False, + 'hidebutton': False, + 'icon': 'mkeventd', + 'layout': 'mobilelist', + 'linktitle': u'Events', + 'mobile': True, + 'name': 'ec_events_mobile', + 'num_columns': 1, + 'owner': 'omdadmin', + 'painters': [('event_id', 'ec_event_mobile', None), + ('event_state', None, None), + ('event_host', 'ec_events_of_host', None), + ('event_application', None, None), + ('event_text', None, None), + ('event_last', None, None)], + 'public': True, + 'single_infos': [], + 'sorters': [], + 'title': u'Events', + 'topic': u'Event Console', + 'user_sortable': True} diff -Nru check-mk-1.2.2p3/=unpacked-tar8=/plugins/visuals/mkeventd.py check-mk-1.2.6p12/=unpacked-tar8=/plugins/visuals/mkeventd.py --- check-mk-1.2.2p3/=unpacked-tar8=/plugins/visuals/mkeventd.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar8=/plugins/visuals/mkeventd.py 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,222 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +import mkeventd + +try: + mkeventd_enabled = config.mkeventd_enabled +except: + mkeventd_enabled = False + +# Declare datasource only if the event console is activated. We do +# not want to irritate users that do not know anything about the EC. +if mkeventd_enabled: + + # .--Infos---------------------------------------------------------------. + # | ___ __ | + # | |_ _|_ __ / _| ___ ___ | + # | | || '_ \| |_ / _ \/ __| | + # | | || | | | _| (_) \__ \ | + # | |___|_| |_|_| \___/|___/ | + # | | + # +----------------------------------------------------------------------+ + # | | + # '----------------------------------------------------------------------' + + infos['event'] = { + 'title' : _('Event Console Event'), + 'single_spec' : [ + ('event_id', Integer( + title = _('Event ID'), + )), + ] + } + + infos['history'] = { + 'title' : _('Historic Event Console Event'), + 'single_spec' : [ + ('event_id', Integer( + title = _('Event ID'), + )), + ('history_line', Integer( + title = _('History Line Number'), + )), + ] + } + + #. + # .--Filters-------------------------------------------------------------. + # | _____ _ _ _ | + # | | ___(_) | |_ ___ _ __ ___ | + # | | |_ | | | __/ _ \ '__/ __| | + # | | _| | | | || __/ | \__ \ | + # | |_| |_|_|\__\___|_| |___/ | + # | | + # '----------------------------------------------------------------------' + + # All filters for events define a function event_headers, that + # returns header lines for the event daemon, if the filter is in + # use. + class EventFilterText(FilterText): + def __init__(self, table, filter_name, column, title, op): + FilterText.__init__(self, filter_name, title, table, column, filter_name, op) + self._table = table + + # Disable Livestatus filter + def filter(self, infoname): + return "" + + def event_headers(self): + return FilterText.filter(self, self._table) + + declare_filter(200, EventFilterText("event", "event_id", "event_id", _("Event ID"), "=")) + declare_filter(200, EventFilterText("event", "event_rule_id", "event_rule_id", _("ID of rule"), "=")) + declare_filter(201, EventFilterText("event", "event_text", "event_text", _("Message/Text of event"), "~~")) + declare_filter(201, EventFilterText("event", "event_application","event_application", _("Application / Syslog-Tag"), "~~")) + declare_filter(201, EventFilterText("event", "event_contact", "event_contact", _("Contact Person"), "~~")) + declare_filter(201, EventFilterText("event", "event_comment", "event_comment", _("Comment to the event"), "~~")) + declare_filter(201, EventFilterText("event", "event_host_regex", "event_host", _("Hostname/IP-Address of original event"), "~~")) + declare_filter(201, EventFilterText("event", "event_host", "event_host", _("Hostname/IP-Address of event, exact match"), "=")) + declare_filter(201, EventFilterText("event", "event_owner", "event_owner", _("Owner of event"), "~~")) + declare_filter(221, EventFilterText("history", "history_who", "history_who", _("User that performed action"), "~~")) + declare_filter(222, EventFilterText("history", "history_line", "history_line", _("Line number in history logfile"), "=")) + + + class EventFilterCount(Filter): + def __init__(self, name, title): + Filter.__init__(self, name, title, "event", [name + "_from", name + "_to"], [name]) + self._name = name + + def display(self): + html.write("from: ") + html.number_input(self._name + "_from", "") + html.write(" to: ") + html.number_input(self._name + "_to", "") + + def filter(self, infoname): + return "" + + def event_headers(self): + try: + f = "" + if html.var(self._name + "_from"): + f += "Filter: event_count >= %d\n" % int(html.var(self._name + "_from")) + if html.var(self._name + "_to"): + f += "Filter: event_count <= %d\n" % int(html.var(self._name + "_to")) + return f + except: + return "" + + + declare_filter(205, EventFilterCount("event_count", _("Message count"))) + + class EventFilterState(Filter): + def __init__(self, table, name, title, choices): + varnames = [ name + "_" + str(c[0]) for c in choices ] + Filter.__init__(self, name, title, table, varnames, [name]) + self._name = name + self._choices = choices + + def double_height(self): + return len(self._choices) >= 5 + + def display(self): + html.begin_checkbox_group() + chars = 0 + for name, title in self._choices: + chars += len(title) + 2 + html.checkbox(self._name + "_" + str(name), True, label=title) + if (title[0].isupper() and chars > 24) or \ + (title[0].islower() and chars > 36): + html.write("
    ") + chars = 0 + html.end_checkbox_group() + + def filter(self, infoname): + return "" + + def event_headers(self): + sel = [] + for name, title in self._choices: + if html.get_checkbox(self._name + "_" + str(name)): + sel.append(str(name)) + if len(sel) > 0 and len(sel) < len(self._choices): + return "Filter: %s in %s\n" % (self._name, " ".join(sel)) + + + + declare_filter(206, EventFilterState("event", "event_state", _("State classification"), [ (0, _("OK")), (1, _("WARN")), (2, _("CRIT")), (3,_("UNKNOWN")) ])) + declare_filter(207, EventFilterState("event", "event_phase", _("Phase"), mkeventd.phase_names.items())) + declare_filter(209, EventFilterState("event", "event_priority", _("Syslog Priority"), mkeventd.syslog_priorities)) + declare_filter(225, EventFilterState("history", "history_what", _("History action type"), [(k,k) for k in mkeventd.action_whats.keys()])) + + + class EventFilterTime(FilterTime): + def __init__(self, table, name, title): + FilterTime.__init__(self, table, name, title, name) + self._table = table + + def filter(self, infoname): + return "" + + def event_headers(self): + return FilterTime.filter(self, self._table) + + declare_filter(220, EventFilterTime("event", "event_first", _("First occurrance of event"))) + declare_filter(221, EventFilterTime("event", "event_last", _("Last occurrance of event"))) + declare_filter(222, EventFilterTime("history", "history_time", _("Time of entry in event history"))) + + + class EventFilterDropdown(Filter): + def __init__(self, name, title, choices, operator = '=', column=None): + if column == None: + column = name + self._varname = "event_" + name + Filter.__init__(self, "event_" + name, title, "event", [ self._varname ], [ "event_" + column ]) + self._choices = choices + self._column = column + self._operator = operator + + def display(self): + if type(self._choices) == list: + choices = self._choices + else: + choices = self._choices() + html.select(self._varname, [ ("", "") ] + [(str(n),t) for (n,t) in choices]) + + def filter(self, infoname): + return "" + + def event_headers(self): + val = html.var(self._varname) + if val: + return "Filter: event_%s %s %s\n" % (self._column, self._operator, val) + + + + declare_filter(210, EventFilterDropdown("facility", _("Syslog Facility"), mkeventd.syslog_facilities)) + declare_filter(211, EventFilterDropdown("sl", _("Service Level at least"), mkeventd.service_levels, operator='>=')) + declare_filter(211, EventFilterDropdown("sl_max", _("Service Level at most"), mkeventd.service_levels, operator='<=', column="sl")) diff -Nru check-mk-1.2.2p3/=unpacked-tar8=/plugins/wato/mkeventd.py check-mk-1.2.6p12/=unpacked-tar8=/plugins/wato/mkeventd.py --- check-mk-1.2.2p3/=unpacked-tar8=/plugins/wato/mkeventd.py 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/=unpacked-tar8=/plugins/wato/mkeventd.py 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,2188 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +import mkeventd + +mkeventd_enabled = config.mkeventd_enabled + +# main_config_file = defaults.check_mk_configdir + "/mkeventd.mk" +mkeventd_config_dir = defaults.default_config_dir + "/mkeventd.d/wato/" +if defaults.omd_root: + mkeventd_status_file = defaults.omd_root + "/var/mkeventd/status" + +# Include rule configuration into backup/restore/replication. Current +# status is not backed up. +if mkeventd_enabled: + replication_paths.append(( "dir", "mkeventd", mkeventd_config_dir )) + backup_paths.append(( "dir", "mkeventd", mkeventd_config_dir )) + +#. +# .--ValueSpecs----------------------------------------------------------. +# | __ __ _ ____ | +# | \ \ / /_ _| |_ _ ___/ ___| _ __ ___ ___ ___ | +# | \ \ / / _` | | | | |/ _ \___ \| '_ \ / _ \/ __/ __| | +# | \ V / (_| | | |_| | __/___) | |_) | __/ (__\__ \ | +# | \_/ \__,_|_|\__,_|\___|____/| .__/ \___|\___|___/ | +# | |_| | +# +----------------------------------------------------------------------+ +# | Declarations of the structure of rules and actions | +# '----------------------------------------------------------------------' +substitute_help = _(""" +The following placeholdes will be substituted by value from the actual event: + + + + + + + + + + + + + + + + + + + + + + +
    $ID$Event ID
    $COUNT$Number of occurrances
    $TEXT$Message text
    $FIRST$Time of the first occurrance (time stamp)
    $LAST$Time of the most recent occurrance
    $COMMENT$Event comment/td>
    $SL$Service Level
    $HOST$Host name (as sent by syslog)
    $CONTACT$Contact information
    $APPLICATION$Syslog tag / Application
    $PID$Process ID of the origin process
    $PRIORITY$Syslog Priority
    $FACILITY$Syslog Facility
    $RULE_ID$ID of the rule
    $STATE$State of the event (0/1/2/3)
    $PHASE$Phase of the event (open in normal situations, closed when cancelling)
    $OWNER$Owner of the event
    $MATCH_GROUPS$Text groups from regular expression match, separated by spaces
    $MATCH_GROUP_1$Text of the first match group from expression match
    $MATCH_GROUP_2$Text of the second match group from expression match
    $MATCH_GROUP_3$Text of the third match group from expression match (and so on...)
    +""" +) + +class ActionList(ListOf): + def __init__(self, vs, **kwargs): + ListOf.__init__(self, vs, **kwargs) + + def validate_value(self, value, varprefix): + ListOf.validate_value(self, value, varprefix) + action_ids = [ v["id"] for v in value ] + rules = load_mkeventd_rules() + for rule in rules: + for action_id in rule.get("actions", []): + if action_id not in action_ids + ["@NOTIFY"]: + raise MKUserError(varprefix, _("You are missing the action with the ID %s, " + "which is still used in some rules.") % action_id) + + +vs_mkeventd_actions = \ + ActionList( + Foldable( + Dictionary( + title = _("Action"), + optional_keys = False, + elements = [ + ( "id", + ID( + title = _("Action ID"), + help = _("A unique ID of this action that is used as an internal " + "reference in the configuration. Changing the ID is not " + "possible if still rules refer to this ID."), + allow_empty = False, + size = 12, + ) + ), + ( "title", + TextUnicode( + title = _("Title"), + help = _("A descriptive title of this action."), + allow_empty = False, + size = 64, + attrencode = True, + ) + ), + ( "disabled", + Checkbox( + title = _("Disable"), + label = _("Currently disable execution of this action"), + ) + ), + ( "hidden", + Checkbox( + title = _("Hide from Status GUI"), + label = _("Do not offer this action as a command on open events"), + help = _("If you enabled this option, then this action will not " + "be available as an interactive user command. It is usable " + "as an ad-hoc action when a rule fires, nevertheless."), + ), + ), + ( "action", + CascadingDropdown( + title = _("Type of Action"), + help = _("Choose the type of action to perform"), + choices = [ + ( "email", + _("Send Email"), + Dictionary( + optional_keys = False, + elements = [ + ( "to", + TextAscii( + title = _("Recipient Email address"), + allow_empty = False, + attrencode = True, + ), + ), + ( "subject", + TextUnicode( + title = _("Subject"), + allow_empty = False, + size = 64, + attrencode = True, + ), + ), + ( "body", + TextAreaUnicode( + title = _("Body"), + help = _("Text-body of the email to send. ") + substitute_help, + cols = 64, + rows = 10, + attrencode = True, + ), + ), + ] + ) + ), + ( "script", + _("Execute Shell Script"), + Dictionary( + optional_keys = False, + elements = [ + ( "script", + TextAreaUnicode( + title = _("Script body"), + help = _("This script will be executed using the BASH shell. ") + substitute_help, + cols = 64, + rows = 10, + attrencode = True, + ) + ), + ] + ) + ), + ] + ), + ), + ], + ), + title_function = lambda value: not value["id"] and _("New Action") or (value["id"] + " - " + value["title"]), + ), + title = _("Actions (Emails & Scripts)"), + help = _("Configure that possible actions that can be performed when a " + "rule triggers and also manually by a user."), + totext = _("%d actions"), + ) + + +class RuleState(CascadingDropdown): + def __init__(self, **kwargs): + choices = [ + ( 0, _("OK")), + ( 1, _("WARN")), + ( 2, _("CRIT")), + ( 3, _("UNKNOWN")), + (-1, _("(set by syslog)")), + ('text_pattern', _('(set by message text)'), + Dictionary( + elements = [ + ('2', RegExpUnicode( + title = _("CRIT Pattern"), + help = _("When the given regular expression (infix search) matches " + "the events state is set to CRITICAL."), + size = 64, + )), + ('1', RegExpUnicode( + title = _("WARN Pattern"), + help = _("When the given regular expression (infix search) matches " + "the events state is set to WARNING."), + size = 64, + )), + ('0', RegExpUnicode( + title = _("OK Pattern"), + help = _("When the given regular expression (infix search) matches " + "the events state is set to OK."), + size = 64, + )), + ], + help = _('Individual patterns matching the text (which must have been matched by ' + 'the generic "text to match pattern" before) which set the state of the ' + 'generated event depending on the match.

    ' + 'First the CRITICAL pattern is tested, then WARNING and OK at last. ' + 'If none of the patterns matches, the events state is set to UNKNOWN.'), + ) + ), + ] + CascadingDropdown.__init__(self, choices = choices, **kwargs) + +vs_mkeventd_rule = Dictionary( + title = _("Rule Properties"), + elements = [ + ( "id", + ID( + title = _("Rule ID"), + help = _("A unique ID of this rule. Each event will remember the rule " + "it was classified with by its rule ID."), + allow_empty = False, + size = 12, + )), + ( "description", + TextUnicode( + title = _("Description"), + help = _("You can use this description for commenting your rules. It " + "will not be attached to the event this rule classifies."), + size = 64, + attrencode = True, + )), + ( "disabled", + Checkbox( + title = _("Rule activation"), + help = _("Disabled rules are kept in the configuration but are not applied."), + label = _("do not apply this rule"), + ) + ), + ( "drop", + Checkbox( + title = _("Drop Message"), + help = _("With this option all messages matching this rule will be silently dropped."), + label = _("Silently drop message, do no actions"), + ) + ), + ( "state", + RuleState( + title = _("State"), + help = _("The monitoring state that this event will trigger."), + default_value = -1, + )), + ( "sl", + DropdownChoice( + title = _("Service Level"), + choices = mkeventd.service_levels, + prefix_values = True, + ), + ), + ( "contact_groups", + ListOf( + GroupSelection("contact"), + title = _("Fallback Contact Groups"), + help = _("When displaying events in the Check_MK GUI, you can make a user see only events " + "for hosts he is a contact for. When you expect this rule to receive events from " + "hosts that are not known to the monitoring, you can specify contact groups " + "for visibility here. Note: If you activate this option and do not specify " + "any group, then users with restricted permissions can never see these events."), + movable = False, + ) + ), + ( "actions", + ListChoice( + title = _("Actions"), + help = _("Actions to automatically perform when this event occurs"), + choices = mkeventd.action_choices, + ) + ), + ( "cancel_actions", + ListChoice( + title = _("Actions when cancelling"), + help = _("Actions to automatically perform when an event is being cancelled."), + choices = mkeventd.action_choices, + ) + ), + ( "autodelete", + Checkbox( + title = _("Automatic Deletion"), + label = _("Delete event immediately after the actions"), + help = _("Incoming messages might trigger actions (when configured above), " + "afterwards only an entry in the event history will be left. There " + "will be no \"open event\" to be handled by the administrators."), + ) + ), + ( "count", + Dictionary( + title = _("Count messages in defined interval"), + help = _("With this option you can make the rule being executed not before " + "the matching message is seen a couple of times in a defined " + "time interval. Also counting activates the aggregation of messages " + "that result from the same rule into one event, even if count is " + "set to 1."), + optional_keys = False, + columns = 2, + elements = [ + ( "count", + Integer( + title = _("Count until triggered"), + help = _("That many times the message must occur until an event is created"), + minvalue = 1, + ), + ), + ( "period", + Age( + title = _("Time period for counting"), + help = _("If in this time range the configured number of time the rule is " + "triggered, an event is being created. If the required count is not reached " + "then the count is reset to zero."), + default_value = 86400, + ), + ), + ( "algorithm", + DropdownChoice( + title = _("Algorithm"), + help = _("Select how the count is computed. The algorithm Interval will count the " + "number of messages from the first occurrance and reset this counter as soon as " + "the interval is elapsed or the maximum count has reached. The token bucket algorithm " + "does not work with intervals but simply decreases the current count by one for " + "each partial time interval. Please refer to the online documentation for more details."), + choices = [ + ( "interval", _("Interval")), + ( "tokenbucket", _("Token Bucket")), + ( "dynabucket", _("Dynamic Token Bucket")), + ], + default_value = "interval") + ), + ( "count_ack", + Checkbox( + label = _("Continue counting when event is acknowledged"), + help = _("Otherwise counting will start from one with a new event for " + "the next rule match."), + default_value = False, + ) + ), + ( "separate_host", + Checkbox( + label = _("Force separate events for different hosts"), + help = _("When aggregation is turned on and the rule matches for " + "two different hosts then these two events will be kept " + "separate if you check this box."), + default_value = True, + ), + ), + ( "separate_application", + Checkbox( + label = _("Force separate events for different applications"), + help = _("When aggregation is turned on and the rule matches for " + "two different applications then these two events will be kept " + "separate if you check this box."), + default_value = True, + ), + ), + ( "separate_match_groups", + Checkbox( + label = _("Force separate events for different match groups"), + help = _("When you use subgroups in the regular expression of your " + "match text then you can have different values for the matching " + "groups be reflected in different events."), + default_value = True, + ), + ), + ], + ) + ), + ( "expect", + Dictionary( + title = _("Expect regular messages"), + help = _("With this option activated you can make the Event Console monitor " + "that a certain number of messages are at least seen within " + "each regular time interval. Otherwise an event will be created. " + "The options week, two days and day refer to " + "periodic intervals aligned at 00:00:00 on the 1st of January 1970. " + "You can specify a relative offset in hours in order to re-align this " + "to any other point of time."), + optional_keys = False, + columns = 2, + elements = [ + ( "interval", + CascadingDropdown( + title = _("Interval"), + html_separator = " ", + choices = [ + ( 7*86400, _("week"), + Integer( + label = _("Timezone offset"), + unit = _("hours"), + default_value = 0, + minvalue = - 167, + maxvalue = 167, + ) + ), + ( 2*86400, _("two days"), + Integer( + label = _("Timezone offset"), + unit = _("hours"), + default_value = 0, + minvalue = - 47, + maxvalue = 47, + ) + ), + ( 86400, _("day"), + DropdownChoice( + label = _("in timezone"), + choices = [ + ( -12, _("UTC -12 hours") ), + ( -11, _("UTC -11 hours") ), + ( -10, _("UTC -10 hours") ), + ( -9, _("UTC -9 hours") ), + ( -8, _("UTC -8 hours") ), + ( -7, _("UTC -7 hours") ), + ( -6, _("UTC -6 hours") ), + ( -5, _("UTC -5 hours") ), + ( -4, _("UTC -4 hours") ), + ( -3, _("UTC -3 hours") ), + ( -2, _("UTC -2 hours") ), + ( -1, _("UTC -1 hour") ), + ( 0, _("UTC") ), + ( 1, _("UTC +1 hour") ), + ( 2, _("UTC +2 hours") ), + ( 3, _("UTC +3 hours") ), + ( 4, _("UTC +4 hours") ), + ( 5, _("UTC +5 hours") ), + ( 6, _("UTC +8 hours") ), + ( 7, _("UTC +7 hours") ), + ( 8, _("UTC +8 hours") ), + ( 9, _("UTC +9 hours") ), + ( 10, _("UTC +10 hours") ), + ( 11, _("UTC +11 hours") ), + ( 12, _("UTC +12 hours") ), + ], + default_value = 0, + ) + ), + ( 3600, _("hour") ), + ( 900, _("15 minutes") ), + ( 300, _("5 minutes") ), + ( 60, _("minute") ), + ( 10, _("10 seconds") ), + ], + default_value = 3600, + ) + ), + ( "count", + Integer( + title = _("Number of expected messages in each interval"), + minvalue = 1, + ) + ), + ( "merge", + DropdownChoice( + title = _("Merge with open event"), + help = _("If there already exists an open event because of absent " + "messages according to this rule, you can optionally merge " + "the new incident with the exising event or create a new " + "event for each interval with absent messages."), + choices = [ + ( "open", _("Merge if there is an open un-acknowledged event") ), + ( "acked", _("Merge even if there is an acknowledged event") ), + ( "never", _("Create a new event for each incident - never merge") ), + ], + default_value = "open", + ) + ), + ]) + ), + ( "delay", + Age( + title = _("Delay event creation"), + help = _("The creation of an event will be delayed by this time period. This " + "does only make sense for events that can be cancelled by a negative " + "rule.")) + ), + ( "livetime", + Tuple( + title = _("Limit event livetime"), + help = _("If you set a livetime of an event, then it will automatically be " + "deleted after that time if, even if no action has taken by the user. You can " + "decide whether to expire open, acknowledged or both types of events. The lifetime " + "always starts when the event is entering the open state."), + elements = [ + Age(), + ListChoice( + choices = [ + ( "open", _("Expire events that are in the state open") ), + ( "ack", _("Expire events that are in the state acknowledged") ), + ], + default_value = [ "open" ], + ) + ], + ), + ), + ( "match", + RegExpUnicode( + title = _("Text to match"), + help = _("The rules does only apply when the given regular expression matches " + "the message text (infix search)."), + size = 64, + ) + ), + ( "match_host", + RegExpUnicode( + title = _("Match host"), + help = _("The rules does only apply when the given regular expression matches " + "the host name the message originates from. Note: in some cases the " + "event might use the IP address instead of the host name."), + ) + ), + ( "match_application", + RegExpUnicode( + title = _("Match syslog application (tag)"), + help = _("Regular expression for matching the syslog tag (case insenstive)"), + ) + ), + ( "match_priority", + Tuple( + title = _("Match syslog priority"), + help = _("Define a range of syslog priorities this rule matches"), + orientation = "horizontal", + show_titles = False, + elements = [ + DropdownChoice(label = _("from:"), choices = mkeventd.syslog_priorities, default_value = 4), + DropdownChoice(label = _(" to:"), choices = mkeventd.syslog_priorities, default_value = 0), + ], + ), + ), + ( "match_facility", + DropdownChoice( + title = _("Match syslog facility"), + help = _("Make the rule match only if the message has a certain syslog facility. " + "Messages not having a facility are classified as user."), + choices = mkeventd.syslog_facilities, + ) + ), + ( "match_sl", + Tuple( + title = _("Match service level"), + help = _("This setting is only useful for events that result from monitoring notifications " + "sent by Check_MK. Those can set a service level already in the event. In such a " + "case you can make this rule match only certain service levels. Events that do not "), + orientation = "horizontal", + show_titles = False, + elements = [ + DropdownChoice(label = _("from:"), choices = mkeventd.service_levels, prefix_values = True), + DropdownChoice(label = _(" to:"), choices = mkeventd.service_levels, prefix_values = True), + ], + ), + ), + ( "match_timeperiod", + TimeperiodSelection( + title = _("Match only during timeperiod"), + help = _("Match this rule only during times where the selected timeperiod from the monitoring " + "system is active. The Timeperiod definitions are taken from the monitoring core that " + "is running on the same host or OMD site as the event daemon. Please note, that this " + "selection only offers timeperiods that are defined with WATO."), + ), + ), + ( "match_ok", + RegExpUnicode( + title = _("Text to cancel event"), + help = _("If a matching message appears with this text, then an event created " + "by this rule will automatically be cancelled (if host, application and match groups match). "), + size = 64, + ) + ), + ( "cancel_priority", + Tuple( + title = _("Syslog priority to cancel event"), + help = _("If the priority of the event lies withing this range and either no text to cancel " + "is specified or that text also matched, then events created with this rule will " + "automatically be cancelled (if host, application and match groups match)."), + orientation = "horizontal", + show_titles = False, + elements = [ + DropdownChoice(label = _("from:"), choices = mkeventd.syslog_priorities, default_value = 7), + DropdownChoice(label = _(" to:"), choices = mkeventd.syslog_priorities, default_value = 5), + ], + ), + ), + ( "set_text", + TextUnicode( + title = _("Rewrite message text"), + help = _("Replace the message text with this text. If you have bracketed " + "groups in the text to match, then you can use the placeholders " + "\\1, \\2, etc. for inserting the first, second " + "etc matching group.") + + _("The placeholder \\0 will be replaced by the original text. " + "This allows you to add new information in front or at the end."), + size = 64, + allow_empty = False, + attrencode = True, + ) + ), + ( "set_host", + TextUnicode( + title = _("Rewrite hostname"), + help = _("Replace the host name with this text. If you have bracketed " + "groups in the text to match, then you can use the placeholders " + "\\1, \\2, etc. for inserting the first, second " + "etc matching group.") + + _("The placeholder \\0 will be replaced by the original host name. " + "This allows you to add new information in front or at the end."), + allow_empty = False, + attrencode = True, + ) + ), + ( "set_application", + TextUnicode( + title = _("Rewrite application"), + help = _("Replace the application (syslog tag) with this text. If you have bracketed " + "groups in the text to match, then you can use the placeholders " + "\\1, \\2, etc. for inserting the first, second " + "etc matching group.") + + _("The placeholder \\0 will be replaced by the original text. " + "This allows you to add new information in front or at the end."), + allow_empty = False, + attrencode = True, + ) + ), + ( "set_comment", + TextUnicode( + title = _("Add comment"), + help = _("Attach a comment to the event. If you have bracketed " + "groups in the text to match, then you can use the placeholders " + "\\1, \\2, etc. for inserting the first, second " + "etc matching group.") + + _("The placeholder \\0 will be replaced by the original text. " + "This allows you to add new information in front or at the end."), + size = 64, + allow_empty = False, + attrencode = True, + ) + ), + ( "set_contact", + TextUnicode( + title = _("Add contact information"), + help = _("Attach information about a contact person. If you have bracketed " + "groups in the text to match, then you can use the placeholders " + "\\1, \\2, etc. for inserting the first, second " + "etc matching group.") + + _("The placeholder \\0 will be replaced by the original text. " + "This allows you to add new information in front or at the end."), + size = 64, + allow_empty = False, + attrencode = True, + ) + ), + ], + optional_keys = [ "delay", "livetime", "count", "expect", "match_priority", "match_priority", + "match_facility", "match_sl", "match_host", "match_application", "match_timeperiod", + "set_text", "set_host", "set_application", "set_comment", + "set_contact", "cancel_priority", "match_ok", "contact_groups" ], + headers = [ + ( _("General Properties"), [ "id", "description", "disabled" ] ), + ( _("Matching Criteria"), [ "match", "match_host", "match_application", "match_priority", "match_facility", + "match_sl", "match_ok", "cancel_priority", "match_timeperiod" ]), + ( _("Outcome & Action"), [ "state", "sl", "contact_groups", "actions", "cancel_actions", "drop", "autodelete" ]), + ( _("Counting & Timing"), [ "count", "expect", "delay", "livetime", ]), + ( _("Rewriting"), [ "set_text", "set_host", "set_application", "set_comment", "set_contact" ]), + ], + render = "form", + form_narrow = True, +) + +# VS for simulating an even +vs_mkeventd_event = Dictionary( + title = _("Event Simulator"), + help = _("You can simulate an event here and check out, which rules are matching."), + render = "form", + form_narrow = True, + optional_keys = False, + elements = [ + ( "text", + TextUnicode( + title = _("Message Text"), + size = 80, + allow_empty = False, + default_value = _("Still nothing happened."), + attrencode = True), + ), + ( "application", + TextUnicode( + title = _("Application Name"), + help = _("The syslog tag"), + size = 40, + default_value = _("Foobar-Daemon"), + allow_empty = True, + attrencode = True), + ), + ( "host", + TextUnicode( + title = _("Host Name"), + help = _("The host name of the event"), + size = 40, + default_value = _("myhost089"), + allow_empty = True, + attrencode = True, + regex = "^\\S*$", + regex_error = _("The host name may not contain spaces."), + ) + ), + ( "priority", + DropdownChoice( + title = _("Syslog Priority"), + choices = mkeventd.syslog_priorities, + default_value = 5, + ) + ), + ( "facility", + DropdownChoice( + title = _("Syslog Facility"), + choices = mkeventd.syslog_facilities, + default_value = 1, + ) + ), + ]) + + +#. +# .--Persistence---------------------------------------------------------. +# | ____ _ _ | +# | | _ \ ___ _ __ ___(_)___| |_ ___ _ __ ___ ___ | +# | | |_) / _ \ '__/ __| / __| __/ _ \ '_ \ / __/ _ \ | +# | | __/ __/ | \__ \ \__ \ || __/ | | | (_| __/ | +# | |_| \___|_| |___/_|___/\__\___|_| |_|\___\___| | +# | | +# +----------------------------------------------------------------------+ +# | | +# '----------------------------------------------------------------------' + +def load_mkeventd_rules(): + filename = mkeventd_config_dir + "rules.mk" + if not os.path.exists(filename): + return [] + try: + vars = { "rules" : [] } + execfile(filename, vars, vars) + # If we are running on OMD then we know the path to + # the state retention file of mkeventd and can read + # the rule statistics directly from that file. + if defaults.omd_root and os.path.exists(mkeventd_status_file): + mkeventd_status = eval(file(mkeventd_status_file).read()) + rule_stats = mkeventd_status["rule_stats"] + for rule in vars["rules"]: + rule["hits"] = rule_stats.get(rule["id"], 0) + + # Convert some data fields into a new format + for rule in vars["rules"]: + if "livetime" in rule: + livetime = rule["livetime"] + if type(livetime) != tuple: + rule["livetime"] = ( livetime, ["open"] ) + + return vars["rules"] + + except Exception, e: + if config.debug: + raise MKGeneralException(_("Cannot read configuration file %s: %s" % + (filename, e))) + return [] + +def save_mkeventd_rules(rules): + make_nagios_directory(defaults.default_config_dir + "/mkeventd.d") + make_nagios_directory(mkeventd_config_dir) + out = create_user_file(mkeventd_config_dir + "rules.mk", "w") + out.write("# Written by WATO\n# encoding: utf-8\n\n") + try: + if config.mkeventd_pprint_rules: + out.write("rules += \\\n%s\n" % pprint.pformat(rules)) + return + except: + pass + + out.write("rules += \\\n%r\n" % rules) + + +#. +# .--WATO Modes----------------------------------------------------------. +# | __ ___ _____ ___ __ __ _ | +# | \ \ / / \|_ _/ _ \ | \/ | ___ __| | ___ ___ | +# | \ \ /\ / / _ \ | || | | | | |\/| |/ _ \ / _` |/ _ \/ __| | +# | \ V V / ___ \| || |_| | | | | | (_) | (_| | __/\__ \ | +# | \_/\_/_/ \_\_| \___/ |_| |_|\___/ \__,_|\___||___/ | +# | | +# +----------------------------------------------------------------------+ +# | The actual configuration modes for all rules, one rule and the | +# | activation of the changes. | +# '----------------------------------------------------------------------' + +def mode_mkeventd_rules(phase): + if phase == "title": + return _("Rules for event correlation") + + elif phase == "buttons": + home_button() + mkeventd_changes_button() + if config.may("mkeventd.edit"): + html.context_button(_("New Rule"), make_link([("mode", "mkeventd_edit_rule")]), "new") + html.context_button(_("Reset Counters"), + make_action_link([("mode", "mkeventd_rules"), ("_reset_counters", "1")]), "resetcounters") + html.context_button(_("Server Status"), make_link([("mode", "mkeventd_status")]), "status") + mkeventd_config_button() + return + + rules = load_mkeventd_rules() + + if phase == "action": + # Validation of input for rule simulation (no further action here) + if html.var("simulate") or html.var("_generate"): + event = vs_mkeventd_event.from_html_vars("event") + vs_mkeventd_event.validate_value(event, "event") + config.save_user_file("simulated_event", event) + + if html.has_var("_generate") and html.check_transaction(): + if not event.get("application"): + raise MKUserError("event_p_application", _("Please specify an application name")) + if not event.get("host"): + raise MKUserError("event_p_host", _("Please specify a host name")) + rfc = mkeventd.send_event(event) + return None, "Test event generated and sent to Event Console.
    %s
    " % rfc + + + if html.has_var("_delete"): + nr = int(html.var("_delete")) + rule = rules[nr] + c = wato_confirm(_("Confirm rule deletion"), + _("Do you really want to delete the rule %s %s?" % + (rule["id"], rule.get("description","")))) + if c: + log_mkeventd("delete-rule", _("Deleted rule %s") % rules[nr]["id"]) + del rules[nr] + save_mkeventd_rules(rules) + elif c == False: + return "" + else: + return + + elif html.has_var("_reset_counters"): + c = wato_confirm(_("Confirm counter reset"), + _("Do you really want to reset all Hits counters to zero?")) + if c: + mkeventd.query("COMMAND RESETCOUNTERS") + log_mkeventd("counter-reset", _("Resetted all rule hit counters to zero")) + elif c == False: + return "" + else: + return + + elif html.has_var("_copy_rules"): + c = wato_confirm(_("Confirm copying rules"), + _("Do you really want to copy all event rules from the master and " + "replace your local configuration with them?")) + if c: + copy_rules_from_master() + log_mkeventd("copy-rules-from-master", _("Copied the event rules from the master " + "into the local configuration")) + return None, _("Copied rules from master") + elif c == False: + return "" + else: + return + + + if html.check_transaction(): + if html.has_var("_move"): + from_pos = int(html.var("_move")) + to_pos = int(html.var("_where")) + rule = rules[from_pos] + del rules[from_pos] # make to_pos now match! + rules[to_pos:to_pos] = [rule] + save_mkeventd_rules(rules) + log_mkeventd("move-rule", _("Changed position of rule %s") % rule["id"]) + return + + rep_mode = mkeventd.replication_mode() + if rep_mode in [ "sync", "takeover" ]: + copy_url = make_action_link([("mode", "mkeventd_rules"), ("_copy_rules", "1")]) + html.show_warning(_("WARNING: This Event Console is currently running as a replication " + "slave. The rules edited here will not be used. Instead a copy of the rules of the " + "master are being used in the case of a takeover. The same holds for the event " + "actions in the global settings.

    If you want you can copy the ruleset of " + "the master into your local slave configuration: ") + \ + '' % copy_url + + _("Copy Rules From Master") + '') + + if not rules: + html.message(_("You have not created any rules yet. The Event Console is useless unless " + "you have activated Force message archiving in the global settings.")) + + # Simulator + event = config.load_user_file("simulated_event", {}) + html.begin_form("simulator") + vs_mkeventd_event.render_input("event", event) + forms.end() + html.hidden_fields() + html.button("simulate", _("Try out")) + html.button("_generate", _("Generate Event!")) + html.end_form() + html.write("
    ") + + if html.var("simulate"): + event = vs_mkeventd_event.from_html_vars("event") + else: + event = None + + if rules: + table.begin(limit=None, sortable=False) + + have_match = False + for nr, rule in enumerate(rules): + table.row() + delete_url = make_action_link([("mode", "mkeventd_rules"), ("_delete", nr)]) + top_url = make_action_link([("mode", "mkeventd_rules"), ("_move", nr), ("_where", 0)]) + bottom_url = make_action_link([("mode", "mkeventd_rules"), ("_move", nr), ("_where", len(rules)-1)]) + up_url = make_action_link([("mode", "mkeventd_rules"), ("_move", nr), ("_where", nr-1)]) + down_url = make_action_link([("mode", "mkeventd_rules"), ("_move", nr), ("_where", nr+1)]) + edit_url = make_link([("mode", "mkeventd_edit_rule"), ("edit", nr)]) + clone_url = make_link([("mode", "mkeventd_edit_rule"), ("clone", nr)]) + + table.cell(_("Actions"), css="buttons") + html.icon_button(edit_url, _("Edit this rule"), "edit") + html.icon_button(clone_url, _("Create a copy of this rule"), "clone") + html.icon_button(delete_url, _("Delete this rule"), "delete") + if not rule is rules[0]: + html.icon_button(top_url, _("Move this rule to the top"), "top") + html.icon_button(up_url, _("Move this rule one position up"), "up") + else: + html.empty_icon_button() + html.empty_icon_button() + + if not rule is rules[-1]: + html.icon_button(down_url, _("Move this rule one position down"), "down") + html.icon_button(bottom_url, _("Move this rule to the bottom"), "bottom") + else: + html.empty_icon_button() + html.empty_icon_button() + + table.cell("", css="buttons") + if rule.get("disabled"): + html.icon(_("This rule is currently disabled and will not be applied"), "disabled") + elif event: + result = mkeventd.event_rule_matches(rule, event) + if type(result) != tuple: + html.icon(_("Rule does not match: %s") % result, "rulenmatch") + else: + cancelling, groups = result + if have_match: + msg = _("This rule matches, but is overruled by a previous match.") + icon = "rulepmatch" + else: + if cancelling: + msg = _("This rule does a cancelling match.") + else: + msg = _("This rule matches.") + icon = "rulematch" + have_match = True + if groups: + msg += _(" Match groups: %s") % ",".join([ g or _('<None>') for g in groups ]) + html.icon(msg, icon) + + if rule.get("contact_groups") != None: + html.icon(_("This rule attaches contact group(s) to the events: %s") % + (", ".join(rule["contact_groups"]) or _("(none)")), + "contactgroups") + + table.cell(_("ID"), '%s' % (edit_url, rule["id"])) + + if rule.get("drop"): + table.cell(_("State"), _("DROP"), css="state statep") + else: + if type(rule['state']) == tuple: + stateval = rule["state"][0] + else: + stateval = rule["state"] + txt = { 0: _("OK"), 1:_("WARN"), + 2: _("CRIT"), 3:_("UNKNOWN"), + -1: _("(syslog)"), + 'text_pattern':_("(set by message text)") }[stateval] + table.cell(_("State"), txt, css="state state%s" % stateval) + + # Syslog priority + if "match_priority" in rule: + prio_from, prio_to = rule["match_priority"] + if prio_from == prio_to: + prio_text = mkeventd.syslog_priorities[prio_from][1] + else: + prio_text = mkeventd.syslog_priorities[prio_from][1][:2] + ".." + \ + mkeventd.syslog_priorities[prio_to][1][:2] + else: + prio_text = "" + table.cell(_("Priority"), prio_text) + + # Syslog Facility + table.cell(_("Facility")) + if "match_facility" in rule: + facnr = rule["match_facility"] + html.write("%s" % dict(mkeventd.syslog_facilities)[facnr]) + + table.cell(_("Service Level"), + dict(mkeventd.service_levels()).get(rule["sl"], rule["sl"])) + if defaults.omd_root: + hits = rule.get('hits') + table.cell(_("Hits"), hits != None and hits or '', css="number") + table.cell(_("Description"), rule.get("description")) + table.cell(_("Text to match"), rule.get("match")) + table.end() + + +def copy_rules_from_master(): + answer = mkeventd.query("REPLICATE 0") + if "rules" not in answer: + raise MKGeneralException(_("Cannot get rules from local event daemon.")) + rules = answer["rules"] + save_mkeventd_rules(rules) + + +def mode_mkeventd_edit_rule(phase): + rules = load_mkeventd_rules() + # Links from status view refer to rule via the rule id + if html.var("rule_id"): + rule_id = html.var("rule_id") + for nr, rule in enumerate(rules): + if rule["id"] == rule_id: + html.set_var("edit", str(nr)) + break + + edit_nr = int(html.var("edit", -1)) # missing -> new rule + clone_nr = int(html.var("clone", -1)) # Only needed in 'new' mode + new = edit_nr < 0 + + if phase == "title": + if new: + return _("Create new rule") + else: + return _("Edit rule %s" % rules[edit_nr]["id"]) + + elif phase == "buttons": + home_button() + mkeventd_rules_button() + mkeventd_changes_button() + if clone_nr >= 0: + html.context_button(_("Clear Rule"), html.makeuri([("_clear", "1")]), "clear") + return + + if new: + if clone_nr >= 0 and not html.var("_clear"): + rule = {} + rule.update(rules[clone_nr]) + else: + rule = {} + else: + rule = rules[edit_nr] + + if phase == "action": + if not html.check_transaction(): + return "mkeventd_rules" + + if not new: + old_id = rule["id"] + rule = vs_mkeventd_rule.from_html_vars("rule") + vs_mkeventd_rule.validate_value(rule, "rule") + if not new and old_id != rule["id"]: + raise MKUserError("rule_p_id", + _("It is not allowed to change the ID of an existing rule.")) + elif new: + for r in rules: + if r["id"] == rule["id"]: + raise MKUserError("rule_p_id", _("A rule with this ID already exists.")) + + try: + num_groups = re.compile(rule["match"]).groups + except: + raise MKUserError("rule_p_match", + _("Invalid regular expression")) + if num_groups > 9: + raise MKUserError("rule_p_match", + _("You matching text has too many regular expresssion subgroups. " + "Only nine are allowed.")) + + if "count" in rule and "expect" in rule: + raise MKUserError("rule_p_expect_USE", _("You cannot use counting and expecting " + "at the same time in the same rule.")) + + if "expect" in rule and "delay" in rule: + raise MKUserError("rule_p_expect_USE", _("You cannot use expecting and delay " + "at the same time in the same rule, sorry.")) + + # Make sure that number of group replacements do not exceed number + # of groups in regex of match + num_repl = 9 + while num_repl > num_groups: + repl = "\\%d" % num_repl + for name, value in rule.items(): + if name.startswith("set_") and type(value) in [ str, unicode ]: + if repl in value: + raise MKUserError("rule_p_" + name, + _("You are using the replacment reference \%d, " + "but your match text has only %d subgroups." % ( + num_repl, num_groups))) + num_repl -= 1 + + + if new and clone_nr >= 0: + rules[clone_nr:clone_nr] = [ rule ] + elif new: + rules = [ rule ] + rules + else: + rules[edit_nr] = rule + + save_mkeventd_rules(rules) + if new: + log_mkeventd("new-rule", _("Created new event correlation rule with id %s" % rule["id"])) + else: + log_mkeventd("edit-rule", _("Modified event correlation rule %s" % rule["id"])) + # Reset hit counters of this rule + mkeventd.query("COMMAND RESETCOUNTERS;" + rule["id"]) + return "mkeventd_rules" + + + html.begin_form("rule") + vs_mkeventd_rule.render_input("rule", rule) + vs_mkeventd_rule.set_focus("rule") + forms.end() + html.button("save", _("Save")) + html.hidden_fields() + html.end_form() + +def mkeventd_reload(): + mkeventd.query("COMMAND RELOAD") + try: + os.remove(log_dir + "mkeventd.log") + except OSError: + pass # ignore not existing logfile + log_audit(None, "mkeventd-activate", _("Activated changes of event console configuration")) + +# This hook is executed when one applies the pending configuration changes +# related to the mkeventd via WATO on the local system. The hook is called +# without parameters. +def call_hook_mkeventd_activate_changes(): + if hooks.registered('mkeventd-activate-changes'): + hooks.call("mkeventd-activate-changes") + +def mode_mkeventd_changes(phase): + if phase == "title": + return _("Event Console - Pending Changes") + + elif phase == "buttons": + home_button() + mkeventd_rules_button() + if config.may("mkeventd.activate") and parse_audit_log("mkeventd") and mkeventd.daemon_running(): + html.context_button(_("Reload Config!"), + html.makeactionuri([("_activate", "now")]), "apply", hot=True) + mkeventd_config_button() + + elif phase == "action": + if html.check_transaction(): + mkeventd_reload() + call_hook_mkeventd_activate_changes() + return "mkeventd_rules", _("The new configuration has successfully been activated.") + + else: + if not mkeventd.daemon_running(): + warning = _("The Event Console Daemon is currently not running. ") + if defaults.omd_root: + warning += _("Please make sure that you have activated it with omd config set MKEVENTD on " + "before starting this site.") + html.show_warning(warning) + entries = parse_audit_log("mkeventd") + if entries: + render_audit_log(entries, "pending", hilite_others=True) + else: + html.write("
    " + _("There are no pending changes.") + "
    ") + +def log_mkeventd(what, message): + log_entry(None, what, message, "audit.log") # central WATO audit log + log_entry(None, what, message, "mkeventd.log") # pending changes for mkeventd + +def mkeventd_changes_button(): + pending = parse_audit_log("mkeventd") + if len(pending) > 0: + buttontext = "%d " % len(pending) + _("Changes") + hot = True + icon = "mkeventd" + else: + buttontext = _("No Changes") + hot = False + icon = "mkeventd" + html.context_button(buttontext, make_link([("mode", "mkeventd_changes")]), icon, hot) + +def mkeventd_rules_button(): + html.context_button(_("All Rules"), make_link([("mode", "mkeventd_rules")]), "back") + +def mkeventd_config_button(): + if config.may("mkeventd.config"): + html.context_button(_("Settings"), make_link([("mode", "mkeventd_config")]), "configuration") + +def mode_mkeventd_status(phase): + if phase == "title": + return _("Event Console - Server Status") + + elif phase == "buttons": + home_button() + mkeventd_rules_button() + mkeventd_config_button() + return + + elif phase == "action": + if config.may("mkeventd.switchmode"): + if html.has_var("_switch_sync"): + new_mode = "sync" + else: + new_mode = "takeover" + c = wato_confirm(_("Confirm switching replication mode"), + _("Do you really want to switch the event daemon to %s mode?" % + new_mode)) + if c: + mkeventd.query("COMMAND SWITCHMODE;%s" % new_mode) + log_audit(None, "mkeventd-switchmode", _("Switched replication slave mode to %s" % new_mode)) + return None, _("Switched to %s mode") % new_mode + elif c == False: + return "" + else: + return + + return + + if not mkeventd.daemon_running(): + warning = _("The Event Console Daemon is currently not running. ") + if defaults.omd_root: + warning += _("Please make sure that you have activated it with omd config set MKEVENTD on " + "before starting this site.") + html.show_warning(warning) + return + + response = mkeventd.query("GET status") + status = dict(zip(response[0], response[1])) + repl_mode = status["status_replication_slavemode"] + html.write("

    %s

    " % _("Current Server Status")) + html.write("
      ") + html.write("
    • %s
    • " % _("Event Daemon is running.")) + html.write("
    • %s: %s
    • " % (_("Current replication mode"), + { "sync" : _("synchronize"), + "takeover" : _("Takeover!"), + }.get(repl_mode, _("master / standalone")))) + if repl_mode in [ "sync", "takeover" ]: + html.write(("
    • " + _("Status of last synchronization: %s") + "
    • ") % ( + status["status_replication_success"] and _("Success") or _("Failed!"))) + last_sync = status["status_replication_last_sync"] + if last_sync: + html.write("
    • " + _("Last successful sync %d seconds ago.") % (time.time() - last_sync) + "
    • ") + else: + html.write(_("
    • No successful synchronization so far.
    • ")) + + html.write("
    ") + + if config.may("mkeventd.switchmode"): + html.begin_form("switch") + if repl_mode == "sync": + html.button("_switch_takeover", _("Switch to Takeover mode!")) + elif repl_mode == "takeover": + html.button("_switch_sync", _("Switch back to sync mode!")) + html.hidden_fields() + html.end_form() + +def mode_mkeventd_edit_configvar(phasee): + if phase == 'title': + return _('Event Console Configuration') + + elif phase == 'buttons': + home_button() + mkeventd_rules_button() + mkeventd_changes_button() + html.context_button(_("Server Status"), make_link([("mode", "mkeventd_status")]), "status") + return + + vs = [ (v[1], v[2]) for v in g_configvar_groups[_("Event Console")] ] + pending_func = g_configvar_domains['mkeventd']['pending'] + current_settings = load_configuration_settings() + + if phase == 'action': + if not html.check_transaction(): + return + + for (varname, valuespec) in vs: + valuespec = dict(vs)[varname] + new_value = valuespec.from_html_vars(varname) + valuespec.validate_value(new_value, varname) + if current_settings.get(varname) != new_value: + msg = _("Changed configuration of %s to %s.") \ + % (varname, valuespec.value_to_text(new_value)) + pending_func(msg) + current_settings[varname] = new_value + + save_configuration_settings(current_settings) + config.load_config() # make new configuration active + return + + html.begin_form('mkeventd_config', method = "POST", action = 'wato.py?mode=mkeventd_config') + + html.button("_save", _("Save")) + html.hidden_fields() + html.end_form() + +def mode_mkeventd_config(phase): + if phase == 'title': + return _('Event Console Configuration') + + elif phase == 'buttons': + home_button() + mkeventd_rules_button() + mkeventd_changes_button() + html.context_button(_("Server Status"), make_link([("mode", "mkeventd_status")]), "status") + return + + vs = [ (v[1], v[2]) for v in g_configvar_groups[_("Event Console")] ] + current_settings = load_configuration_settings() + pending_func = g_configvar_domains['mkeventd']['pending'] + + if phase == "action": + varname = html.var("_varname") + action = html.var("_action") + if not varname: + return + domain, valuespec, need_restart, allow_reset, in_global_settings = g_configvars[varname] + def_value = valuespec.default_value() + + if action == "reset" and not isinstance(valuespec, Checkbox): + c = wato_confirm( + _("Resetting configuration variable"), + _("Do you really want to reset the configuration variable %s " + "back to the default value of %s?") % + (varname, valuespec.value_to_text(def_value))) + else: + if not html.check_transaction(): + return + c = True # no confirmation for direct toggle + + if c: + if varname in current_settings: + current_settings[varname] = not current_settings[varname] + else: + current_settings[varname] = not def_value + msg = _("Changed Configuration variable %s to %s." % (varname, + current_settings[varname] and "on" or "off")) + save_configuration_settings(current_settings) + pending_func(msg) + if action == "_reset": + return "mkeventd_config", msg + else: + return "mkeventd_config" + elif c == False: + return "" + else: + return None + + html.write('
    ') + forms.header(_('Event Console Settings')) + for (varname, valuespec) in vs: + defaultvalue = valuespec.default_value() + value = current_settings.get(varname, valuespec.default_value()) + + edit_url = make_link([("mode", "mkeventd_edit_configvar"), + ("varname", varname), ("site", html.var("site", ""))]) + help_text = type(valuespec.help()) == unicode and valuespec.help().encode("utf-8") or valuespec.help() or '' + title_text = type(valuespec.title()) == unicode and valuespec.title().encode("utf-8") or valuespec.title() + title = '%s' % \ + (edit_url, varname in current_settings and "modified" or "", + html.strip_tags(help_text), title_text) + + to_text = valuespec.value_to_text(value) + + # Is this a simple (single) value or not? change styling in these cases... + simple = True + if '\n' in to_text or '' in to_text: + simple = False + forms.section(title, simple=simple) + + + if isinstance(valuespec, Checkbox): + toggle_url = html.makeactionuri([("_action", "toggle"), ("_varname", varname)]) + toggle_value = current_settings.get(varname, defaultvalue) + html.icon_button(toggle_url, _("Immediately toggle this setting"), + "snapin_switch_" + (toggle_value and "on" or "off")) + else: + html.write('%s' % (edit_url, to_text)) + + forms.end() + html.write('
    ') + + +if mkeventd_enabled: + modes["mkeventd_rules"] = (["mkeventd.edit"], mode_mkeventd_rules) + modes["mkeventd_edit_rule"] = (["mkeventd.edit"], mode_mkeventd_edit_rule) + modes["mkeventd_changes"] = (["mkeventd.edit"], mode_mkeventd_changes) + modes["mkeventd_status"] = ([], mode_mkeventd_status) + modes["mkeventd_config"] = (['mkeventd.config'], mode_mkeventd_config) + modes["mkeventd_edit_configvar"] = (['mkeventd.config'], lambda p: mode_edit_configvar(p, 'mkeventd')) + + + +#. +# .--Permissions---------------------------------------------------------. +# | ____ _ _ | +# | | _ \ ___ _ __ _ __ ___ (_)___ ___(_) ___ _ __ ___ | +# | | |_) / _ \ '__| '_ ` _ \| / __/ __| |/ _ \| '_ \/ __| | +# | | __/ __/ | | | | | | | \__ \__ \ | (_) | | | \__ \ | +# | |_| \___|_| |_| |_| |_|_|___/___/_|\___/|_| |_|___/ | +# | | +# +----------------------------------------------------------------------+ +# | Declaration of Event Console specific permissions for Multisite | +# '----------------------------------------------------------------------' + +if mkeventd_enabled: + config.declare_permission_section("mkeventd", _("Event Console")) + + config.declare_permission("mkeventd.config", + _("Configuration of Event Console "), + _("This permission allows to configure the global settings " + "of the event console."), + ["admin"]) + + config.declare_permission("mkeventd.edit", + _("Configuration of event rules"), + _("This permission allows the creation, modification and " + "deletion of event correlation rules."), + ["admin"]) + + config.declare_permission("mkeventd.activate", + _("Activate changes for event console"), + _("Activation of changes for the event console (rule modification, " + "global settings) is done separately from the monitoring configuration " + "and needs this permission."), + ["admin"]) + + config.declare_permission("mkeventd.switchmode", + _("Switch slave replication mode"), + _("This permission is only useful if the Event Console is setup as a replication " + "slave. It allows a manual switch between sync and takeover mode."), + ["admin"]) + + modules.append( + ( "mkeventd_rules", _("Event Console"), "mkeventd", "mkeventd.edit", + _("Manage event classification and correlation rules for the " + "event console"))) + + +#. +# .--Settings & Rules----------------------------------------------------. +# | ____ _ _ _ ____ _ | +# |/ ___| ___| |_| |_(_)_ __ __ _ ___ _ | _ \ _ _| | ___ ___ | +# |\___ \ / _ \ __| __| | '_ \ / _` / __|_| |_| |_) | | | | |/ _ \/ __| | +# | ___) | __/ |_| |_| | | | | (_| \__ \_ _| _ <| |_| | | __/\__ \ | +# ||____/ \___|\__|\__|_|_| |_|\__, |___/ |_| |_| \_\\__,_|_|\___||___/ | +# | |___/ | +# +----------------------------------------------------------------------+ +# | Declarations for global settings of EC parameters and of a rule for | +# | active checks that query the EC status of a host. | +# '----------------------------------------------------------------------' + + +if mkeventd_enabled: + register_configvar_domain("mkeventd", mkeventd_config_dir, + pending = lambda msg: log_mkeventd('config-change', msg), in_global_settings = False) + group = _("Event Console") + + register_configvar(group, + "remote_status", + Optional( + Tuple( + elements = [ + Integer( + title = _("Port number:"), + help = _("If you are running the mkeventd as a non-root (such as in an OMD site) " + "please choose port number greater than 1024."), + minvalue = 1, + maxvalue = 65535, + default_value = 6558, + ), + Checkbox( + title = _("Security"), + label = _("allow execution of commands and actions via TCP"), + help = _("Without this option the access is limited to querying the current " + "and historic event status."), + default_value = False, + true_label = _("allow commands"), + false_label = _("no commands"), + ), + Optional( + ListOfStrings( + help = _("The access to the event status via TCP will only be allowed from " + "this source IP addresses"), + + valuespec = IPv4Address(), + orientation = "horizontal", + allow_empty = False, + ), + label = _("Restrict access to the following source IP addresses"), + none_label = _("access unrestricted"), + ) + ], + ), + title = _("Access to event status via TCP"), + help = _("In Multisite setups if you want event status checks for hosts that " + "live on a remote site you need to activate remote access to the event status socket " + "via TCP. This allows to query the current event status via TCP. If you do not restrict " + "this to queries also event actions are possible from remote. This feature is not used " + "by the event status checks nor by Multisite so we propose not allowing commands via TCP."), + none_label = _("no access via TCP"), + ), + domain = "mkeventd", + ) + + register_configvar(group, + "replication", + Optional( + Dictionary( + optional_keys = [ "takeover", "fallback", "disabled", "logging" ], + elements = [ + ( "master", + Tuple( + title = _("Master Event Console"), + help = _("Specify the host name or IP address of the master Event Console that " + "you want to replicate from. The port number must be the same as set " + "in the master in Access to event status via TCP."), + elements = [ + TextAscii( + title = _("Hostname/IP address of Master Event Console:"), + allow_empty = False, + attrencode = True, + ), + Integer( + title = _("TCP Port number of status socket:"), + minvalue = 1, + maxvalue = 65535, + default_value = 6558, + ), + ], + ) + ), + ( "interval", + Integer( + title = _("Replication interval"), + help = _("The replication will be triggered each this number of seconds"), + label = _("Do a replication every"), + unit = _("sec"), + minvalue = 1, + default_value = 10, + ), + ), + ( "connect_timeout", + Integer( + title = _("Connect Timeout"), + help = _("TCP connect timeout for connecting to the master"), + label = _("Try bringing up TCP connection for"), + unit = _("sec"), + minvalue = 1, + default_value = 10, + ), + ), + ( "takeover", + Integer( + title = _("Automatic takeover"), + help = _("If you enable this option then the slave will automatically " + "takeover and enable event processing if the master is for " + "the configured number of seconds unreachable."), + label = _("Takeover after a master downtime of"), + unit = _("sec"), + minvalue = 1, + default_value = 30, + ), + ), + ( "fallback", + Integer( + title = _("Automatic fallback"), + help = _("If you enable this option then the slave will automatically " + "fallback from takeover mode to slavemode if the master is " + "rechable again within the selected number of seconds since " + "the previous unreachability (not since the takeover)"), + label = _("Fallback if master comes back within"), + unit = _("sec"), + minvalue = 1, + default_value = 60, + ), + ), + ( "disabled", + FixedValue( + True, + totext = _("Replication is disabled"), + title = _("Currently disable replication"), + help = _("This allows you to disable the replication without loosing " + "your settings. If you check this box, then no replication " + "will be done and the Event Console will act as its own master."), + ), + ), + ( "logging", + FixedValue( + True, + title = _("Log replication events"), + totext = _("logging is enabled"), + help = _("Enabling this option will create detailed log entries for all " + "replication activities of the slave. If disabled only problems " + "will be logged."), + ), + ), + ] + ), + title = _("Enable replication from a master"), + ), + domain = "mkeventd", + ) + + + + register_configvar(group, + "retention_interval", + Age(title = _("State Retention Interval"), + help = _("In this interval the event daemon will save its state " + "to disk, so that you won't loose your current event " + "state in case of a crash."), + default_value = 60, + ), + domain = "mkeventd", + ) + + register_configvar(group, + "housekeeping_interval", + Age(title = _("Housekeeping Interval"), + help = _("From time to time the eventd checks for messages that are expected to " + "be seen on a regular base, for events that time out and yet for " + "count periods that elapse. Here you can specify the regular interval " + "for that job."), + default_value = 60, + ), + domain = "mkeventd", + ) + + register_configvar(group, + "statistics_interval", + Age(title = _("Statistics Interval"), + help = _("The event daemon keeps statistics about the rate of messages, events " + "rule hits, and other stuff. These values are updated in the interval " + "configured here and are available in the sidebar snapin Event Console " + "Performance"), + default_value = 5, + ), + domain = "mkeventd", + ) + + register_configvar(group, + "debug_rules", + Checkbox(title = _("Debug rule execution"), + label = _("enable extensive rule logging"), + help = _("This option turns on logging the execution of rules. For each message received " + "the execution details of each rule are logged. This creates an immense " + "volume of logging and should never be used in productive operation."), + default_value = False), + domain = "mkeventd", + ) + + register_configvar(group, + "log_messages", + Checkbox(title = _("Syslog-like message logging"), + label = _("Log all messages into syslog-like logfiles"), + help = _("When this option is enabled, then every incoming message is being " + "logged into the directory messages in the Event Consoles state " + "directory. The logfile rotation is analog to that of the history logfiles. " + "Please note that if you have lots of incoming messages then these " + "files can get very large."), + default_value = False), + domain = "mkeventd", + ) + + register_configvar(group, + "rule_optimizer", + Checkbox(title = _("Optimize rule execution"), + label = _("enable optimized rule execution"), + help = _("This option turns on a faster algorithm for matching events to rules. "), + default_value = True), + domain = "mkeventd", + ) + + register_configvar(group, + "log_rulehits", + Checkbox(title = _("Log rule hits"), + label = _("Log hits for rules in log of mkeventd"), + help = _("If you enable this option then every time an event matches a rule " + "(by normal hit, cancelling, counting or dropping) a log entry will be written " + "into the log file of the mkeventd. Please be aware that this might lead to " + "a large number of log entries. "), + default_value = False), + domain = "mkeventd", + ) + + register_configvar(group, + "debug_mkeventd_queries", + Checkbox(title = _("Debug queries to mkeventd"), + label = _("enable debugging of queries"), + help = _("With this option turned on all queries made to the event daemon " + "will be displayed."), + default_value = False), + domain = "mkeventd", + ) + + register_configvar(group, + "actions", + vs_mkeventd_actions, + allow_reset = False, + domain = "mkeventd", + ) + + register_configvar(group, + "archive_orphans", + Checkbox(title = _("Force message archiving"), + label = _("Archive messages that do not match any rule"), + help = _("When this option is enabled then messages that do not match " + "a rule will be archived into the event history anyway (Messages " + "that do match a rule will be archived always, as long as they are not " + "explicitely dropped are being aggregated by counting.)"), + default_value = False), + domain = "mkeventd", + ) + + register_configvar(group, + "hostname_translation", + HostnameTranslation( + title = _("Hostname translation for incoming messages"), + help = _("When the Event Console receives a message than the host name " + "that is contained in that message will be translated using " + "this configuration. This can be used for unifying host names " + "from message with those of actively monitored hosts. Note: this translation " + "is happening before any rule is being applied.") + ), + domain = "mkeventd", + ) + + register_configvar(group, + "history_rotation", + DropdownChoice( + title = _("Event history logfile rotation"), + help = _("Specify at which time period a new file for the event history will be created."), + choices = [ + ( "daily", _("daily")), + ( "weekly", _("weekly")) + ], + default_value = "daily", + ), + domain = "mkeventd", + ) + + register_configvar(group, + "history_lifetime", + Integer( + title = _("Event history lifetime"), + help = _("After this number of days old logfile of event history " + "will be deleted."), + default_value = 365, + unit = _("days"), + minvalue = 1, + ), + domain = "mkeventd", + ) + + register_configvar(group, + "socket_queue_len", + Integer( + title = _("Max. number of pending connections to the status socket"), + help = _("When the Multisite GUI or the active check check_mkevents connects " + "to the socket of the event daemon in order to retrieve information " + "about current and historic events then its connection request might " + "be queued before being processed. This setting defines the number of unaccepted " + "connections to be queued before refusing new connections."), + minvalue = 1, + default_value = 10, + label = "max.", + unit = _("pending connections"), + ), + domain = "mkeventd", + ) + + register_configvar(group, + "eventsocket_queue_len", + Integer( + title = _("Max. number of pending connections to the event socket"), + help = _("The event socket is an alternative way for sending events " + "to the Event Console. It is used by the Check_MK logwatch check " + "when forwarding log messages to the Event Console. " + "This setting defines the number of unaccepted " + "connections to be queued before refusing new connections."), + minvalue = 1, + default_value = 10, + label = "max.", + unit = _("pending connections"), + ), + domain = "mkeventd", + ) + + # A few settings for Multisite and WATO + register_configvar(_("Status GUI (Multisite)"), + "mkeventd_connect_timeout", + Integer( + title = _("Connect timeout to status socket of Event Console"), + help = _("When the Multisite GUI connects the socket of the event daemon " + "in order to retrieve information about current and historic events " + "then this timeout will be applied."), + minvalue = 1, + maxvalue = 120, + default_value = 10, + unit = "sec", + ), + domain = "multisite", + ) + + register_configvar(_("Configuration GUI (WATO)"), + "mkeventd_pprint_rules", + Checkbox(title = _("Pretty-Print rules in config file of Event Console"), + label = _("enable pretty-printing of rules"), + help = _("When the WATO module of the Event Console saves rules to the file " + "mkeventd.d/wato/rules.mk it usually prints the Python " + "representation of the rules-list into one single line by using the " + "native Python code generator. Enabling this option switches to pprint, " + "which nicely indents everything. While this is a bit slower for large " + "rulesets it makes debugging and manual editing simpler."), + default_value = False), + domain = "multisite", + ) + + + +# Settings that should also be avaiable on distributed Sites that +# do not run an own eventd but want to query one or send notifications +# to one. +group = _("Notification") +register_configvar(group, + "mkeventd_notify_contactgroup", + GroupSelection( + "contact", + title = _("Send notifications to Event Console"), + no_selection = _("(don't send notifications to Event Console)"), + label = _("send notifications of contactgroup:"), + help = _("If you select a contact group here, then all notifications of " + "hosts and services in that contact group will be sent to the " + "event console. Note: you still need to create a rule " + "matching those messages in order to have events created. Note (2): " + "If you are using the Check_MK Micro Core then this setting is deprecated. " + "Please use the notification plugin Forward Notification to Event Console instead."), + default_value = '', + + ), + domain = "multisite", + need_restart = True) + +register_configvar(group, + "mkeventd_notify_remotehost", + Optional( + TextAscii( + title = _("Host running Event Console"), + attrencode = True, + ), + title = _("Send notifications to remote Event Console"), + help = _("This will send the notification to a Check_MK Event Console on a remote host " + "by using syslog. Note: this setting will only be applied if no Event " + "Console is running locally in this site! That way you can use the same global " + "settings on your central and decentralized system and makes distributed WATO " + "easier. Please also make sure that Send notifications to Event Console " + "is enabled."), + label = _("Send to remote Event Console via syslog"), + none_label = _("Do not send to remote host"), + ), + domain = "multisite", + need_restart = True) + +register_configvar(group, + "mkeventd_notify_facility", + DropdownChoice( + title = _("Syslog facility for Event Console notifications"), + help = _("When sending notifications from the monitoring system to the event console " + "the following syslog facility will be set for these messages. Choosing " + "a unique facility makes creation of rules easier."), + choices = mkeventd.syslog_facilities, + default_value = 16, # local0 + ), + domain = "multisite", + need_restart = True) + + +register_rulegroup("eventconsole", + _("Event Console"), + _("Settings and Checks dealing with the Check_MK Event Console")) +group = "eventconsole" + + +register_rule( + group, + "active_checks:mkevents", + Dictionary( + title = _("Check event state in Event Console"), + help = _("This check is part of the Check_MK Event Console and will check " + "if there are any open events for a certain host (and maybe a certain " + "application on that host. The state of the check will reflect the status " + "of the worst open event for that host."), + elements = [ + ( "hostspec", + OptionalDropdownChoice( + title = _("Host specification"), + help = _("When quering the event status you can either use the monitoring " + "host name, the IP address or a custom host name for referring to a " + "host. This is needed in cases where the event source (syslog, snmptrapd) " + "do not send a host name that matches the monitoring host name."), + choices = [ + ( '$HOSTNAME$', _("Monitoring host name") ), + ( '$HOSTADDRESS$', _("Host IP address" ) ), + ( '$HOSTNAME$/$HOSTADDRESS$', _("Try both host name and IP address" ) ), + ], + otherlabel = _("Specify explicitly"), + explicit = TextAscii(allow_empty = False, attrencode = True), + default_value = '$HOSTNAME$/$HOSTADDRESS$', + ) + ), + ( "item", + TextAscii( + title = _("Item (Used in service description)"), + help = _("If you enter an item name here, this will be used as " + "part of the service description after the prefix \"Events \". " + "The prefix plus the configured item must result in an unique " + "service description per host. If you leave this empty either the " + "string provided in \"Application\" is used as item or the service " + "gets no item when the \"Application\" field is also not configured."), + allow_empty = False, + ) + ), + ( "application", + RegExp( + title = _("Application (regular expression)"), + help = _("If you enter an application name here then only " + "events for that application name are counted. You enter " + "a regular expression here that must match a part " + "of the application name. Use anchors ^ and $ " + "if you need a complete match."), + allow_empty = False, + ) + ), + ( "ignore_acknowledged", + FixedValue( + True, + title = _("Ignore Acknowledged Events"), + help = _("If you check this box then only open events are honored when " + "determining the event state. Acknowledged events are displayed " + "(i.e. their count) but not taken into account."), + totext = _("acknowledged events will not be honored"), + ) + ), + ( "less_verbose", + FixedValue( + True, + title = _("Less Verbose Output"), + help = _("If enabled the check reports less information in its output. " + "You will see no information regarding the worst state or unacknowledged events. " + " For example a default output without this option is " + "WARN - 1 events (1 unacknowledged), worst state is WARN (Last line: Incomplete Content)." + "Output with less verbosity: " + "WARN - 1 events (Worst line: Incomplete Content)
    " + ), + ) + ), + ( "remote", + Alternative( + title = _("Access to the Event Console"), + style = "dropdown", + elements = [ + FixedValue( + None, + title = _("Connect to the local Event Console"), + totext = _("local connect"), + ), + Tuple( + elements = [ + TextAscii( + title = _("Hostname/IP address of Event Console:"), + allow_empty = False, + attrencode = True, + ), + Integer( + title = _("TCP Port number:"), + minvalue = 1, + maxvalue = 65535, + default_value = 6558, + ), + ], + title = _("Access via TCP"), + help = _("In a distributed setup where the Event Console is not running in the same " + "site as the host is monitored you need to access the remote Event Console " + "via TCP. Please make sure that this is activated in the global settings of " + "the event console. The default port number is 6558."), + ), + TextAscii( + title = _("Access via UNIX socket"), + allow_empty = False, + size = 64, + attrencode = True, + ), + + ], + default_value = defaults.omd_root + and defaults.omd_root + "/tmp/run/mkeventd/status" + or defaults.livestatus_unix_socket.split("/",1)[0] + "/mkeventd/status" + ) + ), + ], + optional_keys = [ "application", "remote", "ignore_acknowledged", "less_verbose", "item" ], + ), + match = 'all', +) + +sl_help = _("A service level is a number that describes the business impact of a host or " + "service. This level can be used in rules for notifications, as a filter in " + "views or as a criteria in rules for the Event Console. A higher service level " + "is assumed to be more business critical. This ruleset allows to assign service " + "levels to hosts and/or services. Note: if you assign a service level to " + "a host with the ruleset Service Level of hosts, then this level is " + "inherited to all services that do not have explicitely assigned a service " + "with the ruleset Service Level of services. Assigning no service level " + "is equal to defining a level of 0.

    The list of available service " + "levels is configured via a global option." % + "wato.py?varname=mkeventd_service_levels&mode=edit_configvar") + +register_rule( + "grouping", + "extra_host_conf:_ec_sl", + DropdownChoice( + title = _("Service Level of hosts"), + help = sl_help, + choices = mkeventd.service_levels, + ), + match = 'first', +) + +register_rule( + "grouping", + "extra_service_conf:_ec_sl", + DropdownChoice( + title = _("Service Level of services"), + help = sl_help + _(" Note: if no service level is configured for a service " + "then that of the host will be used instead (if configured)."), + choices = mkeventd.service_levels, + ), + itemtype = 'service', + match = 'first', +) + +contact_help = _("This rule set is useful if you send your monitoring notifications " + "into the Event Console. The contact information that is set by this rule " + "will be put into the resulting event in the Event Console.") +contact_regex = r"^[^;'$|]*$" +contact_regex_error = _("The contact information must not contain one of the characters ; ' | or $") + +register_rule( + group, + "extra_host_conf:_ec_contact", + TextUnicode( + title = _("Host contact information"), + help = contact_help, + size = 80, + regex = contact_regex, + regex_error = contact_regex_error, + attrencode = True, + ), + match = 'first', +) + +register_rule( + group, + "extra_service_conf:_ec_contact", + TextUnicode( + title = _("Service contact information"), + help = contact_help + _(" Note: if no contact information is configured for a service " + "then that of the host will be used instead (if configured)."), + size = 80, + regex = contact_regex, + regex_error = contact_regex_error, + attrencode = True, + ), + itemtype = 'service', + match = 'first', +) +#. +# .--Notifications-------------------------------------------------------. +# | _ _ _ _ __ _ _ _ | +# | | \ | | ___ | |_(_)/ _(_) ___ __ _| |_(_) ___ _ __ ___ | +# | | \| |/ _ \| __| | |_| |/ __/ _` | __| |/ _ \| '_ \/ __| | +# | | |\ | (_) | |_| | _| | (_| (_| | |_| | (_) | | | \__ \ | +# | |_| \_|\___/ \__|_|_| |_|\___\__,_|\__|_|\___/|_| |_|___/ | +# | | +# +----------------------------------------------------------------------+ +# | Stuff for sending monitoring notifications into the event console. | +# '----------------------------------------------------------------------' +def mkeventd_update_notifiation_configuration(hosts): + # Setup notification into the Event Console. Note: If + # the event console is not activated then also the global + # default settings are missing and we must skip this code. + # This can happen in a D-WATO setup where the master has + # enabled the EC and the slave not. + try: + contactgroup = config.mkeventd_notify_contactgroup + remote_console = config.mkeventd_notify_remotehost + except: + return + + if not remote_console: + remote_console = "" + + path = defaults.nagios_conf_dir + "/mkeventd_notifications.cfg" + if not contactgroup and os.path.exists(path): + os.remove(path) + elif contactgroup: + file(path, "w").write("""# Created by Check_MK Event Console +# This configuration will send notifications about hosts and +# services in the contact group '%(group)s' to the Event Console. + +define contact { + contact_name mkeventd + alias "Notifications for Check_MK Event Console" + contactgroups %(group)s + host_notification_commands mkeventd-notify-host + service_notification_commands mkeventd-notify-service + host_notification_options d,u,r + service_notification_options c,w,u,r + host_notification_period 24X7 + service_notification_period 24X7 + email none +} + +define command { + command_name mkeventd-notify-host + command_line mkevent -n %(facility)s '%(remote)s' $HOSTSTATEID$ '$HOSTNAME$' '' '$HOSTOUTPUT$' '$_HOSTEC_SL$' '$_HOSTEC_CONTACT$' +} + +define command { + command_name mkeventd-notify-service + command_line mkevent -n %(facility)s '%(remote)s' $SERVICESTATEID$ '$HOSTNAME$' '$SERVICEDESC$' '$SERVICEOUTPUT$' '$_SERVICEEC_SL$' '$_SERVICEEC_CONTACT$' '$_HOSTEC_SL$' '$_HOSTEC_CONTACT$' +} +""" % { "group" : contactgroup, "facility" : config.mkeventd_notify_facility, "remote" : remote_console }) + +register_hook("pre-activate-changes", mkeventd_update_notifiation_configuration) + +# Only register the reload hook when mkeventd is enabled +if mkeventd_enabled: + register_hook("activate-changes", lambda hosts: mkeventd_reload()) + diff -Nru check-mk-1.2.2p3/ups_bat_temp check-mk-1.2.6p12/ups_bat_temp --- check-mk-1.2.2p3/ups_bat_temp 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/ups_bat_temp 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -28,36 +28,31 @@ ups_bat_temp_default = (40, 50) # warning / critical def inventory_ups_bat_temp(info): - if len(info) > 0: + # 2nd condition is needed to catch some UPS devices which do not have + # any temperature sensor but report a 0 as upsBatteryTemperature. Skip those lines + if len(info) > 0 and saveint(info[0][1]) != 0: return [ ( x[0], "ups_bat_temp_default") for x in info ] def check_ups_bat_temp(item, params, info): - warn, crit = params for line in info: if line[0] == item: - power = int(line[1]) - perfdata = [ ( "temp", power, warn, crit, 80 ) ] - infotext = " - current: %d°C , (warn/crit at %d°C/%d°C) " % \ - (power, warn, crit) - - if power >= crit: - return (2, "CRIT" + infotext, perfdata) - elif power >= warn: - return (1, "WARN" + infotext, perfdata) - else: - return (0, "OK" + infotext, perfdata) + status, infotext, perfdata = check_temperature(int(line[1]), params) + perfdatanew = [ perfdata[0] + (80,) ] + return status, infotext, perfdatanew - return (3, "UNKNOWN - Temperatur %s not found in SNMP output" % item) check_info['ups_bat_temp'] = { "inventory_function" : inventory_ups_bat_temp, "check_function" : check_ups_bat_temp, - "service_description" : "Battery Temp", + "service_description" : "Temperature Battery %s", "has_perfdata" : True, "group" : "hw_temperature", - "snmp_info" : ( ".1.3.6.1.2.1.33.1", ["1.5", "2.7" ] ), + "snmp_info" : ( ".1.3.6.1.2.1.33.1", [ + "1.5", # upsIdentName + "2.7", # upsBatteryTemperature + ] ), "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") in \ - [ ".1.3.6.1.4.1.818.1.100.1.1", ".1.3.6.1.2.1.33", ".1.3.6.1.2.1.33.2" ] \ + [ ".1.3.6.1.4.1.818.1.100.1.1", ".1.3.6.1.4.1.705.1.2", ".1.3.6.1.2.1.33", ".1.3.6.1.2.1.33.2", ".1.3.6.1.4.1.818.1.100.1.2" ] \ or oid('.1.3.6.1.2.1.33.1.1.1.0').startswith('RPS'), - + "includes" : [ "temperature.include" ], } diff -Nru check-mk-1.2.2p3/ups_capacity check-mk-1.2.6p12/ups_capacity --- check-mk-1.2.2p3/ups_capacity 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/ups_capacity 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -42,39 +42,23 @@ # SNMPv2-SMI::mib-2.33.1.3.1.0 = Counter32: 13 # SNMPv2-SMI::mib-2.33.1.3.2.0 = INTEGER: 3 -ups_capacity_defaul_levels = (0, 0) +#ups_capacity_defaul_levels = (0, 0) +ups_capacity_default_levels = { 'battime': (0, 0), 'capacity': (95, 90) } def inventory_ups_capacity(info): if len(info) > 0: - return [(None, 'ups_capacity_defaul_levels')] - -def check_ups_capacity(item, params, info): - #To support inventorys with the old version - if params != None: - warn, crit = params - else: - warn, crit = (0, 0) - - time_on_bat, minutes_left, percent_fuel = map(int, info[0]) - perf = [ - ('capacity', minutes_left, warn, crit), - ('percent', percent_fuel), - ] - - info_text = "remaining capacity %d minutes (%d%%)" % (minutes_left, percent_fuel) - if time_on_bat > crit: - return(2, "CRIT - On battery since %ds(!!) " % time_on_bat + info_text, perf) - elif time_on_bat > warn: - return(1, "WARN - On battery since %ds(!) " % time_on_bat + info_text, perf) - else: - return(0, "OK - " + info_text, perf) + return [(None, 'ups_capacity_default_levels')] check_info["ups_capacity"] = { "check_function" : check_ups_capacity, "inventory_function" : inventory_ups_capacity, "service_description" : "Battery capacity", "has_perfdata" : True, + "group" : "ups_capacity", + "includes" : [ "ups_capacity.include" ], "snmp_info" : ( ".1.3.6.1.2.1.33.1.2", [ "2.0" , "3.0" ,"4.0" ] ), "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") in \ - [ ".1.3.6.1.4.1.534.1", ".1.3.6.1.4.1.705.1", ".1.3.6.1.4.1.818.1.100.1.1", ".1.3.6.1.2.1.33", ".1.3.6.1.2.1.33.2" ] + [ ".1.3.6.1.4.1.534.1", ".1.3.6.1.4.1.705.1", ".1.3.6.1.4.1.818.1.100.1.1", + ".1.3.6.1.2.1.33", ".1.3.6.1.4.1.705.1.2", ".1.3.6.1.2.1.33.2", ".1.3.6.1.4.1.5491.6", + ".1.3.6.1.4.1.476.1.42", ".1.3.6.1.4.1.818.1.100.1.2" ] } diff -Nru check-mk-1.2.2p3/ups_capacity.include check-mk-1.2.6p12/ups_capacity.include --- check-mk-1.2.2p3/ups_capacity.include 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ups_capacity.include 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,70 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def check_ups_capacity(item, params, info, socomec=False): + # To support inventories with the old version + if type(params) is tuple: # old format with 2 params in tuple + warn, crit = params + cap_warn, cap_crit = (95, 90) + elif type(params) is dict: # new dict format + warn, crit = params.get('battime', (0, 0)) + cap_warn, cap_crit = params.get('capacity', (95, 90)) + else: + warn, crit = (0, 0) + cap_warn, cap_crit = (95, 90) + + time_on_bat, minutes_left, percent_fuel = map(int, info[0]) + + # Check time left on battery + if not (socomec and minutes_left == -1): + levelsinfo = "" + if minutes_left <= crit: + state = 2 + levelsinfo = " (crit at %d min)" % cap_crit + elif minutes_left < warn: + state = 1 + levelsinfo = " (warn at %d min)" % cap_warn + else: + state = 0 + yield state, "%d min left on battery" % minutes_left + levelsinfo, [ ('capacity', minutes_left, warn, crit) ] + + # Check percentual capacity + levelsinfo = "" + if percent_fuel <= cap_crit: + state = 2 + levelsinfo = " (crit at %d%%)" % cap_crit + elif percent_fuel < cap_warn: + state = 1 + levelsinfo = " (warn at %d%%)" % cap_warn + else: + state = 0 + yield state, "capacity: %d%%" % percent_fuel + levelsinfo, [ ('percent', percent_fuel, cap_warn, cap_crit) ] + + # Output time on battery + if time_on_bat > 0: + yield 0, "On battery for %d min" % time_on_bat + + diff -Nru check-mk-1.2.2p3/ups_eaton_enviroment check-mk-1.2.6p12/ups_eaton_enviroment --- check-mk-1.2.2p3/ups_eaton_enviroment 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/ups_eaton_enviroment 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -53,7 +53,7 @@ text += "(!)" i += 1 messages.append(text) - return (state, nagios_state_names[state] + " - " + ", ".join(messages), perfdata) + return (state, ", ".join(messages), perfdata) check_info['ups_eaton_enviroment'] = { "inventory_function" : inventory_ups_eaton_enviroment, @@ -62,6 +62,6 @@ "has_perfdata" : True, "group" : "eaton_enviroment", "snmp_info" : ( ".1.3.6.1.4.1.534.1.6", [1,5,6 ] ), - "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") in [".1.3.6.1.4.1.534.1", ".1.3.6.1.4.1.705.1"] + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") in [".1.3.6.1.4.1.705.1.2", ".1.3.6.1.4.1.534.1", ".1.3.6.1.4.1.705.1"] } diff -Nru check-mk-1.2.2p3/ups_in_freq check-mk-1.2.6p12/ups_in_freq --- check-mk-1.2.2p3/ups_in_freq 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/ups_in_freq 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -29,7 +29,7 @@ def inventory_ups_in_freq(info): if len(info) > 0: - return [ ( x[0], "ups_in_freq_default_levels") for x in info ] + return [ ( x[0], "ups_in_freq_default_levels") for x in info if int(x[1]) > 0 ] def check_ups_in_freq(item, params, info): warn, crit = params @@ -37,25 +37,28 @@ if line[0] == item: power = int(line[1]) / 10 perfdata = [ ( "in_freq", power, warn, crit, 30, 70 ) ] - infotext = " - current in freq: %dHz (warn/crit at %d/%d Hz)" % \ + infotext = "in frequency: %dHz (warn/crit at %d/%d Hz)" % \ (power, warn, crit) if power <= crit: - return (2, "CRIT" + infotext, perfdata) + return (2, infotext, perfdata) elif power <= warn: - return (1, "WARN" + infotext, perfdata) + return (1, infotext, perfdata) else: - return (0, "OK" + infotext, perfdata) + return (0, infotext, perfdata) - return (3, "UNKNOWN - Phase %s not found in SNMP output" % item) + return (3, "Phase %s not found in SNMP output" % item) check_info['ups_in_freq'] = { "inventory_function" : inventory_ups_in_freq, "check_function" : check_ups_in_freq, - "service_description" : "IN frequenz phase %s", + "service_description" : "IN frequency phase %s", "has_perfdata" : True, + "group" : "efreq", "snmp_info" : ( ".1.3.6.1.2.1.33.1.3.3.1", [ 0, 2 ] ), "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") in \ - [ ".1.3.6.1.4.1.534.1", ".1.3.6.1.4.1.705.1", ".1.3.6.1.4.1.818.1.100.1.1", ".1.3.6.1.2.1.33", ".1.3.6.1.2.1.33.2" ] \ + [ ".1.3.6.1.4.1.534.1", ".1.3.6.1.4.1.705.1", ".1.3.6.1.4.1.818.1.100.1.1", + ".1.3.6.1.2.1.33", ".1.3.6.1.4.1.705.1.2", ".1.3.6.1.2.1.33.2", ".1.3.6.1.4.1.5491.6", + ".1.3.6.1.4.1.476.1.42", ".1.3.6.1.4.1.818.1.100.1.2" ] } diff -Nru check-mk-1.2.2p3/ups_in_voltage check-mk-1.2.6p12/ups_in_voltage --- check-mk-1.2.2p3/ups_in_voltage 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/ups_in_voltage 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -29,32 +29,18 @@ def inventory_ups_in_voltage(info): if len(info) > 0: - return [ ( x[0], "ups_in_voltage_default_levels") for x in info ] - -def check_ups_in_voltage(item, params, info): - warn, crit = params - for line in info: - if line[0] == item: - power = int(line[1]) - perfdata = [ ( "in_voltage", power, warn, crit, 150 ) ] - infotext = " - current in voltage: %dV, (warn/crit at %dV/%dV)" % \ - (power, warn, crit) - - if power <= crit: - return (2, "CRIT" + infotext, perfdata) - elif power <= warn: - return (1, "WARN" + infotext, perfdata) - else: - return (0, "OK" + infotext, perfdata) - - return (3, "UNKNOWN - Phase %s not found in SNMP output" % item) + return [ ( x[0], "ups_in_voltage_default_levels") for x in info if int(x[1]) > 0 ] check_info['ups_in_voltage'] = { "inventory_function" : inventory_ups_in_voltage, "check_function" : check_ups_in_voltage, "service_description" : "IN voltage phase %s", "has_perfdata" : True, + "group" : "evolt", + "includes" : [ 'ups_in_voltage.include' ], "snmp_info" : ( ".1.3.6.1.2.1.33.1.3.3.1", [ 0, 3 ] ), "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") in \ - [ ".1.3.6.1.4.1.534.1", ".1.3.6.1.4.1.705.1", ".1.3.6.1.4.1.818.1.100.1.1", ".1.3.6.1.2.1.33", ".1.3.6.1.2.1.33.2" ] + [ ".1.3.6.1.4.1.534.1", ".1.3.6.1.4.1.705.1",".1.3.6.1.4.1.705.1.2", ".1.3.6.1.4.1.818.1.100.1.1", + ".1.3.6.1.2.1.33", ".1.3.6.1.2.1.33.2", ".1.3.6.1.4.1.5491.6", + ".1.3.6.1.4.1.476.1.42", ".1.3.6.1.4.1.818.1.100.1.2" ] } diff -Nru check-mk-1.2.2p3/ups_in_voltage.include check-mk-1.2.6p12/ups_in_voltage.include --- check-mk-1.2.2p3/ups_in_voltage.include 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ups_in_voltage.include 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,43 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def check_ups_in_voltage(item, params, info): + warn, crit = params + for line in info: + if line[0] == item: + power = int(line[1]) + perfdata = [ ( "in_voltage", power, warn, crit, 150 ) ] + infotext = "in voltage: %dV, (warn/crit at %dV/%dV)" % \ + (power, warn, crit) + + if power <= crit: + return (2, infotext, perfdata) + elif power <= warn: + return (1, infotext, perfdata) + else: + return (0, infotext, perfdata) + + return (3, "Phase %s not found in SNMP output" % item) diff -Nru check-mk-1.2.2p3/ups_out_load check-mk-1.2.6p12/ups_out_load --- check-mk-1.2.2p3/ups_out_load 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/ups_out_load 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -29,7 +29,7 @@ def inventory_ups_out_load(info): if len(info) > 0: - return [ ( x[0], "ups_out_load_default_levels") for x in info ] + return [ ( x[0], "ups_out_load_default_levels") for x in info if int(x[1]) > 0] def check_ups_out_load(item, params, info): warn, crit = params @@ -37,17 +37,17 @@ if line[0] == item: power = int(line[1]) perfdata = [ ( "out_load", power, warn, crit, 100 ) ] - infotext = " - current load: %d (warn/crit at %d/%d) " % \ + infotext = "load: %d (warn/crit at %d/%d) " % \ (power, warn, crit) if power >= crit: - return (2, "CRIT" + infotext, perfdata) + return (2, infotext, perfdata) elif power >= warn: - return (1, "WARN" + infotext, perfdata) + return (1, infotext, perfdata) else: - return (0, "OK" + infotext, perfdata) + return (0, infotext, perfdata) - return (3, "UNKNOWN - Phase %s not found in SNMP output" % item) + return (3, "Phase %s not found in SNMP output" % item) check_info['ups_out_load'] = { "inventory_function" : inventory_ups_out_load, @@ -56,5 +56,7 @@ "has_perfdata" : True, "snmp_info" : ( ".1.3.6.1.2.1.33.1.4.4.1", [ 0, 5 ] ), "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") in \ - [ ".1.3.6.1.4.1.534.1", ".1.3.6.1.4.1.705.1", ".1.3.6.1.4.1.818.1.100.1.1", ".1.3.6.1.2.1.33", ".1.3.6.1.2.1.33.2" ] + [ ".1.3.6.1.4.1.534.1", ".1.3.6.1.4.1.705.1",".1.3.6.1.4.1.705.1.2", ".1.3.6.1.4.1.818.1.100.1.1", + ".1.3.6.1.2.1.33", ".1.3.6.1.2.1.33.2", ".1.3.6.1.4.1.5491.6", + ".1.3.6.1.4.1.476.1.42", ".1.3.6.1.4.1.818.1.100.1.2" ] } diff -Nru check-mk-1.2.2p3/ups_out_voltage check-mk-1.2.6p12/ups_out_voltage --- check-mk-1.2.2p3/ups_out_voltage 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/ups_out_voltage 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -29,32 +29,18 @@ def inventory_ups_out_voltage(info): if len(info) > 0: - return [ ( x[0], "ups_out_voltage_default_levels") for x in info ] - -def check_ups_out_voltage(item, params, info): - warn, crit = params - for line in info: - if line[0] == item: - power = int(line[1]) - perfdata = [ ( "out_voltage", power, warn, crit, 200, 240 ) ] - infotext = " - current in voltage: %dV (warn/crit at %dV/%dV)" % \ - (power, warn, crit) - - if power <= crit: - return (2, "CRIT" + infotext, perfdata) - elif power <= warn: - return (1, "WARN" + infotext, perfdata) - else: - return (0, "OK" + infotext, perfdata) - - return (3, "UNKNOWN - Phase %s not found in SNMP output" % item) + return [ ( x[0], "ups_out_voltage_default_levels") for x in info if int(x[1]) > 0 ] check_info['ups_out_voltage'] = { "inventory_function" : inventory_ups_out_voltage, "check_function" : check_ups_out_voltage, "service_description" : "OUT voltage phase %s", "has_perfdata" : True, + "group" : "evolt", + "includes" : [ 'ups_out_voltage.include' ], "snmp_info" : ( ".1.3.6.1.2.1.33.1.4.4.1", [ 0, 2 ] ), "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") in \ - [ ".1.3.6.1.4.1.534.1", ".1.3.6.1.4.1.705.1", ".1.3.6.1.4.1.818.1.100.1.1", ".1.3.6.1.2.1.33", ".1.3.6.1.2.1.33.2" ] + [ ".1.3.6.1.4.1.534.1", ".1.3.6.1.4.1.705.1",".1.3.6.1.4.1.705.1.2", ".1.3.6.1.4.1.818.1.100.1.1", + ".1.3.6.1.2.1.33", ".1.3.6.1.2.1.33.2", ".1.3.6.1.4.1.5491.6", + ".1.3.6.1.4.1.476.1.42", ".1.3.6.1.4.1.818.1.100.1.2" ] } diff -Nru check-mk-1.2.2p3/ups_out_voltage.include check-mk-1.2.6p12/ups_out_voltage.include --- check-mk-1.2.2p3/ups_out_voltage.include 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ups_out_voltage.include 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,43 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def check_ups_out_voltage(item, params, info): + warn, crit = params + for line in info: + if line[0] == item: + power = int(line[1]) + perfdata = [ ( "out_voltage", power, warn, crit, 200, 240 ) ] + infotext = "out voltage: %dV (warn/crit at %dV/%dV)" % \ + (power, warn, crit) + + if power <= crit: + return (2, infotext, perfdata) + elif power <= warn: + return (1, infotext, perfdata) + else: + return (0, infotext, perfdata) + + return (3, "Phase %s not found in SNMP output" % item) diff -Nru check-mk-1.2.2p3/ups_power check-mk-1.2.6p12/ups_power --- check-mk-1.2.2p3/ups_power 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/ups_power 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -28,7 +28,7 @@ def inventory_ups_power(info): if len(info) > 0: - return [ ( x[0], "ups_power_default_levels") for x in info ] + return [ ( x[0], "ups_power_default_levels") for x in info if int(x[1]) > 0 ] def check_ups_power(item, params, info): warn, crit = params @@ -40,25 +40,27 @@ if power < 0: power *= -1 perfdata = [ ( "power", power, warn, crit, 0 ) ] - infotext = " - current power: %dW (warn/crit at %dW/%dW)" % \ + infotext = "power: %dW (warn/crit at %dW/%dW)" % \ (power, warn, crit) if power <= crit: - return (2, "CRIT" + infotext, perfdata) + return (2, infotext, perfdata) elif power <= warn: - return (1, "WARN" + infotext, perfdata) + return (1, infotext, perfdata) else: - return (0, "OK" + infotext, perfdata) + return (0, infotext, perfdata) - return (3, "UNKNOWN - Phase %s not found in SNMP output" % item) + return (3, "Phase %s not found in SNMP output" % item) check_info['ups_power'] = { "inventory_function" : inventory_ups_power, "check_function" : check_ups_power, "service_description" : "Power phase %s", "has_perfdata" : True, - "group" : "hw_temperature", + "group" : "epower", "snmp_info" : ( ".1.3.6.1.2.1.33.1.4.4.1", [ 0, 4 ] ), "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") in \ - [ ".1.3.6.1.4.1.534.1", ".1.3.6.1.4.1.705.1", ".1.3.6.1.4.1.818.1.100.1.1", ".1.3.6.1.2.1.33", ".1.3.6.1.2.1.33.2" ] + [ ".1.3.6.1.4.1.534.1", ".1.3.6.1.4.1.705.1",".1.3.6.1.4.1.705.1.2", ".1.3.6.1.4.1.818.1.100.1.1", + ".1.3.6.1.2.1.33", ".1.3.6.1.2.1.33.2" , ".1.3.6.1.4.1.5491.6", + ".1.3.6.1.4.1.476.1.42", ".1.3.6.1.4.1.818.1.100.1.2" ] } diff -Nru check-mk-1.2.2p3/ups_socomec_capacity check-mk-1.2.6p12/ups_socomec_capacity --- check-mk-1.2.2p3/ups_socomec_capacity 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ups_socomec_capacity 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,51 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# upsBatteryStatus 1.3.6.1.4.1.4555.1.1.1.1.2.1 +# upsSecondsOnBattery 1.3.6.1.4.1.4555.1.1.1.1.2.2 +# upsEstimatedMinutesRemaining 1.3.6.1.4.1.4555.1.1.1.1.2.3 +# upsEstimatedChargeRemaining 1.3.6.1.4.1.4555.1.1.1.1.2.4 +# upsBatteryVoltage 1.3.6.1.4.1.4555.1.1.1.1.2.5 +# upsBatteryTemperature 1.3.6.1.4.1.4555.1.1.1.1.2.6 + + +ups_capacity_default_levels = { 'battime': (0, 0), 'capacity': (95, 90) } + +def inventory_ups_socomec_capacity(info): + if len(info) > 0: + return [(None, 'ups_capacity_default_levels')] + + +check_info["ups_socomec_capacity"] = { + "check_function" : lambda item, params, info: check_ups_capacity(item, params, info, socomec=True), + "inventory_function" : inventory_ups_socomec_capacity, + "service_description" : "Battery capacity", + "has_perfdata" : True, + "group" : "ups_capacity", + "snmp_info" : (".1.3.6.1.4.1.4555.1.1.1.1.2" , [ 2, 3, 4 ] ) , + "includes" : [ "ups_capacity.include" ], + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.4555.1.1.1", +} diff -Nru check-mk-1.2.2p3/ups_socomec_in_voltage check-mk-1.2.6p12/ups_socomec_in_voltage --- check-mk-1.2.2p3/ups_socomec_in_voltage 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ups_socomec_in_voltage 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,49 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +ups_in_voltage_default_levels = (210, 180) # warning / critical + +def inventory_socomec_ups_in_voltage(info): + if len(info) > 0: + return [ ( x[0], "ups_in_voltage_default_levels") for x in info if int(x[1]) > 0 ] + +def check_socomec_ups_in_voltage( item, params, info): + conv_info = [] + for line in info: + conv_info.append([ line[0], saveint(line[1]) / 10, line[1] ]) + return check_ups_in_voltage( item, params, conv_info) + +check_info['ups_socomec_in_voltage'] = { + "inventory_function" : inventory_socomec_ups_in_voltage, + "check_function" : check_socomec_ups_in_voltage, + "service_description" : "IN voltage phase %s", + "has_perfdata" : True, + "group" : "evolt", + "includes" : [ 'ups_in_voltage.include' ], + "snmp_info" : (".1.3.6.1.4.1.4555.1.1.1.1.3.3" , [ '1.1', '1.2' ] ) , + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.4555.1.1.1", +} diff -Nru check-mk-1.2.2p3/ups_socomec_outphase check-mk-1.2.6p12/ups_socomec_outphase --- check-mk-1.2.2p3/ups_socomec_outphase 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ups_socomec_outphase 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,57 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +factory_settings["socomec_outphase_default_levels"] = { + "voltage" : (210, 200), + "load" : (80, 90) +} + + +def parse_ups_socomec_outphase(info): + parsed = {} + for index, rawvolt, rawcurr, rawload in info: + parsed[index] = { + "voltage" : (int(rawvolt) / 10, None), # The actual precision does not appear to + "current" : (int(rawcurr) / 10, None), # go beyond degrees, thus we drop the trailing 0 + "load" : (int(rawload), None), + } + return parsed + + +check_info['ups_socomec_outphase'] = { + "parse_function" : parse_ups_socomec_outphase, + "inventory_function" : inventory_elphase, + "check_function" : check_elphase, + "default_levels_variable" : "socomec_outphase_default_levels", + "service_description" : "Output Phase %s", + "has_perfdata" : True, + "group" : "ups_outphase", + "includes" : [ 'elphase.include' ], + # Phase Index, Voltage/dV, Current/dA, Load/% + "snmp_info" : ( ".1.3.6.1.4.1.4555.1.1.1.1.4.4.1" , [ '1', '2', '3', '4' ] ) , + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.4555.1.1.1", +} diff -Nru check-mk-1.2.2p3/ups_socomec_out_source check-mk-1.2.6p12/ups_socomec_out_source --- check-mk-1.2.2p3/ups_socomec_out_source 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ups_socomec_out_source 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,69 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def inventory_ups_socomec_out_source(info): + if info: + return [ (None, None) ] + + +def check_ups_socomec_out_source(_no_item, _no_params, info): + +# This is from the old (v5.01) MIB and is incompatible with the new one below +# ups_socomec_source_states = { +# 1: (3, "Other"), +# 2: (2, "Offline"), +# 3: (0, "Normal"), +# 4: (1, "Internal Maintenance Bypass"), +# 5: (2, "On battery"), +# 6: (0, "Booster"), +# 7: (0, "Reducer"), +# 8: (0, "Standby"), +# 9: (0, "Eco mode"), +# } + +# This is from the new (v6) MIB + ups_socomec_source_states = { + 1: (3, "Unknown"), + 2: (2, "On inverter"), + 3: (0, "On mains"), + 4: (0, "Eco mode"), + 5: (1, "On bypass"), + 6: (0, "Standby"), + 7: (1, "On maintenance bypass"), + 8: (2, "UPS off"), + 9: (0, "Normal mode"), + } + + return ups_socomec_source_states[int(info[0][0])] + + +check_info['ups_socomec_out_source'] = { + "inventory_function" : inventory_ups_socomec_out_source, + "check_function" : check_ups_socomec_out_source, + "service_description" : "Output Source", + "snmp_info" : ( ".1.3.6.1.4.1.4555.1.1.1.1.4", ["1"] ), + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.4555.1.1.1", +} diff -Nru check-mk-1.2.2p3/ups_socomec_out_voltage check-mk-1.2.6p12/ups_socomec_out_voltage --- check-mk-1.2.2p3/ups_socomec_out_voltage 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ups_socomec_out_voltage 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,49 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +ups_out_voltage_default_levels = (210, 180) # warning / critical + +def inventory_socomec_ups_out_voltage(info): + if len(info) > 0: + return [ ( x[0], "ups_out_voltage_default_levels") for x in info if int(x[1]) > 0 ] + +def check_socomec_ups_out_voltage( item, params, info): + conv_info = [] + for line in info: + conv_info.append([ line[0], saveint(line[1]) / 10, line[1] ]) + return check_ups_out_voltage( item, params, conv_info) + +check_info['ups_socomec_out_voltage'] = { + "inventory_function" : inventory_socomec_ups_out_voltage, + "check_function" : check_socomec_ups_out_voltage, + "service_description" : "OUT voltage phase %s", + "has_perfdata" : True, + "group" : "evolt", + "includes" : [ 'ups_out_voltage.include' ], + "snmp_info" : (".1.3.6.1.4.1.4555.1.1.1.1.4.4" , [ '1.1', '1.2' ] ) , + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.4555.1.1.1", +} diff -Nru check-mk-1.2.2p3/ups_test check-mk-1.2.6p12/ups_test --- check-mk-1.2.2p3/ups_test 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/ups_test 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,127 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Description of OIDs used from RFC 1628 +# OID: 1.3.6.1.2.1.33.1.7.3 +# upsTestResultsSummary OBJECT-TYPE +# SYNTAX INTEGER { +# donePass(1), +# doneWarning(2), +# doneError(3), +# aborted(4), +# inProgress(5), +# noTestsInitiated(6) +# } +# MAX-ACCESS read-only +# STATUS current +# DESCRIPTION +# "The results of the current or last UPS diagnostics +# test performed. The values for donePass(1), +# doneWarning(2), and doneError(3) indicate that the +# test completed either successfully, with a warning, or +# with an error, respectively. The value aborted(4) is +# returned for tests which are aborted by setting the +# value of upsTestId to upsTestAbortTestInProgress. +# Tests which have not yet concluded are indicated by +# inProgress(5). The value noTestsInitiated(6) +# indicates that no previous test results are available, +# such as is the case when no tests have been run since +# the last reinitialization of the network management +# subsystem and the system has no provision for non- +# volatile storage of test results." + +# OID: 1.3.6.1.2.1.33.1.7.4 +# upsTestResultsDetail OBJECT-TYPE +# SYNTAX DisplayString (SIZE (0..255)) +# MAX-ACCESS read-only +# STATUS current +# DESCRIPTION +# "Additional information about upsTestResultsSummary. +# If no additional information available, a zero length +# string is returned." + +# OID: 1.3.6.1.2.1.33.1.7.5 +# Description: +# upsTestStartTime OBJECT-TYPE +# SYNTAX TimeStamp +# MAX-ACCESS read-only +# STATUS current +# DESCRIPTION +# "The value of sysUpTime at the time the test in +# progress was initiated, or, if no test is in progress, +# the time the previous test was initiated. If the +# value of upsTestResultsSummary is noTestsInitiated(6), +# upsTestStartTime has the value 0." + +ups_test_default = (0, 0) + +def inventory_ups_test(info): + if info[1]: + return [ (None, "ups_test_default") ] + +def check_ups_test(_no_item, params, info): + warn, crit = params + + uptime_info, bat_info = info + ResultsSummary, StartTime, upsTestResultsDetail = bat_info[0] + uptime = parse_snmp_uptime(uptime_info[0][0]) + start_time = parse_snmp_uptime(StartTime) + + # State of test + state = 0 + if ResultsSummary in [ '3', '4' ]: + state = 2 + elif ResultsSummary in [ '2' ]: + state = 1 + if upsTestResultsDetail or state: + yield state, upsTestResultsDetail + + # Elapsed time since last start of test + elapsed = uptime - start_time + state = 0 + if crit and elapsed >= crit * 86400: + state = 2 + elif warn and elapsed >= warn * 86400: + state = 1 + if start_time: + yield state, "time elapsed since start of last test: %s" % get_age_human_readable(elapsed) + else: + yield state, "no battery test since start of device (%s ago)" % get_age_human_readable(uptime) + + +check_info['ups_test'] = { + "inventory_function" : inventory_ups_test, + "check_function" : check_ups_test, + "service_description" : "Self Test", + "has_perfdata" : False, + "group" : "ups_test", + "snmp_info" : [ + ('.1.3.6.1.2.1.1', ['3.0']), # sysUptime + ( ".1.3.6.1.2.1.33.1.7", [ 3, 5, 4 ] ), + ], + "snmp_scan_function" : lambda oid: oid(".1.3.6.1.2.1.1.2.0") in [ ".1.3.6.1.4.1.534.1", ".1.3.6.1.4.1.818.1.100.1.2" ], + "includes" : [ 'uptime.include' ], +} diff -Nru check-mk-1.2.2p3/uptime check-mk-1.2.6p12/uptime --- check-mk-1.2.2p3/uptime 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/uptime 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,21 +24,25 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. + +# Example output from agent: +# <<>> +# 15876.96 187476.72 + def inventory_uptime(info): - if len(info) >= 1 and len(info[0]) >= 1: - return [ (None, None) ] + if info: + return [ (None, {}) ] -def check_uptime(_no_item, _no_params, info): - uptime = int(float(info[0][0])) - seconds = uptime % 60 - rem = uptime / 60 - minutes = rem % 60 - hours = (rem % 1440) / 60 - days = rem / 1440 - now = int(time.time()) - since = time.strftime("%c", time.localtime(now - uptime)) - return (0, "OK - up since %s (%dd %02d:%02d:%02d)" % (since, days, hours, minutes, seconds), [ ("uptime", uptime) ]) +def check_uptime(_no_item, params, info): + uptime_sec = float(info[0][0]) + return check_uptime_seconds(params, uptime_sec) -check_info["uptime"] = (check_uptime, "Uptime", 1, inventory_uptime) -checkgroup_of["uptime"] = "uptime" +check_info["uptime"] = { + 'check_function' : check_uptime, + 'inventory_function' : inventory_uptime, + 'service_description' : 'Uptime', + 'has_perfdata' : True, + 'includes' : [ 'uptime.include' ], + 'group' : 'uptime', +} diff -Nru check-mk-1.2.2p3/uptime.include check-mk-1.2.6p12/uptime.include --- check-mk-1.2.2p3/uptime.include 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/uptime.include 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,81 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + + +def parse_snmp_uptime(ticks): + try: + if len(ticks) < 3: + return 0 + else: + return int(ticks[:-2]) + except: + days, h, m, s = ticks.split(":") + return (int(days) * 86400 ) + (int(h) * 3600) + (int(m) * 60) + int(float(s)) + +# Example for params: +# { +# "min" : ( 7200, 3600 ), # Minimum required uptime (warn, crit) +# "max" : ( 86400 * 7, 86400 * 14), # Maximum required uptime (warn, crit) +# } + +def check_uptime_seconds(params, uptime_sec): + seconds = uptime_sec % 60 + rem = uptime_sec / 60 + minutes = rem % 60 + hours = (rem % 1440) / 60 + days = rem / 1440 + since = time.strftime("%c", time.localtime(time.time() - uptime_sec)) + + state = 0 + infotext = "up since %s (%dd %02d:%02d:%02d)" % \ + (since, days, hours, minutes, seconds) + + if params == None: # legacy: support older versions of parameters + params = {} + + if "min" in params: + warn, crit = params["min"] + if uptime_sec < crit: + state = 2 + elif uptime_sec < warn: + state = max(state, 1) + + if state: + infotext += ", not up long enough!" + + if "max" in params: + warn, crit = params["max"] + if uptime_sec > crit: + state = 2 + elif uptime_sec > warn: + state = max(state, 1) + + if uptime_sec > warn: + infotext += ", up too long!" + + return (state, infotext, [ ("uptime", uptime_sec) ]) + diff -Nru check-mk-1.2.2p3/users check-mk-1.2.6p12/users --- check-mk-1.2.2p3/users 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/users 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,24 @@ +title: Number of Users Currently Logged In +agents: linux +catalog: os/misc +license: GPL +distribution: check_mk +description: + Monitor the number of users currently logged in. + The default levels are set to {20} and {30}. + +inventory: + A check of this type is created automatically on each Linux host as soon + as the mk_users plugin is installed on the host. + +perfdata: + On variable: the current number of users logged in + +[parameters] +warn (int): Number of users logged in at which a warning level + is triggered +crit (int): Number for a critical state + +[configuration] +users_default_levels (int, int): Levels used for all checks found + by inventory. This variable is preset to {(20, 30)}. diff -Nru check-mk-1.2.2p3/vbox_guest check-mk-1.2.6p12/vbox_guest --- check-mk-1.2.2p3/vbox_guest 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/vbox_guest 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -29,31 +29,36 @@ def check_vbox_guest(_no_item, _no_params, info): if len(info) == 1 and info[0][0] == "ERROR": - return (3, "UNKNOWN - Error running VBoxControl guestproperty enumerate") + return (3, "Error running VBoxControl guestproperty enumerate") try: d = vbox_guest_make_dict(info) except: d = {} if len(d) == 0: - return (2, "CRIT - No guest additions installed") + return (2, "No guest additions installed") version = d.get('GuestAdd/Version') revision = d.get('GuestAdd/Revision') if not version or not version[0].isdigit(): - return (3, "UNKNOWN - No guest addition version available") - infotext = " - version: %s, revision: %s" % (version, revision) + return (3, "No guest addition version available") + infotext = "version: %s, revision: %s" % (version, revision) host_version = d['HostInfo/VBoxVer'] host_revision = d['HostInfo/VBoxRev'] if (host_version, host_revision) != (version, revision): - return (1, "WARN" + infotext + ", Host has %s/%s" % (host_version, host_revision)) + return (1, infotext + ", Host has %s/%s" % (host_version, host_revision)) else: - return (0, "OK" + infotext) + return (0, infotext) def inventory_vbox_guest(info): if len(info) > 0: return [(None, None)] -check_info["vbox_guest"] = ( check_vbox_guest, "VBox Guest Additions", 0, inventory_vbox_guest) -checkgroup_of["vbox_guest"] = "vm_state" + +check_info["vbox_guest"] = { + 'check_function': check_vbox_guest, + 'inventory_function': inventory_vbox_guest, + 'service_description': 'VBox Guest Additions', + 'group': 'vm_state', +} diff -Nru check-mk-1.2.2p3/veeam_client check-mk-1.2.6p12/veeam_client --- check-mk-1.2.2p3/veeam_client 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/veeam_client 2015-07-28 12:55:47.000000000 +0000 @@ -0,0 +1,157 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +factory_settings["veeam_client"] = { + "age": ( 108000, 172800 ), # 30h/2d +} + +def parse_veeam_client(info): + data = {} + for line in info: + if line[0] == "Status": + if len(line) == 2: + last_status = line[1] + else: + # Prevent empty entries + last_status = False + elif line[0] == 'JobName': + if last_status: + last_found = line[1] + data[last_found] = {} + data[last_found]['Status'] = last_status + else: + if last_status and len(line) == 2: + data[last_found][line[0]] = line[1] + return data + + +def inventory_veeam_client(parsed): + for job in parsed.keys(): + yield job, {} + +def check_veeam_client(item, params, parsed): + # Fallback for old None item version + #FIXME Can be remvoed in CMK 2.0 + if item == None and len(parsed) > 0: + item = parsed.keys()[0] + + try: + data = parsed[item] + except KeyError: + return 3, "Client not found in agent output" + + perfdata = [] + infotexts = [] + + state = 0 + # Append current Status to Output + if data['Status'] == 'Warning': + state = 1 + if data['Status'] == 'Failed': + state = 2 + infotexts.append("Status: %s" % data['Status'] ) + + # Only output the Job name + if data.get('JobName'): + infotexts.append("Job: %s" % data['JobName'] ) + + + + size_info = [] + size_legend = [] + + TotalSizeByte = int(data['TotalSizeByte']) + perfdata.append(('totalsize', TotalSizeByte)) + size_info.append(get_bytes_human_readable(TotalSizeByte)) + size_legend.append("total") + + # Output ReadSize and TransferedSize if available + if "ReadSizeByte" in data: + ReadSizeByte = int(data['ReadSizeByte']) + perfdata.append(('readsize', ReadSizeByte)) + size_info.append(get_bytes_human_readable(ReadSizeByte)) + size_legend.append("read") + + if "TransferedSizeByte" in data: + TransferedSizeByte = int(data['TransferedSizeByte']) + perfdata.append(('transferredsize', TransferedSizeByte)) + size_info.append(get_bytes_human_readable(TransferedSizeByte)) + size_legend.append("transferred") + + infotexts.append("Size (%s): %s" % ("/".join(size_legend), "/ ".join(size_info))) + + # Check Stop time in any case, that we can catch hanging backups + if "StopTime" not in data: + state = 2 + infotexts.append("No complete Backup(!!)") + # If the Backup currently is running, the stop time is strange. + elif data['StopTime'] != "01.01.1900 00:00:00": + stop_time = time.mktime(time.strptime( data['StopTime'], "%d.%m.%Y %H:%M:%S")) + now = time.time() + age = now - stop_time + warn, crit = params['age'] + levels = "" + label = "" + if age >= crit: + state = 2 + label = "(!!)" + levels = " (Warn/Crit: %s/%s)" % ( get_age_human_readable(warn), get_age_human_readable(crit)) + elif age >= warn: + state = max(state, 1) + label = "(!)" + levels = " (Warn/Crit: %s/%s)" % ( get_age_human_readable(warn), get_age_human_readable(crit)) + infotexts.append("Last backup: %s ago%s%s" % ( get_age_human_readable(age), label, levels )) + + # Check duration only if currently not running + if data['Status'] not in [ 'InProgress', 'Pending' ]: + # Information may missing + if data.get('DurationDDHHMMSS'): + duration = 0 + days, hours, minutes, seconds = map(int, data['DurationDDHHMMSS'].split(':')) + duration += seconds + duration += minutes * 60 + duration += hours * 60 * 60 + duration += days * 60 * 60 * 24 + infotexts.append("Duration: %s" % get_age_human_readable(duration)) + perfdata.append(('duration', duration )) + + AvgSpeedBps = int(data['AvgSpeedBps']) + perfdata.append(('avgspeed', AvgSpeedBps)) + infotexts.append(("Average Speed: %s/s" % get_bytes_human_readable(AvgSpeedBps))) + + + return state, ", ".join(infotexts), perfdata + +check_info["veeam_client"] = { + 'parse_function': parse_veeam_client, + 'check_function': check_veeam_client, + 'inventory_function': inventory_veeam_client, + 'service_description': 'VEEAM Client', + 'group': 'veeam_backup', + 'default_levels_variable' : 'veeam_client', + 'has_perfdata': True, +} + diff -Nru check-mk-1.2.2p3/veeam_jobs check-mk-1.2.6p12/veeam_jobs --- check-mk-1.2.2p3/veeam_jobs 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/veeam_jobs 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,73 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# <<>> +# BACKUP_RIS Backup Stopped Success 27.10.2013 22:00:17 27.10.2013 22:06:12 +# BACKUP_R43-local_HXWH44 Backup Stopped Success 26.10.2013 18:00:20 26.10.2013 18:46:03 +# BACKUP_R43-Pool4_HXWH44 Backup Stopped Failed 26.10.2013 23:13:13 27.10.2013 00:51:17 +# BACKUP_R43-Pool3_HXWH44 Backup Stopped Failed 27.10.2013 02:59:29 27.10.2013 08:59:51 +# REPL_KNESXIDMZ Replica Stopped Success 27.10.2013 44:00:01 27.10.2013 44:44:26 +# BACKUP_KNESXI Backup Stopped Success 28.10.2013 05:00:04 28.10.2013 05:32:15 +# BACKUP_KNESXit Backup Stopped Success 26.10.2013 22:30:02 27.10.2013 02:37:30 +# BACKUP_R43-Pool5_HXWH44 Backup Stopped Success 27.10.2013 23:00:00 27.10.2013 23:04:53 +# BACKUP_R43-Pool2_HXWH44 Backup Stopped Failed 27.10.2013 02:37:45 27.10.2013 02:45:35 + + +def inventory_veeam_jobs(info): + return [ (x[0], None) for x in info ] + +def check_veeam_jobs(item, _no_params, info): + for line in info: + if line[0] == item: + backup_type = line[1] + backup_status = line[3] + backup_current = line[2] + + if backup_status == "Success": + state = 0 + elif backup_current == 'Idle' and backup_type == "BackupSync": + # A sync job is always idle + state = 0 + elif backup_current == "Working": + state = 0 + line = line[:6] + elif backup_status == "Failed": + state = 2 + elif backup_current == "Stopped" and backup_status == "Warning": + state = 1 + else: + state = 3 + infotxt = "Result: %s, " % line[3] + infotxt += ", ".join(line[1:3] + line[4:]) + return (state, infotxt) + + return 3, "No such job found" + +check_info["veeam_jobs"] = { + 'check_function': check_veeam_jobs, + 'inventory_function': inventory_veeam_jobs, + 'service_description': 'VEEAM Job %s', +} diff -Nru check-mk-1.2.2p3/VERSION check-mk-1.2.6p12/VERSION --- check-mk-1.2.2p3/VERSION 2013-11-05 09:42:58.000000000 +0000 +++ check-mk-1.2.6p12/VERSION 2015-09-21 11:01:35.000000000 +0000 @@ -1 +1 @@ -1.2.2p3 +1.2.6p12 diff -Nru check-mk-1.2.2p3/viprinet_firmware check-mk-1.2.6p12/viprinet_firmware --- check-mk-1.2.2p3/viprinet_firmware 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/viprinet_firmware 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,49 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def check_viprinet_firmware(_no_item, _no_params, info): + fw_status_map = { + "0": "No new firmware available", + "1": "Update Available", + "2": "Checking for Updates", + "3": "Downloading Update", + "4": "Installing Update" + } + fw_status = fw_status_map.get(info[0][1]) + if fw_status: + return (0, "%s, %s" % (info[0][0], fw_status)) + else: + return (3, "%s, no firmware status available") + + +check_info["viprinet_firmware"] = { + 'check_function' : check_viprinet_firmware, + 'inventory_function' : lambda info: len(info) > 0 and [(None, None)] or [], + 'service_description' : "Firmware Version", + 'has_perfdata' : False, + 'snmp_info' : (".1.3.6.1.4.1.35424.1.1", [4, 7]), + 'snmp_scan_function' : lambda oid: oid(".1.3.6.1.2.1.1.2.0") in [".1.3.6.1.4.1.35424"], +} diff -Nru check-mk-1.2.2p3/viprinet_mem check-mk-1.2.6p12/viprinet_mem --- check-mk-1.2.2p3/viprinet_mem 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/viprinet_mem 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,34 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +check_info["viprinet_mem"] = { + 'check_function' : lambda _no_item, _no_params, info: (0, "Memory used: %s" % get_bytes_human_readable(saveint(info[0][0]))), + 'inventory_function' : lambda info: len(info) > 0 and [(None, None)] or [], + 'service_description' : "Memory", + 'has_perfdata' : False, + 'snmp_info' : (".1.3.6.1.4.1.35424.1.2", [2]), + 'snmp_scan_function' : lambda oid: oid(".1.3.6.1.2.1.1.2.0") in [".1.3.6.1.4.1.35424"], +} diff -Nru check-mk-1.2.2p3/viprinet_power check-mk-1.2.6p12/viprinet_power --- check-mk-1.2.2p3/viprinet_power 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/viprinet_power 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,45 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def check_viprinet_power(_no_item, params, info): + power_map = { + "0" : "no failure", + "1" : "a single PSU is out of order" + } + power_info = power_map.get(info[0][0]) + if power_info: + return (0, power_info) + else: + return (3, "Invalid power status") + +check_info["viprinet_power"] = { + 'check_function' : check_viprinet_power, + 'inventory_function' : lambda info: len(info) > 0 and [(None, None)] or [], + 'service_description' : "Power-Supply", + 'has_perfdata' : False, + 'snmp_info' : (".1.3.6.1.4.1.35424.1.2", [5]), + 'snmp_scan_function' : lambda oid: oid(".1.3.6.1.2.1.1.2.0") in [".1.3.6.1.4.1.35424"], +} diff -Nru check-mk-1.2.2p3/viprinet_router check-mk-1.2.6p12/viprinet_router --- check-mk-1.2.2p3/viprinet_router 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/viprinet_router 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,61 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def check_viprinet_router(_no_item, params, info): + router_mode_map = { + "0" : "Node", + "1" : "Hub", + "2" : "Hub running as HotSpare", + "3" : "Hotspare-Hub replacing another router" + } + current_mode = info[0][0][0] + mode = router_mode_map.get(current_mode) + + expect_mode = params.get("expect_mode") + if expect_mode: + # Requires mode found on inventory + if expect_mode == "inv": + expect_mode = params.get("mode_inv") + if expect_mode in router_mode_map: + if expect_mode != current_mode: + return (2, "Mode '%s' differs from expected mode '%s'" % + ( mode, router_mode_map.get(expect_mode) )) + + if mode: + return (0, mode) + else: + return (3, "Undefined Mode") + +check_info["viprinet_router"] = { + 'check_function' : check_viprinet_router, + 'inventory_function' : lambda info: len(info) > 0 and [(None, {"mode_inv": info[0][0][0]})] or [], + 'service_description' : "Router Mode", + 'has_perfdata' : False, + 'group' : "viprinet_router", + 'snmp_info' : (".1.3.6.1.4.1.35424.1.1", [5]), + 'snmp_scan_function' : lambda oid: oid(".1.3.6.1.2.1.1.2.0") in [".1.3.6.1.4.1.35424"], +} + diff -Nru check-mk-1.2.2p3/viprinet_serial check-mk-1.2.6p12/viprinet_serial --- check-mk-1.2.2p3/viprinet_serial 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/viprinet_serial 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,42 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +def inventory_viprinet_serial(info): + if info: + return [(None, None)] + +def check_viprinet_serial(_no_item, _no_params, info): + return 0, info[0][0] + + +check_info["viprinet_serial"] = { + 'check_function' : check_viprinet_serial, + 'inventory_function' : inventory_viprinet_serial, + 'service_description' : "Serial Number", + 'snmp_info' : (".1.3.6.1.4.1.35424.1.1", [2]), + 'snmp_scan_function' : lambda oid: oid(".1.3.6.1.2.1.1.2.0") == ".1.3.6.1.4.1.35424", +} diff -Nru check-mk-1.2.2p3/viprinet_temp check-mk-1.2.6p12/viprinet_temp --- check-mk-1.2.2p3/viprinet_temp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/viprinet_temp 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,56 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +def check_viprinet_temp(item, params, info): + state = 0 + value = saveint(info[0][item == "System"]) + state_info = "" + + warn = "" + crit = "" + if params: + warn, crit = params + if value > crit: + state_info = "(crit at %d°C)" % crit + state = 2 + elif value > warn: + state_info = "(warn at %d°C)" % warn + state = 1 + + infotext = "%s: %s°C %s" % (item, value, state_info) + return state, infotext, [("temp", value, warn, crit)] + + +check_info["viprinet_temp"] = { + 'check_function' : check_viprinet_temp, + 'inventory_function' : lambda info: len(info) > 0 and [("CPU", None), ("System", None)] or [], + 'service_description' : "Temperature %s", + 'group' : "room_temperature", + 'has_perfdata' : True, + 'snmp_info' : (".1.3.6.1.4.1.35424.1.2", [3, 4]), + 'snmp_scan_function' : lambda oid: oid(".1.3.6.1.2.1.1.2.0") in [".1.3.6.1.4.1.35424"], +} + diff -Nru check-mk-1.2.2p3/vms_cpu check-mk-1.2.6p12/vms_cpu --- check-mk-1.2.2p3/vms_cpu 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/vms_cpu 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -40,7 +40,7 @@ idle, user, wait_interrupt, wait_npsync = map(lambda x: float(x) / num_cpus, info[0][1:]) wait = wait_interrupt + wait_npsync system = 100.0 - idle - user - wait - infotext = " - user: %2.1f%%, system: %2.1f%%, wait: %2.1f%%" % (user, system, wait) + infotext = "user: %2.1f%%, system: %2.1f%%, wait: %2.1f%%" % (user, system, wait) state = 0 perfdata = [ ( "user", user ), @@ -59,7 +59,7 @@ state = 1 infotext += '(!)' - return (state, nagios_state_names[state] + infotext, perfdata) + return (state, infotext, perfdata) check_info['vms_cpu'] = { "check_function" : check_vms_cpu, diff -Nru check-mk-1.2.2p3/vms_df check-mk-1.2.6p12/vms_df --- check-mk-1.2.2p3/vms_df 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/vms_df 1970-01-01 00:00:00.000000000 +0000 @@ -1,57 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# ails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -check_includes['vms_df'] = [ "df.include" ] - -# DSA101 0.77 400.00 0.07 0.93 16271539.00 35556389 -# DSA102 0.00 400.00 0.00 0.00 86651840.00 106669167 -# DSA103 0.00 400.00 0.00 0.00 97962784.00 106669167 -# DSA104 0.30 400.00 0.00 0.00 75934488.00 106669167 -def inventory_vms_df(info): - return df_inventory([ line[0] for line in info ]) - -def check_vms_df(item, params, info): - for line in info: - if line[0] == item: - io_ops_total_per_sec = float(line[1]) - read_perc = float(line[2]) - disk_util = float(line[3]) - response_time_ms = float(line[4]) - blocks_free = float(line[5]) - blocks_total = float(line[6]) # one block is 512 bytes - - free_mb = blocks_free / 2048 - size_mb = blocks_total / 2048 - status, text, perfdata = df_check_filesystem(g_hostname, item, size_mb, free_mb, params) - - perfdata.append( ('iops', "%.2f" % io_ops_total_per_sec ) ) - return status, text, perfdata - - return (3, "Disk %s not found" % (item,)) - -check_info['vms_df'] = ( check_vms_df, "fs_%s", 1, inventory_vms_df ) -checkgroup_of["vms_df"] = "filesystem" -check_default_levels['vms_df'] = "filesystem_default_levels" diff -Nru check-mk-1.2.2p3/vms_diskstat check-mk-1.2.6p12/vms_diskstat --- check-mk-1.2.2p3/vms_diskstat 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/vms_diskstat 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -47,14 +47,15 @@ avail_mb = int(used) * 512 / (1024.0 * 1024.0) return df_check_filesystem(g_hostname, item, size_mb, avail_mb, params) - return (3, "UNKNOWN - no such disk") + return (3, "no such disk") check_info['vms_diskstat.df'] = { "check_function" : check_vms_diskstat_fs, "inventory_function" : inventory_vms_diskstat_fs, - "service_description" : "fs_%s", + "service_description" : "Filesystem %s", "has_perfdata" : True, "group" : "filesystem", "includes" : [ "df.include" ], + "default_levels_variable" : "filesystem_default_levels", } diff -Nru check-mk-1.2.2p3/vms_diskstat.df check-mk-1.2.6p12/vms_diskstat.df --- check-mk-1.2.2p3/vms_diskstat.df 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/vms_diskstat.df 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Check used disk space on OpenVMS +title: Disk space on OpenVMS agents: openvms -author: Mathias Kettner +catalog: os/storage license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/vms_if check-mk-1.2.6p12/vms_if --- check-mk-1.2.2p3/vms_if 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/vms_if 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/vms_md check-mk-1.2.6p12/vms_md --- check-mk-1.2.2p3/vms_md 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/vms_md 1970-01-01 00:00:00.000000000 +0000 @@ -1,78 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# ails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# <<>> -# MD DSA25 2 2 -# MD DSA101 2 2 -# MD DSA102 2 2 -# MD DSA103 2 2 -# MD DSA104 2 2 - -# Bei der Inventur holen wir die Liste aller Shadowsets und monitoren -# in Zukunft, ob alle noch vorhanden sind, keine zuviel ist (Warning) -# alle die richtige Anzahl von Platten aktiv haben. -def inventory_vms_md(info): - shadowsets = [] - for line in info: - shadowsets.append(line[1]) - if len(shadowsets) == 0: - return [] - else: - return [(None, '""', shadowsets)] - -def check_vms_md(item, params, info): - shadowsets = [] - errorlevel = 0 - errors = [] - try: - for line in info: - name = line[1] - disks = int(line[2]) - online = int(line[3]) - if online < disks: - errorlevel = 2 - errors.append("%s: only %d out of %d disks online" % (name, online, disks) ) - if name not in params: - errorlevel = max(errorlevel, 1) - errors.append("%s: unknown" % name) - shadowsets.append(name) - except: - return (3, "UNKNOWN - invalid output from plugin") - - # check for missing shadowsets - for name in params: - if name not in shadowsets: - errorlevel = 2 - errors.append("%s: missing" % name) - if errorlevel == 0: - return (0, "OK - %d shadowsets in normal state" % len(shadowsets)) - else: - return (errorlevel, "%s - %s" % ( - { 1: "WARNING", 2: "CRITICAL" }[errorlevel], - ", ".join(errors))) - - -check_info['vms_md'] = ( check_vms_md, "Shadowsets", 0, inventory_vms_md ) diff -Nru check-mk-1.2.2p3/vms_netif check-mk-1.2.6p12/vms_netif --- check-mk-1.2.2p3/vms_netif 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/vms_netif 1970-01-01 00:00:00.000000000 +0000 @@ -1,51 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# ails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# <<>> -# WE0 661236727 169337595 -def inventory_vms_netif(info): - return [ (line[0], '""', None) for line in info ] - -def check_vms_netif(item, params, info): - for line in info: - if item == line[0]: - try: - global g_counters - perfdata = [] - rates = [] - this_time = time.time() - for dir, value in [ ('ipackets', int(line[1])), ('opackets', int(line[2])) ]: - countername = "netctr.%s.%s" % (item, dir) - timedif, items_per_sec = get_counter(countername, this_time, value) - perfdata.append( (dir, "%dc" % value) ) - rates.append(items_per_sec) - return (0, "OK - packets/s: %.2f in / %.2f out " % (rates[0], rates[1]), perfdata) - except: - return (3, "UNKNOWN - invalid output from plugin") - - return (3, "UNKNOWN - network inteface not present") - -check_info['vms_netif'] = (check_vms_netif, "NIC %s", 1, inventory_vms_netif) diff -Nru check-mk-1.2.2p3/vms_queuejobs check-mk-1.2.6p12/vms_queuejobs --- check-mk-1.2.2p3/vms_queuejobs 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/vms_queuejobs 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -44,7 +44,7 @@ max_cpu_secs = cpu_secs max_cpu_job = name - infotext = " - %d jobs" % len(info) + infotext = "%d jobs" % len(info) if max_cpu_job: minutes, seconds = divmod(max_cpu_secs, 60) hours, minutes = divmod(minutes, 60) @@ -52,7 +52,7 @@ infotext += ', most CPU used by %s (%d days, %02d:%02d:%02d.%02d)' % \ (max_cpu_job, days, hours, minutes, int(seconds), int(seconds * 100)) - return 0, "OK" + infotext + return 0, infotext check_info['vms_queuejobs'] = { diff -Nru check-mk-1.2.2p3/vms_sys check-mk-1.2.6p12/vms_sys --- check-mk-1.2.2p3/vms_sys 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/vms_sys 1970-01-01 00:00:00.000000000 +0000 @@ -1,131 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# ails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# vms_memused_default_levels = (50.0, 80.0) - -vms_memused_default_levels = (150.0, 200.0) - -def inventory_vms_mem(info): - for line in info: - if line[0] == "MEM": - return [(None, "", "vms_memused_default_levels")] - - -# MEM 524288.00 20106.00 157124.00 26635.00 107394.00 168.00 7.00 9.00 3.00 0.00 0.00 8192.00 79.52 -def check_vms_mem(item, params, info): - warn, crit = params - try: - for line in info: - if line[0].startswith("MEM"): - physical_size_in_pages = (float(line[1])) # Gesamtspeicher - vms_alloc_pages = (float(line[2])) # reserviert fuer Kernel - virtual_io_cache_pages = (float(line[3])) - modified_list_pages = (float(line[4])) # dirty pages - free_list_size_pages = (float(line[5])) # ist so eine Art Cache, letztendlich - number_of_processes = (float(line[6])) - interactive_processes = (float(line[7])) - network_processes = (float(line[8])) - batch_processes = (float(line[9])) - proc_in_swapped_persec = (float(line[10])) - proc_out_swapped_persec = (float(line[11])) - pagesize = (float(line[12])) - memory_used_perc = (float(line[13])) - - mem_total_mb = float(physical_size_in_pages * pagesize) / (1024*1024) - mem_used_mb = mem_total_mb * memory_used_perc / 100.0 - warn_mb = warn * mem_total_mb / 100 - crit_mb = crit * mem_total_mb / 100 - perfdata = [ ('ramused', "%.2fMB" % mem_used_mb, warn_mb, crit_mb, 0, mem_total_mb) ] - - used_txt = "%.1f%% of RAM (%.2f of %.2f GB) used by processes" % (memory_used_perc, mem_used_mb/1024, mem_total_mb/1024) - if memory_used_perc >= crit: - return (2, 'CRIT - %s (critical at %.1f%%)' % (used_txt, crit), perfdata) - elif memory_used_perc >= warn: - return (1, 'WARN - %s (warning at %.1f%%)' % (used_txt, warn), perfdata) - else: - return (0, 'OK - %s' % used_txt, perfdata) - - except 1: - return (3, "UNKNOWN - invalid output from plugin") - -check_info['vms_sys.mem'] = (check_vms_mem, "Memory used", 1, inventory_vms_mem) - -# <<>> -# sum us sy wa sy sy sy -# CPU0 3.30 1.18 0.99 0.10 0.68 0.32 0.46 -# CPU1 6.32 3.08 1.62 0.16 1.25 0.32 0.46 -# CPU2 6.77 3.00 2.37 0.24 0.75 0.32 0.46 -# CPU3 12.22 5.11 4.35 0.19 1.27 0.32 0.46 -# MEM 524288.00 20106.00 157124.00 26635.00 107394.00 168.00 7.00 9.00 3.00 0.00 0.00 8192.00 79.52 -# PAG 12.35 2.03 0.00 2.03 10.32 -# GIO 18.56 284.52 0.10 12.64 0.00 1.15 1765.00 -# XQP 2.23 0.02 0.07 0.00 0.15 0.00 0.00 0.00 -# LCK 19624.00 22.16 37.58 22.18 6.66 0.40 0.00 0.00 0.00 -def inventory_vms_cpu_utilization(info): - for line in info: - if line[0].startswith("CPU"): - return [(None, "", None)] - return [] - -def check_vms_cpu_utilization(item, param, info): - # Es sieht so aus, als sind die %-Werte, die wir vom - # Agenten bekommen, bereits durch die Anzahl der CPUs - # geteilt, so dass das Maximum der Summe 100% ist und - # nicht 100% mal CPU-Anzahl. Eigentlich sollte der Agent - # besser die Werte so ausgeben, dass jede einzelne CPU - # fuer sich 100% erreichen kann. - # Wir gleichen das aktuell hier im Check aus. Der Agent - # sollte dahingehend aber ueberprueft werden. - us = 0.0 - sy = 0.0 - wa = 0.0 - count = 0 - for line in info: - if line[0].startswith("CPU"): - us += float(line[2]) - sy += float(line[3]) + float(line[5]) + float(line[6]) + float(line[7]) - wa += float(line[4]) - count += 1 - if count == 0: - return (3, "UNKNOWN - incomplete data from host") - - if count == 1: - cputext = "1 CPU" - else: - cputext = "%d CPUS" % count - - user_perc = us / count - system_perc = sy / count - wait_perc = wa / count - - perfdata = [ - ( "user", "%.3f" % user_perc ), - ( "system", "%.3f" % system_perc ), - ( "wait", "%.3f" % wait_perc ) ] - return (0, "OK - user: %2.0f%%, system: %2.0f%%, wait: %2.0f%% (%s)" - % (user_perc, system_perc, wait_perc, cputext), perfdata) - -check_info['vms_sys.util'] = (check_vms_cpu_utilization, "CPU utilization", 1, inventory_vms_cpu_utilization) diff -Nru check-mk-1.2.2p3/vms_system check-mk-1.2.6p12/vms_system --- check-mk-1.2.2p3/vms_system 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/vms_system 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -39,7 +39,7 @@ def check_vms_system_ios(_no_item, _no_params, info): direct_ios, buffered_ios = map(float, info[0][:2]) - return (0, "OK - Direct IOs: %.2f/sec, Buffered IOs: %.2f/sec" % ( + return (0, "Direct IOs: %.2f/sec, Buffered IOs: %.2f/sec" % ( direct_ios, buffered_ios), [ ("direct", direct_ios), ("buffered", buffered_ios)]) check_info["vms_system.ios"] = { @@ -57,11 +57,11 @@ warn, crit = params perfdata = [('procs', procs, warn, crit, 0 )] if procs >= crit: - return (2, "CRIT - %d processes (critical at %d)" % (procs, crit), perfdata) + return (2, "%d processes (critical at %d)" % (procs, crit), perfdata) elif procs >= warn: - return (1, "WARN - %d processes (warning at %d)" % (procs, warn), perfdata) + return (1, "%d processes (warning at %d)" % (procs, warn), perfdata) - return (0, "OK - %d processes" % (procs,), perfdata) + return (0, "%d processes" % (procs,), perfdata) check_info["vms_system.procs"] = { diff -Nru check-mk-1.2.2p3/vms_system.ios check-mk-1.2.6p12/vms_system.ios --- check-mk-1.2.2p3/vms_system.ios 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/vms_system.ios 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Monitor number total IOs of a node on OpenVMS +title: Number of total IOs on OpenVMS agents: openvms -author: Mathias Kettner +catalog: os/storage license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/vms_system.procs check-mk-1.2.6p12/vms_system.procs --- check-mk-1.2.2p3/vms_system.procs 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/vms_system.procs 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Check number of processes on OpenVMS +title: Number of processes on OpenVMS agents: openvms -author: Mathias Kettner +catalog: os/ps license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/vmstat_aix check-mk-1.2.6p12/vmstat_aix --- check-mk-1.2.2p3/vmstat_aix 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/vmstat_aix 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -42,11 +42,16 @@ ( 'sy', "%d%%" % sy, '', '', '0', '100' ), ( 'id', "%d%%" % id, '', '', '0', '100' ), ( 'wa', "%d%%" % wa, '', '', '0', '100' ) ] - return (0, "OK - %d%%(us) %d%%(sy) %d%%(id) %d%%(wa)" % (us, sy, id, wa), perfdata) + return (0, "%d%%(us) %d%%(sy) %d%%(id) %d%%(wa)" % (us, sy, id, wa), perfdata) else: - return (3, "UNKNOWN - item %s not defined" % item) + return (3, "item %s not defined" % item) except: - return (3, "UNKNOWN - invalid output from plugin") + return (3, "invalid output from plugin") -check_info['vmstat_aix'] = (check_vmstat_aix, "vmstat %s", 1, no_inventory_possible) + +check_info["vmstat_aix"] = { + 'check_function': check_vmstat_aix, + 'service_description': 'vmstat %s', + 'has_perfdata': True, +} diff -Nru check-mk-1.2.2p3/vms_users check-mk-1.2.6p12/vms_users --- check-mk-1.2.2p3/vms_users 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/vms_users 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -50,9 +50,9 @@ perfdata = [("sessions", num_sessions)] if num_sessions: - return (0, "OK - Interactive users: " + ", ".join(infos), perfdata) + return (0, "Interactive users: " + ", ".join(infos), perfdata) else: - return (0, "OK - No interactive users", perfdata) + return (0, "No interactive users", perfdata) check_info['vms_users'] = { diff -Nru check-mk-1.2.2p3/vmware_state check-mk-1.2.6p12/vmware_state --- check-mk-1.2.2p3/vmware_state 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/vmware_state 1970-01-01 00:00:00.000000000 +0000 @@ -1,104 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# ails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# Example output from agent: -# <<>> -# [2009-11-12 10:40:30.086 'App' 3076453184 info] Current working directory: /usr/lib/check_mk_agent/plugins -# Found VM: -# moref:32 -# name:name_of_vm -# uuid:xxxxxxxxxxxx -# ipaddr:192.168.1.5 -# Found VM: -# moref:48 -# name:abcdef -# uuid:xxxxxxxxx -# ipaddr:12.34.56.78 -# [2009-11-12 10:40:30.215 'vcbVmName' 3076453184 warning] IP address not set. -# Found VM: -# moref:80 -# name:name_of_vm -# uuid:xxxxxxxxx -# ipaddr: - -def inventory_vmware_state(info): - inventory = [] - for line in info: - # print "LINE IS %s" % line[0] - if line[0].startswith("name:"): - vm_name = line[0][5:] - if line[0].startswith("ipaddr:"): - vm_ipaddr = line[0][7:] - # add machine to inventory (if IP address is not empty) - if vm_ipaddr != '': - inventory.append((vm_name, None)) - return inventory - -# 1. Variant: loop over all machines and -# remember the ip addresses of all machines. -# Then pick out the ip address of the machine -# we are looking for. -# -#def check_vmware_state(item, params, info): -# vm_ipaddr = {} -# for line in info: -# if line[0].startswith("name:"): -# vm_name = line[0][5:] -# -# if line[0].startswith("ipaddr:"): -# vm_ipaddr[vm_name] = line[0][7:] -# -# ip_addr = vm_ipaddr.get(item) -# if ip_addr == "": -# return(2, "CRIT - The Machine is DOWN") -# -# elif ip_addr == None: -# return (3, "UNKNOWN - no such machine") -# -# else: -# return(0, "OK - The Machine is UP (%s)" % ip_addr) - -# 2. Variant: loop over all machines. If -# we reach the machine we are looking for -# we do the check and return immediately. -# If we go through the loop without finding -# the machine, we return an UNKNOWN state. -def check_vmware_state(item, _no_params, info): - # item is the name of the machine. - for line in info: - if line[0].startswith("name:"): - vm_name = line[0][5:] - elif line[0].startswith("ipaddr:"): - if vm_name == item: - ip_addr = line[0][7:] - if ip_addr == "": - return (2, "CRIT - the machine is down") - else: - return (0, "OK - machine is up (%s)" % ip_addr) - return (3, "UNKNOWN - no such machine") - -check_info['vmware_state'] = (check_vmware_state, "VM %s", 0, inventory_vmware_state) -checkgroup_of['vmware_state'] = 'vm_state' diff -Nru check-mk-1.2.2p3/vxvm_enclosures check-mk-1.2.6p12/vxvm_enclosures --- check-mk-1.2.2p3/vxvm_enclosures 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/vxvm_enclosures 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -25,17 +25,17 @@ # Boston, MA 02110-1301 USA. # Example output: -#<<>> -#LIO-Sechs aluadisk ALUAdisk CONNECTED ALUA 3 +# <<>> +# LIO-Sechs aluadisk ALUAdisk CONNECTED ALUA 3 def check_vxvm_enclosures(item, params, info): for line in info: if line[0] == item: if line[3] != "CONNECTED": - return(2, "CRIT - Status is %s" % line[3]) + return(2, "Status is %s" % line[3]) else: - return(0, "OK - Status is CONNECTED") - return(3, "UKN - Item not found") + return(0, "Status is CONNECTED") + return(3, "Item not found") check_info["vxvm_enclosures"] = { diff -Nru check-mk-1.2.2p3/vxvm_multipath check-mk-1.2.6p12/vxvm_multipath --- check-mk-1.2.2p3/vxvm_multipath 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/vxvm_multipath 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -25,11 +25,11 @@ # Boston, MA 02110-1301 USA. # Example output: -#<<>> -#sda ENABLED OTHER_DISKS 1 1 0 other_disks -#LIO-Sechs_0 ENABLED aluadisk 1 1 0 LIO-Sechs -#LIO-Sechs_1 ENABLED aluadisk 1 1 0 LIO-Sechs -#LIO-Sechs_2 ENABLED aluadisk 1 1 0 LIO-Sechs +# <<>> +# sda ENABLED OTHER_DISKS 1 1 0 other_disks +# LIO-Sechs_0 ENABLED aluadisk 1 1 0 LIO-Sechs +# LIO-Sechs_1 ENABLED aluadisk 1 1 0 LIO-Sechs +# LIO-Sechs_2 ENABLED aluadisk 1 1 0 LIO-Sechs # def check_vxvm_multipath(item, params, info): @@ -40,9 +40,9 @@ state = 1 elif int(paths_inact) > 0 and float(paths_inact) > float(paths) /2: state = 2 - return(state, nagios_state_names[state] + " - " + "Status: %s, (%d/%d) Paths to enclosure %s enabled" % (status_txt, int(paths_act), int(paths), enclosure )) + return(state, "Status: %s, (%d/%d) Paths to enclosure %s enabled" % (status_txt, int(paths_act), int(paths), enclosure )) - return(3, "UKN - Item not found") + return(3, "Item not found") check_info["vxvm_multipath"] = { diff -Nru check-mk-1.2.2p3/vxvm_objstatus check-mk-1.2.6p12/vxvm_objstatus --- check-mk-1.2.2p3/vxvm_objstatus 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/vxvm_objstatus 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -68,9 +68,9 @@ if error == False: text = ["%s: OK" % volume] messages.append(", ".join(text)) - return(state, nagios_state_names[state] + ' - ' + ', '.join(messages)) + return(state, ', '.join(messages)) - return(2, "CRIT - Group not found") + return(2, "Group not found") check_info["vxvm_objstatus"] = { diff -Nru check-mk-1.2.2p3/wagner_titanus_topsense check-mk-1.2.6p12/wagner_titanus_topsense --- check-mk-1.2.2p3/wagner_titanus_topsense 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/wagner_titanus_topsense 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,336 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +wagner_titanus_topsense_info = [(".1.3.6.1.2.1.1", [1, # sysDescr 0 + 3, # sysUpTimeInstance 1 + 4, # sysContact 2 + 5, # sysName 3 + 6]), # sysLocation 4 + (".1.3.6.1.4.1.34187.21501.1.1", [1, # company 0 + 2, # model 1 + 3, # revision 2 + 1000, # pswMainAlarmD1 3 + 1001, # pswPreAlarmD1 4 + 1002, # pswInfAlarmD1 5 + 1003, # pswMainAlarmD2 6 + 1004, # pswPreAlarmD2 7 + 1005, # pswInfAlarmD2 8 + 1006]), # pswFailureDisplayOn 9 + # off(0), on(1) + + (".1.3.6.1.4.1.34187.21501.2.1", [24584008, # tsLSNbusOnline 0 + # offline(0),online(1) + 245810000, # tsBarGraph1String + 245820000, # tsBarGraph2String + 245950000, # tsD1ChamberValueString + 246090000, # tsD2ChamberValueString + 245960000, # tsD1AirFlowDeviationString + 246100000, # tsD2AirFlowDeviationString + 245970000, # tsD1AirTemperatureString + 246110000]), # tsD2AirTemperatureString + ] + +def wagner_titanus_topsense_scan(oid): + return oid(".1.3.6.1.2.1.1.2.0").startswith(".1.3.6.1.4.1.34187.21501") + +# .--titanus info--------------------------------------------------------. +# | _ _ _ _ __ | +# | | |_(_) |_ __ _ _ __ _ _ ___ (_)_ __ / _| ___ | +# | | __| | __/ _` | '_ \| | | / __| | | '_ \| |_ / _ \ | +# | | |_| | || (_| | | | | |_| \__ \ | | | | | _| (_) | | +# | \__|_|\__\__,_|_| |_|\__,_|___/ |_|_| |_|_| \___/ | +# | | +# '----------------------------------------------------------------------' + +def inventory_wagner_titanus_topsense_info(info): + return [ (None, None) ] + +def check_wagner_titanus_topsense_info(item, _no_params, info): + ts_lsn_bus = info[2][0][0] + if ts_lsn_bus == "0": + ts_lsn_bus = "offline" + elif ts_lsn_bus == "1": + ts_lsn_bus = "online" + else: + ts_lsn_bus = "unknown" + + message = "System: " + info[0][0][0] + message += ", Uptime: " + get_age_human_readable(int(info[0][0][1]) / 100) + message += ", System Name: " + info[0][0][3] + message += ", System Contact: " + info[0][0][2] + message += ", System Location: " + info[0][0][4] + message += ", Company: " + info[1][0][0] + message += ", Model: " + info[1][0][1] + message += ", Revision: " + info[1][0][2] + message += ", LSNi bus: " + ts_lsn_bus + return 0, message + +check_info["wagner_titanus_topsense.info"] = { + "check_function" : check_wagner_titanus_topsense_info, + "inventory_function" : inventory_wagner_titanus_topsense_info, + "service_description" : "Topsense Info", + "has_perfdata" : False, + "snmp_info" : wagner_titanus_topsense_info, + "snmp_scan_function" : wagner_titanus_topsense_scan +} + +#. +# .--overall status------------------------------------------------------. +# | _ _ _ _ | +# | _____ _____ _ __ __ _| | | ___| |_ __ _| |_ _ _ ___ | +# | / _ \ \ / / _ \ '__/ _` | | | / __| __/ _` | __| | | / __| | +# | | (_) \ V / __/ | | (_| | | | \__ \ || (_| | |_| |_| \__ \ | +# | \___/ \_/ \___|_| \__,_|_|_| |___/\__\__,_|\__|\__,_|___/ | +# | | +# '----------------------------------------------------------------------' + + +def inventory_wagner_titanus_topsense_overall_status(info): + return [ (None, None) ] + +def check_wagner_titanus_topsense_overall_status(item, _no_params, info): + psw_failure = info[1][0][9] + status = 3 + if psw_failure == "0": + message = "Overall Status reports OK" + status = 0 + else: + message = "Overall Status reports a problem" + status = 2 + return status, message + +check_info["wagner_titanus_topsense.overall_status"] = { + "check_function" : check_wagner_titanus_topsense_overall_status, + "inventory_function" : inventory_wagner_titanus_topsense_overall_status, + "service_description" : "Overall Status", + "has_perfdata" : False, + "snmp_info" : wagner_titanus_topsense_info, + "snmp_scan_function" : wagner_titanus_topsense_scan +} + +#. +# .--alarm---------------------------------------------------------------. +# | _ | +# | __ _| | __ _ _ __ _ __ ___ | +# | / _` | |/ _` | '__| '_ ` _ \ | +# | | (_| | | (_| | | | | | | | | | +# | \__,_|_|\__,_|_| |_| |_| |_| | +# | | +# '----------------------------------------------------------------------' + +def inventory_wagner_titanus_topsense_alarm(info): + return [ ("1", None), ("2", None) ] + +def check_wagner_titanus_topsense_alarm(item, _no_params, info): + if item == "1": + main_alarm = info[1][0][3] + pre_alarm = info[1][0][4] + info_alarm = info[1][0][5] + elif item == "2": + main_alarm = info[1][0][6] + pre_alarm = info[1][0][7] + info_alarm = info[1][0][8] + else: + return 3, "Alarm Detector %s not found in SNMP" % item + + status = 0 + message = "No Alarm" + if info_alarm != "0": + message = "Info Alarm" + status = 1 + if pre_alarm != "0": + message = "Pre Alarm" + status = 1 + if main_alarm != "0": + message = "Main Alarm: Fire" + status = 2 + + return status, message + +check_info["wagner_titanus_topsense.alarm"] = { + "check_function" : check_wagner_titanus_topsense_alarm, + "inventory_function" : inventory_wagner_titanus_topsense_alarm, + "service_description" : "Alarm Detector %s", + "has_perfdata" : False, + "snmp_info" : wagner_titanus_topsense_info, + "snmp_scan_function" : wagner_titanus_topsense_scan +} + +#. +# .--smoke percent-------------------------------------------------------. +# | _ _ | +# | ___ _ __ ___ ___ | | _____ _ __ ___ _ __ ___ ___ _ __ | |_ | +# | / __| '_ ` _ \ / _ \| |/ / _ \ | '_ \ / _ \ '__/ __/ _ \ '_ \| __| | +# | \__ \ | | | | | (_) | < __/ | |_) | __/ | | (_| __/ | | | |_ | +# | |___/_| |_| |_|\___/|_|\_\___| | .__/ \___|_| \___\___|_| |_|\__| | +# | |_| | +# '----------------------------------------------------------------------' + +def inventory_wagner_titanus_topsense_smoke(info): + return [ ("1", None), ("2", None) ] + +def check_wagner_titanus_topsense_smoke(item, _no_params, info): + if item == "1": + smoke_perc = float(info[2][0][1]) + elif item == "2": + smoke_perc = float(info[2][0][2]) + else: + return 3, "Smoke Detector %s not found in SNMP" % item + + perfdata = [("smoke_perc", smoke_perc)] + if smoke_perc > 0: + status = 2 + else: + status = 0 + + return status, "%0.6f%% smoke detected" % smoke_perc, perfdata + +check_info["wagner_titanus_topsense.smoke"] = { + "check_function" : check_wagner_titanus_topsense_smoke, + "inventory_function" : inventory_wagner_titanus_topsense_smoke, + "service_description" : "Smoke Detector %s", + "has_perfdata" : True, + "snmp_info" : wagner_titanus_topsense_info, + "snmp_scan_function" : wagner_titanus_topsense_scan +} + +#. +# .--chamber deviation---------------------------------------------------. +# | _ _ _ | +# | ___| |__ __ _ _ __ ___ | |__ ___ _ __ __| | _____ __ | +# | / __| '_ \ / _` | '_ ` _ \| '_ \ / _ \ '__| / _` |/ _ \ \ / / | +# | | (__| | | | (_| | | | | | | |_) | __/ | | (_| | __/\ V / | +# | \___|_| |_|\__,_|_| |_| |_|_.__/ \___|_| \__,_|\___| \_/ | +# | | +# '----------------------------------------------------------------------' + +def inventory_wagner_titanus_topsense_chamber_deviation(info): + return [ ("1", None), ("2", None) ] + +def check_wagner_titanus_topsense_chamber_deviation(item, _no_params, info): + if item == "1": + chamber_deviation = float(info[2][0][3]) + elif item == "2": + chamber_deviation = float(info[2][0][4]) + else: + return 3, "Chamber Deviation Detector %s not found in SNMP" % item + + perfdata = [("chamber_deviation", chamber_deviation)] + + return 0, "%0.6f%% Chamber Deviation" % chamber_deviation, perfdata + +check_info["wagner_titanus_topsense.chamber_deviation"] = { + "check_function" : check_wagner_titanus_topsense_chamber_deviation, + "inventory_function" : inventory_wagner_titanus_topsense_chamber_deviation, + "service_description" : "Chamber Deviation Detector %s", + "has_perfdata" : True, + "snmp_info" : wagner_titanus_topsense_info, + "snmp_scan_function" : wagner_titanus_topsense_scan +} + +#. +# .--air flow deviation--------------------------------------------------. +# | _ __ _ _ | +# | __ _(_)_ __ / _| | _____ __ __| | _____ __ | +# | / _` | | '__| | |_| |/ _ \ \ /\ / / / _` |/ _ \ \ / / | +# | | (_| | | | | _| | (_) \ V V / | (_| | __/\ V / | +# | \__,_|_|_| |_| |_|\___/ \_/\_/ \__,_|\___| \_/ | +# | | +# '----------------------------------------------------------------------' + +wagner_titanus_topsense_airflow_deviation_default_values = (-20.0, -20.0, 20.0, 20.0) + +def inventory_wagner_titanus_topsense_airflow_deviation(info): + return [ ("1", "wagner_titanus_topsense_airflow_deviation_default_values"), + ("2", "wagner_titanus_topsense_airflow_deviation_default_values") ] + +def check_wagner_titanus_topsense_airflow_deviation(item, params, info): + lower_crit, lower_warn, upper_warn, upper_crit = params + status=0 + if item == "1": + airflow_deviation = float(info[2][0][5]) + elif item == "2": + airflow_deviation = float(info[2][0][6]) + else: + return 3, "Airflow Deviation Detector %s not found in SNMP" % item + + if airflow_deviation >= upper_warn or airflow_deviation <= lower_warn: + status = 1 + if airflow_deviation >= upper_crit or airflow_deviation <= lower_crit: + status = 2 + + perfdata = [("airflow_deviation", airflow_deviation, upper_warn, upper_crit, 0)] + + return status, "Airflow Deviation is %0.6f%%" % airflow_deviation, perfdata + +check_info["wagner_titanus_topsense.airflow_deviation"] = { + "check_function" : check_wagner_titanus_topsense_airflow_deviation, + "inventory_function" : inventory_wagner_titanus_topsense_airflow_deviation, + "service_description" : "Airflow Deviation Detector %s", + "has_perfdata" : True, + "group" : "airflow_deviation", + "snmp_info" : wagner_titanus_topsense_info, + "snmp_scan_function" : wagner_titanus_topsense_scan +} + +#. +# .--air temp------------------------------------------------------------. +# | _ _ | +# | __ _(_)_ __ | |_ ___ _ __ ___ _ __ | +# | / _` | | '__| | __/ _ \ '_ ` _ \| '_ \ | +# | | (_| | | | | || __/ | | | | | |_) | | +# | \__,_|_|_| \__\___|_| |_| |_| .__/ | +# | |_| | +# '----------------------------------------------------------------------' + +wagner_titanus_topsense_temperature_default_values = (30, 35) + +def inventory_wagner_titanus_topsense_temp(info): + return [ ("1", "wagner_titanus_topsense_temperature_default_values"), + ("2", "wagner_titanus_topsense_temperature_default_values") ] + +def check_wagner_titanus_topsense_temp(item, params, info): + if item == "1": + temp = float(info[2][0][7]) + elif item == "2": + temp = float(info[2][0][8]) + else: + return + + return check_temperature(temp, params) + +check_info["wagner_titanus_topsense.temp"] = { + "check_function" : check_wagner_titanus_topsense_temp, + "inventory_function" : inventory_wagner_titanus_topsense_temp, + "service_description" : "Temperature %s", + "has_perfdata" : True, + "group" : "room_temperature", + "snmp_info" : wagner_titanus_topsense_info, + "snmp_scan_function" : wagner_titanus_topsense_scan, + "includes" : [ "temperature.include" ], +} + +#. diff -Nru check-mk-1.2.2p3/wagner_titanus_topsense.airflow_deviation check-mk-1.2.6p12/wagner_titanus_topsense.airflow_deviation --- check-mk-1.2.2p3/wagner_titanus_topsense.airflow_deviation 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/wagner_titanus_topsense.airflow_deviation 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,53 @@ +title: Wagner Titanus Top Sens: Airflow Deviation from Set Point +agents: snmp +catalog: hw/environment/wagner +license: GPL +distribution: check_mk +description: + Checks the Airflow Deviation from Set Point of Detectors in + Wagner Titanus Top Sens devices. The value is in percent from the chosen + air flow limits within the devices where an air flow fault condition + occures. + + Returns {WARN} or {CRIT} if the value is out of the given ranges and + {OK} otherwise. + +inventory: + Creates two checks per device, one for every Detector. + +item: + The ID of the detector. + +perfdata: + One value: The airflow deviation in percent, together with it's upper + levels for warning and critical. + +examples: + # set other default levels + wagner_titanus_topsense_airflow_deviation_default_values = (-30.0, -10.0, 10.0, 30.0) + + # check detector 1 with default levels + checks += [ + ("ras", "wagner_titanus_topsense.airflow_deviation", '1', wagner_titanus_topsense_airflow_deviation_default_values) + ] + + # check detector 1 with individual levels + checks += [ + ( "ras", "wagner_titanus_topsense.airflow_deviation", '1', (-40.0, -30.0, 30.0, 40.0) ) + ] + +[parameters] +parameters (tuple): Containing the four numbers lower_crit, lower_warn, upper_warn, upper_crit. + If the airflow deviation in percent from the set point + is lower or equal lower_crit, the check returns {CRIT}. + If the airflow deviation in percent from the set point + is lower or equal lower_warn, the check returns {WARN}. + If the airflow deviation in percent from the set point + is higher or equal upper_warn, the check returns {WARN}. + If the airflow deviation in percent from the set point + is higher or equal upper_crit, the check returns {CRIT}. + +[configuration] +wagner_titanus_topsense_airflow_deviation_default_values(float, float, float, float): + Default values for lower_crit, lower_warn, upper_warn, upper_crit, + preset to (-20.0, -20.0, 20.0, 20.0) diff -Nru check-mk-1.2.2p3/wagner_titanus_topsense.alarm check-mk-1.2.6p12/wagner_titanus_topsense.alarm --- check-mk-1.2.2p3/wagner_titanus_topsense.alarm 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/wagner_titanus_topsense.alarm 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,17 @@ +title: Wagner Titanus Top Sens: Alarms Triggered +agents: snmp +catalog: hw/environment/wagner +license: GPL +distribution: check_mk +description: + Checks if alarms have been triggered on an Wagner Titanus Top Sens device. + + If pswMainAlarm is triggered, the check returns {CRIT}, if pswPreAlarm or + pswInfAlarm is triggered, it return {WARN} and if none of these is triggered + it returns {OK}. + +item: + The ID of the detector. + +inventory: + Creates two checks on every Top Sense device, one for each detector. diff -Nru check-mk-1.2.2p3/wagner_titanus_topsense.chamber_deviation check-mk-1.2.6p12/wagner_titanus_topsense.chamber_deviation --- check-mk-1.2.2p3/wagner_titanus_topsense.chamber_deviation 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/wagner_titanus_topsense.chamber_deviation 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,21 @@ +title: Wagner Titanus Top Sens: Chamber Deviation from Calibration Point +agents: snmp +catalog: hw/environment/wagner +license: GPL +distribution: check_mk +description: + Reports the Chamber Deviation from Calibration Point of Detectors in + Wagner Titanus Top Sens devices. The value is in percent from the chamber + deviation limits, where a chamber fault condition occures. + + The check is informational only and returns always {OK}. + +inventory: + Creates two checks, one for every Detector. + +item: + The ID of the detector. + +perfdata: + One value: The chamber deviation in percent from the chamber deviation + limits. diff -Nru check-mk-1.2.2p3/wagner_titanus_topsense.info check-mk-1.2.6p12/wagner_titanus_topsense.info --- check-mk-1.2.2p3/wagner_titanus_topsense.info 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/wagner_titanus_topsense.info 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,16 @@ +title: Wagner Titanus Top Sens: System Infos +agents: snmp +catalog: hw/environment/wagner +license: GPL +distribution: check_mk +description: + Gathers some system information from Wagner Titanus Top Sens devices + and displays them. In detail these are: + Uptime, System Name, System Contact, System Location, Company, Model, + Revision and LSNi bus status. + + This check is for information only and returns always {OK}. + +inventory: + Creates exactly one check for each Top Sense device called + Topsense Info. diff -Nru check-mk-1.2.2p3/wagner_titanus_topsense.overall_status check-mk-1.2.6p12/wagner_titanus_topsense.overall_status --- check-mk-1.2.2p3/wagner_titanus_topsense.overall_status 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/wagner_titanus_topsense.overall_status 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,14 @@ +title: Wagner Titanus Top Sens: Overall Status +agents: snmp +catalog: hw/environment/wagner +license: GPL +distribution: check_mk +description: + Checks the Overall Status of Wagner Titanus Top Sens devices. + + If pswFailureDisplay reports to be switched off, the check returns {OK} and + {CRIT} otherwise. + +inventory: + Creates exactly one check for each Top Sense device called + Overall Status. diff -Nru check-mk-1.2.2p3/wagner_titanus_topsense.smoke check-mk-1.2.6p12/wagner_titanus_topsense.smoke --- check-mk-1.2.2p3/wagner_titanus_topsense.smoke 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/wagner_titanus_topsense.smoke 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,18 @@ +title: Wagner Titanus Top Sens: Smoke Detectors +agents: snmp +catalog: hw/environment/wagner +license: GPL +distribution: check_mk +description: + Checks the Levels reported by Smoke Detectors of Wagner Titanus Top Sens devices. + + The check returns {OK} if the level is 0.000000 % and {CRIT} otherwise. + +inventory: + Creates two checks, one for every Smoke Detector. + +item: + The ID of the detector. + +perfdata: + One value: The level of smoke in percent. diff -Nru check-mk-1.2.2p3/wagner_titanus_topsense.temp check-mk-1.2.6p12/wagner_titanus_topsense.temp --- check-mk-1.2.2p3/wagner_titanus_topsense.temp 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/wagner_titanus_topsense.temp 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,43 @@ +title: Wagner Titanus Top Sens: Temperature +agents: snmp +catalog: hw/environment/wagner +license: GPL +distribution: check_mk +description: + Checks the temperature measured by Detectors in Wagner Titanus Top Sens devices. + + Returns {WARN} or {CRIT} if the value exeeds given levels and {OK} otherwise. + +inventory: + Creates two checks per device, one for every Detector. + +item: + The ID of the detector. + +perfdata: + One value: The temperature in degree celsius, together with levels for warning + and critical. + +examples: + # set other default levels + wagner_titanus_topsense_temperature_default_values = (25, 40) + + # check detector 1 with default levels + checks += [ + ("ras", "wagner_titanus_topsense.temp", '1', wagner_titanus_topsense_temperature_default_values) + ] + + # check detector 1 with individual levels + checks += [ + ( "ras", "wagner_titanus_topsense.temp", '1', (20, 50) ) + ] + +[parameters] +parameters (tuple): A pair of numbers for warn and crit level. + If the temperature in degree celsius is higher or equal warn, + the check returns {WARN}. If the temperature in degree celsius is higher + or equal crit, the check returns {CRIT}. + +[configuration] +wagner_titanus_topsense_temperature_default_values(int, int): + Default levels for warning and critical, preset to (30, 35) diff -Nru check-mk-1.2.2p3/waitmax.c check-mk-1.2.6p12/waitmax.c --- check-mk-1.2.2p3/waitmax.c 2013-10-12 17:49:41.000000000 +0000 +++ check-mk-1.2.6p12/waitmax.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,157 +0,0 @@ -// +------------------------------------------------------------------+ -// | ____ _ _ __ __ _ __ | -// | / ___| |__ ___ ___| | __ | \/ | |/ / | -// | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -// | | |___| | | | __/ (__| < | | | | . \ | -// | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -// | | -// | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -// +------------------------------------------------------------------+ -// -// This file is part of Check_MK. -// The official homepage is at http://mathias-kettner.de/check_mk. -// -// check_mk is free software; you can redistribute it and/or modify it -// under the terms of the GNU General Public License as published by -// the Free Software Foundation in version 2. check_mk is distributed -// in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -// out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -// PARTICULAR PURPOSE. See the GNU General Public License for more de- -// ails. You should have received a copy of the GNU General Public -// License along with GNU Make; see the file COPYING. If not, write -// to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -// Boston, MA 02110-1301 USA. - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* macros for using write(2) instead of fprintf(stderr, ) */ -#define out(text) write(2, text, strlen(text)); - -int g_pid; -int g_timeout = 0; -int g_signum = 15; - -struct option long_options[] = { - { "version" , no_argument, 0, 'V' }, - { "help" , no_argument, 0, 'h' }, - { "signal" , required_argument, 0, 's' }, - { 0, 0, 0, 0 } }; - -void version() -{ - out("waitmax version 1.1\n" - "Copyright Mathias Kettner 2008\n" - "This is free software; see the source for copying conditions. There is NO\n" - "warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"); - exit(0); -} - - -void usage() -{ - out("Usage: waitmax [-s SIGNUM] MAXTIME PROGRAM [ARGS...]\n" - "\n" - "Execute PROGRAM as a subprocess. If PROGRAM does not exit before MAXTIME\n" - "seconds, it will be killed with SIGTERM or an alternative signal.\n" - "\n" - " -s, --signal SIGNUM kill with SIGNUM on timeout\n" - " -h, --help this help\n" - " -V, --version show version an exit\n\n"); - exit(1); -} - - -void signalhandler(int signum) -{ - if (0 == kill(g_pid, g_signum)) - g_timeout = 1; -} - - -int main(int argc, char **argv) -{ - int indexptr=0; - int ret; - setenv("POSIXLY_CORRECT", "true", 0); - while (0 <= (ret = getopt_long(argc, argv, "Vhs:", long_options, &indexptr))) { - switch (ret) - { - case 'V': - version(); - - case 'h': - usage(); - - case 's': - g_signum = strtoul(optarg, 0, 10); - if (g_signum < 1 || g_signum > 32) { - out("Signalnumber must be between 1 and 32.\n"); - exit(1); - } - break; - - default: - usage(argv[0]); - exit(1); - break; - } - } - - if (optind + 1 >= argc) usage(); - - int maxtime = atoi(argv[optind]); - if (maxtime <= 0) usage(); - - g_pid = fork(); - if (g_pid == 0) { - signal(SIGALRM, signalhandler); - execvp(argv[optind + 1], argv + optind + 1); - out("Cannot execute "); - out(argv[optind + 1]); - out(": "); - out(strerror(errno)); - out("\n"); - exit(253); - } - - signal(SIGALRM, signalhandler); - alarm(maxtime); - int status; - while (1) { - int pid = waitpid(g_pid, &status, 0); - if (pid <= 0) { - if (errno == EINTR) continue; // interuppted by alarm - else - out("Strange: waitpid() fails: "); - out(strerror(errno)); - out("\n"); - exit(1); - } - else break; - } - - if (WIFEXITED(status)) { - int exitcode = WEXITSTATUS(status); - return exitcode; - } - else if (WIFSIGNALED(status)) { - int signum = WTERMSIG(status); - if (g_timeout) - return 255; - else - return 128 + signum; - } - else { - out("Strange: program did neither exit nor was signalled.\n"); - return 254; - } -} diff -Nru check-mk-1.2.2p3/websphere_mq_channels check-mk-1.2.6p12/websphere_mq_channels --- check-mk-1.2.2p3/websphere_mq_channels 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/websphere_mq_channels 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,74 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# <<>> +# 0 CHANNEL(C000052.C000051) 5000 Unknown +# 0 CHANNEL(C000052.CATSOS.03) 5000 RUNNING +# 0 CHANNEL(C000052.DXUZ001) 5000 RUNNING +# 0 CHANNEL(C000052.N000011) 5000 RUNNING +# 0 CHANNEL(C000052.SI0227450.T1) 10000 RUNNING +# 0 CHANNEL(C000052.SOX10.T1) 10000 STOPPED +# 0 CHANNEL(C000052.SV1348520.T1) 5000 RUNNING +# 0 CHANNEL(C000052.SV2098742.T1) 5000 Unknown + +# Anzahl_Message Channelname MaxMessages_Moeglich Status" + +websphere_mq_channels_default_levels = ( 900, 1000 ) +def inventory_websphere_mq_channels(info): + return [ ( x[1], 'websphere_mq_channels_default_levels' ) for x in info ] + +def check_websphere_mq_channels(item, params, info): + for messages, channel, max_messages, status in info: + if channel == item: + warn, crit = params + messages = saveint(messages) + max_messages = saveint(max_messages) + state = 0 + message = "%d/%d Messages" % ( messages, max_messages) + if messages >= crit: + state = 2 + message += "(!!)" + elif messages >= warn: + state = 1 + message += "(!)" + + message += ", Channel State: %s" % status + if status not in [ 'RUNNING' ]: + state = 2 + message += "(!!)" + perf = [ ('messages', messages, warn, crit, 0, max_messages) ] + return state, message, perf + return 3, "Channel not found in output" + + +check_info["websphere_mq_channels"] = { + "group" : "websphere_mq", + "check_function" : check_websphere_mq_channels, + "inventory_function" : inventory_websphere_mq_channels, + "service_description" : "MQ Channel %s", + "has_perfdata" : True, +} + diff -Nru check-mk-1.2.2p3/websphere_mq_queues check-mk-1.2.6p12/websphere_mq_queues --- check-mk-1.2.2p3/websphere_mq_queues 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/websphere_mq_queues 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,76 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# <<>> +# 0 CD.ISS.CATSOS.REPLY.C000052 5000 +# 0 CD.ISS.COBA.REPLY.C000052 5000 +# 0 CD.ISS.DEUBA.REPLY.C000052 5000 +# 0 CD.ISS.TIQS.REPLY.C000052 5000 +# 0 CD.ISS.VWD.REPLY.C000052 5000 + +# Old output +# <<>> +# 0 CD.ISS.CATSOS.REPLY.C000052 +# 0 CD.ISS.COBA.REPLY.C000052 +# 0 CD.ISS.DEUBA.REPLY.C000052 +# 0 CD.ISS.TIQS.REPLY.C000052 +# 0 CD.ISS.VWD.REPLY.C000052 + +websphere_mq_queues_default_levels = ( 1000, 1200 ) + +def inventory_websphere_mq_queues(info): + return [ ( x[1], 'websphere_mq_queues_default_levels' ) for x in info ] + +def check_websphere_mq_queues(item, params, info): + for line in info: + queue = line[1] + if queue == item: + messages = int(line[0]) + if len(line) >= 3: + queue_length = int(line[2]) + length_info = "/%d" % queue_length + else: + length_info = "" + message = "%d%s messages in queue" % ( messages, length_info ) + + warn, crit = params + perf = [ ( "queue", messages, warn, crit ) ] + if messages >= crit: + return 2, message, perf + if messages >= warn: + return 1, message, perf + return 0, message, perf + + return 3, "No message queue named %s in agent output" % item + +check_info["websphere_mq_queues"] = { + "group" : "websphere_mq", + "check_function" : check_websphere_mq_queues, + "inventory_function" : inventory_websphere_mq_queues, + "service_description" : "MQ Queue", + "has_perfdata" : True, +} + Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/web.tar.gz and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/web.tar.gz differ diff -Nru check-mk-1.2.2p3/win_bios check-mk-1.2.6p12/win_bios --- check-mk-1.2.2p3/win_bios 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/win_bios 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,79 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output: +# <<>> +# Manufacturer : innotek GmbH +# Name : Default System BIOS +# BIOSVersion : {VBOX - 1} +# ListOfLanguages : +# PrimaryBIOS : True +# ReleaseDate : 20061201000000.000000+000 +# SMBIOSBIOSVersion : VirtualBox +# SMBIOSMajorVersion : 2 +# SMBIOSMinorVersion : 5 + +def inv_win_bios(info): + node = inv_tree("hardware.bios.") + smbios_version = "" + bios_version = "" + for line in info: + # Separator : seams not ideal. Some systems have : in the BIOS version + if len(line) > 2: + line = [ line[0], ":".join(line[1:]) ] + varname, value = line + varname = re.sub(" *","", varname) + value = re.sub("^ ", "", value) + if varname == "BIOSVersion": + bios_version = value + elif varname == "SMBIOSBIOSVersion": + smbios_version = value + elif varname == "SMBIOSMajorVersion": + major_version = value + elif varname == "SMBIOSMinorVersion": + minor_version = value + elif varname == "ReleaseDate": + # The ReleaseDate property indicates the release date of the + # Win32 BIOS in the Coordinated Universal Time (UTC) format + # of YYYYMMDDHHMMSS.MMMMMM(+-)OOO. + value = value.replace("*", "0") + node["date"] = int(time.mktime(time.strptime(value.split(".")[0], "%Y%m%d%H%M%S"))) + elif varname == "Manufacturer": + node["vendor"] = value + elif varname == "Name": + node["model"] = value + + if smbios_version: + node["version"] = smbios_version + " " + major_version + \ + "." + minor_version + else: + node["version"] = bios_version + +inv_info['win_bios'] = { + "inv_function" : inv_win_bios, + "unicode" : True, +} + diff -Nru check-mk-1.2.2p3/win_cpuinfo check-mk-1.2.6p12/win_cpuinfo --- check-mk-1.2.2p3/win_cpuinfo 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/win_cpuinfo 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,129 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output: +# <<>> +# Name : Intel(R) Core(TM)2 Duo CPU T9600 @ 2.80GHz +# Manufacturer : GenuineIntel +# Caption : Intel64 Family 6 Model 23 Stepping 10 +# DeviceID : CPU0 +# MaxClockSpeed : 2783 +# DataWidth : 64 +# L2CacheSize : +# L3CacheSize : 0 +# NumberOfCores : 1 +# NumberOfLogicalProcessors : 1 +# Status : OK + + +def win_cpuinfo_parse_speed(v): # into Hz (float) + if v == "Unknown" or v == "": + return None + parts = v.split() + if len(parts) == 1: + return float(parts[0]) * 1000000.0 # seems to be in MHz as default + elif parts[1] == "GHz": + return float(parts[0]) * 1000000000.0 + elif parts[1] == "MHz": + return float(parts[0]) * 1000000.0 + elif parts[1] == "kHz": + return float(parts[0]) * 1000.0 + elif parts[1] == "Hz": + return float(parts[0]) + +def win_cpuinfo_parse_voltage(v): + if v == "Unknown" or v == "": + return None + parts = v.split() + return float(parts[0]) + +def inv_win_cpuinfo(info): + node = inv_tree("hardware.cpu.") + num_threads_total = 0 + num_procs = 0 + for varname, value in info: + varname = re.sub(" *","", varname) + value = re.sub("^ ", "", value) + + if varname == "NumberOfCores" and value != "": + if value != "": + node["cores_per_cpu"] = int(value) + else: + node["cores_per_cpu"] = 1 # missing on Windows 2003 + + elif varname == "NumberOfLogicalProcessors": + if value != "": + node["threads_per_cpu"] = int(value) + else: + node["threads_per_cpu"] = 1 # missing on Windows 2003 + + elif varname == "Manufacturer": + node["vendor"] = { + "GenuineIntel" : "intel", + "AuthenticAMD" : "amd", + }.get(value, value) + + # there is also the L3CacheSize + elif varname == "L2CacheSize" and value != "": + # normalized to bytes! + node["cache_size"] = saveint(value) * 1024 + elif varname == "Name": + node["model"] = value + # For the following two entries we assume that all + # entries are numbered in increasing order in /proc/cpuinfo. + elif varname == "DeviceID": + num_procs += 1 + elif varname == "CurrentVoltage": + node["voltage"] = win_cpuinfo_parse_voltage(value) + elif varname == "MaxClockSpeed": + node["max_speed"] = win_cpuinfo_parse_speed(value) + #elif varname == "AddressWidth": + # if value == "64": + # node["arch"] = "x86_64" + # else: + # node["arch"] = "i386" + elif varname == "Architecture": + node["arch"] = { + "0" : "i386", + "1" : "MIPS", + "2" : "Alpha", + "3" : "PowerPC", + "6" : "Itanium", + "9" : "x86_64", + }.get(value,value) + + if num_procs: + node.setdefault("cores_per_cpu", 1) + node.setdefault("threads_per_cpu", 1) + node["cpus"] = num_procs + node["cores"] = num_procs * node["cores_per_cpu"] + node["threads"] = num_procs * node["threads_per_cpu"] + +inv_info['win_cpuinfo'] = { + "inv_function" : inv_win_cpuinfo, + "unicode" : True, +} + diff -Nru check-mk-1.2.2p3/win_dhcp_pools check-mk-1.2.6p12/win_dhcp_pools --- check-mk-1.2.2p3/win_dhcp_pools 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/win_dhcp_pools 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -121,26 +121,36 @@ # Catch unused pools if size == 0: - return (3, "UNKNOWN - DHCP Pool contains no IP addresses / is deactivated") + return (3, "DHCP Pool contains no IP addresses / is deactivated") - if free > 0: - perc_free = 100 * size / free - else: - perc_free = 0 + warn_perc, crit_perc = params + warn_abs = size * (warn_perc / 100.0) + crit_abs = size * (crit_perc / 100.0) - if perc_free < params[1]: + if free < crit_abs: status = 2 - elif perc_free < params[0]: + elif free < warn_abs: status = 1 - perfdata = [ ('free', free, params[0], params[1], 0, size), - ('used', used, None, None, 0, size), - ('pending', pending, None, None, 0, size) ] - - return (status, '%s - Addresses Free: %d, Used: %d, Pending: %d' % - (nagios_state_names[status], free, used, pending), perfdata) + perfdata = [ ('free', free, warn_abs, crit_abs, 0, size), + ('used', used, None, None, 0, size), + ('pending', pending, None, None, 0, size) ] + + infotext = 'Addresses Free: %d, Used: %d, Pending: %d' % (free, used, pending) + if status: + infotext += " (levels at %d/%d)" % (warn_abs, crit_abs) + return status, infotext, perfdata else: - return (3, 'UNKNOWN - Pool information not found') + return (3, 'Pool information not found') + +check_info["win_dhcp_pools"] = { + 'check_function': check_win_dhcp_pools, + 'inventory_function': inventory_win_dhcp_pools, + 'service_description': 'DHCP Pool %s', + 'has_perfdata': True, + 'group': 'win_dhcp_pools', +} + def inventory_win_dhcp_pools_stats(info): return [ (None, None) for line in info if line[0] != '' ] @@ -149,7 +159,6 @@ output = '' perfdata = [] this_time = int(time.time()) - timedif = 0 for line in parse_win_dhcp_pools(info): if len(line) > 0: @@ -157,21 +166,19 @@ if key in [ 'Discovers', 'Offers', 'Requests', 'Acks', 'Nacks', 'Declines', 'Releases', 'Scopes' ]: value = saveint(line[1]) - try: - timedif, per_sec = get_counter("win_dhcp_stats.%s" % key, this_time, value) - except MKCounterWrapped: - per_sec = 0 - pass + per_sec = get_rate("win_dhcp_stats.%s" % key, this_time, value) output += '%s: %.0f/s, ' % (key, per_sec) perfdata.append((key, per_sec)) if output == '': - return (3, "UNKNOWN - Information not available") + return (3, "Information not available") else: - if timedif != 0: - output = 'In last %d secs: %s' % (timedif, output) - return (0, "OK - %s" % (output.rstrip(', ')), perfdata) - -check_info['win_dhcp_pools'] = (check_win_dhcp_pools, "DHCP Pool %s", 1, inventory_win_dhcp_pools) -check_info['win_dhcp_pools.stats'] = (check_win_dhcp_pools_stats, "DHCP Stats", 1, inventory_win_dhcp_pools_stats) -checkgroup_of['win_dhcp_pools'] = "win_dhcp_pools" + return 0, output.rstrip(', '), perfdata + + +check_info["win_dhcp_pools.stats"] = { + 'check_function': check_win_dhcp_pools_stats, + 'inventory_function': inventory_win_dhcp_pools_stats, + 'service_description': 'DHCP Stats', + 'has_perfdata': True, +} diff -Nru check-mk-1.2.2p3/win_dhcp_pools.stats check-mk-1.2.6p12/win_dhcp_pools.stats --- check-mk-1.2.2p3/win_dhcp_pools.stats 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/win_dhcp_pools.stats 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Monitors the number of transactions since startup of the Windows DHCP -agents: linux -author: Lars Michelsen +title: Number of transactions since startup of the Windows DHCP +agents: windows +catalog: os/services license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/win_disks check-mk-1.2.6p12/win_disks --- check-mk-1.2.2p3/win_disks 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/win_disks 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,95 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output: +# <<>> +# DeviceID : \\.\PHYSICALDRIVE0 +# Partitions : 2 +# InterfaceType : IDE +# Size : 32210196480 +# Caption : VBOX HARDDISK ATA Device +# Description : Laufwerk +# Manufacturer : (Standardlaufwerke) +# MediaType : Fixed hard disk media +# Model : VBOX HARDDISK ATA Device +# Name : \\.\PHYSICALDRIVE0 +# SerialNumber : 42566539323537333930652d3836636263352065 + +# CapabilityDescriptions : {Random Access, Supports Writing} +# BytesPerSector : 512 +# Index : 0 +# FirmwareRevision : 1.0 +# MediaLoaded : True +# Status : OK +# SectorsPerTrack : 63 +# TotalCylinders : 3916 +# TotalHeads : 255 +# TotalSectors : 62910540 +# TotalTracks : 998580 +# TracksPerCylinder : 255 +# Capabilities : {3, 4} +# Signature : 645875120 +# SCSIBus : 0 +# SCSILogicalUnit : 0 +# SCSIPort : 2 +# SCSITargetId : 0 + +def inv_win_disks(info): + node = inv_tree("hardware.storage.disks:") + array = {} + for line in info: + if len(line) > 2: + line = [ line[0], ":".join(line[1:]) ] + varname, value = line + varname = re.sub(" *","", varname) + value = re.sub("^ ", "", value) + if varname == "Manufacturer": + array["vendor"] = value + elif varname == "InterfaceType": + array["bus"] = value + elif varname == "Model": + array["product"] = value + elif varname == "Name": + array["fsnode"] = value + elif varname == "SerialNumber": + array["serial"] = value + elif varname == "Size": + array["size"] = int(value) + elif varname == "MediaType": + array["type"] = value + elif varname == "Signature": + if value != "": + array["signature"] = int(value) + else: + array["signature"] = 0 + array["local"] = True + node.append(array) + + +inv_info['win_disks'] = { + "inv_function" : inv_win_disks, + "unicode" : True, +} Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/windows/check_mk_agent-64.exe and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/windows/check_mk_agent-64.exe differ diff -Nru check-mk-1.2.2p3/windows/check_mk_agent.cc check-mk-1.2.6p12/windows/check_mk_agent.cc --- check-mk-1.2.2p3/windows/check_mk_agent.cc 2013-11-05 09:22:41.000000000 +0000 +++ check-mk-1.2.6p12/windows/check_mk_agent.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,3304 +0,0 @@ -// +------------------------------------------------------------------+ -// | ____ _ _ __ __ _ __ | -// | / ___| |__ ___ ___| | __ | \/ | |/ / | -// | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -// | | |___| | | | __/ (__| < | | | | . \ | -// | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -// | | -// | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -// +------------------------------------------------------------------+ -// -// This file is part of Check_MK. -// The official homepage is at http://mathias-kettner.de/check_mk. -// -// check_mk is free software; you can redistribute it and/or modify it -// under the terms of the GNU General Public License as published by -// the Free Software Foundation in version 2. check_mk is distributed -// in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -// out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -// PARTICULAR PURPOSE. See the GNU General Public License for more de- -// ails. You should have received a copy of the GNU General Public -// License along with GNU Make; see the file COPYING. If not, write -// to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -// Boston, MA 02110-1301 USA. - - -// Looking for documentation on Win32-API? Here are some of the -// documents that I used: - -// Registry: -// http://msdn.microsoft.com/en-us/library/ms724897.aspx - -// Performance-Counters: -// http://msdn.microsoft.com/en-us/library/aa373178(VS.85).aspx - -// Eventlogs: -// http://msdn.microsoft.com/en-us/library/aa363672(VS.85).aspx -// http://msdn.microsoft.com/en-us/library/bb427356(VS.85).aspx - -// System Error Codes: -// http://msdn.microsoft.com/en-us/library/ms681381(VS.85).aspx - -// This program needs at least windows version 0x0500 -// (Window 2000 / Windows XP) -#define WINVER 0x0500 - -#include -#include -#include -#include -#include // performance counters from registry -#include // list of processes -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include // isspace() -#include // stat() -#include // gettimeofday() - -// .----------------------------------------------------------------------. -// | ____ _ _ _ | -// | | _ \ ___ ___| | __ _ _ __ __ _| |_(_) ___ _ __ ___ | -// | | | | |/ _ \/ __| |/ _` | '__/ _` | __| |/ _ \| '_ \/ __| | -// | | |_| | __/ (__| | (_| | | | (_| | |_| | (_) | | | \__ \ | -// | |____/ \___|\___|_|\__,_|_| \__,_|\__|_|\___/|_| |_|___/ | -// | | -// +----------------------------------------------------------------------+ -// | Declarations of macrosk, structs and function prototypes | -// '----------------------------------------------------------------------' - -#define CHECK_MK_VERSION "1.2.2p3" -#define CHECK_MK_AGENT_PORT 6556 -#define SERVICE_NAME "Check_MK_Agent" -#define KiloByte 1024 - -#define SECTION_CHECK_MK 0x00000001 -#define SECTION_UPTIME 0x00000002 -#define SECTION_DF 0x00000004 -#define SECTION_PS 0x00000008 -#define SECTION_MEM 0x00000010 -#define SECTION_SERVICES 0x00000020 -#define SECTION_WINPERF 0x00000040 -#define SECTION_LOGWATCH 0x00000080 -#define SECTION_SYSTEMTIME 0x00000100 -#define SECTION_PLUGINS 0x00000200 -#define SECTION_LOCAL 0x00000400 -#define SECTION_MRPE 0x00000800 -#define SECTION_FILEINFO 0x00001000 -#define SECTION_LOGFILES 0x00002000 - -// Limits for static global arrays -#define MAX_EVENTLOGS 128 -#define MAX_ONLY_FROM 32 -#define MAX_WINPERF_COUNTERS 64 -#define MAX_MRPE_COMMANDS 64 -#define MAX_EXECUTE_SUFFIXES 64 -#define MAX_FILEINFO_ENTRIES 128 - -// Default buffer size for reading performance counters -#define DEFAULT_BUFFER_SIZE 40960L - -// Needed for only_from -struct ipspec { - uint32_t address; - uint32_t netmask; - int bits; -}; - -// Configuration for section [winperf] -struct winperf_counter { - int id; - char *name; -}; - -// Configuration entries from [logwatch] for individual logfiles -struct eventlog_config_entry { - char name[256]; - int level; -}; - -// Command definitions for MRPE -struct mrpe_entry { - char command_line[256]; - char plugin_name[64]; - char service_description[256]; -}; - -// Forward declarations of functions -void listen_tcp_loop(); -void output(SOCKET &out, const char *format, ...); -char *ipv4_to_text(uint32_t ip); -void output_data(SOCKET &out); -double file_time(const FILETIME *filetime); -void open_crash_log(); -void close_crash_log(); -void crash_log(const char *format, ...); - -// .----------------------------------------------------------------------. -// | ____ _ _ _ | -// | / ___| | ___ | |__ __ _| |___ | -// | | | _| |/ _ \| '_ \ / _` | / __| | -// | | |_| | | (_) | |_) | (_| | \__ \ | -// | \____|_|\___/|_.__/ \__,_|_|___/ | -// | | -// +----------------------------------------------------------------------+ -// | Global variables | -// '----------------------------------------------------------------------' - -bool verbose_mode = false; -bool g_crash_debug = false; -bool do_tcp = false; -bool should_terminate = false; -char g_hostname[256]; - -// sections enabled (configurable in check_mk.ini) -unsigned long enabled_sections = 0xffffffff; - -// Variables for section <<>> -bool logwatch_send_initial_entries = false; -bool logwatch_suppress_info = true; - -// dynamic buffer for event log entries. Grows with the -// time as needed. Never shrinked. -char *eventlog_buffer = 0; -int eventlog_buffer_size = 0; - -// Our memory of what event logs we know and up to -// which record entry we have seen its messages so -// far. We do not want to make use of C++ features -// here so sorry for the mess... -unsigned num_eventlogs = 0; -DWORD known_record_numbers[MAX_EVENTLOGS]; -char *eventlog_names[MAX_EVENTLOGS]; -bool newly_found[MAX_EVENTLOGS]; - -// Directories -char g_agent_directory[256]; -char g_current_directory[256]; -char g_plugins_dir[256]; -char g_local_dir[256]; -char g_config_file[256]; -char g_crash_log[256]; -char g_connection_log[256]; -char g_success_log[256]; -char g_logwatch_statefile[256]; - -// Configuration of eventlog monitoring (see config parser) -int num_eventlog_configs = 0; -eventlog_config_entry eventlog_config[MAX_EVENTLOGS]; - -// Parsing of only_from -struct ipspec g_only_from[MAX_ONLY_FROM]; -unsigned int g_num_only_from = 0; - -// Configuration of winperf counters -struct winperf_counter g_winperf_counters[MAX_WINPERF_COUNTERS]; -unsigned int g_num_winperf_counters = 0; - -struct mrpe_entry g_mrpe_entries[MAX_MRPE_COMMANDS]; -unsigned int g_num_mrpe_entries = 0; - -// Configuration of execution suffixed -unsigned g_num_execute_suffixes = 0; -char *g_execute_suffixes[MAX_EXECUTE_SUFFIXES]; - -// Array of file patterns for fileinfo -unsigned g_num_fileinfo_paths = 0; -char *g_fileinfo_path[MAX_FILEINFO_ENTRIES]; - -// Pointer to open crash log file, if crash_debug = on -FILE *g_connectionlog_file = 0; -struct timeval g_crashlog_start; -bool g_found_crash = false; - -// .----------------------------------------------------------------------. -// | _ _ _ | -// | | | | | ___| |_ __ ___ _ __ ___ | -// | | |_| |/ _ \ | '_ \ / _ \ '__/ __| | -// | | _ | __/ | |_) | __/ | \__ \ | -// | |_| |_|\___|_| .__/ \___|_| |___/ | -// | |_| | -// +----------------------------------------------------------------------+ -// | Global helper functions | -// '----------------------------------------------------------------------' - -#ifdef DEBUG -void debug(char *text) -{ - FILE *debugout = fopen("C:\\check_mk_agent.log", "a"); - if (debugout) { - fprintf(debugout, "%s\n", text); - fflush(debugout); - fclose(debugout); - } -} -#else -#define debug(C) -#endif - -void verbose(const char *format, ...) -{ - if (!verbose_mode) - return; - - va_list ap; - va_start(ap, format); - printf("DEBUG: "); - vprintf(format, ap); - printf("\n"); - fflush(stdout); -} - - -char *llu_to_string(unsigned long long value) -{ - static char buffer[64]; - - if (value == 0) { - strcpy(buffer, "0"); - return buffer; - } - - buffer[63] = 0; - - char *write = buffer + 63; - while (value > 0) { - if (write <= buffer) { - strcpy(buffer, "(invalid)"); - return buffer; - } - char digit = (value % 10) + '0'; - *--write = digit; - value = value / 10; - } - return write; -} - -unsigned long long string_to_llu(char *s) -{ - unsigned long long value = 0; - unsigned long long mult = 1; - char *e = s + strlen(s); - while (e > s) { - --e; - value += mult * (*e - '0'); - mult *= 10; - } - return value; -} - - -// determine system root by reading the environment variable -// %SystemRoot%. This variable is used in the registry entries -// that describe eventlog messages. -const char *system_root() -{ - static char root[128]; - if (0 < GetWindowsDirectory(root, sizeof(root))) - return root; - else - return "C:\\WINDOWS"; -} - -double current_time() -{ - SYSTEMTIME systime; - FILETIME filetime; - GetSystemTime(&systime); - SystemTimeToFileTime(&systime, &filetime); - return file_time(&filetime); -} - -#define WINDOWS_TICK 10000000 -#define SEC_TO_UNIX_EPOCH 11644473600LL -double file_time(const FILETIME *filetime) -{ - _ULARGE_INTEGER uli; - uli.LowPart = filetime->dwLowDateTime; - uli.HighPart = filetime->dwHighDateTime; - return double(uli.QuadPart / (double)WINDOWS_TICK - SEC_TO_UNIX_EPOCH); -} - -char *lstrip(char *s) -{ - while (isspace(*s)) - s++; - return s; -} - - -void rstrip(char *s) -{ - char *end = s + strlen(s); // point one beyond last character - while (end > s && isspace(*(end - 1))) { - end--; - } - *end = 0; -} - -char *strip(char *s) -{ - rstrip(s); - return lstrip(s); -} - -void char_replace(char what, char into, char *in) -{ - while (*in) { - if (*in == what) - *in = into; - in++; - } -} - -// .----------------------------------------------------------------------. -// | ______ _ _ _ ______ | -// | / / / /___ _ _ ___| |_ ___ _ __ ___ | |_(_)_ __ ___ ___\ \ \ \ | -// |/ / / // __| | | / __| __/ _ \ '_ ` _ \| __| | '_ ` _ \ / _ \\ \ \ \ | -// |\ \ \ \\__ \ |_| \__ \ || __/ | | | | | |_| | | | | | | __// / / / | -// | \_\_\_\___/\__, |___/\__\___|_| |_| |_|\__|_|_| |_| |_|\___/_/_/_/ | -// | |___/ | -// '----------------------------------------------------------------------' - -void section_systemtime(SOCKET &out) -{ - crash_log("<<>>"); - output(out, "<<>>\n"); - output(out, "%.0f\n", current_time()); -} - -// .----------------------------------------------------------------------. -// | ______ _ _ ______ | -// | / / / / _ _ _ __ | |_(_)_ __ ___ ___ \ \ \ \ | -// | / / / / | | | | '_ \| __| | '_ ` _ \ / _ \ \ \ \ \ | -// | \ \ \ \ | |_| | |_) | |_| | | | | | | __/ / / / / | -// | \_\_\_\ \__,_| .__/ \__|_|_| |_| |_|\___| /_/_/_/ | -// | |_| | -// '----------------------------------------------------------------------' - -void section_uptime(SOCKET &out) -{ - crash_log("<<>>"); - output(out, "<<>>\n"); - static LARGE_INTEGER Frequency,Ticks; - QueryPerformanceFrequency (&Frequency); - QueryPerformanceCounter (&Ticks); - Ticks.QuadPart = Ticks.QuadPart - Frequency.QuadPart; - unsigned int uptime = (double)Ticks.QuadPart / Frequency.QuadPart; - output(out, "%s\n", llu_to_string(uptime)); -} - - - -// .----------------------------------------------------------------------. -// | ______ _ __ ______ | -// | / / / / __| |/ _| \ \ \ \ | -// | / / / / / _` | |_ \ \ \ \ | -// | \ \ \ \ | (_| | _| / / / / | -// | \_\_\_\ \__,_|_| /_/_/_/ | -// | | -// '----------------------------------------------------------------------' - -void df_output_filesystem(SOCKET &out, char *volid) -{ - TCHAR fsname[128]; - TCHAR volume[512]; - DWORD dwSysFlags = 0; - if (!GetVolumeInformation(volid, volume, sizeof(volume), 0, 0, &dwSysFlags, fsname, sizeof(fsname))) - fsname[0] = 0; - - ULARGE_INTEGER free_avail, total, free; - free_avail.QuadPart = 0; - total.QuadPart = 0; - free.QuadPart = 0; - int returnvalue = GetDiskFreeSpaceEx(volid, &free_avail, &total, &free); - if (returnvalue > 0) { - double perc_used = 0; - if (total.QuadPart > 0) - perc_used = 100 - (100 * free_avail.QuadPart / total.QuadPart); - - if (volume[0]) // have a volume name - char_replace(' ', '_', volume); - else - strncpy(volume, volid, sizeof(volume)); - - output(out, "%s %s ", volume, fsname); - output(out, "%s ", llu_to_string(total.QuadPart / KiloByte)); - output(out, "%s ", llu_to_string((total.QuadPart - free_avail.QuadPart) / KiloByte)); - output(out, "%s ", llu_to_string(free_avail.QuadPart / KiloByte)); - output(out, "%3.0f%% ", perc_used); - output(out, "%s\n", volid); - } -} - -void df_output_mountpoints(SOCKET &out, char *volid) -{ - char mountpoint[512]; - HANDLE hPt = FindFirstVolumeMountPoint(volid, mountpoint, sizeof(mountpoint)); - if (hPt != INVALID_HANDLE_VALUE) { - while (true) { - TCHAR combined_path[1024]; - snprintf(combined_path, sizeof(combined_path), "%s%s", volid, mountpoint); - df_output_filesystem(out, combined_path); - if (!FindNextVolumeMountPoint(hPt, mountpoint, sizeof(mountpoint))) - break; - } - FindVolumeMountPointClose(hPt); - } -} - -void section_df(SOCKET &out) -{ - crash_log("<<>>"); - output(out, "<<>>\n"); - TCHAR buffer[4096]; - DWORD len = GetLogicalDriveStrings(sizeof(buffer), buffer); - - TCHAR *end = buffer + len; - TCHAR *drive = buffer; - while (drive < end) { - UINT drvType = GetDriveType(drive); - if (drvType == DRIVE_FIXED) // only process local harddisks - { - df_output_filesystem(out, drive); - df_output_mountpoints(out, drive); - } - drive += strlen(drive) + 1; - } - - // Output volumes, that have no drive letter. The following code - // works, but then we have no information about the drive letters. - // And if we run both, then volumes are printed twice. So currently - // we output only fixed drives and mount points below those fixed - // drives. - - // HANDLE hVolume; - // char volid[512]; - // hVolume = FindFirstVolume(volid, sizeof(volid)); - // if (hVolume != INVALID_HANDLE_VALUE) { - // df_output_filesystem(out, volid); - // while (true) { - // // df_output_mountpoints(out, volid); - // if (!FindNextVolume(hVolume, volid, sizeof(volid))) - // break; - // } - // FindVolumeClose(hVolume); - // } -} - -// .----------------------------------------------------------------------. -// | ______ ______ | -// | / / / / _ __ ___ \ \ \ \ | -// | / / / / | '_ \/ __| \ \ \ \ | -// | \ \ \ \ | |_) \__ \ / / / / | -// | \_\_\_\ | .__/|___/ /_/_/_/ | -// | |_| | -// '----------------------------------------------------------------------' - -void section_ps(SOCKET &out) -{ - crash_log("<<>>"); - output(out, "<<>>\n"); - HANDLE hProcessSnap; - PROCESSENTRY32 pe32; - - hProcessSnap = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0); - if (hProcessSnap != INVALID_HANDLE_VALUE) - { - pe32.dwSize = sizeof(PROCESSENTRY32); - if (Process32First(hProcessSnap, &pe32)) { - do { - output(out, "%s\n", pe32.szExeFile); - } while (Process32Next(hProcessSnap, &pe32)); - } - CloseHandle(hProcessSnap); - } -} - -// .----------------------------------------------------------------------. -// | ______ _ ______ | -// | / / / / ___ ___ _ ____ _(_) ___ ___ ___ \ \ \ \ | -// | / / / / / __|/ _ \ '__\ \ / / |/ __/ _ \/ __| \ \ \ \ | -// | \ \ \ \ \__ \ __/ | \ V /| | (_| __/\__ \ / / / / | -// | \_\_\_\ |___/\___|_| \_/ |_|\___\___||___/ /_/_/_/ | -// | | -// '----------------------------------------------------------------------' - - -// Determine the start type of a service. Unbelievable how much -// code is needed for that... -const char *service_start_type(SC_HANDLE scm, LPCTSTR service_name) -{ - // Query the start type of the service - const char *start_type = "invalid1"; - SC_HANDLE schService; - LPQUERY_SERVICE_CONFIG lpsc; - schService = OpenService(scm, service_name, SERVICE_QUERY_CONFIG); - if (schService) { - start_type = "invalid2"; - DWORD dwBytesNeeded, cbBufSize; - if (!QueryServiceConfig(schService, NULL, 0, &dwBytesNeeded)) { - start_type = "invalid3"; - DWORD dwError = GetLastError(); - if (dwError == ERROR_INSUFFICIENT_BUFFER) { - start_type = "invalid4"; - cbBufSize = dwBytesNeeded; - lpsc = (LPQUERY_SERVICE_CONFIG) LocalAlloc(LMEM_FIXED, cbBufSize); - if (QueryServiceConfig(schService, lpsc, cbBufSize, &dwBytesNeeded)) { - switch (lpsc->dwStartType) { - case SERVICE_AUTO_START: start_type = "auto"; break; - case SERVICE_BOOT_START: start_type = "boot"; break; - case SERVICE_DEMAND_START: start_type = "demand"; break; - case SERVICE_DISABLED: start_type = "disabled"; break; - case SERVICE_SYSTEM_START: start_type = "system"; break; - default: start_type = "other"; - } - } - LocalFree(lpsc); - } - } - CloseServiceHandle(schService); - } - return start_type; -} - - -void section_services(SOCKET &out) -{ - crash_log("<<>>"); - output(out, "<<>>\n"); - SC_HANDLE scm = OpenSCManager(0, 0, SC_MANAGER_CONNECT | SC_MANAGER_ENUMERATE_SERVICE); - if (scm != INVALID_HANDLE_VALUE) { - DWORD bytes_needed = 0; - DWORD num_services = 0; - // first determine number of bytes needed - EnumServicesStatusEx(scm, SC_ENUM_PROCESS_INFO, SERVICE_WIN32, SERVICE_STATE_ALL, - NULL, 0, &bytes_needed, &num_services, 0, 0); - if (GetLastError() == ERROR_MORE_DATA && bytes_needed > 0) { - BYTE *buffer = (BYTE *)malloc(bytes_needed); - if (buffer) { - if (EnumServicesStatusEx(scm, SC_ENUM_PROCESS_INFO, SERVICE_WIN32, SERVICE_STATE_ALL, - buffer, bytes_needed, - &bytes_needed, &num_services, 0, 0)) - { - ENUM_SERVICE_STATUS_PROCESS *service = (ENUM_SERVICE_STATUS_PROCESS *)buffer; - for (unsigned i=0; iServiceStatusProcess.dwCurrentState; - const char *state_name = "unknown"; - switch (state) { - case SERVICE_CONTINUE_PENDING: state_name = "continuing"; break; - case SERVICE_PAUSE_PENDING: state_name = "pausing"; break; - case SERVICE_PAUSED: state_name = "paused"; break; - case SERVICE_RUNNING: state_name = "running"; break; - case SERVICE_START_PENDING: state_name = "starting"; break; - case SERVICE_STOP_PENDING: state_name = "stopping"; break; - case SERVICE_STOPPED: state_name = "stopped"; break; - } - - const char *start_type = service_start_type(scm, service->lpServiceName); - - // The service name usually does not contain spaces. But - // in some cases it does. We replace them with _ in order - // the keep it in one space-separated column. Since we own - // the buffer, we can simply change the name inplace. - for (char *w=(char *)(service->lpServiceName); *w; w++) { - if (*w == ' ') - *w = '_'; - } - - output(out, "%s %s/%s %s\n", - service->lpServiceName, state_name, start_type, - service->lpDisplayName); - service ++; - } - } - free(buffer); - } - } - CloseServiceHandle(scm); - } -} - - -// .----------------------------------------------------------------------. -// | ______ _ __ ______ | -// | / / / / __ _(_)_ __ _ __ ___ _ __ / _| \ \ \ \ | -// | / / / / \ \ /\ / / | '_ \| '_ \ / _ \ '__| |_ \ \ \ \ | -// | \ \ \ \ \ V V /| | | | | |_) | __/ | | _| _ _ _ / / / / | -// | \_\_\_\ \_/\_/ |_|_| |_| .__/ \___|_| |_|___(_|_|_) /_/_/_/ | -// | |_| |_____| | -// '----------------------------------------------------------------------' - -// Hilfsfunktionen zum Navigieren in den Performance-Counter Binaerdaten -PERF_OBJECT_TYPE *FirstObject(PERF_DATA_BLOCK *dataBlock) { - return (PERF_OBJECT_TYPE *) ((BYTE *)dataBlock + dataBlock->HeaderLength); -} -PERF_OBJECT_TYPE *NextObject(PERF_OBJECT_TYPE *act) { - return (PERF_OBJECT_TYPE *) ((BYTE *)act + act->TotalByteLength); -} -PERF_COUNTER_DEFINITION *FirstCounter(PERF_OBJECT_TYPE *perfObject) { - return (PERF_COUNTER_DEFINITION *) ((BYTE *) perfObject + perfObject->HeaderLength); -} -PERF_COUNTER_DEFINITION *NextCounter(PERF_COUNTER_DEFINITION *perfCounter) { - return (PERF_COUNTER_DEFINITION *) ((BYTE *) perfCounter + perfCounter->ByteLength); -} -PERF_COUNTER_BLOCK *GetCounterBlock(PERF_INSTANCE_DEFINITION *pInstance) { - return (PERF_COUNTER_BLOCK *) ((BYTE *)pInstance + pInstance->ByteLength); -} -PERF_INSTANCE_DEFINITION *FirstInstance (PERF_OBJECT_TYPE *pObject) { - return (PERF_INSTANCE_DEFINITION *) ((BYTE *) pObject + pObject->DefinitionLength); -} -PERF_INSTANCE_DEFINITION *NextInstance (PERF_INSTANCE_DEFINITION *pInstance) { - return (PERF_INSTANCE_DEFINITION *) ((BYTE *)pInstance + pInstance->ByteLength + GetCounterBlock(pInstance)->ByteLength); -} - -void outputCounter(SOCKET &out, BYTE *datablock, int counter, - PERF_OBJECT_TYPE *objectPtr, PERF_COUNTER_DEFINITION *counterPtr); -void outputCounterValue(SOCKET &out, PERF_COUNTER_DEFINITION *counterPtr, PERF_COUNTER_BLOCK *counterBlockPtr); - - -void dump_performance_counters(SOCKET &out, unsigned counter_base_number, const char *countername) -{ - crash_log("<<>>", countername); - output(out, "<<>>\n", countername); - output(out, "%.2f %u\n", current_time(), counter_base_number); - - // registry entry is ascii representation of counter index - char counter_index_name[8]; - snprintf(counter_index_name, sizeof(counter_index_name), "%u", counter_base_number); - - // allocate block to store counter data block - DWORD size = DEFAULT_BUFFER_SIZE; - BYTE *data = new BYTE[DEFAULT_BUFFER_SIZE]; - DWORD type; - DWORD ret; - - // Holt zu einem bestimmten Counter den kompletten Binärblock aus der - // Registry. Da man vorher nicht weiß, wie groß der Puffer sein muss, - // kann man nur mit irgendeiner Größe anfangen und dann diesen immer - // wieder größer machen, wenn er noch zu klein ist. >:-P - while ((ret = RegQueryValueEx(HKEY_PERFORMANCE_DATA, counter_index_name, - 0, &type, data, &size)) != ERROR_SUCCESS) - { - if (ret == ERROR_MORE_DATA) // WIN32 API sucks... - { - // Der Puffer war zu klein. Toll. Also den Puffer größer machen - // und das ganze nochmal probieren. - size += DEFAULT_BUFFER_SIZE; - verbose("Buffer for RegQueryValueEx too small. Resizing..."); - delete [] data; - data = new BYTE [size]; - } else { - // Es ist ein anderer Fehler aufgetreten. Abbrechen. - delete [] data; - return; - } - } - crash_log(" - read performance data, buffer size %d", size); - - PERF_DATA_BLOCK *dataBlockPtr = (PERF_DATA_BLOCK *)data; - - // Determine first object in list of objects - PERF_OBJECT_TYPE *objectPtr = FirstObject(dataBlockPtr); - - // Now walk through the list of objects. The bad news is: - // even if we expect only one object, windows might send - // us more than one object. We need to scan a list of objects - // in order to find the one we have asked for. >:-P - for (unsigned int a=0 ; a < dataBlockPtr->NumObjectTypes ; a++) - { - // Have we found the object we seek? - if (objectPtr->ObjectNameTitleIndex == counter_base_number) - { - // Yes. Great. Now: each object consist of a lot of counters. - // We walk through the list of counters in this object: - - // get pointer to first counter - PERF_COUNTER_DEFINITION *counterPtr = FirstCounter(objectPtr); - - // Now we make a first quick walk through all counters, only in order - // to find the beginning of the data block (which comes after the - // counter definitions) - PERF_COUNTER_DEFINITION *last_counter = FirstCounter(objectPtr); - for (unsigned int b=0 ; b < objectPtr->NumCounters ; b++) - last_counter = NextCounter(last_counter); - BYTE *datablock = (BYTE *)last_counter; - - // In case of multi-instance objects, output a list of all instance names - int num_instances = objectPtr->NumInstances; - if (num_instances >= 0) - { - output(out, "%d instances:", num_instances); - char name[512]; - PERF_INSTANCE_DEFINITION *instancePtr = FirstInstance(objectPtr); - for(int b=0 ; bNumInstances ; b++) - { - WCHAR *name_start = (WCHAR *)((char *)(instancePtr) + instancePtr->NameOffset); - memcpy(name, name_start, instancePtr->NameLength); - WideCharToMultiByte(CP_UTF8, 0, name_start, instancePtr->NameLength, name, sizeof(name), NULL, NULL); - // replace spaces with '_' - for (char *s = name; *s; s++) - if (*s == ' ') *s = '_'; - - output(out, " %s", name); - instancePtr = NextInstance(instancePtr); - } - output(out, "\n"); - } - - // Now walk through the counter list a second time and output all counters - for (unsigned int b=0 ; b < objectPtr->NumCounters ; b++) - { - outputCounter(out, datablock, counter_base_number, objectPtr, counterPtr); - counterPtr = NextCounter(counterPtr); - } - } - // naechstes Objekt in der Liste - objectPtr = NextObject(objectPtr); - } - delete [] data; -} - - -void outputCounter(SOCKET &out, BYTE *datablock, int counter_base_number, - PERF_OBJECT_TYPE *objectPtr, PERF_COUNTER_DEFINITION *counterPtr) -{ - - // determine the type of the counter (for verbose output) - const char *countertypename = 0; - switch (counterPtr->CounterType) { - case PERF_COUNTER_COUNTER: countertypename = "counter"; break; - case PERF_COUNTER_TIMER: countertypename = "timer"; break; - case PERF_COUNTER_QUEUELEN_TYPE: countertypename = "queuelen_type"; break; - case PERF_COUNTER_BULK_COUNT: countertypename = "bulk_count"; break; - case PERF_COUNTER_TEXT: countertypename = "text"; break; - case PERF_COUNTER_RAWCOUNT: countertypename = "rawcount"; break; - case PERF_COUNTER_LARGE_RAWCOUNT: countertypename = "large_rawcount"; break; - case PERF_COUNTER_RAWCOUNT_HEX: countertypename = "rawcount_hex"; break; - case PERF_COUNTER_LARGE_RAWCOUNT_HEX: countertypename = "large_rawcount_HEX"; break; - case PERF_SAMPLE_FRACTION: countertypename = "sample_fraction"; break; - case PERF_SAMPLE_COUNTER: countertypename = "sample_counter"; break; - case PERF_COUNTER_NODATA: countertypename = "nodata"; break; - case PERF_COUNTER_TIMER_INV: countertypename = "timer_inv"; break; - case PERF_SAMPLE_BASE: countertypename = "sample_base"; break; - case PERF_AVERAGE_TIMER: countertypename = "average_timer"; break; - case PERF_AVERAGE_BASE: countertypename = "average_base"; break; - case PERF_AVERAGE_BULK: countertypename = "average_bulk"; break; - case PERF_100NSEC_TIMER: countertypename = "100nsec_timer"; break; - case PERF_100NSEC_TIMER_INV: countertypename = "100nsec_timer_inv"; break; - case PERF_COUNTER_MULTI_TIMER: countertypename = "multi_timer"; break; - case PERF_COUNTER_MULTI_TIMER_INV: countertypename = "multi_timer_inV"; break; - case PERF_COUNTER_MULTI_BASE: countertypename = "multi_base"; break; - case PERF_100NSEC_MULTI_TIMER: countertypename = "100nsec_multi_timer"; break; - case PERF_100NSEC_MULTI_TIMER_INV: countertypename = "100nsec_multi_timer_inV"; break; - case PERF_RAW_FRACTION: countertypename = "raw_fraction"; break; - case PERF_RAW_BASE: countertypename = "raw_base"; break; - case PERF_ELAPSED_TIME: countertypename = "elapsed_time"; break; - } - - // Output index of counter object and counter, and timestamp - output(out, "%d", counterPtr->CounterNameTitleIndex - counter_base_number); - - // If this is a multi-instance-counter, loop over the instances - int num_instances = objectPtr->NumInstances; - if (num_instances >= 0) - { - // get pointer to first instance - PERF_INSTANCE_DEFINITION *instancePtr = FirstInstance(objectPtr); - - for (int b=0 ; bNumInstances ; b++) - { - // PERF_COUNTER_BLOCK dieser Instanz ermitteln. - PERF_COUNTER_BLOCK *counterBlockPtr = GetCounterBlock(instancePtr); - outputCounterValue(out, counterPtr, counterBlockPtr); - instancePtr = NextInstance(instancePtr); - } - - } - else { // instanceless counter - PERF_COUNTER_BLOCK *counterBlockPtr = (PERF_COUNTER_BLOCK *) datablock; - outputCounterValue(out, counterPtr, counterBlockPtr); - } - if (countertypename) - output(out, " %s\n", countertypename); - else - output(out, " type(%lx)\n", counterPtr->CounterType); -} - - -void outputCounterValue(SOCKET &out, PERF_COUNTER_DEFINITION *counterPtr, PERF_COUNTER_BLOCK *counterBlockPtr) -{ - unsigned offset = counterPtr->CounterOffset; - int size = counterPtr->CounterSize; - BYTE *pData = ((BYTE *)counterBlockPtr) + offset; - - if (counterPtr->CounterType | PERF_SIZE_DWORD) - output(out, " %llu", (ULONGLONG)(*(DWORD*)pData)); - - else if (counterPtr->CounterType | PERF_SIZE_LARGE) - output(out, " %llu", *(UNALIGNED ULONGLONG*)pData); - - // handle other data generically. This is wrong in some situation. - // Once upon a time in future we might implement a conversion as - // described in http://msdn.microsoft.com/en-us/library/aa373178%28v=vs.85%29.aspx - else if (size == 4) { - DWORD value = *((DWORD *)pData); - output(out, " %lu", value); - } - else if (size == 8) { - DWORD *data_at = (DWORD *)pData; - DWORDLONG value = (DWORDLONG)*data_at + ((DWORDLONG)*(data_at + 1) << 32); - output(out, " %s", llu_to_string(value)); - } - else - output(out, " unknown"); -} - -void section_winperf(SOCKET &out) -{ - dump_performance_counters(out, 234, "phydisk"); - dump_performance_counters(out, 238, "processor"); - - // also output additionally configured counters - for (unsigned i=0; i failed to load %s", dll_realpath); - return false; - } - } - else - dll = NULL; - - WCHAR wmsgbuffer[2048]; - DWORD dwFlags = FORMAT_MESSAGE_ARGUMENT_ARRAY | FORMAT_MESSAGE_FROM_SYSTEM; - if (dll) - dwFlags |= FORMAT_MESSAGE_FROM_HMODULE; - - DWORD len = FormatMessageW( - // DWORD len = FormatMessage( - dwFlags, - dll, - event->EventID, - 0, // accept any language - wmsgbuffer, - // msgbuffer, - 2048, - (char **)strings); - - if (dll) - FreeLibrary(dll); - - if (len) - { - // convert message to UTF-8 - len = WideCharToMultiByte(CP_UTF8, 0, wmsgbuffer, -1, msgbuffer, sizeof(msgbuffer), NULL, NULL); - } - - if (len == 0) // message could not be converted - { - // if conversion was not successfull while trying to load a DLL, we return a - // failure. Our parent function will then retry later without a DLL path. - if (dllpath) - return false; - - // if message cannot be converted, then at least output the text strings. - // We render all messages one after the other into msgbuffer, separated - // by spaces. - memset(msgbuffer, 0, sizeof(msgbuffer)); // avoids problems with 0-termination - char *w = msgbuffer; - int sizeleft = sizeof(msgbuffer) - 1; // leave one byte for termination - int n = 0; - while (strings[n]) // string array is zero terminated - { - WCHAR *s = strings[n]; - DWORD len = WideCharToMultiByte(CP_UTF8, 0, s, -1, w, sizeleft, NULL, NULL); - if (!len) - break; - sizeleft -= len; - w += len; - if (sizeleft <= 0) - break; - n++; - if (strings[n]) - *w++ = ' '; - } - } - - // replace newlines with spaces. check_mk expects one message each line. - char *w = msgbuffer; - while (*w) { - if (*w == '\n' || *w == '\r') *w = ' '; - w++; - } - - // convert UNIX timestamp to local time - time_t time_generated = (time_t)event->TimeGenerated; - struct tm *t = localtime(&time_generated); - char timestamp[64]; - strftime(timestamp, sizeof(timestamp), "%b %d %H:%M:%S", t); - - output(out, "%c %s %lu.%lu %s %s\n", type_char, timestamp, - event->EventID / 65536, // "Qualifiers": no idea what *that* is - event->EventID % 65536, // the actual event id - source_name, msgbuffer); - return true; -} - - -void process_eventlog_entries(SOCKET &out, const char *logname, char *buffer, - DWORD bytesread, DWORD *record_number, bool just_find_end, - int *worst_state, int level) -{ - WCHAR *strings[64]; - char regpath[128]; - BYTE dllpath[128]; - char source_name[128]; - - EVENTLOGRECORD *event = (EVENTLOGRECORD *)buffer; - while (bytesread > 0) - { - crash_log(" - record %d: process_eventlog_entries bytesread %d, event->Length %d", *record_number, bytesread, event->Length); - *record_number = event->RecordNumber; - - char type_char; - int this_state; - switch (event->EventType) { - case EVENTLOG_ERROR_TYPE: - type_char = 'C'; - this_state = 2; - break; - case EVENTLOG_WARNING_TYPE: - type_char = 'W'; - this_state = 1; - break; - case EVENTLOG_INFORMATION_TYPE: - case EVENTLOG_AUDIT_SUCCESS: - type_char = level == 0 ? 'I' : '.'; - this_state = 0; - break; - case EVENTLOG_AUDIT_FAILURE: - type_char = 'C'; - this_state = 2; - break; - default: - type_char = 'u'; - this_state = 1; - break; - } - if (*worst_state < this_state) - *worst_state = this_state; - - // If we are not just scanning for the current end and the worst state, - // we output the event message - if (!just_find_end) - { - // The source name is the name of the application that produced the event - // It is UTF-16 encoded - WCHAR *lpSourceName = (WCHAR *) ((LPBYTE) event + sizeof(EVENTLOGRECORD)); - WideCharToMultiByte(CP_UTF8, 0, lpSourceName, -1, source_name, sizeof(source_name), NULL, NULL); - - char *w = source_name; - while (*w) { - if (*w == ' ') *w = '_'; - w++; - } - - // prepare array of zero terminated strings to be inserted - // into message template. - DWORD num_strings = event->NumStrings; - WCHAR *s = (WCHAR *)(((char *)event) + event->StringOffset); - unsigned ns; - for (ns = 0; ns < num_strings; ns++) { - if (ns >= 63) break; - strings[ns] = s; - s += wcslen(s) + 1; - } - strings[ns] = 0; // end marker in array - - // Windows eventlog entries refer to texts stored in a DLL >:-P - // We need to load this DLL. First we need to look up which - // DLL to load in the registry. Hard to image how one could - // have contrieved this more complicated... - snprintf(regpath, sizeof(regpath), - "SYSTEM\\CurrentControlSet\\Services\\Eventlog\\%s\\%S", - logname, lpSourceName); - - HKEY key; - DWORD ret = RegOpenKeyEx(HKEY_LOCAL_MACHINE, regpath, 0, KEY_READ, &key); - - bool success = false; - if (ret == ERROR_SUCCESS) // could open registry key - { - DWORD size = sizeof(dllpath) - 1; // leave space for 0 termination - memset(dllpath, 0, sizeof(dllpath)); - if (ERROR_SUCCESS == RegQueryValueEx(key, "EventMessageFile", NULL, NULL, dllpath, &size)) - { - crash_log(" - record %d: DLLs to load: %s", *record_number, dllpath); - // Answer may contain more than one DLL. They are separated - // by semicola. Not knowing which one is the correct one, I have to try - // all... - char *token = strtok((char *)dllpath, ";"); - while (token) { - if (output_eventlog_entry(out, token, event, type_char, logname, source_name, strings)) { - success = true; - break; - } - token = strtok(NULL, ";"); - } - } - RegCloseKey(key); - } - else { - crash_log(" - record %d: no DLLs listed in registry", *record_number); - } - - // No text conversion succeeded. Output without text anyway - if (!success) { - crash_log(" - record %d: translation failed", *record_number); - output_eventlog_entry(out, NULL, event, type_char, logname, source_name, strings); - } - - } // type_char != '.' - - bytesread -= event->Length; - crash_log(" - record %d: event_processed, bytesread %d, event->Length %d", *record_number, bytesread, event->Length); - event = (EVENTLOGRECORD *) ((LPBYTE) event + event->Length); - } -} - - -void output_eventlog(SOCKET &out, const char *logname, - DWORD *record_number, bool just_find_end, int level) -{ - crash_log(" - event log \"%s\":", logname); - - if (eventlog_buffer_size == 0) { - const int initial_size = 65536; - eventlog_buffer = new char[initial_size]; - eventlog_buffer_size = initial_size; - } - - HANDLE hEventlog = OpenEventLog(NULL, logname); - DWORD bytesread = 0; - DWORD bytesneeded = 0; - if (hEventlog) { - crash_log(" . successfully opened event log"); - output(out, "[[[%s]]]\n", logname); - int worst_state = 0; - DWORD old_record_number = *record_number; - - // we scan all new entries twice. At the first run we check if - // at least one warning/error message is present. Only if this - // is the case we make a second run where we output *all* messages, - // even the informational ones. - for (int t=0; t<2; t++) - { - *record_number = old_record_number; - verbose("Starting from entry number %u", old_record_number); - while (true) { - DWORD flags; - if (*record_number == 0) { - if (t == 1) { - verbose("Need to reopen Logfile in order to find start again."); - CloseEventLog(hEventlog); - hEventlog = OpenEventLog(NULL, logname); - if (!hEventlog) { - verbose("Failed to reopen event log. Bailing out."); - return; - } - crash_log(" . reopened log"); - } - flags = EVENTLOG_SEQUENTIAL_READ | EVENTLOG_FORWARDS_READ; - } - else { - verbose("Previous record number was %d. Doing seek read.", *record_number); - flags = EVENTLOG_SEEK_READ | EVENTLOG_FORWARDS_READ; - } - - if (ReadEventLogW(hEventlog, - flags, - *record_number + 1, - eventlog_buffer, - eventlog_buffer_size, - &bytesread, - &bytesneeded)) - { - crash_log(" . got entries starting at %d (%d bytes)", *record_number + 1, bytesread); - process_eventlog_entries(out, logname, eventlog_buffer, - bytesread, record_number, just_find_end || t==0, &worst_state, level); - } - else { - DWORD error = GetLastError(); - if (error == ERROR_INSUFFICIENT_BUFFER) { - grow_eventlog_buffer(bytesneeded); - crash_log(" . needed to grow buffer to %d bytes", bytesneeded); - } - // found current end of log - else if (error == ERROR_HANDLE_EOF) { - verbose("End of logfile reached at entry %u. Worst state is %d", - *record_number, worst_state); - break; - } - // invalid parameter can also mean end of log - else if (error == ERROR_INVALID_PARAMETER) { - verbose("Invalid parameter at entry %u (could mean end of logfile). Worst state is %d", - *record_number, worst_state); - break; - } - else { - output(out, "ERROR: Cannot read eventlog '%s': error %u\n", logname, error); - break; - } - } - } - if (worst_state < level && logwatch_suppress_info) { - break; // nothing important found. Skip second run - } - } - CloseEventLog(hEventlog); - } - else { - output(out, "[[[%s:missing]]]\n", logname); - } -} - -// Keeps memory of an event log we have found. It -// might already be known and will not be stored twice. -void register_eventlog(char *logname) -{ - if (num_eventlogs >= MAX_EVENTLOGS) - return; // veeery unlikely - - // check if we already know this one... - for (unsigned i=0; i < num_eventlogs; i++) { - if (!strcmp(logname, eventlog_names[i])) { - newly_found[i] = true; // remember its still here - return; - } - } - - // yet unknown. register it. - known_record_numbers[num_eventlogs] = 0; - eventlog_names[num_eventlogs] = strdup(logname); - newly_found[num_eventlogs] = true; - num_eventlogs ++; -} - -void unregister_all_eventlogs() -{ - for (unsigned i=0; i < num_eventlogs; i++) - free(eventlog_names[i]); - num_eventlogs = 0; -} - -/* Look into the registry in order to find out, which - event logs are available. */ -bool find_eventlogs(SOCKET &out) -{ - for (unsigned i=0; imissing) { - // llu_to_string is not reentrant, so do this in three steps - fprintf(file, "%s|%s", tf->path, llu_to_string(tf->file_id)); - fprintf(file, "|%s", llu_to_string(tf->file_size)); - fprintf(file, "|%s\r\n", llu_to_string(tf->offset)); - } - } - fclose(file); -} - -void parse_logwatch_state_line(char *line) -{ - if (g_num_logwatch_hints >= MAX_LOGWATCH_TEXTFILES) { - verbose("Too many entries in logwatch state file."); - return; - } - - /* Example: line = "M://log1.log|98374598374|0|16"; */ - rstrip(line); - char *p = line; - while (*p && *p != '|') p++; - *p = 0; - char *path = line; - p++; - char *token = strtok(p, "|"); - unsigned long long file_id = string_to_llu(token); - token = strtok(NULL, "|"); - unsigned long long file_size = string_to_llu(token); - token = strtok(NULL, "|"); - unsigned long long offset = string_to_llu(token); - - logwatch_textfile *tf = new logwatch_textfile(); - tf->path = strdup(path); - tf->file_id = file_id; - tf->file_size = file_size; - tf->offset = offset; - tf->missing = false; - tf->patterns = 0; - g_logwatch_hints[g_num_logwatch_hints++] = tf; -} - -void load_logwatch_offsets() -{ - static bool offsets_loaded = false; - if (!offsets_loaded) { - FILE *file = fopen(g_logwatch_statefile, "r"); - if (file) { - char line[256]; - while (NULL != fgets(line, sizeof(line), file)) { - parse_logwatch_state_line(line); - } - fclose(file); - } - offsets_loaded = true; - } -} - - - - -// debug output -void print_logwatch_config() -{ - printf("\nLOGWATCH CONFIG\n=================\nFILES\n"); - for (unsigned int i = 0; i < g_num_logwatch_textfiles ; i++) { - printf(" %s %u %x missing %d\n", g_logwatch_textfiles[i]->path, - (unsigned int)g_logwatch_textfiles[i]->offset, - (unsigned int) g_logwatch_textfiles[i]->patterns, - g_logwatch_textfiles[i]->missing); - } - printf("\n"); - - printf("GLOBS\n"); - for (unsigned int i = 0; i < g_num_logwatch_globlines ; i++) { - printf("Globline Container %x\n", (unsigned int)g_logwatch_globlines[i]->patterns); - for (int j = 0; j < g_logwatch_globlines[i]->num_tokens ; j++) - printf(" %s\n", g_logwatch_globlines[i]->token[j]->pattern); - printf("Pattern Container\n"); - for (int j = 0; j < g_logwatch_globlines[i]->patterns->num_patterns; j++) - printf(" %c %s\n", g_logwatch_globlines[i]->patterns->patterns[j]->state, - g_logwatch_globlines[i]->patterns->patterns[j]->glob_pattern); - } - printf("\n"); -} - -// Add a new state pattern to the current pattern container -void add_condition_pattern(char state, char *value) -{ - if (g_current_globline_container == NULL) { - fprintf(stderr, "You need to set a textfile, before specifying a condition pattern\n"); - return; - } - - - if (g_current_globline_container - && g_current_globline_container->patterns->num_patterns + 1 >= MAX_LOGWATCH_CONDITIONS) - { - fprintf(stderr, "Maximum number of conditions for a globline exceeded %d.\n", MAX_LOGWATCH_CONDITIONS); - } - - condition_pattern *new_pattern = new condition_pattern(); - new_pattern->state = state; - new_pattern->glob_pattern = strdup(value); - g_current_globline_container->patterns->patterns[g_current_globline_container->patterns->num_patterns++] - = new_pattern; -} - - -logwatch_textfile* get_logwatch_textfile(const char *filename) -{ - for (unsigned int i = 0; i < g_num_logwatch_textfiles; i++) - if (strcmp(filename, g_logwatch_textfiles[i]->path) == 0) - return g_logwatch_textfiles[i]; - return 0; -} - -// Add a new textfile and to the global textfile list -// and determine some initial values -bool add_new_logwatch_textfile(const char *full_filename, pattern_container *patterns) -{ - if (g_num_logwatch_textfiles + 1 >= MAX_LOGWATCH_TEXTFILES) { - fprintf(stderr, "Maximum number of textfiles exceeded %d.\n", MAX_LOGWATCH_TEXTFILES); - return false; - } - - logwatch_textfile *new_textfile = new logwatch_textfile(); - - HANDLE hFile = CreateFile(full_filename,// file to open - GENERIC_READ, // open for reading - FILE_SHARE_READ|FILE_SHARE_WRITE|FILE_SHARE_DELETE, - NULL, // default security - OPEN_EXISTING, // existing file only - FILE_ATTRIBUTE_NORMAL, // normal file - NULL); // no attr. template - - BY_HANDLE_FILE_INFORMATION fileinfo; - GetFileInformationByHandle(hFile, &fileinfo); - CloseHandle(hFile); - - new_textfile->path = strdup(full_filename); - new_textfile->missing = false; - new_textfile->patterns = patterns; - - // Hier aus den gespeicherten Hints was holen.... - bool found_hint = false; - for (unsigned i=0; ipath, full_filename)) { - new_textfile->file_size = hint->file_size; - new_textfile->file_id = hint->file_id; - new_textfile->offset = hint->offset; - found_hint = true; - break; - } - } - - if (!found_hint) { - new_textfile->file_size = (unsigned long long)fileinfo.nFileSizeLow + - (((unsigned long long)fileinfo.nFileSizeHigh) << 32); - new_textfile->file_id = (unsigned long long)fileinfo.nFileIndexLow + - (((unsigned long long)fileinfo.nFileIndexHigh) << 32); - new_textfile->offset = new_textfile->file_size; - } - - g_logwatch_textfiles[g_num_logwatch_textfiles++] = new_textfile; - return true; -} - - -// Check if the given full_filename already exists. If so, do some basic file integrity checks -// Otherwise create a new textfile instance -void update_or_create_logwatch_textfile(const char *full_filename, pattern_container *patterns) -{ - logwatch_textfile *textfile; - if ((textfile = get_logwatch_textfile(full_filename)) != NULL) - { - HANDLE hFile = CreateFile(textfile->path,// file to open - GENERIC_READ, // open for reading - FILE_SHARE_READ|FILE_SHARE_WRITE|FILE_SHARE_DELETE, - NULL, // default security - OPEN_EXISTING, // existing file only - FILE_ATTRIBUTE_NORMAL, // normal file - NULL); // no attr. template - - BY_HANDLE_FILE_INFORMATION fileinfo; - // Do some basic checks to ensure its still the same file - // try to fill the structure with info regarding the file - if (hFile != INVALID_HANDLE_VALUE) - { - if (GetFileInformationByHandle(hFile, &fileinfo)) - { - unsigned long long file_id = (unsigned long long)fileinfo.nFileIndexLow + - (((unsigned long long)fileinfo.nFileIndexHigh) << 32); - textfile->file_size = (unsigned long long)fileinfo.nFileSizeLow + - (((unsigned long long)fileinfo.nFileSizeHigh) << 32); - - if (file_id != textfile->file_id) { // file has been changed - verbose("File %s: id has changed from %s", - full_filename, llu_to_string(textfile->file_id)); - verbose(" to %s\n", llu_to_string(file_id)); - textfile->offset = 0; - textfile->file_id = file_id; - } else if (textfile->file_size < textfile->offset) { // file has been truncated - verbose("File %s: file has been truncated\n", full_filename); - textfile->offset = 0; - } - - textfile->missing = false; - } - CloseHandle(hFile); - } else { - verbose("Cant open file with CreateFile %s\n", full_filename); - } - - } - else - add_new_logwatch_textfile(full_filename, patterns); // Add new file -} - -// Process a single expression (token) of a globline and try to find matching files -void process_glob_expression(glob_token *glob_token, pattern_container *patterns) -{ - WIN32_FIND_DATA data; - char full_filename[512]; - glob_token->found_match = false; - HANDLE h = FindFirstFileEx(glob_token->pattern, FindExInfoStandard, &data, FindExSearchNameMatch, NULL, 0); - if (h != INVALID_HANDLE_VALUE) { - glob_token->found_match = true; - const char *basename = ""; - char *end = strrchr(glob_token->pattern, '\\'); - if (end) { - *end = 0; - basename = glob_token->pattern; - } - snprintf(full_filename,sizeof(full_filename), "%s\\%s", basename, data.cFileName); - update_or_create_logwatch_textfile(full_filename, patterns); - - while (FindNextFile(h, &data)){ - snprintf(full_filename,sizeof(full_filename), "%s\\%s", basename, data.cFileName); - update_or_create_logwatch_textfile(full_filename, patterns); - } - - if (end) - *end = '\\'; // repair string - FindClose(h); - } -} - -// Add a new globline from the config file: -// C:/Testfile D:/var/log/data.log D:/tmp/art*.log -// This globline is split into tokens which are processed by process_glob_expression -void add_globline(char *value) -{ - if ( g_num_logwatch_globlines + 1 >= MAX_LOGWATCH_GLOBLINES) { - fprintf(stderr, "Maximum number of globlines exceeded %d.\n", MAX_LOGWATCH_GLOBLINES); - exit(1); - } - - // Each globline receives its own pattern container - // In case new files matching the glob pattern are we - // we already have all state,regex patterns available - globline_container *new_globline = new globline_container(); - new_globline->patterns = new pattern_container(); - new_globline->num_tokens = 0; - - g_logwatch_globlines[g_num_logwatch_globlines++] = new_globline; - g_current_globline_container = new_globline; - - // Split globline into tokens - if (value != 0) { - char *copy = strdup(value); - char *token = strtok(copy, "|"); - while (token) { - token = lstrip(token); - new_globline->token[new_globline->num_tokens] = new glob_token(); - new_globline->token[new_globline->num_tokens]->pattern = strdup(token); - process_glob_expression(new_globline->token[new_globline->num_tokens], new_globline->patterns); - token = strtok(NULL, "|"); - new_globline->num_tokens++; - } - free(copy); - } -} - - -// Revalidate the existance of logfiles and check if the files attribute (id / size) indicate a change -void revalidate_logwatch_textfiles() -{ - // First of all invalidate all textfiles - for (unsigned int i = 0; i < g_num_logwatch_textfiles ; i++) - g_logwatch_textfiles[i]->missing = true; - - for (unsigned int i = 0; i < g_num_logwatch_globlines; i++) { - globline_container *current_globline = g_logwatch_globlines[i]; - for (int j = 0; j < current_globline->num_tokens; j++) { - process_glob_expression(current_globline->token[j], current_globline->patterns); - } - } -} - - -bool globmatch(const char *pattern, char *astring); - -// Remove missing files from list -void cleanup_logwatch_textfiles() -{ - for (unsigned int i=0; i < g_num_logwatch_textfiles; i++) { - if (g_logwatch_textfiles[i]->missing) { - // remove this file from the list - free(g_logwatch_textfiles[i]->path); - delete g_logwatch_textfiles[i]; - - // One entry less in our list.. - g_num_logwatch_textfiles--; - i--; - - // Check if this was not the last entry in the list - // In this case take the last entry and fill the gap - if (i != g_num_logwatch_textfiles) - g_logwatch_textfiles[i] = g_logwatch_textfiles[g_num_logwatch_textfiles]; - } - } -} - -// Called on program exit -void cleanup_logwatch() -{ - // cleanup textfiles - for (unsigned int i = 0; i < g_num_logwatch_textfiles ; i++) - g_logwatch_textfiles[i]->missing = true; - cleanup_logwatch_textfiles(); - - // cleanup globlines and textpatterns - for (unsigned int i = 0; i < g_num_logwatch_globlines ; i++) { - for (int j = 0; j < g_logwatch_globlines[i]->num_tokens ; j++) { - free(g_logwatch_globlines[i]->token[j]->pattern); - delete g_logwatch_globlines[i]->token[j]; - } - for (int j = 0; j < g_logwatch_globlines[i]->patterns->num_patterns; j++) { - free(g_logwatch_globlines[i]->patterns->patterns[j]->glob_pattern); - delete g_logwatch_globlines[i]->patterns->patterns[j]; - } - delete g_logwatch_globlines[i]->patterns; - delete g_logwatch_globlines[i]; - } -} - - -// Process content of the given textfile -// Can be called in dry-run mode (write_output = false). This tries to detect CRIT or WARN patterns -// If write_output is set to true any data found is written to the out socket -bool process_textfile(FILE *file, logwatch_textfile* textfile, SOCKET &out, bool write_output) { - char line[4096]; - condition_pattern *pattern = 0; - verbose("Checking file %s\n", textfile->path); - while (!feof(file)) { - if (!fgets(line, sizeof(line), file)) - break; - - if (line[strlen(line)-1] == '\n') - line[strlen(line)-1] = 0; - - char state = '.'; - for (int j=0; j < textfile->patterns->num_patterns; j++) { - pattern = textfile->patterns->patterns[j]; - if (globmatch(pattern->glob_pattern, line)){ - if (!write_output && (pattern->state == 'C' || pattern->state == 'W' || pattern->state == 'O')) - return true; - state = pattern->state; - break; - } - } - if (write_output && strlen(line) > 0) - output(out, "%c %s\n", state, line); - } - - return false; -} - - -// The output of this section is compatible with -// the logwatch agent for Linux and UNIX -void section_logfiles(SOCKET &out) -{ - crash_log("<<>>"); - output(out, "<<>>\n"); - revalidate_logwatch_textfiles(); - - logwatch_textfile *textfile; - - // Missing glob patterns - for (unsigned int i = 0; i < g_num_logwatch_globlines; i++) { - globline_container *current_globline = g_logwatch_globlines[i]; - for(int j = 0; j < current_globline->num_tokens; j++) { - if (!current_globline->token[j]->found_match) - output(out, "[[[%s:missing]]]\n", current_globline->token[j]->pattern); - } - } - - for (unsigned int i = 0; i < g_num_logwatch_textfiles ; i++) { - textfile = g_logwatch_textfiles[i]; - if (textfile->missing){ - output(out, "[[[%s:missing]]]\n", textfile->path); - continue; - } - - - FILE *file = fopen(textfile->path, "r"); - if (!file) { - output(out, "[[[%s:cannotopen]]]\n", textfile->path); - continue; - } - - output(out, "[[[%s]]]\n", textfile->path); - - if (textfile->offset == textfile->file_size) {// no new data - fclose(file); - continue; - } - - fseek(file, textfile->offset, SEEK_SET); - - // try to find WARN / CRIT match - bool found_match = process_textfile(file, textfile, out, false); - - if (found_match) { - fseek(file, textfile->offset, SEEK_SET); - process_textfile(file, textfile, out, true); - } - - fclose(file); - textfile->offset = textfile->file_size; - } - - cleanup_logwatch_textfiles(); - save_logwatch_offsets(); -} - - -// The output of this section is compatible with -// the logwatch agent for Linux and UNIX -void section_eventlog(SOCKET &out) -{ - crash_log("<<>>"); - - // This agent remembers the record numbers - // of the event logs up to which messages have - // been processed. When started, the eventlog - // is skipped to the end. Historic messages are - // not been processed. - static bool first_run = true; - output(out, "<<>>\n"); - - if (find_eventlogs(out)) - { - for (unsigned i=0; i < num_eventlogs; i++) { - if (!newly_found[i]) // not here any more! - output(out, "[[[%s:missing]]]\n", eventlog_names[i]); - else { - // Get the configuration of that log file (which messages to send) - int level = 1; - for (int j=0; j>>. -// MemTotal: 514104 kB -// MemFree: 19068 kB -// SwapTotal: 1048568 kB -// SwapFree: 1043732 kB - -void section_mem(SOCKET &out) -{ - crash_log("<<>>"); - output(out, "<<>>\n"); - - MEMORYSTATUSEX statex; - statex.dwLength = sizeof (statex); - GlobalMemoryStatusEx (&statex); - - output(out, "MemTotal: %11d kB\n", statex.ullTotalPhys / 1024); - output(out, "MemFree: %11d kB\n", statex.ullAvailPhys / 1024); - output(out, "SwapTotal: %11d kB\n", (statex.ullTotalPageFile - statex.ullTotalPhys) / 1024); - output(out, "SwapFree: %11d kB\n", (statex.ullAvailPageFile - statex.ullAvailPhys) / 1024); - output(out, "PageTotal: %11d kB\n", statex.ullTotalPageFile / 1024); - output(out, "PageFree: %11d kB\n", statex.ullAvailPageFile / 1024); - output(out, "VirtualTotal: %11d kB\n", statex.ullTotalVirtual / 1024); - output(out, "VirtualFree: %11d kB\n", statex.ullAvailVirtual / 1024); -} - -// .-----------------------------------------------------------------------. -// | ______ __ _ _ _ __ ______ | -// | / / / // _(_) | ___(_)_ __ / _| ___\ \ \ \ | -// | / / / /| |_| | |/ _ \ | '_ \| |_ / _ \\ \ \ \ | -// | \ \ \ \| _| | | __/ | | | | _| (_) / / / / | -// | \_\_\_\_| |_|_|\___|_|_| |_|_| \___/_/_/_/ | -// | | -// '-----------------------------------------------------------------------' - -void output_fileinfos(SOCKET &out, const char *path); -void output_fileinfo(SOCKET &out, const char *basename, WIN32_FIND_DATA *data); - -void section_fileinfo(SOCKET &out) -{ - crash_log("<<>>"); - output(out, "<<>>\n"); - output(out, "%.0f\n", current_time()); - for (unsigned i=0; inFileSizeLow - + (((unsigned long long)data->nFileSizeHigh) << 32); - - if (0 == (data->dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) { - output(out, "%s\\%s|%llu|%.0f\n", basename, - data->cFileName, size, file_time(&data->ftLastWriteTime)); - } -} - - -bool handle_fileinfo_config_variable(char *var, char *value) -{ - if (!strcmp(var, "path")) { - if (g_num_fileinfo_paths >= MAX_FILEINFO_ENTRIES) { - fprintf(stderr, "Sorry, only %d entries in [fileinfo] are allowed.\r\n", - MAX_FILEINFO_ENTRIES); - return false; - } - g_fileinfo_path[g_num_fileinfo_paths++] = strdup(value); - return true; - } - return false; -} - - -// .----------------------------------------------------------------------. -// | ____ _ | -// | | _ \ _ _ _ __ _ __ (_)_ __ __ _ _ __ _ __ __ _ ___ | -// | | |_) | | | | '_ \| '_ \| | '_ \ / _` | | '_ \| '__/ _` / __| | -// | | _ <| |_| | | | | | | | | | | | (_| | | |_) | | | (_| \__ \ | -// | |_| \_\\__,_|_| |_|_| |_|_|_| |_|\__, | | .__/|_| \__, |___/ | -// | |___/ |_| |___/ | -// +----------------------------------------------------------------------+ -// | Functions for dealing with running external programs. | -// '----------------------------------------------------------------------' - -char *add_interpreter(char *path, char *newpath) -{ - if (!strcmp(path + strlen(path) - 4, ".vbs")) { - // If this is a vbscript don't rely on the default handler for this - // file extensions. This might be notepad or some other editor by - // default on a lot of systems. So better add cscript as interpreter. - snprintf(newpath, 256, "cscript.exe //Nologo \"%s\"", path); - return newpath; - } - else if (!strcmp(path + strlen(path) - 4, ".ps1")) { - // Same for the powershell scripts. Add the powershell interpreter. - // To make this work properly two things are needed: - // 1.) The powershell interpreter needs to be in PATH - // 2.) The execution policy needs to allow the script execution - // -> Get-ExecutionPolicy / Set-ExecutionPolicy - snprintf(newpath, 256, "powershell.exe -NoLogo -ExecutionPolicy RemoteSigned \"& \'%s\'\"", path); - return newpath; - } - else { - snprintf(newpath, 256, "\"%s\"", path); - return newpath; - } -} - -bool banned_exec_name(char *name) -{ - if (strlen(name) < 5) - return false; - - char *extension = name + strlen(name) - 4; - if (g_num_execute_suffixes) { - if (extension[0] != '.') - return true; - extension ++; - unsigned i; - for (i=0; id_name; - if (name[0] != '.' && !banned_exec_name(name)) { - snprintf(path, sizeof(path), "%s\\%s", dirname, name); - run_plugin(out, path); - } - } - closedir(dir); - } -} - -// .----------------------------------------------------------------------. -// | ______ ______ | -// | / / / / _ __ ___ _ __ _ __ ___ \ \ \ \ | -// | / / / / | '_ ` _ \| '__| '_ \ / _ \ \ \ \ \ | -// | \ \ \ \ | | | | | | | | |_) | __/ / / / / | -// | \_\_\_\ |_| |_| |_|_| | .__/ \___| /_/_/_/ | -// | |_| | -// '----------------------------------------------------------------------' - -void section_mrpe(SOCKET &out) -{ - crash_log("<<>>"); - output(out, "<<>>\n"); - for (unsigned i=0; iplugin_name, entry->service_description); - - FILE *f = _popen(entry->command_line, "r"); - if (!f) { - output(out, "3 Unable to execute - plugin may be missing.\n"); - continue; - } - - if (f) { - char buffer[8192]; - int bytes = fread(buffer, 1, sizeof(buffer) - 1, f); - buffer[bytes] = 0; - rstrip(buffer); - char *plugin_output = lstrip(buffer); - // Replace \n with Ascii 1 and \r with spaces - for (char *x = plugin_output; *x; x++) { - if (*x == '\n') - *x = (char)1; - else if (*x == '\r') - *x = ' '; - } - int status = _pclose(f); - int nagios_code = status; - output(out, "%d %s\n", nagios_code, plugin_output); - } - } -} - - -// .----------------------------------------------------------------------. -// | ______ _ _ ______ | -// | / / / / | | ___ ___ __ _| | \ \ \ \ | -// | / / / / | |/ _ \ / __/ _` | | \ \ \ \ | -// | \ \ \ \ | | (_) | (_| (_| | | / / / / | -// | \_\_\_\ |_|\___/ \___\__,_|_| /_/_/_/ | -// | | -// '----------------------------------------------------------------------' - -void section_local(SOCKET &out) -{ - crash_log("<<>>"); - output(out, "<<>>\n"); - run_external_programs(out, g_local_dir); -} - -// .----------------------------------------------------------------------. -// | ____ _ _ | -// | | _ \| |_ _ __ _(_)_ __ ___ | -// | | |_) | | | | |/ _` | | '_ \/ __| | -// | | __/| | |_| | (_| | | | | \__ \ | -// | |_| |_|\__,_|\__, |_|_| |_|___/ | -// | |___/ | -// '----------------------------------------------------------------------' - -void section_plugins(SOCKET &out) -{ - run_external_programs(out, g_plugins_dir); -} - - - - -// .----------------------------------------------------------------------. -// | ______ ____ _ _ __ __ _ __ ______ | -// | / / / / / ___| |__ ___ ___| | __ | \/ | |/ / \ \ \ \ | -// | / / / / | | | '_ \ / _ \/ __| |/ / | |\/| | ' / \ \ \ \ | -// | \ \ \ \ | |___| | | | __/ (__| < | | | | . \ / / / / | -// | \_\_\_\ \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ /_/_/_/ | -// | |_____| | -// +----------------------------------------------------------------------+ -// | The section <<>> | -// '----------------------------------------------------------------------' - -void section_check_mk(SOCKET &out) -{ - crash_log("<<>>"); - output(out, "<<>>\n"); - output(out, "Version: %s\n", CHECK_MK_VERSION); - output(out, "AgentOS: windows\n"); - output(out, "Hostname: %s\n", g_hostname); - output(out, "WorkingDirectory: %s\n", g_current_directory); - output(out, "ConfigFile: %s\n", g_config_file); - output(out, "AgentDirectory: %s\n", g_agent_directory); - output(out, "PluginsDirectory: %s\n", g_plugins_dir); - output(out, "LocalDirectory: %s\n", g_local_dir); - if (g_crash_debug) { - output(out, "ConnectionLog: %s\n", g_connection_log); - output(out, "CrashLog: %s\n", g_crash_log); - output(out, "SuccessLog: %s\n", g_success_log); - } - - output(out, "OnlyFrom:"); - if (g_num_only_from == 0) - output(out, " 0.0.0.0/0\n"); - else { - for (unsigned i=0; i < g_num_only_from; i++) { - ipspec *is = &g_only_from[i]; - output(out, " %d.%d.%d.%d/%d", - is->address & 0xff, - is->address >> 8 & 0xff, - is->address >> 16 & 0xff, - is->address >> 24 & 0xff, - is->bits); - } - output(out, "\n"); - } -} - - - - -// .----------------------------------------------------------------------. -// | ____ _ | -// | / ___| ___ _ ____ _(_) ___ ___ | -// | \___ \ / _ \ '__\ \ / / |/ __/ _ \ | -// | ___) | __/ | \ V /| | (_| __/ | -// | |____/ \___|_| \_/ |_|\___\___| | -// | | -// +----------------------------------------------------------------------+ -// | Stuff dealing with the Windows service management. | -// '----------------------------------------------------------------------' - -TCHAR* gszServiceName = (TCHAR *)TEXT(SERVICE_NAME); -SERVICE_STATUS serviceStatus; -SERVICE_STATUS_HANDLE serviceStatusHandle = 0; - - -void WINAPI ServiceControlHandler( DWORD controlCode ) -{ - switch ( controlCode ) - { - case SERVICE_CONTROL_INTERROGATE: - break; - - case SERVICE_CONTROL_SHUTDOWN: - case SERVICE_CONTROL_STOP: - should_terminate = true; - serviceStatus.dwCurrentState = SERVICE_STOP_PENDING; - SetServiceStatus( serviceStatusHandle, &serviceStatus ); - return; - - case SERVICE_CONTROL_PAUSE: - break; - - case SERVICE_CONTROL_CONTINUE: - break; - - default: - if ( controlCode >= 128 && controlCode <= 255 ) - // user defined control code - break; - else - // unrecognised control code - break; - } - - SetServiceStatus( serviceStatusHandle, &serviceStatus ); -} - -void WINAPI ServiceMain(DWORD, TCHAR* [] ) -{ - // initialise service status - serviceStatus.dwServiceType = SERVICE_WIN32_OWN_PROCESS; - serviceStatus.dwCurrentState = SERVICE_STOPPED; - serviceStatus.dwControlsAccepted = 0; - serviceStatus.dwWin32ExitCode = NO_ERROR; - serviceStatus.dwServiceSpecificExitCode = NO_ERROR; - serviceStatus.dwCheckPoint = 0; - serviceStatus.dwWaitHint = 0; - - serviceStatusHandle = RegisterServiceCtrlHandler( gszServiceName, - ServiceControlHandler ); - - if ( serviceStatusHandle ) - { - // service is starting - serviceStatus.dwCurrentState = SERVICE_START_PENDING; - SetServiceStatus( serviceStatusHandle, &serviceStatus ); - - // Service running - serviceStatus.dwControlsAccepted |= (SERVICE_ACCEPT_STOP | - SERVICE_ACCEPT_SHUTDOWN); - serviceStatus.dwCurrentState = SERVICE_RUNNING; - SetServiceStatus( serviceStatusHandle, &serviceStatus ); - - do_tcp = true; - listen_tcp_loop(); - - // service is now stopped - serviceStatus.dwControlsAccepted &= ~(SERVICE_ACCEPT_STOP | - SERVICE_ACCEPT_SHUTDOWN); - serviceStatus.dwCurrentState = SERVICE_STOPPED; - SetServiceStatus( serviceStatusHandle, &serviceStatus ); - } -} - -void RunService() -{ - SERVICE_TABLE_ENTRY serviceTable[] = - { - { gszServiceName, ServiceMain }, - { 0, 0 } - }; - - StartServiceCtrlDispatcher( serviceTable ); -} - -void InstallService() -{ - SC_HANDLE serviceControlManager = OpenSCManager( 0, 0, - SC_MANAGER_CREATE_SERVICE ); - - if ( serviceControlManager ) - { - char path[ _MAX_PATH + 1 ]; - if ( GetModuleFileName( 0, path, sizeof(path)/sizeof(path[0]) ) > 0 ) - { - SC_HANDLE service = CreateService( serviceControlManager, - gszServiceName, gszServiceName, - SERVICE_ALL_ACCESS, SERVICE_WIN32_OWN_PROCESS, - SERVICE_AUTO_START, SERVICE_ERROR_IGNORE, path, - 0, 0, 0, 0, 0 ); - if ( service ) - { - CloseServiceHandle( service ); - printf(SERVICE_NAME " Installed Successfully\n"); - } - else - { - if(GetLastError() == ERROR_SERVICE_EXISTS) - printf(SERVICE_NAME " Already Exists.\n"); - else - printf(SERVICE_NAME " Was not Installed Successfully. Error Code %d\n", (int)GetLastError()); - } - } - - CloseServiceHandle( serviceControlManager ); - } -} - -void UninstallService() -{ - SC_HANDLE serviceControlManager = OpenSCManager( 0, 0, - SC_MANAGER_CONNECT ); - - if ( serviceControlManager ) - { - SC_HANDLE service = OpenService( serviceControlManager, - gszServiceName, SERVICE_QUERY_STATUS | DELETE ); - if ( service ) - { - SERVICE_STATUS serviceStatus; - if ( QueryServiceStatus( service, &serviceStatus ) ) - { - if ( serviceStatus.dwCurrentState == SERVICE_STOPPED ) - { - if(DeleteService( service )) - printf(SERVICE_NAME " Removed Successfully\n"); - else - { - DWORD dwError; - dwError = GetLastError(); - if(dwError == ERROR_ACCESS_DENIED) - printf("Access Denied While trying to Remove " SERVICE_NAME " \n"); - else if(dwError == ERROR_INVALID_HANDLE) - printf("Handle invalid while trying to Remove " SERVICE_NAME " \n"); - else if(dwError == ERROR_SERVICE_MARKED_FOR_DELETE) - printf(SERVICE_NAME " already marked for deletion\n"); - } - } - else - { - printf(SERVICE_NAME " is still Running.\n"); - } - } - CloseServiceHandle( service ); - } - CloseServiceHandle( serviceControlManager ); - } -} -void do_install() -{ - InstallService(); -} - -void do_remove() -{ - UninstallService(); -} - -// .-----------------------------------------------------------------------. -// | ____ _ ____ _ | -// | / ___|_ __ __ _ ___| |__ | _ \ ___| |__ _ _ __ _ | -// | | | | '__/ _` / __| '_ \ | | | |/ _ \ '_ \| | | |/ _` | | -// | | |___| | | (_| \__ \ | | | | |_| | __/ |_) | |_| | (_| | | -// | \____|_| \__,_|___/_| |_| |____/ \___|_.__/ \__,_|\__, | | -// | |___/ | -// '-----------------------------------------------------------------------' - -void open_crash_log() -{ - struct stat buf; - - if (g_crash_debug) { - snprintf(g_crash_log, sizeof(g_crash_log), "%s\\crash.log", g_agent_directory); - snprintf(g_connection_log, sizeof(g_connection_log), "%s\\connection.log", g_agent_directory); - snprintf(g_success_log, sizeof(g_success_log), "%s\\success.log", g_agent_directory); - - // rename left over log if exists (means crash found) - if (0 == stat(g_connection_log, &buf)) { - // rotate to up to 9 crash log files - char rotate_path_from[256]; - char rotate_path_to[256]; - for (int i=9; i>=1; i--) { - snprintf(rotate_path_to, sizeof(rotate_path_to), - "%s\\crash-%d.log", g_agent_directory, i); - if (i>1) - snprintf(rotate_path_from, sizeof(rotate_path_from), - "%s\\crash-%d.log", g_agent_directory, i-1); - else - snprintf(rotate_path_from, sizeof(rotate_path_from), - "%s\\crash.log", g_agent_directory); - unlink(rotate_path_to); - rename(rotate_path_from, rotate_path_to); - } - rename(g_connection_log, g_crash_log); - g_found_crash = true; - } - - g_connectionlog_file = fopen(g_connection_log, "w"); - gettimeofday(&g_crashlog_start, 0); - time_t now = time(0); - struct tm *t = localtime(&now); - char timestamp[64]; - strftime(timestamp, sizeof(timestamp), "%b %d %H:%M:%S", t); - crash_log("Opened crash log at %s.", timestamp); - } -} - -void close_crash_log() -{ - if (g_crash_debug) { - crash_log("Closing crash log (no crash this time)"); - fclose(g_connectionlog_file); - unlink(g_success_log); - rename(g_connection_log, g_success_log); - } -} - -void crash_log(const char *format, ...) -{ - struct timeval tv; - - if (g_connectionlog_file) { - gettimeofday(&tv, 0); - long int ellapsed_usec = tv.tv_usec - g_crashlog_start.tv_usec; - long int ellapsed_sec = tv.tv_sec - g_crashlog_start.tv_sec; - if (ellapsed_usec < 0) { - ellapsed_usec += 1000000; - ellapsed_sec --; - } - - va_list ap; - va_start(ap, format); - fprintf(g_connectionlog_file, "%ld.%06ld ", ellapsed_sec, ellapsed_usec); - vfprintf(g_connectionlog_file, format, ap); - fputs("\n", g_connectionlog_file); - fflush(g_connectionlog_file); - } -} - -void output_crash_log(SOCKET &out) -{ - output(out, "<<>>\n"); - output(out, "[[[Check_MK Agent]]]\n"); - if (g_found_crash) { - output(out, "C Check_MK Agent crashed\n"); - FILE *f = fopen(g_crash_log, "r"); - char line[1024]; - while (0 != fgets(line, sizeof(line), f)) { - output(out, "W "); - output(out, line); - } - fclose(f); - g_found_crash = false; - } -} - - - -// .----------------------------------------------------------------------. -// | ____ __ _ _ _ | -// | / ___|___ _ __ / _(_) __ _ _ _ _ __ __ _| |_(_) ___ _ __ | -// | | | / _ \| '_ \| |_| |/ _` | | | | '__/ _` | __| |/ _ \| '_ \ | -// | | |__| (_) | | | | _| | (_| | |_| | | | (_| | |_| | (_) | | | | | -// | \____\___/|_| |_|_| |_|\__, |\__,_|_| \__,_|\__|_|\___/|_| |_| | -// | |___/ | -// '----------------------------------------------------------------------' - -int parse_boolean(char *value) -{ - if (!strcmp(value, "yes")) - return 1; - else if (!strcmp(value, "no")) - return 0; - else - fprintf(stderr, "Invalid boolean value. Only yes and no are allowed.\r\n"); - return -1; -} - -void lowercase(char *s) -{ - while (*s) { - *s = tolower(*s); - s++; - } -} -// Do a simple pattern matching with the jokers * and ?. -// This is case insensitive (windows-like). -bool globmatch(const char *pattern, char *astring) -{ - const char *p = pattern; - char *s = astring; - while (*s) { - if (!*p) - return false; // pattern too short - - // normal character-wise match - if (tolower(*p) == tolower(*s) || *p == '?') { - p++; - s++; - } - - // non-matching charactetr - else if (*p != '*') - return false; - - else { // check * - // If there is more than one asterisk in the pattern, - // we need to try out several variants. We do this - // by backtracking (smart, eh?) - int maxlength = strlen(s); - // replace * by a sequence of ?, at most the rest length of s - char *subpattern = (char *)malloc(strlen(p) + maxlength + 1); - bool match = false; - for (int i=0; i<=maxlength; i++) { - for (int x=0; x= MAX_ONLY_FROM) { - fprintf(stderr, "Cannot handle more the %d entries for only_from\r\n", MAX_ONLY_FROM); - exit(1); - } - - unsigned a, b, c, d; - int bits = 32; - - if (strchr(value, '/')) { - if (5 != sscanf(value, "%u.%u.%u.%u/%u", &a, &b, &c, &d, &bits)) { - fprintf(stderr, "Invalid value %s for only_hosts\n", value); - exit(1); - } - } - else { - if (4 != sscanf(value, "%u.%u.%u.%u", &a, &b, &c, &d)) { - fprintf(stderr, "Invalid value %s for only_hosts\n", value); - exit(1); - } - } - - uint32_t ip = a + b * 0x100 + c * 0x10000 + d * 0x1000000; - uint32_t mask_swapped = 0; - for (int bit = 0; bit < bits; bit ++) - mask_swapped |= 0x80000000 >> bit; - uint32_t mask; - unsigned char *s = (unsigned char *)&mask_swapped; - unsigned char *t = (unsigned char *)&mask; - t[3] = s[0]; - t[2] = s[1]; - t[1] = s[2]; - t[0] = s[3]; - g_only_from[g_num_only_from].address = ip; - g_only_from[g_num_only_from].netmask = mask; - g_only_from[g_num_only_from].bits = bits; - - if ((ip & mask) != ip) { - fprintf(stderr, "Invalid only_hosts entry: host part not 0: %s/%u", - ipv4_to_text(ip), bits); - exit(1); - } - g_num_only_from ++; -} - -char *next_word(char **line) -{ - if (*line == 0) // allow subsequent calls without checking - return 0; - - char *end = *line + strlen(*line); - char *value = *line; - while (value < end) { - value = lstrip(value); - char *s = value; - while (*s && !isspace(*s)) - s++; - *s = 0; - *line = s + 1; - rstrip(value); - if (strlen(value) > 0) - return value; - else - return 0; - } - return 0; -} - - -void parse_only_from(char *value) -{ - char *word; - while (0 != (word = next_word(&value))) - add_only_from(word); -} - -void parse_execute(char *value) -{ - // clean array if this options has been parsed already - while (g_num_execute_suffixes) - free(g_execute_suffixes[--g_num_execute_suffixes]); - - char *suffix; - while (0 != (suffix = next_word(&value))) { - if (g_num_execute_suffixes < MAX_EXECUTE_SUFFIXES) { - g_execute_suffixes[g_num_execute_suffixes++] = strdup(suffix); - } - } -} - - -bool parse_crash_debug(char *value) -{ - int s = parse_boolean(value); - if (s == -1) - return false; - g_crash_debug = s; - return true; -} - - -bool handle_global_config_variable(char *var, char *value) -{ -if (!strcmp(var, "only_from")) { - parse_only_from(value); - return true; - } - else if (!strcmp(var, "execute")) { - parse_execute(value); - return true; - } - else if (!strcmp(var, "crash_debug")) { - return parse_crash_debug(value); - } - else if (!strcmp(var, "sections")) { - enabled_sections = 0; - char *word; - while ((word = next_word(&value))) { - if (!strcmp(word, "check_mk")) - enabled_sections |= SECTION_CHECK_MK; - else if (!strcmp(word, "uptime")) - enabled_sections |= SECTION_UPTIME; - else if (!strcmp(word, "df")) - enabled_sections |= SECTION_DF; - else if (!strcmp(word, "ps")) - enabled_sections |= SECTION_PS; - else if (!strcmp(word, "mem")) - enabled_sections |= SECTION_MEM; - else if (!strcmp(word, "services")) - enabled_sections |= SECTION_SERVICES; - else if (!strcmp(word, "winperf")) - enabled_sections |= SECTION_WINPERF; - else if (!strcmp(word, "logwatch")) - enabled_sections |= SECTION_LOGWATCH; - else if (!strcmp(word, "logfiles")) - enabled_sections |= SECTION_LOGFILES; - else if (!strcmp(word, "systemtime")) - enabled_sections |= SECTION_SYSTEMTIME; - else if (!strcmp(word, "plugins")) - enabled_sections |= SECTION_PLUGINS; - else if (!strcmp(word, "local")) - enabled_sections |= SECTION_LOCAL; - else if (!strcmp(word, "mrpe")) - enabled_sections |= SECTION_MRPE; - else if (!strcmp(word, "fileinfo")) - enabled_sections |= SECTION_FILEINFO; - else { - fprintf(stderr, "Invalid section '%s'.\r\n", word); - return false; - } - } - return true; - } - - return false; -} - -bool handle_winperf_config_variable(char *var, char *value) -{ - if (!strcmp(var, "counters")) { - char *word; - while (0 != (word = next_word(&value))) { - if (g_num_winperf_counters >= MAX_WINPERF_COUNTERS) { - fprintf(stderr, "Defined too many counters in [winperf]:counters.\r\n"); - exit(1); - } - char *colon = strchr(word, ':'); - if (!colon) { - fprintf(stderr, "Invalid counter '%s' in section [winperf]: need number and colon, e.g. 238:processor.\n", word); - exit(1); - } - *colon = 0; - g_winperf_counters[g_num_winperf_counters].name = strdup(colon + 1); - g_winperf_counters[g_num_winperf_counters].id = atoi(word); - g_num_winperf_counters ++; - } - return true; - } - return false; -} - -bool handle_logfiles_config_variable(char *var, char *value) -{ - load_logwatch_offsets(); - if (!strcmp(var, "textfile")) { - if (value != 0) - add_globline(value); - return true; - }else if (!strcmp(var, "warn")) { - if (value != 0) - add_condition_pattern('W', value); - return true; - }else if (!strcmp(var, "crit")) { - if (value != 0) - add_condition_pattern('C', value); - return true; - }else if (!strcmp(var, "ignore")) { - if (value != 0) - add_condition_pattern('I', value); - return true; - }else if (!strcmp(var, "ok")) { - if (value != 0) - add_condition_pattern('O', value); - return true; - } - return false; -} - -bool handle_logwatch_config_variable(char *var, char *value) -{ - if (!strncmp(var, "logfile ", 8)) { - int level; - char *logfilename = lstrip(var + 8); - lowercase(logfilename); - if (!strcmp(value, "off")) - level = -1; - else if (!strcmp(value, "all")) - level = 0; - else if (!strcmp(value, "warn")) - level = 1; - else if (!strcmp(value, "crit")) - level = 2; - else { - fprintf(stderr, "Invalid log level '%s'.\r\n" - "Allowed are off, all, warn and crit.\r\n", value); - return false; - } - if (num_eventlog_configs < MAX_EVENTLOGS) { - eventlog_config[num_eventlog_configs].level = level; - strncpy(eventlog_config[num_eventlog_configs].name, logfilename, 256); - num_eventlog_configs++; - } - return true; - } - else if (!strcmp(var, "sendall")) { - int s = parse_boolean(value); - if (s == -1) - return false; - logwatch_send_initial_entries = s; - return true; - } - return false; -} - -bool check_host_restriction(char *patterns) -{ - char *word; - while ((word = next_word(&patterns))) { - if (globmatch(word, g_hostname)) { - return true; - } - } - return false; -} - - -bool handle_mrpe_config_variable(char *var, char *value) -{ - if (!strcmp(var, "check")) { - if (g_num_mrpe_entries >= MAX_MRPE_COMMANDS) { - fprintf(stderr, "Sorry, we are limited to %u MRPE commands\r\n", MAX_MRPE_COMMANDS); - return false; - } - - // First word: service description - // Rest: command line - fprintf(stderr, "VALUE: [%s]\r\n", value); - char *service_description = next_word(&value); - char *command_line = value; - if (!command_line || !command_line[0]) { - fprintf(stderr, "Invalid command specification for mrpe:\r\n" - "Format: SERVICEDESC COMMANDLINE\r\n"); - return false; - } - fprintf(stderr, "CMD: [%s]\r\n", command_line); - - strncpy(g_mrpe_entries[g_num_mrpe_entries].command_line, command_line, - sizeof(g_mrpe_entries[g_num_mrpe_entries].command_line)); - strncpy(g_mrpe_entries[g_num_mrpe_entries].service_description, service_description, - sizeof(g_mrpe_entries[g_num_mrpe_entries].service_description)); - - // compute plugin name, drop directory part - char *plugin_name = next_word(&value); - char *p = strrchr(plugin_name, '/'); - if (!p) - p = strrchr(plugin_name, '\\'); - if (p) - plugin_name = p + 1; - strncpy(g_mrpe_entries[g_num_mrpe_entries].plugin_name, plugin_name, - sizeof(g_mrpe_entries[g_num_mrpe_entries].plugin_name)); - - g_num_mrpe_entries++; - return true; - } - return false; -} - - -/* Example configuration file: - -[global] - # Process this logfile only on the following hosts - only_on = zhamzr12 - - # Restrict access to certain IP addresses - only_from = 127.0.0.1 192.168.56.0/24 - - # Enable crash debugging - crash_debug = on - - -[winperf] - # Select counters to extract. The following counters - # are needed by checks shipped with check_mk. - counters = 10332:msx_queues - -[logwatch] - # Select which messages are to be sent in which - # event log - logfile system = off - logfile application = info - logfile * = off - -[mrpe] - check = DISK_C: mrpe/check_disk -w C: - check = MEM mrpe/check_mem -w 10 -c 20 -*/ - -void read_config_file() -{ - snprintf(g_config_file, sizeof(g_config_file), "%s\\check_mk.ini", g_agent_directory); - FILE *file = fopen(g_config_file, "r"); - if (!file) { - g_config_file[0] = 0; - return; - } - - char line[512]; - int lineno = 0; - bool (*variable_handler)(char *var, char *value) = 0; - bool is_active = true; // false in sections with host restrictions - - while (!feof(file)) { - if (!fgets(line, sizeof(line), file)){ - fclose(file); - return; - } - lineno ++; - char *l = strip(line); - if (l[0] == 0 || l[0] == '#' || l[0] == ';') - continue; // skip empty lines and comments - int len = strlen(l); - if (l[0] == '[' && l[len-1] == ']') { - // found section header - l[len-1] = 0; - char *section = l + 1; - if (!strcmp(section, "global")) - variable_handler = handle_global_config_variable; - else if (!strcmp(section, "winperf")) - variable_handler = handle_winperf_config_variable; - else if (!strcmp(section, "logwatch")) - variable_handler = handle_logwatch_config_variable; - else if (!strcmp(section, "logfiles")) - variable_handler = handle_logfiles_config_variable; - else if (!strcmp(section, "mrpe")) - variable_handler = handle_mrpe_config_variable; - else if (!strcmp(section, "fileinfo")) - variable_handler = handle_fileinfo_config_variable; - else { - fprintf(stderr, "Invalid section [%s] in %s in line %d.\r\n", - section, g_config_file, lineno); - exit(1); - } - // forget host-restrictions if new section begins - is_active = true; - } - else if (!variable_handler) { - fprintf(stderr, "Line %d is outside of any section.\r\n", lineno); - exit(1); - } - else { - // split up line at = sign - char *s = l; - while (*s && *s != '=') - s++; - if (*s != '=') { - fprintf(stderr, "Invalid line %d in %s.\r\n", - lineno, g_config_file); - exit(1); - } - *s = 0; - char *value = s + 1; - char *variable = l; - rstrip(variable); - lowercase(variable); - value = strip(value); - - // handle host restriction - if (!strcmp(variable, "host")) - is_active = check_host_restriction(value); - - // skip all other variables for non-relevant hosts - else if (!is_active) - continue; - - // Useful for debugging host restrictions - else if (!strcmp(variable, "print")) - fprintf(stderr, "%s\r\n", value); - - - else if (!variable_handler(variable, value)) { - fprintf(stderr, "Invalid entry in %s line %d.\r\n", g_config_file, lineno); - exit(1); - } - } - } - fclose(file); -} - - - - - -// .----------------------------------------------------------------------. -// | _____ ____ ____ ____ _ _ | -// | |_ _/ ___| _ \ / ___| ___ ___| | _____| |_ | -// | | || | | |_) | \___ \ / _ \ / __| |/ / _ \ __| | -// | | || |___| __/ ___) | (_) | (__| < __/ |_ | -// | |_| \____|_| |____/ \___/ \___|_|\_\___|\__| | -// | | -// +----------------------------------------------------------------------+ -// | Stuff dealing with the handling of the TCP socket | -// '----------------------------------------------------------------------' - -void wsa_startup() -{ - WSADATA wsa; - if (0 != WSAStartup(MAKEWORD(2, 0), &wsa)) { - fprintf(stderr, "Cannot initialize winsock.\n"); - exit(1); - } - if (0 != (gethostname(g_hostname, sizeof(g_hostname)))) { - strcpy(g_hostname, ""); - } - -} - -char *ipv4_to_text(uint32_t ip) -{ - static char text[32]; - snprintf(text, 32, "%u.%u.%u.%u", - ip & 255, - ip >> 8 & 255, - ip >> 16 & 255, - ip >> 24); - return text; -} - -bool check_only_from(uint32_t ip) -{ - if (g_num_only_from == 0) - return true; // no restriction set - - for (unsigned i=0; i> 8) & 0xff, (ip >> 16) & 0xff, (ip >> 24) & 0xff); - output_data(connection); - close_crash_log(); - } - closesocket(connection); - } - } - else if (!should_terminate) { - Sleep(1); // should never happen - } - } - closesocket(s); - WSACleanup(); -} - - -void output(SOCKET &out, const char *format, ...) -{ - static char outbuffer[4096]; - - va_list ap; - va_start(ap, format); - int len = vsnprintf(outbuffer, sizeof(outbuffer), format, ap); - if (do_tcp) { - while (!should_terminate) { - int result = send(out, outbuffer, len, 0); - if (result == SOCKET_ERROR) { - debug("send() failed"); - int error = WSAGetLastError(); - if (error == WSAEINTR) { - debug("INTR. Nochmal..."); - continue; - } - else if (error == WSAEINPROGRESS) { - debug("INPROGRESS. Nochmal..."); - continue; - } - else if (error == WSAEWOULDBLOCK) { - debug("WOULDBLOCK. Komisch. Breche ab..."); - break; - } - else { - debug("Anderer Fehler. Gebe auf\n"); - break; - } - } - else if (result == 0) - debug("send() returned 0"); - else if (result != len) - debug("send() sent too few bytes"); - break; - } - } - else - fwrite(outbuffer, len, 1, stdout); -} - - -// .----------------------------------------------------------------------. -// | __ __ _ | -// | | \/ | __ _(_)_ __ | -// | | |\/| |/ _` | | '_ \ | -// | | | | | (_| | | | | | | -// | |_| |_|\__,_|_|_| |_| | -// | | -// '----------------------------------------------------------------------' - -void usage() -{ - fprintf(stderr, "Usage: \n" - "check_mk_agent version -- show version " CHECK_MK_VERSION " and exit\n" - "check_mk_agent install -- install as Windows NT service Check_Mk_Agent\n" - "check_mk_agent remove -- remove Windows NT service\n" - "check_mk_agent adhoc -- open TCP port %d and answer request until killed\n" - "check_mk_agent test -- test output of plugin, do not open TCP port\n" - "check_mk_agent debug -- similar to test, but with lots of debug output\n", CHECK_MK_AGENT_PORT); - exit(1); -} - - -void do_debug() -{ - verbose_mode = true; - do_tcp = false; - // logwatch_send_initial_entries = true; - // logwatch_suppress_info = false; - SOCKET dummy; - output_data(dummy); -} - -void do_test() -{ - do_tcp = false; - SOCKET dummy; - output_data(dummy); -} - -void do_adhoc() -{ - do_tcp = true; - printf("Listening for TCP connections on port %d\n", CHECK_MK_AGENT_PORT); - printf("Close window or press Ctrl-C to exit\n"); - fflush(stdout); - - should_terminate = false; - listen_tcp_loop(); // runs for ever or until Ctrl-C -} - - -void output_data(SOCKET &out) -{ - // make sure, output of numbers is not localized - setlocale(LC_ALL, "C"); - - if (g_crash_debug) - output_crash_log(out); - if (enabled_sections & SECTION_CHECK_MK) - section_check_mk(out); - if (enabled_sections & SECTION_UPTIME) - section_uptime(out); - if (enabled_sections & SECTION_DF) - section_df(out); - if (enabled_sections & SECTION_PS) - section_ps(out); - if (enabled_sections & SECTION_MEM) - section_mem(out); - if (enabled_sections & SECTION_FILEINFO) - section_fileinfo(out); - if (enabled_sections & SECTION_SERVICES) - section_services(out); - if (enabled_sections & SECTION_WINPERF) - section_winperf(out); - if (enabled_sections & SECTION_LOGWATCH) - section_eventlog(out); - if (enabled_sections & SECTION_LOGFILES) - section_logfiles(out); - if (enabled_sections & SECTION_PLUGINS) - section_plugins(out); - if (enabled_sections & SECTION_LOCAL) - section_local(out); - if (enabled_sections & SECTION_MRPE) - section_mrpe(out); - if (enabled_sections & SECTION_SYSTEMTIME) - section_systemtime(out); -} - - -void cleanup() -{ - if (eventlog_buffer_size > 0) - delete [] eventlog_buffer; - - unregister_all_eventlogs(); // frees a few bytes - - while (g_num_execute_suffixes) - free(g_execute_suffixes[--g_num_execute_suffixes]); - - while (g_num_fileinfo_paths) - free(g_fileinfo_path[--g_num_fileinfo_paths]); - - cleanup_logwatch(); -} - -void show_version() -{ - printf("Check_MK_Agent version %s\n", CHECK_MK_VERSION); -} - -void get_agent_dir(char *buffer, int size) -{ - buffer[0] = 0; - - HKEY key; - DWORD ret = RegOpenKeyEx(HKEY_LOCAL_MACHINE, - "SYSTEM\\CurrentControlSet\\Services\\check_mk_agent", 0, KEY_READ, &key); - if (ret == ERROR_SUCCESS) - { - DWORD dsize = size; - if (ERROR_SUCCESS == RegQueryValueEx(key, "ImagePath", NULL, NULL, (BYTE *)buffer, &dsize)) - { - char *end = buffer + strlen(buffer); - // search backwards for backslash - while (end > buffer && *end != '\\') - end--; - *end = 0; // replace \ with string end => get directory of executable - - // Handle case where name is quoted with double quotes. - // This is reported to happen on some 64 Bit systems when spaces - // are in the directory name. - if (*buffer == '"') { - memmove(buffer, buffer + 1, strlen(buffer)); - } - } - RegCloseKey(key); - } - else { - // If the agent is not installed as service, simply - // assume the current directory to be the agent - // directory (for test and adhoc mode) - strncpy(buffer, g_current_directory, size); - if (buffer[strlen(buffer)-1] == '\\') // Remove trailing backslash - buffer[strlen(buffer)-1] = 0; - } - -} - -void determine_directories() -{ - // Determine directories once and forever - getcwd(g_current_directory, sizeof(g_current_directory)); - get_agent_dir(g_agent_directory, sizeof(g_agent_directory)); - snprintf(g_plugins_dir, sizeof(g_plugins_dir), "%s\\plugins", g_agent_directory); - snprintf(g_local_dir, sizeof(g_local_dir), "%s\\local", g_agent_directory); - snprintf(g_logwatch_statefile, sizeof(g_logwatch_statefile), "%s\\logstate.txt", g_agent_directory); -} - -int main(int argc, char **argv) -{ - wsa_startup(); - determine_directories(); - read_config_file(); - - if (argc > 2) - usage(); - else if (argc <= 1) - RunService(); - else if (!strcmp(argv[1], "test")) - do_test(); - else if (!strcmp(argv[1], "adhoc")) - do_adhoc(); - else if (!strcmp(argv[1], "install")) - do_install(); - else if (!strcmp(argv[1], "remove")) - do_remove(); - else if (!strcmp(argv[1], "debug")) - do_debug(); - else if (!strcmp(argv[1], "version")) - show_version(); - else - usage(); - - cleanup(); -} - Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/windows/check_mk_agent.exe and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/windows/check_mk_agent.exe differ Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/windows/check_mk_agent.msi and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/windows/check_mk_agent.msi differ diff -Nru check-mk-1.2.2p3/windows/check_mk.example.ini check-mk-1.2.6p12/windows/check_mk.example.ini --- check-mk-1.2.2p3/windows/check_mk.example.ini 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/windows/check_mk.example.ini 2015-09-18 13:35:20.000000000 +0000 @@ -1,64 +1,104 @@ -[global] - # Restrict access to certain IP addresses - # only_from = 127.0.0.1 192.168.56.0/24 - - # Do only execute programs and scripts with - # the following suffixes (in local and plugins). - # Per default all extensions except txt and dir - # are being executed. - # execute = exe bat vbs - - # Restrict the following configuration variables - # in [global] to the following hosts - # host = winsrv* zab???ad - - # Just output certain sections (e.g. for upper hosts) - # sections = check_mk winperf - - -[winperf] - # Select counters to extract. The following counters - # are needed by checks shipped with check_mk. - # counters = 10332:msx_queues - - -[logfiles] - # # Define textfiles to be monitored, separated by | - # textfile = C:\tmp logfiles\message_*.log|D:\log\sample.txt - # # Set patterns for defined textfiles - # ok = Successfully logged in* - # crit = Error in* - # warn = Unable to contact* - # ignore = Backup * saved - - # # Define additional textfiles with different patterns - # textfile = C:\tmp\memo.udf - # # Set patterns for defined textfile - # warn = *overdue* - # ok = *mail sent* - -[logwatch] - # Testing: output *all* messages from the eventlogs - # sendall = yes - - # From application log send only critical messages - # logfile application = crit - - # From the security log send all messages - # logfile security = all - - # Switch all other logfiles off. Default is warn: - # send messages of type warn or crit - # logfile * = off - -[mrpe] - # Run classical Nagios plugins. The word before the command - # line is the service description for Nagios. Use backslashes - # in Windows-paths. - # check = Dummy mrpe\check_crit - # check = IP_Configuration mrpe\check_ipconfig 1.2.3.4 - # check = Whatever c:\myplugins\check_whatever -w 10 -c 20 - -[fileinfo] - # path = C:\Programs\Foo\*.log - # path = M:\Bar Test\*.* +[global] + # Restrict access to certain IP addresses + # only_from = 127.0.0.1 192.168.56.0/24 + + # Change port where the agent is listening ( default 6556 ) + # port = 6556 + + # Do only execute programs and scripts with + # the following suffixes (in local and plugins). + # Per default all extensions except txt and dir + # are being executed. + # execute = exe bat vbs + + # Restrict the following configuration variables + # in [global] to the following hosts + # host = winsrv* zab???ad + + # Just output certain sections (e.g. for upper hosts) + # sections = check_mk winperf + + # Write a logfile for tackling down crashes of the agent + # crash_debug = yes + + +[winperf] + # Select counters to extract. The following counters + # are needed by checks shipped with check_mk. + # counters = 10332:msx_queues + # counters = 638:tcp_conn + + +[logfiles] + # # Define textfiles to be monitored, separated by | + # textfile = C:\tmp logfiles\message_*.log|D:\log\sample.txt + # # Set patterns for defined textfiles + # ok = Successfully logged in* + # crit = Error in* + # warn = Unable to contact* + # ignore = Backup * saved + + # # Define additional textfiles with different patterns + # textfile = C:\tmp\memo.udf + # # Set patterns for defined textfile + # warn = *overdue* + # ok = *mail sent* + +[logwatch] + # Testing: output *all* messages from the eventlogs + # sendall = yes + + # From application log send only critical messages + # logfile application = crit + + # From system log send only warning/critical messages, + # but suppress any context messages + # logfile system = nocontext warn + + # From the security log send all messages + # logfile security = all + + # Switch all other logfiles off. Default is warn: + # send messages of type warn or crit + # logfile * = off + +[mrpe] + # Run classical monitoring plugins. The word before the command + # line is the service description for the monitoring. Use backslashes + # in Windows-paths. + # check = Dummy mrpe\check_crit + # check = IP_Configuration mrpe\check_ipconfig 1.2.3.4 + # check = Whatever c:\myplugins\check_whatever -w 10 -c 20 + +[fileinfo] + # path = C:\Programs\Foo\*.log + # path = M:\Bar Test\*.* + +[local] + # define timeouts for local scripts matching + # specific patterns - first match wins + # timeout *.vbs = 20 + # timeout *.bat = 10 + # timeout * = 30 + +[plugins] + # example: the windows_updates.vbs + # plugin is executed asynchronous + # and is only updated every 3600 seconds + # it may fail (timeout / error) up to 3 times before the last known data is discarded + # execution windows_updates.vbs = async + # timeout windows_updates.vbs = 120 + # cache_age windows_updates.vbs = 3600 + # retry_count windows_udpates.vbs = 3 + + # define timeouts for plugin scripts matching + # specific patterns - first match wins + # timeout ps_perf.ps1 = 20 + # timeout *.ps1 = 10 + # timeout * = 30 + + # When using the Check_MK Inventory plugin, it is a good idea to make the + # plugin being executed asynchronous to prevent it from hooking up the + # whole agent processing. Additionally it should have a execution timeout. + # execution mk_inventory.ps1 = async + # timeout mk_inventory.ps1 = 240 diff -Nru check-mk-1.2.2p3/windows/crash.cc check-mk-1.2.6p12/windows/crash.cc --- check-mk-1.2.2p3/windows/crash.cc 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/windows/crash.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,33 +0,0 @@ -// +------------------------------------------------------------------+ -// | ____ _ _ __ __ _ __ | -// | / ___| |__ ___ ___| | __ | \/ | |/ / | -// | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -// | | |___| | | | __/ (__| < | | | | . \ | -// | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -// | | -// | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -// +------------------------------------------------------------------+ -// -// This file is part of Check_MK. -// The official homepage is at http://mathias-kettner.de/check_mk. -// -// check_mk is free software; you can redistribute it and/or modify it -// under the terms of the GNU General Public License as published by -// the Free Software Foundation in version 2. check_mk is distributed -// in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -// out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -// PARTICULAR PURPOSE. See the GNU General Public License for more de- -// ails. You should have received a copy of the GNU General Public -// License along with GNU Make; see the file COPYING. If not, write -// to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -// Boston, MA 02110-1301 USA. - -// This program causes itself to crash. You can use it to provoke an entry -// into the "Application" eventlog (at your own risk). Compile it with -// 'make crash' - -int main(int argc, char **argv) -{ - char *p = 0x00000000; - *p = 17; -} Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/windows/crash.exe and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/windows/crash.exe differ diff -Nru check-mk-1.2.2p3/windows/endless.bat check-mk-1.2.6p12/windows/endless.bat --- check-mk-1.2.2p3/windows/endless.bat 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/windows/endless.bat 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -:start -@goto start Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/windows/install_agent-64.exe and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/windows/install_agent-64.exe differ Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/windows/install_agent.exe and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/windows/install_agent.exe differ Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/windows/installer.ico and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/windows/installer.ico differ diff -Nru check-mk-1.2.2p3/windows/installer.nsi check-mk-1.2.6p12/windows/installer.nsi --- check-mk-1.2.2p3/windows/installer.nsi 2013-11-05 09:22:41.000000000 +0000 +++ check-mk-1.2.6p12/windows/installer.nsi 1970-01-01 00:00:00.000000000 +0000 @@ -1,110 +0,0 @@ -; This is the NSIS configuration file for the Check_MK windows agent. This is -; the spec file how to build the installer -;-------------------------------- -; Useful sources: -; http://nsis.sourceforge.net/Reusable_installer_script - -!define CHECK_MK_VERSION "1.2.2p3" -!define NAME "Check_MK Agent ${CHECK_MK_VERSION}" - -XPStyle on -Icon "installer.ico" - -; The name of the installer -Name "${NAME}" - -; The file to write -OutFile "install_agent.exe" - -SetDateSave on -SetDatablockOptimize on -CRCCheck on -SilentInstall normal - -; The default installation directory -InstallDir "$PROGRAMFILES\check_mk" - -; Registry key to check for directory (so if you install again, it will -; overwrite the old one automatically) -InstallDirRegKey HKLM "Software\check_mk_agent" "Install_Dir" - -; Request application privileges for Windows >Vista -RequestExecutionLevel admin - -ShowInstDetails show - -;-------------------------------- -; Pages - -Page directory -Page components -Page instfiles - -UninstPage uninstConfirm -UninstPage instfiles - -;-------------------------------- - -Section "Check_MK_Agent" - ; Can not be disabled - SectionIn RO - - !include LogicLib.nsh - ExpandEnvStrings $0 "%comspec%" - nsExec::ExecToStack '"$0" /k "net start | FIND /C /I "check_mk_agent""' - Pop $0 - Pop $1 - StrCpy $1 $1 1 - Var /GLOBAL stopped - ${If} "$0$1" == "01" - DetailPrint "Stop running check_mk_agent..." - StrCpy $stopped "1" - nsExec::Exec 'cmd /C "net stop check_mk_agent"' - ${Else} - StrCpy $stopped "0" - ${EndIf} - - SetOutPath "$INSTDIR" - File check_mk_agent.exe - File check_mk.example.ini - CreateDirectory "$INSTDIR\local" - CreateDirectory "$INSTDIR\plugins" - - ; Write the installation path into the registry - WriteRegStr HKLM SOFTWARE\check_mk_agent "Install_Dir" "$INSTDIR" - - ; Write the uninstall keys for Windows - WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\check_mk_agent" "DisplayName" "${NAME}" - WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\check_mk_agent" "UninstallString" '"$INSTDIR\uninstall.exe"' - WriteRegDWORD HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\check_mk_agent" "NoModify" 1 - WriteRegDWORD HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\check_mk_agent" "NoRepair" 1 - WriteUninstaller "uninstall.exe" -SectionEnd - -Section "Install & start service" - DetailPrint "Installing and starting the check_mk_agent service..." - nsExec::Exec 'cmd /C "$INSTDIR\check_mk_agent.exe" install' - nsExec::Exec 'cmd /C "net start check_mk_agent"' -SectionEnd - -Section "Uninstall" - ; Remove the service - DetailPrint "Stopping service..." - nsExec::Exec 'cmd /C "net stop check_mk_agent"' - DetailPrint "Removing service..." - nsExec::Exec 'cmd /C "$INSTDIR\check_mk_agent.exe" remove' - - ; Remove registry keys - DeleteRegKey HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\check_mk_agent" - DeleteRegKey HKLM SOFTWARE\check_mk_agent - - ; Remove files and uninstaller - Delete "$INSTDIR\check_mk_agent.exe" - Delete "$INSTDIR\check_mk.example.ini" - Delete "$INSTDIR\uninstall.exe" - RMDir "$INSTDIR\local" - RMDir "$INSTDIR\plugins" - - ; Remove directories used - RMDir "$INSTDIR" -SectionEnd diff -Nru check-mk-1.2.2p3/windows/Makefile check-mk-1.2.6p12/windows/Makefile --- check-mk-1.2.2p3/windows/Makefile 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/windows/Makefile 1970-01-01 00:00:00.000000000 +0000 @@ -1,53 +0,0 @@ -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# ails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -ifeq ($(shell uname),Linux) - CXX = echo -e "\n\nWARNING: MinGW under Linux is broken! Use at your own risk!\n\n" ; i586-mingw32msvc-g++ -else - CXX = mingw32-g++ -endif - -install_agent.exe: check_mk_agent.exe crash.exe nowin.exe - if ! which makensis; then \ - echo "ERROR: makensis missing. Please install the nsis package." ; \ - exit 1 ; \ - fi - makensis installer.nsi - chmod +x install_agent.exe - -check_mk_agent.exe: check_mk_agent.cc Makefile - $(CXX) -s -O2 -Wall -Werror -o $@ $< -lwsock32 - -crash.exe: crash.cc Makefile - $(CXX) -o $@ $< - -nowin.exe: nowin.cc Makefile - $(CXX) -o $@ $< - -clean: - rm -f *.o *~ svn-*.tmp - -mrproper: clean - rm -f *.exe - diff -Nru check-mk-1.2.2p3/windows/mrpe/check_crit.bat check-mk-1.2.6p12/windows/mrpe/check_crit.bat --- check-mk-1.2.2p3/windows/mrpe/check_crit.bat 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/windows/mrpe/check_crit.bat 2015-09-18 13:35:20.000000000 +0000 @@ -1,2 +1,2 @@ -@echo CRIT - This check is always critical -@exit 2 +@echo CRIT - This check is always critical +@exit 2 diff -Nru check-mk-1.2.2p3/windows/nowin.cc check-mk-1.2.6p12/windows/nowin.cc --- check-mk-1.2.2p3/windows/nowin.cc 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/windows/nowin.cc 1970-01-01 00:00:00.000000000 +0000 @@ -1,48 +0,0 @@ -// +------------------------------------------------------------------+ -// | ____ _ _ __ __ _ __ | -// | / ___| |__ ___ ___| | __ | \/ | |/ / | -// | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -// | | |___| | | | __/ (__| < | | | | . \ | -// | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -// | | -// | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | -// +------------------------------------------------------------------+ -// -// This file is part of Check_MK. -// The official homepage is at http://mathias-kettner.de/check_mk. -// -// check_mk is free software; you can redistribute it and/or modify it -// under the terms of the GNU General Public License as published by -// the Free Software Foundation in version 2. check_mk is distributed -// in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -// out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -// PARTICULAR PURPOSE. See the GNU General Public License for more de- -// ails. You should have received a copy of the GNU General Public -// License along with GNU Make; see the file COPYING. If not, write -// to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -// Boston, MA 02110-1301 USA. - -#include -#include -#include - -int main(int argc, char **argv) -{ - if (argc != 2) { - fprintf(stderr, "Usage: %s MINUTES\n", argv[0]); - exit(1); - } - int minutes = atoi(argv[1]); - time_t now = time(0); - time_t then = now + minutes * 60; - - struct tm *t; - char out[64]; - t = localtime(&now); - strftime(out, sizeof(out), "%Y-%m-%d %H:%M", t); - printf("%s ", out); - t = localtime(&then); - strftime(out, sizeof(out), "%Y-%m-%d %H:%M", t); - printf("%s\n", out); -} - Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/windows/nowin.exe and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/windows/nowin.exe differ diff -Nru check-mk-1.2.2p3/windows/plugins/ad_replication.bat check-mk-1.2.6p12/windows/plugins/ad_replication.bat --- check-mk-1.2.2p3/windows/plugins/ad_replication.bat 2013-06-10 12:49:02.000000000 +0000 +++ check-mk-1.2.6p12/windows/plugins/ad_replication.bat 2015-09-18 13:35:20.000000000 +0000 @@ -1,7 +1,7 @@ @echo off REM *** -REM * To be able to run this check you need apprpriate credentials +REM * To be able to run this check you need appropriate credentials REM * in the target domain. REM * REM * Normally the Check_MK agent runs as sevice with local system @@ -9,11 +9,13 @@ REM * REM * To solve this problem you can do e.g. the following: REM * -REM * - Change the account the service is being started with to a +REM * - Change the account the service is being started with to a REM * domain user account with enough permissions on the DC. -REM * +REM * REM *** echo ^<^<^^>^> dsquery server | find /I "CN=%COMPUTERNAME%," > nul -if ERRORLEVEL 0 repadmin /showrepl /csv +if ERRORLEVEL 1 goto SERVER_NOT_IN_DC_LIST +repadmin /showrepl /csv +:SERVER_NOT_IN_DC_LIST diff -Nru check-mk-1.2.2p3/windows/plugins/arcserve_backup.ps1 check-mk-1.2.6p12/windows/plugins/arcserve_backup.ps1 --- check-mk-1.2.2p3/windows/plugins/arcserve_backup.ps1 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/windows/plugins/arcserve_backup.ps1 2015-09-18 13:35:20.000000000 +0000 @@ -0,0 +1,176 @@ +# ArcServe.ps1 +# Created by Ben Backx +# modified by Hans-Christian Scherzer +# Email: bbackx AT icorda.be +# Version: 0.6de +# Created: 10/12/2009 +# Last modification: 03/02/2014 + +# Function: +# --------- +# This script connects to the ArcServe logging database (available +# for version 12.0 and up) and processes the relevant logs. +# works only with german version of ArcServe +# + +# SQL Database to connect to +$sqlServer = "SATURN\ARCSERVE_DB" + + +################## +# GetLatestJobId # +################## +function GetLatestJobId($sqlCmd) { + # Put the command in our sqlCmd + # Please adapt description if english translation is used + $sqlCmd.CommandText = "SELECT top 1 jobid FROM dbo.aslogw WHERE msgtext LIKE '%Ausfhrung von Job Sichern%' ORDER BY jobid DESC" + + # Create an adapter to put the data we get from SQL and get the data + $sqlAdapter = New-Object System.Data.SqlClient.SqlDataAdapter + $sqlAdapter.SelectCommand = $sqlCmd + $dataSet = New-Object System.Data.DataSet + $sqlAdapter.Fill($dataSet) + + return $dataSet.Tables[0].Rows[0][0] +} + +##################### +# GetPreLatestJobId # +##################### +function GetPreLatestJobId($sqlCmd, $jobId) { + # Put the command in our sqlCmd + # Please adapt description if english translation is used + $sqlCmd.CommandText = "SELECT top 1 jobid FROM dbo.aslogw WHERE msgtext LIKE '%Ausfhrung von Job Sichern%' AND jobid < " + $jobId + " ORDER BY jobid DESC" + + # Create an adapter to put the data we get from SQL and get the data + $sqlAdapter = New-Object System.Data.SqlClient.SqlDataAdapter + $sqlAdapter.SelectCommand = $sqlCmd + $dataSet = New-Object System.Data.DataSet + $sqlAdapter.Fill($dataSet) + + return $dataSet.Tables[0].Rows[0][0] +} + +############# +# GetStatus # +############# +function GetStatus($sqlCmd, $jobId) { + + # Put the command in our sqlCmd + # Please adapt description if english translation is used + $sqlCmd.CommandText = "SELECT top 1 msgtext FROM dbo.aslogw WHERE msgtext LIKE '%Vorgang Sichern%' AND jobid = " + $jobid + " ORDER BY id DESC" + + # Create an adapter to put the data we get from SQL and get the data + $sqlAdapter = New-Object System.Data.SqlClient.SqlDataAdapter + $sqlAdapter.SelectCommand = $sqlCmd + $dataSet = New-Object System.Data.DataSet + $sqlAdapter.Fill($dataSet) + + $temp = $dataSet.Tables[0].Rows[0][0] + + return $temp +} + +################## +# GetBackupFiles # +################## +function GetBackupFiles($sqlCmd, $jobId) { + + # Put the command in our sqlCmd + # Please adapt description if english translation is used + $sqlCmd.CommandText = "SELECT msgtext FROM dbo.aslogw WHERE msgtext LIKE '%Verzeichnis(se)%' AND jobid = " + $jobId + " ORDER BY id DESC" + + # Create an adapter to put the data we get from SQL and get the data + $sqlAdapter = New-Object System.Data.SqlClient.SqlDataAdapter + $sqlAdapter.SelectCommand = $sqlCmd + $dataSet = New-Object System.Data.DataSet + $sqlAdapter.Fill($dataSet) + + $temp = $dataSet.Tables[0].Rows[0][0] + + return $temp +} + +################## +# GetDescription # +################## +function GetDescription($sqlCmd, $jobId) { + + # Put the command in our sqlCmd + # Please adapt description if english translation is used + $sqlCmd.CommandText = "SELECT msgtext + ' (' + convert(varchar(10), logtime, 104) + ')' FROM dbo.aslogw WHERE msgtext LIKE '%Beschreibung:%' AND jobid = " + $jobId + " ORDER BY id DESC" + + # Create an adapter to put the data we get from SQL and get the data + $sqlAdapter = New-Object System.Data.SqlClient.SqlDataAdapter + $sqlAdapter.SelectCommand = $sqlCmd + $dataSet = New-Object System.Data.DataSet + $sqlAdapter.Fill($dataSet) + + $temp = $dataSet.Tables[0].Rows[0][0] + + return $temp +} + + +###################### +# 'Main' starts here # +###################### + +# We need no arguments + +# Make a connection with the SQL-server +# Please adapt Server and Database name +$sqlConnection = New-Object System.Data.SqlClient.SqlConnection +$sqlConnection.ConnectionString = "Server=$sqlServer;Integrated Security=True;Database=aslog" +$sqlConnection.Open() + +# Create a command object +$sqlCmd = New-Object System.Data.SqlClient.SqlCommand +$sqlCmd.Connection = $sqlConnection + +$temp = GetLatestJobId($sqlCmd) +$j = $temp[1] + +$temp = GetDescription $sqlCmd $j +$desc = $temp[1] +write-output "<<>>" +write-output "Job: " $j $desc + +$temp = GetBackupFiles $sqlCmd $j +write-output $temp[1] + +$temp = GetStatus $sqlCmd $j +write-output $temp[1] + +write-output "" + +# Please adapt job description +if ( $desc.contains("Wochensicherung") ) { + + $temp = GetPreLatestJobId $sqlCmd $j + $j = $temp[1] + $temp = GetDescription $sqlCmd $j + $desc = $temp[1] +} +else { + while ( ! $desc.contains("Wochensicherung") ) { + $temp = GetPreLatestJobId $sqlCmd $j + $j = $temp[1] + $temp = GetDescription $sqlCmd $j + $desc = $temp[1] + } +} + +write-output "Job: " $j $desc + +$temp = GetBackupFiles $sqlCmd $j +write-output $temp[1] + +$temp = GetStatus $sqlCmd $j +write-output $temp[1] + +write-output "" + + +# Close the SQL-connection +$sqlConnection.Close() diff -Nru check-mk-1.2.2p3/windows/plugins/citrix_licenses.vbs check-mk-1.2.6p12/windows/plugins/citrix_licenses.vbs --- check-mk-1.2.2p3/windows/plugins/citrix_licenses.vbs 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/windows/plugins/citrix_licenses.vbs 2015-09-18 13:35:20.000000000 +0000 @@ -0,0 +1,11 @@ +' Check for citrix licenses +' This is an almost unmodified version of ctx_licensecheck.vbs from Stefan Senftleben. +On Error Resume Next +Dim objWMI : Set objWMI = GetObject("winmgmts:\\" & strComputer) +Dim strComputer : strComputer = "." +Set objService = GetObject("winmgmts:\\" & strComputer & "\root\CitrixLicensing") +Set colItems = objService.ExecQuery("SELECT * FROM Citrix_GT_License_Pool",,48) +Wscript.Echo "<<>>" +For Each objItem in colItems + WScript.Echo objItem.PLD & vbTab & objItem.Count & vbTab & objItem.InUseCount +Next diff -Nru check-mk-1.2.2p3/windows/plugins/citrix_xenapp.ps1 check-mk-1.2.6p12/windows/plugins/citrix_xenapp.ps1 --- check-mk-1.2.2p3/windows/plugins/citrix_xenapp.ps1 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/windows/plugins/citrix_xenapp.ps1 2015-09-18 13:35:20.000000000 +0000 @@ -0,0 +1,14 @@ +$computer = "localhost" + +### Citrix XenApp Serverload +$loadObject = Get-WmiObject -Namespace 'Root\Citrix' -class 'MetaFrame_Server_LoadLevel' -ComputerName $computer -ErrorAction Stop +"<<>>" +$loadObject.LoadLevel +$computer = "localhost" + +### Citrix XenApp Sessions +$serverObject = Get-WmiObject -Namespace root\citrix -Class Metaframe_Server -ComputerName $computer +"<<>>" +"sessions {0}" -f $serverObject.NumberOfSessions +"active_sessions {0}" -f $serverObject.NumberOfActiveSessions +"inactive_sessions {0}" -f $serverObject.NumberOfDisconnectedSessions diff -Nru check-mk-1.2.2p3/windows/plugins/deprecated/citrix_serverload.ps1 check-mk-1.2.6p12/windows/plugins/deprecated/citrix_serverload.ps1 --- check-mk-1.2.2p3/windows/plugins/deprecated/citrix_serverload.ps1 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/windows/plugins/deprecated/citrix_serverload.ps1 2015-02-25 15:25:57.000000000 +0000 @@ -0,0 +1,6 @@ +$computer = "localhost" + +### Citrix XenApp Serverload +$loadObject = Get-WmiObject -Namespace 'Root\Citrix' -class 'MetaFrame_Server_LoadLevel' -ComputerName $computer -ErrorAction Stop +"<<>>" +$loadObject.LoadLevel diff -Nru check-mk-1.2.2p3/windows/plugins/deprecated/citrix_sessions.ps1 check-mk-1.2.6p12/windows/plugins/deprecated/citrix_sessions.ps1 --- check-mk-1.2.2p3/windows/plugins/deprecated/citrix_sessions.ps1 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/windows/plugins/deprecated/citrix_sessions.ps1 2015-02-25 15:25:57.000000000 +0000 @@ -0,0 +1,8 @@ +$computer = "localhost" + +### Citrix XenApp Sessions +$serverObject = Get-WmiObject -Namespace root\citrix -Class Metaframe_Server -ComputerName $computer +"<<>>" +"sessions {0}" -f $serverObject.NumberOfSessions +"active_sessions {0}" -f $serverObject.NumberOfActiveSessions +"inactive_sessions {0}" -f $serverObject.NumberOfDisconnectedSessions diff -Nru check-mk-1.2.2p3/windows/plugins/deprecated/dmi_sysinfo.bat check-mk-1.2.6p12/windows/plugins/deprecated/dmi_sysinfo.bat --- check-mk-1.2.2p3/windows/plugins/deprecated/dmi_sysinfo.bat 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/windows/plugins/deprecated/dmi_sysinfo.bat 2015-02-25 15:25:57.000000000 +0000 @@ -0,0 +1,15 @@ +@echo off + +REM *** +REM * To be able to run this check you need to install dmidecode +REM * on your windows client. +REM * +REM * You can download dmidecode for windows from +REM * +REM * +REM * This plugin should work out of the box if you install dmidecode +REM * to the default location. +REM *** + +echo ^<^<^^>^> +C:\Programme\GnuWin32\sbin\dmidecode.exe -t 1 -q diff -Nru check-mk-1.2.2p3/windows/plugins/deprecated/psperf.bat check-mk-1.2.6p12/windows/plugins/deprecated/psperf.bat --- check-mk-1.2.2p3/windows/plugins/deprecated/psperf.bat 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/windows/plugins/deprecated/psperf.bat 2014-07-04 17:50:26.000000000 +0000 @@ -0,0 +1,8 @@ +@echo off +rem This plugin obsoletes wmicchecks.bat. It is better because it is +rem directly supported by the normal ps check. + +echo ^<^<^^>^> +echo [wmic process] +wmic process get ProcessId,name,pagefileusage,virtualsize,workingsetsize,usermodetime,kernelmodetime,ThreadCount,HandleCount /format:csv +echo [wmic process end] \ No newline at end of file diff -Nru check-mk-1.2.2p3/windows/plugins/deprecated/windows_time.bat check-mk-1.2.6p12/windows/plugins/deprecated/windows_time.bat --- check-mk-1.2.2p3/windows/plugins/deprecated/windows_time.bat 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/windows/plugins/deprecated/windows_time.bat 2013-11-05 09:57:59.000000000 +0000 @@ -0,0 +1,3 @@ +@echo off +echo ^<^<^^>^> +w32tm /query /status diff -Nru check-mk-1.2.2p3/windows/plugins/deprecated/wmicchecks.bat check-mk-1.2.6p12/windows/plugins/deprecated/wmicchecks.bat --- check-mk-1.2.2p3/windows/plugins/deprecated/wmicchecks.bat 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/windows/plugins/deprecated/wmicchecks.bat 2013-11-05 09:57:59.000000000 +0000 @@ -0,0 +1,3 @@ +@echo off +echo ^<^<^^>^> +wmic process get name,pagefileusage,virtualsize,workingsetsize,usermodetime,kernelmodetime,ThreadCount /format:csv diff -Nru check-mk-1.2.2p3/windows/plugins/dmi_sysinfo.bat check-mk-1.2.6p12/windows/plugins/dmi_sysinfo.bat --- check-mk-1.2.2p3/windows/plugins/dmi_sysinfo.bat 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/windows/plugins/dmi_sysinfo.bat 1970-01-01 00:00:00.000000000 +0000 @@ -1,15 +0,0 @@ -@echo off - -REM *** -REM * To be able to run this check you need to install dmidecode -REM * on your windows client. -REM * -REM * You can download dmidecode for windows from -REM * -REM * -REM * This plugin should work out of the box if you install dmidecode -REM * to the default location. -REM *** - -echo ^<^<^^>^> -C:\Programme\GnuWin32\sbin\dmidecode.exe -t 1 -q diff -Nru check-mk-1.2.2p3/windows/plugins/hyperv_vms.ps1 check-mk-1.2.6p12/windows/plugins/hyperv_vms.ps1 --- check-mk-1.2.2p3/windows/plugins/hyperv_vms.ps1 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/windows/plugins/hyperv_vms.ps1 2015-09-18 13:35:20.000000000 +0000 @@ -0,0 +1,3 @@ +Write-Host "<<>>" + +Get-VM | select Name, State, Uptime, Status | ConvertTo-Csv -Delimiter "`t" -NoTypeInformation diff -Nru check-mk-1.2.2p3/windows/plugins/megaraid.bat check-mk-1.2.6p12/windows/plugins/megaraid.bat --- check-mk-1.2.2p3/windows/plugins/megaraid.bat 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/windows/plugins/megaraid.bat 2015-09-18 13:35:20.000000000 +0000 @@ -9,10 +9,10 @@ REM * REM * To be able to run this check you need to install MegaCli.exe on your windows client. REM * -REM * You can download MegaCli.exe for windows from +REM * You can download MegaCli.exe for windows from REM * http://www.lsi.com/ REM * -REM * after install MegaCli.exe modify the Path in the MegaCli Variable +REM * after install MegaCli.exe modify the Path in the MegaCli Variable REM * REM ********************************************************************** @@ -42,17 +42,17 @@ ECHO ^<^<^^>^> "!MegaCli!" -EncInfo -aALL -NoLog | FINDSTR /g:!enclist! > !tmpfile! FOR /F "tokens=1,2,3,4,5,6* delims=:+ " %%h in (!tmpfile!) do ( - + IF "%%h" == "Enclosure" ( ECHO %%i|FINDSTR /r "[^0-9]" > NUL IF ERRORLEVEL 1 ( SET part_a=%%h %%i ) - ) + ) IF "%%h" == "Device" ( ECHO dev2enc !part_a! %%h %%i %%j SET part_a= - ) + ) ) "!MegaCli!" -PDList -aALL -NoLog | FINDSTR /g:!pdlist! @@ -65,4 +65,4 @@ DEL "!enclist!" DEL "!pdlist!" -:END \ No newline at end of file +:END diff -Nru check-mk-1.2.2p3/windows/plugins/mk_inventory.ps1 check-mk-1.2.6p12/windows/plugins/mk_inventory.ps1 --- check-mk-1.2.2p3/windows/plugins/mk_inventory.ps1 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/windows/plugins/mk_inventory.ps1 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,101 @@ +# Configuration +$delay = 14400 # execute agent only every $delay seconds +$exe_paths = @("c:\Program Files (x86)") + +[System.Threading.Thread]::CurrentThread.CurrentCulture = [Globalization.CultureInfo]::InvariantCulture +[System.Threading.Thread]::CurrentThread.CurrentUICulture = [Globalization.CultureInfo]::InvariantCulture +[Console]::OutputEncoding = [System.Text.Encoding]::UTF8 +write-output "" # workaround to prevent the byte order mark to be at the beginning of the first section +$name = (Get-Item env:\Computername).Value +$separator = "|" +# filename for timestamp +$remote_host = $env:REMOTE_HOST +$agent_dir = $env:MK_CONFDIR + +# Fallback if the (old) agent does not provide the MK_CONFDIR +if (!$agent_dir) { + $agent_dir = "c:\Program Files (x86)\check_mk" +} + +$timestamp = $agent_dir + "\timestamp."+ $remote_host + +# does $timestamp exist? +If (Test-Path $timestamp){ + $filedate = (ls $timestamp).LastWriteTime + $now = Get-Date + $earlier = $now.AddSeconds(-$delay) + # exit if timestamp to young + if ( $filedate -gt $earlier ) { exit } +} +# create new timestamp file +New-Item $timestamp -type file -force | Out-Null + +# calculate unix timestamp +$epoch=[int][double]::Parse($(Get-Date -date (Get-Date).ToUniversalTime()-uformat %s)) + +# convert it to integer and add $delay seconds plus 5 minutes +$until = [int]($epoch -replace ",.*", "") + $delay + 600 + +# Processor +write-host "<<>>" +$cpu = Get-WmiObject Win32_Processor -ComputerName $name +$cpu_vars = @( "Name","Manufacturer","Caption","DeviceID","MaxClockSpeed","AddressWidth","L2CacheSize","L3CacheSize","Architecture","NumberOfCores","NumberOfLogicalProcessors","CurrentVoltage","Status" ) +foreach ( $entry in $cpu ) { foreach ( $item in $cpu_vars) { write-host $item ":" $entry.$item } } + +# OS Version +write-host "<<>>" +Get-WmiObject Win32_OperatingSystem -ComputerName $name | foreach-object { write-host -separator $separator $_.csname, $_.caption, $_.version, $_.OSArchitecture, $_.servicepackmajorversion, $_.ServicePackMinorVersion, $_.InstallDate } + +# Memory +#Get-WmiObject Win32_PhysicalMemory -ComputerName $name | select BankLabel,DeviceLocator,Capacity,Manufacturer,PartNumber,SerialNumber,Speed + +# BIOS +write-host "<<>>" +$bios = Get-WmiObject win32_bios -ComputerName $name +$bios_vars= @( "Manufacturer","Name","SerialNumber","InstallDate","BIOSVersion","ListOfLanguages","PrimaryBIOS","ReleaseDate","SMBIOSBIOSVersion","SMBIOSMajorVersion","SMBIOSMinorVersion" ) +foreach ( $entry in $bios ) { foreach ( $item in $bios_vars) { write-host $item ":" $entry.$item } } + +# System +write-host "<<>>" +$system = Get-WmiObject Win32_SystemEnclosure -ComputerName $name +$system_vars = @( "Manufacturer","Name","Model","HotSwappable","InstallDate","PartNumber","SerialNumber" ) +foreach ( $entry in $system ) { foreach ( $item in $system_vars) { write-host $item ":" $entry.$item } } + +# Hard-Disk +write-host "<<>>" +$disk = Get-WmiObject win32_diskDrive -ComputerName $name +$disk_vars = @( "Manufacturer","InterfaceType","Model","Name","SerialNumber","Size","MediaType","Signature" ) +foreach ( $entry in $disk ) { foreach ( $item in $disk_vars) { write-host $item ":" $entry.$item } } + +# Graphics Adapter +write-host "<<>>" +$adapters=Get-WmiObject Win32_VideoController -ComputerName $name +$adapter_vars = @( "Name", "Description", "Caption", "AdapterCompatibility", "VideoModeDescription", "VideoProcessor", "DriverVersion", "DriverDate", "MaxMemorySupported") +foreach ( $entry in $adapters ) { foreach ( $item in $adapter_vars) { write-host $item ":" $entry.$item } } + +# Installed Software +write-host "<<>>" +Get-WmiObject Win32_Product -ComputerName $name | foreach-object { write-host -separator $separator $_.Name, $_.Vendor, $_.Version, $_.InstallDate } + +# Search Registry +write-host "<<>>" +$paths = @("HKLM:\Software\Microsoft\Windows\CurrentVersion\Uninstall") +foreach ($path in $paths) { + Get-ChildItem $path -Recurse | foreach-object { $path2 = $path+"\"+$_.PSChildName; get-ItemProperty -path $path2 | + + foreach-object { + $Publisher = $_.Publisher -replace "`0", "" + write-host -separator $separator $_.DisplayName, $Publisher , $_.InstallLocation, $_.PSChildName, $_.DisplayVersion, $_.EstimatedSize, $_.InstallDate }} +} + +# Search exes +write-host "<<>>" +foreach ($item in $exe_paths) +{ + if ((Test-Path $item -pathType container)) + { + Get-ChildItem -Path $item -include *.exe -Recurse | foreach-object { write-host -separator $separator $_.Fullname, $_.LastWriteTime, $_.Length, $_.VersionInfo.FileDescription, $_.VersionInfo.ProduktVersion, $_.VersionInfo.ProduktName } + } +} + + diff -Nru check-mk-1.2.2p3/windows/plugins/mssql.vbs check-mk-1.2.6p12/windows/plugins/mssql.vbs --- check-mk-1.2.2p3/windows/plugins/mssql.vbs 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/windows/plugins/mssql.vbs 2015-09-21 10:59:54.000000000 +0000 @@ -21,7 +21,7 @@ Option Explicit -Dim WMI, prop, instId, instVersion, instIds, instName, output +Dim WMI, prop, instId, instIdx, instVersion, instIds, instName, output, WMIservice, colRunningServices, objService WScript.Timeout = 10 @@ -34,7 +34,6 @@ output = output & text & vbLf End Sub - ' Dummy empty output. ' Contains timeout error if this scripts runtime exceeds the timeout WScript.echo "<<>>" @@ -42,40 +41,52 @@ ' Loop all found local MSSQL server instances ' Try different trees to handle different versions of MSSQL On Error Resume Next -' MSSQL >= 10 -' try SQL Server 2012: -Set WMI = GetObject("WINMGMTS:\\.\root\Microsoft\SqlServer\ComputerManagement11") +' try SQL Server 2014: +Set WMI = GetObject("WINMGMTS:\\.\root\Microsoft\SqlServer\ComputerManagement12") If Err.Number <> 0 Then Err.Clear() - - ' try SQL Server 2008 - Set WMI = GetObject("WINMGMTS:\\.\root\Microsoft\SqlServer\ComputerManagement10") + ' try SQL Server 2012: + Set WMI = GetObject("WINMGMTS:\\.\root\Microsoft\SqlServer\ComputerManagement11") If Err.Number <> 0 Then Err.Clear() - - ' try MSSQL < 10 - Set WMI = GetObject("WINMGMTS:\\.\root\Microsoft\SqlServer\ComputerManagement") + + ' try SQL Server 2008 + Set WMI = GetObject("WINMGMTS:\\.\root\Microsoft\SqlServer\ComputerManagement10") If Err.Number <> 0 Then - addOutput( "Error: " & Err.Number & " " & Err.Description ) Err.Clear() - wscript.quit() + + ' try MSSQL < 10 + Set WMI = GetObject("WINMGMTS:\\.\root\Microsoft\SqlServer\ComputerManagement") + If Err.Number <> 0 Then + addOutput( "Error: " & Err.Number & " " & Err.Description ) + Err.Clear() + wscript.quit() + End If End If End If End If On Error Goto 0 +Set WMIservice = GetObject("winmgmts:{impersonationLevel=impersonate}!\\.\root\cimv2") + For Each prop In WMI.ExecQuery("SELECT * FROM SqlServiceAdvancedProperty WHERE " & _ "SQLServiceType = 1 AND PropertyName = 'VERSION'") - instId = Replace(prop.ServiceName, "$", "_") + + Set colRunningServices = WMIservice.ExecQuery("SELECT State FROM Win32_Service WHERE Name = '" & prop.ServiceName & "'") + instId = Replace(prop.ServiceName, "$", "__") instVersion = prop.PropertyStrValue - + instIdx = Replace(instId, "__", "_") addOutput( "<<>>" ) - addOutput( instId & " " & instVersion ) + addOutput( instIdx & " " & instVersion ) ' Now query the server instance for the databases ' Use name as key and always empty value for the moment - instIds.add instId, "" + For Each objService In colRunningServices + If objService.State = "Running" Then + instIds.add instId, "" + End If + Next Next Set WMI = nothing @@ -98,9 +109,10 @@ ' At this place one could implement to use other authentication mechanism CONN.Properties("Integrated Security").Value = "SSPI" - If InStr(instId, "_") <> 0 Then - instName = Split(instId, "_")(1) - Else + If InStr(instId, "__") <> 0 Then + instName = Split(instId, "__")(1) + instId = Replace(instId, "__", "_") + Else instName = instId End If @@ -109,7 +121,8 @@ CONN.Properties("Data Source").Value = "(local)" Else CONN.Properties("Data Source").Value = hostname & "\" & instName - End If + End If + 'WScript.echo (CONN) CONN.Open @@ -209,4 +222,3 @@ ' finally output collected data WScript.echo output - diff -Nru check-mk-1.2.2p3/windows/plugins/netstat_an.bat check-mk-1.2.6p12/windows/plugins/netstat_an.bat --- check-mk-1.2.2p3/windows/plugins/netstat_an.bat 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/windows/plugins/netstat_an.bat 2015-09-18 13:35:20.000000000 +0000 @@ -0,0 +1,3 @@ +@echo off +echo ^<^<^^>^> +netstat -an diff -Nru check-mk-1.2.2p3/windows/plugins/tsm_checks.bat check-mk-1.2.6p12/windows/plugins/tsm_checks.bat --- check-mk-1.2.2p3/windows/plugins/tsm_checks.bat 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/windows/plugins/tsm_checks.bat 2015-09-18 13:35:20.000000000 +0000 @@ -0,0 +1,21 @@ +@echo off +cd C:\Progra~1\Tivoli\TSM\baclient\ +SET COMMAND=dsmadmc -dataonly=YES -id=admin -password=password -displaymode=table + +echo ^<^<^^>^> +%COMMAND% "select 'default', library_name, drive_name, drive_state, online, drive_serial from drives" + +echo ^<^<^^>^> +%COMMAND% "select source_name, destination_name, online from paths" + +echo ^<^<^^>^> +%COMMAND% "select session_id, client_name, state, wait_seconds from sessions" + +echo ^<^<^^>^> +%COMMAND% "select 'default', count(library_name), library_name from libvolumes where status='Scratch' group by library_name" + +echo ^<^<^^>^> +%COMMAND% "select 'default', type, stgpool_name, sum(logical_mb) from occupancy group by type, stgpool_name" + +echo ^<^<^^>^> +%COMMAND% "select 'default', stgpool_name, pct_utilized from volumes where access='READWRITE' and devclass_name<>'DISK'" diff -Nru check-mk-1.2.2p3/windows/plugins/veeam_backup_status.bat check-mk-1.2.6p12/windows/plugins/veeam_backup_status.bat --- check-mk-1.2.2p3/windows/plugins/veeam_backup_status.bat 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/windows/plugins/veeam_backup_status.bat 2015-09-18 13:35:20.000000000 +0000 @@ -0,0 +1,8 @@ +@ECHO OFF +REM version 0.9 +REM Put this file in cmk Plugins-Folder *only* if you need to run +REM the veeam_backup_status.ps1 powershell script and you +REM have no 64 bit check_mk agent available +REM In this case the powershell script needs to be put somewhere else +REM (see example here) and is called from this .bat script with the 64 bit powershell +%systemroot%\sysnative\WindowsPowerShell\v1.0\powershell.exe -ExecutionPolicy Unrestricted " & ""C:\skripte\veeam_backup_status.ps1""" diff -Nru check-mk-1.2.2p3/windows/plugins/veeam_backup_status.ps1_ check-mk-1.2.6p12/windows/plugins/veeam_backup_status.ps1_ --- check-mk-1.2.2p3/windows/plugins/veeam_backup_status.ps1_ 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/windows/plugins/veeam_backup_status.ps1_ 2015-09-18 13:35:20.000000000 +0000 @@ -0,0 +1,150 @@ +# This powershell script needs to be run with the 64bit powershell +# and thus from a 64bit check_mk agent +# If a 64 bit check_mk agent is available it just needs to be renamed with +# the extension .ps1 +# If only a 32bit check_mk agent is available it needs to be relocated to a +# directory given in veeam_backup_status.bat and the .bat file needs to be +# started by the check_mk agent instead. + +$pshost = get-host +$pswindow = $pshost.ui.rawui + +$newsize = $pswindow.buffersize +$newsize.height = 300 +$newsize.width = 150 +$pswindow.buffersize = $newsize + +# Get Information from veeam backup and replication in cmk-friendly format +# V0.9 +# Load Veeam Backup and Replication Powershell Snapin +Add-PSSnapin VeeamPSSnapIn -ErrorAction SilentlyContinue + + +# No real error handling in the whole script, just using this try ... catch for totally unexpected errors. +# If any error occurs during check, a field might just remain blank + +try +{ +# Create new text string for backup job section. Initialize it with header +$myJobsText = "<<>>`n" +# Create new text string for backup tasks section. +$myTaskText = "" + +# List all planned backup AND replication jobs which are ENABLED +$myBackupJobs = Get-VBRJob | where {$_.IsScheduleEnabled -eq $true } +# to check only for Backups or Replicas: "$_.IsBackup -eq $true" "$_.IsReplica -eq $true" + +# Iterate through all backup jobs +foreach ($myJob in $myBackupJobs) +{ + $myJobName = "" + $myJobName = $myJob.Name -replace "\'","_" -replace " ","_" + + $myJobType = "" + $myJobType = $myjob.JobType + + $myJobLastState = "" + $myJobLastState = $myJob.GetLastState() + + $myJobLastResult = "" + $myJobLastResult = $myJob.GetLastResult() + + $myJobLastSession = $myJob.FindLastSession() + + $myJobCreationTime = "" + $myJobCreationTime = $myJobLastSession.CreationTime | get-date -Format "dd.MM.yyyy HH:mm:ss" -ErrorAction SilentlyContinue + + $myJobEndTime = "" + $myJobEndTime = $myJobLastSession.EndTime | get-date -Format "dd.MM.yyyy HH:mm:ss" -ErrorAction SilentlyContinue + + $myJobsText = "$myJobsText" + "$myJobName" + "`t" + "$myJobType" + "`t" + "$myJobLastState" + "`t" + "$myJobLastResult" + "`t" + "$myJobCreationTime" + "`t" + "$myJobEndTime" + "`n" + + # Each backup job has a number of tasks which were executed (VMs which were processed) + # Get all Tasks of the L A S T backup session + # Caution: Each backup job MAY have run SEVERAL times for retries + $myJobLastSessionTasks = $myJobLastSession | Get-VBRTaskSession -ErrorAction SilentlyContinue + + # Iterate through all tasks in the last backup job + $myTask = "" + foreach ($myTask in $myJobLastSessionTasks) + { + $myTaskName = "" + $myTaskName = $myTask.Name + + $myTaskText = "$myTaskText" + "<<<<" + "$myTaskName" + ">>>>" + "`n" + + $myTaskText = "$myTaskText" + "<<<"+ "veeam_client:sep(9)" +">>>" +"`n" + + $myTaskStatus = "" + $myTaskStatus = $myTask.Status + + $myTaskText = "$myTaskText" + "Status" + "`t" + "$myTaskStatus" + "`n" + + $myTaskText = "$myTaskText" + "JobName" + "`t" + "$myJobName" + "`n" + + $myTaskTotalSize = "" + $myTaskTotalSize = $myTask.Progress.TotalSize + + $myTaskText = "$myTaskText" + "TotalSizeByte" + "`t" + "$myTaskTotalSize" + "`n" + + $myTaskReadSize = $myTask.Progress.ReadSize + + $myTaskText = "$myTaskText" + "ReadSizeByte" + "`t" + "$myTaskReadSize" + "`n" + + $myTaskTransferedSize = $myTask.Progress.TransferedSize + + $myTaskText = "$myTaskText" + "TransferedSizeByte" + "`t" + "$myTaskTransferedSize" + "`n" + + $myTaskStartTime = "" + $myTaskStartTime = $myTask.Progress.StartTime | get-date -Format "dd.MM.yyyy HH:mm:ss" -ErrorAction SilentlyContinue + + $myTaskText = "$myTaskText" + "StartTime" + "`t" + "$myTaskStartTime" + "`n" + + $myTaskStopTime = "" + $myTaskStopTime = $myTask.Progress.StopTime | get-date -Format "dd.MM.yyyy HH:mm:ss" -ErrorAction SilentlyContinue + + $myTaskText = "$myTaskText" + "StopTime" + "`t" + "$myTaskStopTime" + "`n" + + # Result is a value of type System.TimeStamp. I'm sure there is a more elegant way of formatting the output: + $myTaskDuration = "" + $myTaskDuration = "" + "{0:D2}" -f $myTask.Progress.duration.Days + ":" + "{0:D2}" -f $myTask.Progress.duration.Hours + ":" + "{0:D2}" -f $myTask.Progress.duration.Minutes + ":" + "{0:D2}" -f $myTask.Progress.duration.Seconds + + $myTaskText = "$myTaskText" + "DurationDDHHMMSS" + "`t" + "$myTaskDuration" + "`n" + + $myTaskAvgSpeed = "" + $myTaskAvgSpeed = $myTask.Progress.AvgSpeed + + $myTaskText = "$myTaskText" + "AvgSpeedBps" + "`t" + "$myTaskAvgSpeed" + "`n" + + $myTaskDisplayName = "" + $myTaskDisplayName = $myTask.Progress.DisplayName + + $myTaskText = "$myTaskText" + "DisplayName" + "`t" + "$myTaskDisplayName" + "`n" + + # End of section <<>> + $myTaskText = "$myTaskText" + "<<<<" + ">>>>" +"`n" + + # END OF LOOP foreach ($myTask in $myJobLastSessionTasks) + } + +# END OF LOOP foreach ($myJob in $myBackupJobs) +} + +# Final output +write-host $myJobsText +write-host $myTaskText +# "$myJobsText" + "$myTaskText" + + +# END OF TRY +} + +# CATCH only totally impossible catastrophic errors +catch +{ +$errMsg = $_.Exception.Message +$errItem = $_.Exception.ItemName +Write-Error "Totally unexpected and unhandled error occured:`n Item: $errItem`n Error Message: $errMsg" +Break +} + diff -Nru check-mk-1.2.2p3/windows/plugins/win_dmidecode.bat check-mk-1.2.6p12/windows/plugins/win_dmidecode.bat --- check-mk-1.2.2p3/windows/plugins/win_dmidecode.bat 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/windows/plugins/win_dmidecode.bat 2015-09-18 13:35:20.000000000 +0000 @@ -0,0 +1,15 @@ +@echo off + +REM *** +REM * To be able to run this plugin you need to install dmidecode +REM * on your windows client. +REM * +REM * You can download dmidecode for windows from +REM * +REM * +REM * This plugin should work out of the box if you install dmidecode +REM * to the default location. +REM *** + +echo ^<^<^^>^> +C:\Programme\GnuWin32\sbin\dmidecode.exe -q diff -Nru check-mk-1.2.2p3/windows/plugins/windows_broadcom_bonding.bat check-mk-1.2.6p12/windows/plugins/windows_broadcom_bonding.bat --- check-mk-1.2.2p3/windows/plugins/windows_broadcom_bonding.bat 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/windows/plugins/windows_broadcom_bonding.bat 2015-09-18 13:35:20.000000000 +0000 @@ -0,0 +1,5 @@ +@echo off +echo ^<^<^^>^> + +rem Tested with BroadCom BASP v1.6.3 +wmic /namespace:\\root\BrcmBnxNS path brcm_redundancyset get caption,redundancystatus diff -Nru check-mk-1.2.2p3/windows/plugins/windows_intel_bonding.bat check-mk-1.2.6p12/windows/plugins/windows_intel_bonding.bat --- check-mk-1.2.2p3/windows/plugins/windows_intel_bonding.bat 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/windows/plugins/windows_intel_bonding.bat 2015-09-18 13:35:20.000000000 +0000 @@ -0,0 +1,14 @@ +@echo off +echo ^<^<^^>^> + + +wmic /namespace:\\root\IntelNCS2 path IANET_TeamOfAdapters get Caption,Name,RedundancyStatus + +echo ### + +wmic /namespace:\\root\IntelNCS2 path IANET_TeamedMemberAdapter get AdapterFunction,AdapterStatus,GroupComponent,PartComponent + +echo ### + +wmic /namespace:\\root\IntelNCS2 path IANET_PhysicalEthernetAdapter get AdapterStatus,Caption,DeviceID + diff -Nru check-mk-1.2.2p3/windows/plugins/windows_multipath.vbs check-mk-1.2.6p12/windows/plugins/windows_multipath.vbs --- check-mk-1.2.2p3/windows/plugins/windows_multipath.vbs 2012-10-04 13:12:37.000000000 +0000 +++ check-mk-1.2.6p12/windows/plugins/windows_multipath.vbs 2015-09-18 13:35:20.000000000 +0000 @@ -1,24 +1,24 @@ -' MPIO_PATH_INFORMATION.vbs -' VBS WMI MPIO -' Author: Torsten Haake -' Date: 2011-02-15 -' -----------------------------------------------' -Option Explicit -Dim objWMIService, objItem, colItems, strComputer - -' On Error Resume Next -strComputer = "." - -' added for check_mk parsing (fh@mathias-ketter.de) -Wscript.Echo "<<>>" - -' WMI connection to Root WMI -Set objWMIService = GetObject("winmgmts:\\" & strComputer & "\root\WMI") -Set colItems = objWMIService.ExecQuery("Select * from MPIO_PATH_INFORMATION") - -For Each objItem in colItems - Wscript.Echo objItem.NumberPaths -Next - -WSCript.Quit - +' MPIO_PATH_INFORMATION.vbs +' VBS WMI MPIO +' Author: Torsten Haake +' Date: 2011-02-15 +' -----------------------------------------------' +Option Explicit +Dim objWMIService, objItem, colItems, strComputer + +' On Error Resume Next +strComputer = "." + +' added for check_mk parsing (fh@mathias-ketter.de) +Wscript.Echo "<<>>" + +' WMI connection to Root WMI +Set objWMIService = GetObject("winmgmts:\\" & strComputer & "\root\WMI") +Set colItems = objWMIService.ExecQuery("Select * from MPIO_PATH_INFORMATION") + +For Each objItem in colItems + Wscript.Echo objItem.NumberPaths +Next + +WSCript.Quit + diff -Nru check-mk-1.2.2p3/windows/plugins/windows_os_bonding.ps1 check-mk-1.2.6p12/windows/plugins/windows_os_bonding.ps1 --- check-mk-1.2.2p3/windows/plugins/windows_os_bonding.ps1 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/windows/plugins/windows_os_bonding.ps1 2015-09-18 13:35:20.000000000 +0000 @@ -0,0 +1,25 @@ +# you need this agent plugin if you want to monitor bonding interfaces +# on windows configured on operating system level + +try { + $teams = Get-NetLbfoTeam +} catch {} +if ($teams) { + Write-Host "<<>>" + foreach ($team in $teams){ + Write-Host Team Name: $team.Name + Write-Host Bonding Mode: $team.LoadBalancingAlgorithm + Write-Host Status: $team.Status + $bondspeed = (Get-NetAdapter | where {$_.InterfaceDescription -match "Multiplex"}).LinkSpeed + Write-Host Speed: $bondspeed `n + foreach ($slave in $team.members){ + Write-Host Slave Name: $slave + $net = Get-Netadapter $slave + Write-Host Slave Interface: $net.ifName + Write-Host Slave Description: $net.interfaceDescription + Write-Host Slave Status: $net.Status + Write-Host Slave Speed: $net.LinkSpeed + Write-Host Slave MAC address: $net.MacAddress `n + } + } +} diff -Nru check-mk-1.2.2p3/windows/plugins/windows_tasks.ps1 check-mk-1.2.6p12/windows/plugins/windows_tasks.ps1 --- check-mk-1.2.2p3/windows/plugins/windows_tasks.ps1 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/windows/plugins/windows_tasks.ps1 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,2 @@ +echo "<<>>" +schtasks /query /fo csv -v | ConvertFrom-Csv | ? {$_.HostName -match "^$($Env:Computername)$" -and $_.TaskName -notlike '\Microsoft*' -and $_.TaskName -notlike '*zachteHRM*'} | fl taskname,"last run time","next run time","last result","scheduled task state" \ No newline at end of file diff -Nru check-mk-1.2.2p3/windows/plugins/windows_time.bat check-mk-1.2.6p12/windows/plugins/windows_time.bat --- check-mk-1.2.2p3/windows/plugins/windows_time.bat 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/windows/plugins/windows_time.bat 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -@echo off -echo ^<^<^^>^> -w32tm /query /status diff -Nru check-mk-1.2.2p3/windows/plugins/windows_updates.vbs check-mk-1.2.6p12/windows/plugins/windows_updates.vbs --- check-mk-1.2.2p3/windows/plugins/windows_updates.vbs 2013-03-04 11:48:41.000000000 +0000 +++ check-mk-1.2.6p12/windows/plugins/windows_updates.vbs 2015-09-21 10:59:54.000000000 +0000 @@ -1,111 +1,107 @@ -' ----------------------------------------------------------------------------------------- -' windows_updates.vbs - check_mk agent plugin to monitor pending windows updates indirectly -' -' To use this just place it in the plugins/ directory below the path of the -' check_mk_agent. After that an inventory run on the Nagios host should lead -' to a new inventorized service. -' -' Author: Lars Michelsen , 2011-03-21 -' Editor: Patrick Schlter , 2011-08-21 -' -' Updated by Phil Randal, 2012-09-21, to cache results using a randomised check interval -' of 16 to 24 hours -' Based on code here: http://www.monitoring-portal.org/wbb/index.php?page=Thread&threadID=23509 -' Spawning a separate process to produce cached result (as in above forum discussion) caused me -' some issues, so I went for a simpler solution using only one script -' ----------------------------------------------------------------------------------------- - -Option Explicit - -Dim result, reboot, numImp, numOpt, important, opti -Dim updtSearcher, colDownloads, objEntry - -Dim objFSO, objFile -Set objFSO = WScript.CreateObject("Scripting.FileSystemObject") - -Dim lastModificationDate -Dim updateNeeded -updateNeeded = True - -Dim WSHShell -Set WSHShell = CreateObject("WScript.Shell") - -Dim scriptname, scriptpath, strFolder - -scriptname = Wscript.ScriptFullName -scriptpath = objFSO.getparentfoldername(scriptname) - -strFolder = scriptpath & "\windows-update" -set objFSO = createobject("Scripting.FileSystemObject") - -if objFSO.FolderExists(strFolder) = False then - objFSO.CreateFolder strFolder -end if - -Dim ts, TextLine -Dim rndFudge - -Randomize -rndFudge = Int(8 * 60 * Rnd) ' random fudge factor for test (0 to 8 hrs) - -If objFSO.FileExists(scriptpath &"\windows-update\windows_updates-log.txt") Then - lastModificationDate = objFSO.GetFile(scriptpath &"\windows-update\windows_updates-log.txt").DateLastModified - If DateDiff("n", lastModificationDate, now) < ((60*24)-rndFudge) Then ' 1 day minus 0 to 8 hours - updateNeeded = False - End If -End If - -If updateNeeded Then - Set objFile = objFSO.CreateTextFile(scriptpath &"\windows-update\windows_updates-log.txt") - - If CreateObject("Microsoft.Update.AutoUpdate").DetectNow <> 0 Then - objFile.WriteLine("<<>>") - WScript.Quit() - End If - - Set updtSearcher = CreateObject("Microsoft.Update.Session").CreateUpdateSearcher - - reboot = 0 - numImp = 0 - numOpt = 0 - - If CreateObject("Microsoft.Update.SystemInfo").RebootRequired Then - reboot = 1 - End If - - Set result = updtSearcher.Search("IsInstalled = 0 and IsHidden = 0") - Set colDownloads = result.Updates - - For Each objEntry in colDownloads - if objEntry.AutoSelectOnWebSites Then - if numImp = 0 Then - important = objEntry.Title - else - important = important & "; " & objEntry.Title - End If - numImp = numImp + 1 - Else - If numOpt = 0 Then - opti = objEntry.Title - Else - opti = opti & "; " & objEntry.Title - End If - numOpt = numOpt + 1 - End If - Next - - objFile.WriteLine("<<>>") - objFile.WriteLine(reboot & " " & numImp & " " & numOpt) - objFile.WriteLine(important) - objFile.WriteLine(opti) - objFile.Close - -End If - -Set ts = objFSO.GetFile(scriptpath &"\windows-update\windows_updates-log.txt").OpenAsTextStream(1, -2) -Do While ts.AtEndOfStream <> True - WScript.Echo ts.ReadLine -Loop -ts.Close - -WScript.Quit() +' ----------------------------------------------------------------------------------------- +' windows_updates.vbs - check_mk agent plugin to monitor pending windows updates indirectly +' +' To use this just place it in the plugins/ directory below the path of the +' check_mk_agent. After that an inventory run on the Nagios host should lead +' to a new inventorized service. +' +' Author: Lars Michelsen , 2011-03-21 +' Editor: Patrick Schlüter , 2011-08-21 +' +' Updated by Phil Randal, 2012-09-21, to cache results using a randomised check interval +' of 16 to 24 hours +' Based on code here: http://www.monitoring-portal.org/wbb/index.php?page=Thread&threadID=23509 +' Spawning a separate process to produce cached result (as in above forum discussion) caused me +' some issues, so I went for a simpler solution using only one script +' +' Updated by Bastian Kuhn, 2014-03-03: Removed all caching functions cause the current agent +' has a native caching support. Make sure that you activate caching for this script in check_mk.ini +' +' 2014-04-17: Fix by Stefan Kick to handle errors. Payed by Adaptron. +' ----------------------------------------------------------------------------------------- + +Option Explicit + +function readFromRegistry (strRegistryKey, strDefault ) + Dim WSHShell, value + + On Error Resume Next + Set WSHShell = CreateObject("WScript.Shell") + value = WSHShell.RegRead( strRegistryKey ) + + if err.number <> 0 then + readFromRegistry=strDefault + else + readFromRegistry=value + end if + + set WSHShell = nothing +end function + +Dim result, reboot, numImp, numOpt, important, opti +Dim updtSearcher, colDownloads, objEntry + + +Dim WSHShell +Set WSHShell = CreateObject("WScript.Shell") + +Dim RebootTime +Dim RegPath + +If CreateObject("Microsoft.Update.AutoUpdate").DetectNow <> 0 Then + WScript.Echo "<<>>" + WScript.Quit() +End If + +Set updtSearcher = CreateObject("Microsoft.Update.Session").CreateUpdateSearcher + +RegPath = "HKEY_LOCAL_MACHINE\SOFTWARE\MICROSOFT\Windows\CurrentVersion\WindowsUpdate\Auto Update\" +RebootTime = ReadFromRegistry(RegPath & "NextFeaturedUpdatesNotificationTime","no_key") + +reboot = 0 +numImp = 0 +numOpt = 0 + +If CreateObject("Microsoft.Update.SystemInfo").RebootRequired Then + reboot = 1 +End If + +On Error Resume Next + +Set result = updtSearcher.Search("IsInstalled = 0 and IsHidden = 0") + +If Err.Number <> 0 then + WScript.Echo "<<>>" + Wscript.Echo "x x x" + Wscript.Echo "There was an error getting update information. Maybe Windows update is not activated. Error Number: " & Err.Number + WScript.Quit() +End If + + +Set colDownloads = result.Updates +For Each objEntry in colDownloads + + if objEntry.AutoSelectOnWebSites Then + if numImp = 0 Then + important = objEntry.Title + else + important = important & "; " & objEntry.Title + End If + numImp = numImp + 1 + Else + If numOpt = 0 Then + opti = objEntry.Title + Else + opti = opti & "; " & objEntry.Title + End If + numOpt = numOpt + 1 + End If + +Next + +WScript.Echo "<<>>" +WScript.Echo reboot & " " & numImp & " " & numOpt +WScript.Echo important +WScript.Echo opti +WScript.Echo RebootTime +WScript.Quit() diff -Nru check-mk-1.2.2p3/windows/plugins/win_printers.ps1 check-mk-1.2.6p12/windows/plugins/win_printers.ps1 --- check-mk-1.2.2p3/windows/plugins/win_printers.ps1 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/windows/plugins/win_printers.ps1 2015-09-18 13:35:20.000000000 +0000 @@ -0,0 +1,254 @@ +#### +# +# http://blogs.msdn.com/b/powershell/archive/2012/07/13/join-object.aspx +# +#### + +### +## http://blogs.technet.com/b/heyscriptingguy/archive/2006/12/04/how-can-i-expand-the-width-of-the-windows-powershell-console.aspx + +$pshost = get-host +$pswindow = $pshost.ui.rawui + +$newsize = $pswindow.buffersize +$newsize.height = 300 +$newsize.width = 150 +$pswindow.buffersize = $newsize + +### + +Write-Host -NoNewLine "<<>>" +$Data_Set1 = Get-WMIObject Win32_PerfFormattedData_Spooler_PrintQueue | Select Name, @{Expression={$_.jobs};Label="CurrentJobs"} +$Data_Set2 = Get-WmiObject win32_printer | select name, printerstatus, detectederrorstate +$Data_Set2 = Get-WmiObject win32_printer | ?{$_.PortName -notmatch '^TS'} | Select name, printerstatus, detectederrorstate + + +function AddItemProperties($item, $properties, $output) +{ + if($item -ne $null) + { + foreach($property in $properties) + { + $propertyHash =$property -as [hashtable] + if($propertyHash -ne $null) + { + $hashName=$propertyHash["name"] -as [string] + if($hashName -eq $null) + { + throw "there should be a string Name" + } + + $expression=$propertyHash["expression"] -as [scriptblock] + if($expression -eq $null) + { + throw "there should be a ScriptBlock Expression" + } + + $_=$item + $expressionValue=& $expression + + $output | add-member -MemberType "NoteProperty" -Name $hashName -Value $expressionValue + } + else + { + # .psobject.Properties allows you to list the properties of any object, also known as "reflection" + foreach($itemProperty in $item.psobject.Properties) + { + if ($itemProperty.Name -like $property) + { + $output | add-member -MemberType "NoteProperty" -Name $itemProperty.Name -Value $itemProperty.Value + } + } + } + } + } +} + + +function WriteJoinObjectOutput($leftItem, $rightItem, $leftProperties, $rightProperties, $Type) +{ + $output = new-object psobject + + if($Type -eq "AllInRight") + { + # This mix of rightItem with LeftProperties and vice versa is due to + # the switch of Left and Right arguments for AllInRight + AddItemProperties $rightItem $leftProperties $output + AddItemProperties $leftItem $rightProperties $output + } + else + { + AddItemProperties $leftItem $leftProperties $output + AddItemProperties $rightItem $rightProperties $output + } + $output +} + +<# +.Synopsis + Joins two lists of objects +.DESCRIPTION + Joins two lists of objects +.EXAMPLE + Join-Object $a $b "Id" ("Name","Salary") +#> +function Join-Object +{ + [CmdletBinding()] + [OutputType([int])] + Param + ( + # List to join with $Right + [Parameter(Mandatory=$true, + Position=0)] + [object[]] + $Left, + + # List to join with $Left + [Parameter(Mandatory=$true, + Position=1)] + [object[]] + $Right, + + # Condition in which an item in the left matches an item in the right + # typically something like: {$args[0].Id -eq $args[1].Id} + [Parameter(Mandatory=$true, + Position=2)] + [scriptblock] + $Where, + + # Properties from $Left we want in the output. + # Each property can: + # - Be a plain property name like "Name" + # - Contain wildcards like "*" + # - Be a hashtable like @{Name="Product Name";Expression={$_.Name}}. Name is the output property name + # and Expression is the property value. The same syntax is available in select-object and it is + # important for join-object because joined lists could have a property with the same name + [Parameter(Mandatory=$true, + Position=3)] + [object[]] + $LeftProperties, + + # Properties from $Right we want in the output. + # Like LeftProperties, each can be a plain name, wildcard or hashtable. See the LeftProperties comments. + [Parameter(Mandatory=$true, + Position=4)] + [object[]] + $RightProperties, + + # Type of join. + # AllInLeft will have all elements from Left at least once in the output, and might appear more than once + # if the where clause is true for more than one element in right, Left elements with matches in Right are + # preceded by elements with no matches. This is equivalent to an outer left join (or simply left join) + # SQL statement. + # AllInRight is similar to AllInLeft. + # OnlyIfInBoth will cause all elements from Left to be placed in the output, only if there is at least one + # match in Right. This is equivalent to a SQL inner join (or simply join) statement. + # AllInBoth will have all entries in right and left in the output. Specifically, it will have all entries + # in right with at least one match in left, followed by all entries in Right with no matches in left, + # followed by all entries in Left with no matches in Right.This is equivallent to a SQL full join. + [Parameter(Mandatory=$false, + Position=5)] + [ValidateSet("AllInLeft","OnlyIfInBoth","AllInBoth", "AllInRight")] + [string] + $Type="OnlyIfInBoth" + ) + + Begin + { + # a list of the matches in right for each object in left + $leftMatchesInRight = new-object System.Collections.ArrayList + + # the count for all matches + $rightMatchesCount = New-Object "object[]" $Right.Count + + for($i=0;$i -lt $Right.Count;$i++) + { + $rightMatchesCount[$i]=0 + } + } + + Process + { + if($Type -eq "AllInRight") + { + # for AllInRight we just switch Left and Right + $aux = $Left + $Left = $Right + $Right = $aux + } + + # go over items in $Left and produce the list of matches + foreach($leftItem in $Left) + { + $leftItemMatchesInRight = new-object System.Collections.ArrayList + $null = $leftMatchesInRight.Add($leftItemMatchesInRight) + + for($i=0; $i -lt $right.Count;$i++) + { + $rightItem=$right[$i] + + if($Type -eq "AllInRight") + { + # For AllInRight, we want $args[0] to refer to the left and $args[1] to refer to right, + # but since we switched left and right, we have to switch the where arguments + $whereLeft = $rightItem + $whereRight = $leftItem + } + else + { + $whereLeft = $leftItem + $whereRight = $rightItem + } + + if(Invoke-Command -ScriptBlock $where -ArgumentList $whereLeft,$whereRight) + { + $null = $leftItemMatchesInRight.Add($rightItem) + $rightMatchesCount[$i]++ + } + + } + } + + # go over the list of matches and produce output + for($i=0; $i -lt $left.Count;$i++) + { + $leftItemMatchesInRight=$leftMatchesInRight[$i] + $leftItem=$left[$i] + + if($leftItemMatchesInRight.Count -eq 0) + { + if($Type -ne "OnlyIfInBoth") + { + WriteJoinObjectOutput $leftItem $null $LeftProperties $RightProperties $Type + } + + continue + } + + foreach($leftItemMatchInRight in $leftItemMatchesInRight) + { + WriteJoinObjectOutput $leftItem $leftItemMatchInRight $LeftProperties $RightProperties $Type + } + } + } + + End + { + #produce final output for members of right with no matches for the AllInBoth option + if($Type -eq "AllInBoth") + { + for($i=0; $i -lt $right.Count;$i++) + { + $rightMatchCount=$rightMatchesCount[$i] + if($rightMatchCount -eq 0) + { + $rightItem=$Right[$i] + WriteJoinObjectOutput $null $rightItem $LeftProperties $RightProperties $Type + } + } + } + } +} + +Join-Object -Left $Data_Set1 -Right $Data_Set2 -Where {$args[0].Name -eq $args[1].Name} LeftProperties "Name","CurrentJobs" RightProperties "printerstatus","detectederrorstate" -Type OnlyIfInBoth | format-table -HideTableHeaders diff -Nru check-mk-1.2.2p3/windows/plugins/wmicchecks.bat check-mk-1.2.6p12/windows/plugins/wmicchecks.bat --- check-mk-1.2.2p3/windows/plugins/wmicchecks.bat 2013-11-05 09:22:37.000000000 +0000 +++ check-mk-1.2.6p12/windows/plugins/wmicchecks.bat 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -@echo off -echo ^<^<^^>^> -wmic process get name,pagefileusage,virtualsize,workingsetsize,usermodetime,kernelmodetime,ThreadCount /format:csv diff -Nru check-mk-1.2.2p3/windows/plugins/wmic_if.bat check-mk-1.2.6p12/windows/plugins/wmic_if.bat --- check-mk-1.2.2p3/windows/plugins/wmic_if.bat 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/windows/plugins/wmic_if.bat 2015-09-18 13:35:20.000000000 +0000 @@ -0,0 +1,3 @@ +@echo off +echo ^<^<^^>^> +wmic path Win32_NetworkAdapter get speed,macaddress,name,netconnectionid,netconnectionstatus /format:csv Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/windows/plugins/wmic_if.ps1 and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/windows/plugins/wmic_if.ps1 differ diff -Nru check-mk-1.2.2p3/windows_broadcom_bonding check-mk-1.2.6p12/windows_broadcom_bonding --- check-mk-1.2.2p3/windows_broadcom_bonding 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/windows_broadcom_bonding 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,58 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +# <<>> +# Caption RedundancyStatus +# BOND_10.3 2 +# BOND_HeartbeatMS 2 +# + +def inventory_windows_broadcom_bonding(info): + inventory = [] + for line in info[1:]: + inventory.append( (" ".join(line[:-1]), "", None) ) + return inventory + +def check_windows_broadcom_bonding(item, params, info): + for line in info: + if " ".join(line[:-1]) == item: + status = int(line[-1]) + if status == 5: + return (2, "Bond not working") + elif status == 4: + return (1, "Bond partly working") + elif status == 2: + return (0, "Bond fully working") + else: + return (3, "Bond status cannot be recognized") + return (3, "Bond %s not found in agent output" % item) + +check_info['windows_broadcom_bonding'] = { + "check_function" : check_windows_broadcom_bonding, + "inventory_function" : inventory_windows_broadcom_bonding, + "service_description" : "Bonding Interface %s", +} diff -Nru check-mk-1.2.2p3/windows_intel_bonding check-mk-1.2.6p12/windows_intel_bonding --- check-mk-1.2.2p3/windows_intel_bonding 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/windows_intel_bonding 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,122 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +#<<>> +#Caption Name RedundancyStatus +#Bond_10.4 {714F579F-D17A-40DC-B684-083C561EE352} 2 +# +#### +#AdapterFunction AdapterStatus GroupComponent PartComponent +#1 1 IANet_TeamOfAdapters.CreationClassName="IANet_TeamOfAdapters",Name="{714F579F-D17A-40DC-B684-083C561EE352}" IANet_PhysicalEthernetAdapter.CreationClassName="IANet_PhysicalEthernetAdapter",DeviceID="{18EC3002-F03B-4B69-AD88-BFEB700460DC}",SystemCreationClassName="Win32_ComputerSystem",SystemName="Z3061021" +#2 2 IANet_TeamOfAdapters.CreationClassName="IANet_TeamOfAdapters",Name="{714F579F-D17A-40DC-B684-083C561EE352}" IANet_PhysicalEthernetAdapter.CreationClassName="IANet_PhysicalEthernetAdapter",DeviceID="{1EDEBE50-005F-4533-BAFC-E863617F1030}",SystemCreationClassName="Win32_ComputerSystem",SystemName="Z3061021" +# +#### +#AdapterStatus Caption DeviceID +#51 TEAM : Bond_10.4 - Intel(R) Gigabit ET Dual Port Server Adapter {18EC3002-F03B-4B69-AD88-BFEB700460DC} +#51 TEAM : Bond_10.4 - Intel(R) Gigabit ET Dual Port Server Adapter #2 {1EDEBE50-005F-4533-BAFC-E863617F1030} +#35 Broadcom BCM5709C NetXtreme II GigE (NDIS VBD Client) #43 {55799336-A84B-4DA5-8EB9-B7426AA1AB75} +#35 Broadcom BCM5709C NetXtreme II GigE (NDIS VBD Client) #35 {7DB9B461-FAC0-4763-9AF9-9A6CA6648188} +#35 Broadcom BCM5709C NetXtreme II GigE (NDIS VBD Client) #40 {82AE1F27-BF28-4E30-AC3D-809DF5FF0D39} +#35 Broadcom BCM5709C NetXtreme II GigE (NDIS VBD Client) #38 {DC918766-F61C-4801-92F8-E5532907EA0D} + +def get_real_adapter_name(bond, name): + prefix = "TEAM : %s - " % bond + return name[len(prefix):] + +def parse_windows_intel_bonding(info): + lines = iter(info) + bonds = {} + adapters = {} + adapter_names = {} + + try: + # Get bond info + lines.next() # Skip header + while True: + line = lines.next() + if line[0] == "###": + break + bond_caption = " ".join(line[:-2]) + bond_name, bond_mode = line[-2], line[-1] + bonds[bond_name] = { "caption": bond_caption, "mode": bond_mode} + + # Get adapter info + lines.next() # Skip header + while True: + line = lines.next() + if line[0] == "###": + break + adapter_function, adapter_status = line[0], line[1] + adapter_bond = line[2].split(",")[-1].split("=")[1][1:-1] + adapter = line[3].split(",")[1].split("=")[1][1:-1] + adapters[adapter] = { "function": adapter_function, "status": adapter_status, "bond": adapter_bond } + + # Get adapter names + lines.next() # Skip header + while True: + line = lines.next() + adapter_names[line[-1]] = " ".join(line[1:-1]) + + except StopIteration: + pass + + + # Now convert to generic dict, also used by other bonding checks + converted = {} + map_adapter_status = { "0": "Unknown", "1": "up", "2": "up", "3": "down"} + for bond, status in bonds.items(): + interfaces = {} + bond_status = "down" + converted[status["caption"]] = {} + for adapter, adapter_info in adapters.items(): + if bond == adapter_info["bond"]: + real_adapter_name = get_real_adapter_name(status["caption"], adapter_names[adapter]) + if adapter_info["function"] == "1": + converted[status["caption"]]["primary"] = real_adapter_name + if adapter_info["status"] == "1": + converted[status["caption"]]["active"] = real_adapter_name + bond_status = "up" + interfaces[real_adapter_name] = { + "status" : map_adapter_status.get(adapter_info["status"], "down"), + } + + converted[status["caption"]].update({ + "status" : bond_status, + "mode" : status["mode"], + "interfaces" : interfaces, + }) + + return converted + + +check_info['windows_intel_bonding'] = { + "check_function" : lambda item,params,info: check_bonding(item, params, parse_windows_intel_bonding(info)), + "inventory_function" : lambda info: inventory_bonding(parse_windows_intel_bonding(info)), + "service_description" : "Bonding interface %s", + "group" : "bonding", + "includes" : [ "bonding.include" ], +} diff -Nru check-mk-1.2.2p3/windows_multipath check-mk-1.2.6p12/windows_multipath --- check-mk-1.2.2p3/windows_multipath 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/windows_multipath 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -44,20 +44,14 @@ all_paths = params # Anything less than 51% of paths is considered crit - if num_paths == all_paths: + if num_paths == all_paths: state = 0 elif num_paths <= all_paths / 2: state = 2 - elif num_paths < all_paths: + else: state = 1 - - return (state, nagios_state_names[state] + " - paths reported %d, expected paths %d" % (num_paths, all_paths) + state * "!") - - - - return (3, "UNKNOWN - Data not in agent output") - + return (state, "paths reported %d, expected paths %d" % (num_paths, all_paths) + state * "!") check_info["windows_multipath"] = { "check_function" : check_windows_multipath, diff -Nru check-mk-1.2.2p3/windows_os_bonding check-mk-1.2.6p12/windows_os_bonding --- check-mk-1.2.2p3/windows_os_bonding 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/windows_os_bonding 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,77 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# <<>> +# Team Name: LAN +# Bonding Mode: Dynamic +# Status: Up +# Speed: 20 Gbps +# +# Slave Name: NIC1 +# Slave Interface: Ethernet_14 +# Slave Description: Intel(R) Ethernet 10G 2P X520-k bNDC #2 +# Slave Status: Up +# Slave Speed: 10 Gbps +# Slave MAC address: 18-A9-9B-9F-AD-28 +# +# Slave Name: NIC2 +# Slave Interface: Ethernet_10 +# Slave Description: Intel(R) Ethernet 10G 2P X520-k bNDC +# Slave Status: Up +# Slave Speed: 10 Gbps +# Slave MAC address: 18-A9-9B-9F-AD-2A + +def parse_windows_os_bonding(info): + bonds = {} + + for line in info: + if len(line)>1: + line[1] = re.sub("^ +", "", line[1]) + if line[0] == "Team Name": + bond = line[1] + bonds[bond] = {} + bonds[bond]["interfaces"] = {} + elif line[0] == "Bonding Mode": + bonds[bond]["mode"] = line[1] + elif line[0] == "Status": + bonds[bond]["status"] = line[1].lower() + elif line[0] == "Slave Name": + slave = line[1] + bonds[bond]["interfaces"][slave] = {} + elif line[0] == "Slave Status": + bonds[bond]["interfaces"][slave]["status"] = line[1].lower() + elif line[0] == "Slave MAC address": + bonds[bond]["interfaces"][slave]["hwaddr"] = line[1].lower().replace("-", ":") + return bonds + + +check_info['windows_os_bonding'] = { + "check_function" : lambda item,params,info: check_bonding(item, params, parse_windows_os_bonding(info)), + "inventory_function" : lambda info: inventory_bonding(parse_windows_os_bonding(info)), + "service_description" : "Bonding Interface %s", + "group" : "bonding", + "includes" : [ "bonding.include" ], +} diff -Nru check-mk-1.2.2p3/windows_tasks check-mk-1.2.6p12/windows_tasks --- check-mk-1.2.2p3/windows_tasks 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/windows_tasks 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,99 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +# <<>> +# TaskName : \WebShopPictureUpload +# Last Run Time : 17.10.2013 23:00:00 +# Next Run Time : 18.10.2013 23:00:00 +# Last Result : 0 +# Scheduled Task State : Enabled +# +# TaskName : \OfficeSoftwareProtectionPlatform\SvcRestartTask +# Last Run Time : N/A +# Next Run Time : Disabled +# Last Result : 1 +# Scheduled Task State : Disabled + +# A list of all task state can be found here: +# http://msdn.microsoft.com/en-us/library/aa383604%28VS.85%29.aspx + +def windows_tasks_convert(info): + data = {} + last_task = False + for line in info: + name = line[0].strip() + value = ":".join(line[1:]).strip() + if last_task and name != "TaskName": + data[last_task][name] = value + + if name == 'TaskName': + last_task = value + data[last_task] = {} + return data + +def inventory_windows_tasks(info): + info = windows_tasks_convert(info) + return [ (n, None) for n, v in info.items() if v['Scheduled Task State'] == "Enabled"] + +def check_windows_tasks(item, _no_params, info): + info = windows_tasks_convert(info) + for name, values in info.items(): + if name == item: + last_result = saveint(values['Last Result']) + state = 0 + label = "" + msg = [] + if last_result not in [ 0, 0x00041301, 0x00041325, 0x00041306 ]: # RUNNING, QUEUED, CANCELD + state = 2 + label = "(!!)" + msg.append("Service in state: %s%s" % ( last_result, label ) ) + if last_result == 0x00041301: + msg[-1] += " (currently running)" + elif last_result == 0x00041325: + msg[-1] += " (queued)" + + if values['Scheduled Task State'] != 'Enabled': + msg.append("Task not Enabled(!!)") + state = 2 + + if "Last Run Time" in values: + msg.append("last run time: %s" % values["Last Run Time"]) + + if "Next Run Time" in values: + msg.append("next run time: %s" % values["Next Run Time"]) + + return state, ", ".join(msg) + + return 3, "Task not found on server" + +check_info["windows_tasks"] = { + "check_function" : check_windows_tasks, + "inventory_function" : inventory_windows_tasks, + "service_description" : "Task %s", + "has_perfdata" : False, +} + diff -Nru check-mk-1.2.2p3/windows_updates check-mk-1.2.6p12/windows_updates --- check-mk-1.2.2p3/windows_updates 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/windows_updates 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -24,60 +24,105 @@ # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. -# Author: Lars Michelsen - # <<>> # 0 2 5 # Windows XP Service Pack 3 (KB936929); Windows-Tool zum Entfernen sch�dlicher Software - M�rz 2011 (KB890830) # Update f�r WMDRM-f�hige Medienplayer (KB891122); Windows Media Player 11; Windows Search 4.0 f�r Windows XP (KB940157); Microsoft Base Smartcard-Kryptografiedienstanbieter-Paket: x86 (KB909520); Update f�r die Microsoft .NET Framework 3.5 Service Pack 1- und .NET Framework 3.5-Produktfamilie (KB951847) x86 # First row: Reboot_required, num_important, num_optional -# Second row: List of all important updates -# Thirt row: List of all optional updates +# Second row: List of all important updates (optional) +# Third row: List of all optional updates (optional) +# Last row: Date and time of forced update (optional) -windows_updates_default_params = (None, None, None, None) +windows_updates_default_params = (0, 0, 0, 0, 604800, 172800, True) def inventory_windows_updates(info): if info and len(info[0]) == 3: return [(None, "windows_updates_default_params")] -def check_windows_updates(_unused, params, info): +def check_windows_updates(_no_item, params, info): if info and len(info[0]) == 3: status = 0 + # Workarround to return errors from the plugin + if info[0][0] == 'x': + return 2, ' '.join(info[1]) reboot_required, num_imp, num_opt = map(saveint, info[0]) - imp_warn, imp_crit, opt_warn, opt_crit = params + imp_warn, imp_crit, opt_warn, opt_crit = params[0:4] + if len(params) == 7: + force_warn, force_crit, verbose = params[4:7] + else: + force_warn = 604800 + force_crit = 172800 + verbose = True important = '' - if len(info) >= 2: - important = ' '.join(info[1]) optional = '' - if len(info) >= 3: + + last = 1 + if num_imp != 0: + important = ' '.join(info[1]) + last += 1 + if num_opt != 0 and num_imp != 0: + last += 1 optional = ' '.join(info[2]) + elif num_opt != 0: + last += 1 + optional = ' '.join(info[1]) + + # the last element may be the forced_reboot time + forced_reboot = "" + if len(info) - 1 == last and len(info[last]) == 2: + forced_reboot = info[last] txt = [] perfdata = [] for label, updates, cur, warn, crit in [ ('important', important, num_imp, imp_warn, imp_crit), ('optional', optional, num_opt, opt_warn, opt_crit) ]: - this_txt = '%d %s' % (cur, label) - if label == 'important' and cur > 0: - this_txt += ' (%s)' % updates + this_txt = '%d %s updates' % (cur, label) if crit and cur >= crit: - this_txt += ' (CRIT: >=%d)' % crit + this_txt += ' >=%d (!!)' % crit if status < 2: status = 2 elif warn and cur >= warn: - this_txt += ' (WARN: >=%d)' % warn + this_txt += ' >=%d (!)' % warn if status < 1: status = 1 + if label == 'important' and cur > 0 and verbose: + this_txt += ', (%s) --- ' % updates txt.append(this_txt) perfdata.append((label, cur, warn, crit)) if reboot_required == 1: if status < 1: status = 1 - txt.append('WARN: A reboot is required to finish update installations') + txt.append('Reboot required to finish updates(!)') + + if forced_reboot != "": + parsed = time.strptime(" ".join(forced_reboot), "%Y-%m-%d %H:%M:%S") + now = int(time.time()) + delta = time.mktime(parsed) - now + + # check if force_date is in the future + if delta >= 0: + sym = "" + if force_crit and delta <= force_crit: + sym = "(!!)" + status = 2 + elif force_warn and delta <= force_warn: + sym = "(!)" + status = max(status, 1) + + boot_txt = 'Reboot enforced in %s to finish updates%s' % (get_age_human_readable(delta), sym) + txt.append(boot_txt) + + return (status, ', '.join(txt), perfdata) - return (status, '%s - %s' % (nagios_state_names[status], ', '.join(txt)), perfdata) + return (3, 'No windows update information provided') - return (3, 'UNKNOWN - No windows update information provided') -check_info['windows_updates'] = (check_windows_updates, "System Updates", 1, inventory_windows_updates) +check_info["windows_updates"] = { + 'check_function': check_windows_updates, + 'inventory_function': inventory_windows_updates, + 'service_description': 'System Updates', + 'group': 'windows_updates', + 'has_perfdata': True, +} diff -Nru check-mk-1.2.2p3/win_exefiles check-mk-1.2.6p12/win_exefiles --- check-mk-1.2.2p3/win_exefiles 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/win_exefiles 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,72 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output +# <<>> +# C:\Program Files\Common Files\Microsoft Shared\MSInfo\msinfo32.exe|378880 +# C:\Program Files\Common Files\Microsoft Shared\OFFICE15\CMigrate.exe|6847704 +# C:\Program Files\Common Files\Microsoft Shared\OFFICE15\MSOXMLED.EXE|217200 +# C:\Program Files\Common Files\Microsoft Shared\OfficeSoftwareProtectionPlatform\OSPPSVC.EXE|5132888 + +def inv_win_exefiles(info): + paclist = inv_tree("software.packages:") + for line in info: + if len(line) != 6: + continue # ignore broken lines containing parse errors + full_name, write_time, size, description, product_version, product_name = line + parts = full_name.split('\\') + # Since 1.2.6p1 the agent always provides a date format of "04/18/2003 18:06:32". + # Old agent versions provided localized date formats which lead to problems here + # when none of the implemented parsers matches. We keep the existing parsers for + # compatibility, all users with yet unhandled formats should update the agent to + # solve the problems. + if re.match("^\d{2}\.\d{2}\.20\d{2} \d{2}:\d{2}:\d{2}", write_time): + install_date = int(time.mktime(time.strptime(write_time, "%d.%m.%Y %H:%M:%S"))) + elif re.match("^\d{1,2}/\d{1,2}/20\d{2} \d{1,2}:\d{2}:\d{2} (AM|PM)", write_time): + install_date = int(time.mktime(time.strptime(write_time, "%m/%d/%Y %H:%M:%S %p"))) + elif re.match("^\d{1,2}/\d{1,2}/20\d{2} \d{1,2}:\d{2}:\d{2}", write_time): + # This is the 1.2.6p1 new default date + install_date = int(time.mktime(time.strptime(write_time, "%m/%d/%Y %H:%M:%S"))) + else: + install_date = None # need to return 0 to not break the painter which assumes an int + + entry = { + "name" : parts[-1], + "path" : "\\".join(parts[:-1]), + "package_type" : "exe", + "install_date" : install_date, + "size" : saveint(size), + "version" : product_version, + "summary" : description, + "vendor" : product_name, + } + paclist.append(entry) + + +inv_info['win_exefiles'] = { + "inv_function" : inv_win_exefiles, + "unicode" : True, +} diff -Nru check-mk-1.2.2p3/win_netstat check-mk-1.2.6p12/win_netstat --- check-mk-1.2.2p3/win_netstat 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/win_netstat 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,90 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +# Example output from agent (German Windows XP) +# <<>> +# +# Aktive Verbindungen +# +# Proto Lokale Adresse Remoteadresse Status +# TCP 0.0.0.0:135 0.0.0.0:0 ABH™REN +# TCP 0.0.0.0:445 0.0.0.0:0 ABH™REN +# TCP 0.0.0.0:2869 0.0.0.0:0 ABH™REN +# TCP 0.0.0.0:6556 0.0.0.0:0 ABH™REN +# TCP 10.1.1.99:139 0.0.0.0:0 ABH™REN +# TCP 10.1.1.99:445 10.1.1.123:52820 HERGESTELLT +# TCP 10.1.1.99:6556 10.1.1.50:43257 WARTEND +# TCP 10.1.1.99:6556 10.1.1.50:43288 WARTEND +# TCP 10.1.1.99:6556 10.1.1.50:43309 WARTEND +# TCP 127.0.0.1:1029 127.0.0.1:5354 HERGESTELLT +# TCP 127.0.0.1:1030 0.0.0.0:0 ABH™REN +# TCP 127.0.0.1:1040 127.0.0.1:27015 HERGESTELLT +# TCP 127.0.0.1:5354 0.0.0.0:0 ABH™REN +# TCP 127.0.0.1:5354 127.0.0.1:1029 HERGESTELLT +# TCP 127.0.0.1:27015 0.0.0.0:0 ABH™REN +# TCP 127.0.0.1:27015 127.0.0.1:1040 HERGESTELLT +# UDP 0.0.0.0:445 *:* +# UDP 0.0.0.0:500 *:* +# UDP 127.0.0.1:1042 *:* +# UDP 127.0.0.1:1900 *:* + +win_netstat_states = { + # German + "ABH\x99REN" : "LISTENING", + "HERGESTELLT" : "ESTABLISHED", + "WARTEND" : "TIME_WAIT", + # Add further states in any required language here. Sorry, Windows + # has no "unset LANG" ;-) +} + + +def parse_win_netstat(info): + connections = [] + for line in info: + if line[0] == "TCP": + proto, local, remote, connstate = line + elif line[0] == "UDP": + proto, local, remote = line + connstate = "LISTEN" + else: + continue + connections.append( (proto, local.rsplit(":", 1), remote.rsplit(":", 1), + win_netstat_states.get(connstate, connstate)) ) + return connections + + +def check_win_netstat(item, params, info): + connections = parse_win_netstat(info) + return check_netstat_generic(item, params, connections) + + +check_info["win_netstat"] = { + 'check_function' : check_win_netstat, + 'service_description' : "TCP Connection %s", + 'group' : "tcp_connections", + 'includes' : [ "netstat.include" ], +} diff -Nru check-mk-1.2.2p3/win_os check-mk-1.2.6p12/win_os --- check-mk-1.2.2p3/win_os 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/win_os 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,58 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output +# <<>> +# WIN2003|Microsoft(R) Windows(R) Server 2003 Standard Edition|5.2.3790||2|0|20100620230628.000000+120 + +def inv_win_os(info): + node = inv_tree("software.os.") + line = info[0] + if len(line) == 7: + node["name"] = line[1] + node["kernel_version"] = line[2] + if line[3]: + if line[3].lower() == "64-bit": + node["arch"] = "x86_64" + else: + node["arch"] = "i386" + node["service_pack"] = line[4]+"."+line[5] + + if '+' in line[6]: + datestr, tz = line[6].split('+') + tz = int(tz) + elif '-' in line[6]: + datestr, tz = line[6].split('-') + tz = int(tz)*-1 + else: + datestr, tz = line[6], 0 + node["install_date"] = int(time.mktime(time.strptime(datestr, "%Y%m%d%H%M%S.%f"))) - tz*60 + + +inv_info['win_os'] = { + "inv_function" : inv_win_os, + "unicode" : True, +} diff -Nru check-mk-1.2.2p3/winperf check-mk-1.2.6p12/winperf --- check-mk-1.2.2p3/winperf 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/winperf 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -46,10 +46,6 @@ else: warn, crit = params['levels'] - if params == None: - params = winperf_cpu_default_levels - warn, crit = params - for line in info: if line[0] == '238:6': this_time = int(float(line[1])) @@ -60,17 +56,10 @@ counter_wrapped = False for cpu in range(0, num_cpus): ticks = int(line[2 + cpu]) - try: - timedif, ticks_per_sec = get_counter("cpuusage.%d" % cpu, this_time, ticks) - except MKCounterWrapped, e: - counter_wrapped = e - timedif, ticks_per_sec = 60, 0 + ticks_per_sec = get_rate("cpuusage.%d" % cpu, this_time, ticks) secs_per_sec = ticks_per_sec / 10000000.0; used_perc = 100 * (1 - secs_per_sec) overall_perc += used_perc - # At least one counter wrapped or started. This check is useless - if counter_wrapped: - raise counter_wrapped used_perc = overall_perc / num_cpus @@ -84,16 +73,16 @@ else: num_txt = " / %d CPUs" % num_cpus - infotext = " - %d%% used%s (in last %d secs)" % (int(used_perc), num_txt, timedif) + infotext = "%d%% used%s" % (int(used_perc), num_txt) if used_perc >= crit: - return (2, "CRIT" + infotext, perfdata) + return (2, infotext, perfdata) elif used_perc >= warn: - return (1, "WARN" + infotext, perfdata) + return (1, infotext, perfdata) else: - return (0, "OK" + infotext, perfdata) + return (0, infotext, perfdata) - return (3, "UNKNOWN - counter for cpu (238:6) not found") + return (3, "counter for cpu (238:6) not found") def inventory_win_diskstat(info): for line in info: @@ -117,20 +106,32 @@ this_time = int(float(line[1])) try: - read_timedif, read_per_sec = get_counter("diskstat.read", this_time, read_bytes_ctr) - write_timedif, write_per_sec = get_counter("diskstat.write", this_time, write_bytes_ctr) + read_per_sec = get_rate("diskstat.read", this_time, read_bytes_ctr) + write_per_sec = get_rate("diskstat.write", this_time, write_bytes_ctr) except MKCounterWrapped, e: # make sure that inital check does not need three cycles for all counters # to be initialized - get_counter("diskstat.write", this_time, write_bytes_ctr) + get_rate("diskstat.write", this_time, write_bytes_ctr) raise e perfdata = [ ("read", "%dc" % read_bytes_ctr), ("write", "%dc" % write_bytes_ctr) ] - return (0, "OK - reading %.1f MB/s, writing %.1f MB/s (in last %d secs)" % - (read_per_sec / 1048576, write_per_sec / 1048576, read_timedif), perfdata) + return (0, "reading %.1f MB/s, writing %.1f MB/s" % + (read_per_sec / 1048576, write_per_sec / 1048576), perfdata) -check_info['winperf.diskstat'] = (check_win_diskstat, "Disk IO", 1, inventory_win_diskstat) -check_info['winperf.cpuusage'] = (check_win_cpuusage, "CPU Usage", 1, inventory_win_cpuusage) check_config_variables.append("winperf_cpu_default_levels") + +check_info["winperf.cpuusage"] = { + 'check_function': check_win_cpuusage, + 'inventory_function': inventory_win_cpuusage, + 'service_description': 'CPU Usage', + 'has_perfdata': True, +} + +check_info["winperf.diskstat"] = { + 'check_function': check_win_diskstat, + 'inventory_function': inventory_win_diskstat, + 'service_description': 'Disk IO', + 'has_perfdata': True, +} diff -Nru check-mk-1.2.2p3/winperf.cpuusage check-mk-1.2.6p12/winperf.cpuusage --- check-mk-1.2.2p3/winperf.cpuusage 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/winperf.cpuusage 2015-06-24 09:48:36.000000000 +0000 @@ -1,18 +1,21 @@ -title: Measure CPU usage on Windows systems +title: CPU usage on Windows systems (deprecated) agents: windows -author: Mathias Kettner +catalog: os/kernel license: GPL distribution: check_mk description: This check monitors the CPU usage on a Windows system using the - windows performance counters (agent section {<<>>}). + windows performance counters (agent section {winperf}). In its current implementation this check is always OK. Setting a level for warning or a critical state is possible, but might - not be very usefull, since the computed value is an average over - just one check cycle. A combination with a larger value of {max_check_attempts} + not be very usefull, since the computed value is an average over just one + check cycle. A combination with a larger value of {max_check_attempts} or {notification_delay} might be useful here. + Note: this check is deprecated and not used by modern Check_MK agents. + It is replaced by {winperf_processor.util}. + perfdata: The averaged percentage of CPU utilization during the last check cycle. diff -Nru check-mk-1.2.2p3/winperf.diskstat check-mk-1.2.6p12/winperf.diskstat --- check-mk-1.2.2p3/winperf.diskstat 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/winperf.diskstat 2015-06-24 09:48:36.000000000 +0000 @@ -1,6 +1,6 @@ -title: Measure Disk IO on Windows systems +title: Disk IO on Windows systems agents: windows -author: Mathias Kettner +catalog: os/storage license: GPL distribution: check_mk description: diff -Nru check-mk-1.2.2p3/winperf_if check-mk-1.2.6p12/winperf_if --- check-mk-1.2.2p3/winperf_if 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/winperf_if 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,207 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + + +# Example output from agent +# <<>> +# 1366721523.71 510 +# 3 instances: Ethernetadapter_der_AMD-PCNET-Familie__2_-_Paketplaner-Miniport Ethernetadapter_der_AMD-PCNET-Familie_-_Paketplaner-Miniport MS_TCP_Loopback_interface +# -122 43364 1085829 41602 bulk_count +# -110 293 4174 932 counter +# -244 138 3560 466 counter +# -58 155 614 466 counter +# 10 100000000 100000000 10000000 rawcount +# -246 21219 780491 20801 counter +# 14 0 383 466 counter +# 16 138 3176 0 counter +# 18 0 0 0 rawcount +# 20 0 0 0 rawcount +# 22 0 1 0 rawcount +# -4 22145 305338 20801 counter +# 26 0 428 466 counter +# 28 155 186 0 counter +# 30 0 0 0 rawcount +# 32 0 0 0 rawcount +# 34 0 0 0 rawcount +# <<>> +# Node,MACAddress,Name,NetConnectionID,NetConnectionStatus +# WINDOWSXP,08:00:27:8D:47:A4,Ethernetadapter der AMD-PCNET-Familie,LAN-Verbindung,2 +# WINDOWSXP,,Asynchroner RAS-Adapter,, +# WINDOWSXP,08:00:27:8D:47:A4,Paketplaner-Miniport,, +# WINDOWSXP,,WAN-Miniport (L2TP),, +# WINDOWSXP,50:50:54:50:30:30,WAN-Miniport (PPTP),, +# WINDOWSXP,33:50:6F:45:30:30,WAN-Miniport (PPPOE),, +# WINDOWSXP,,Parallelanschluss (direkt),, +# WINDOWSXP,,WAN-Miniport (IP),, +# WINDOWSXP,00:E5:20:52:41:53,Paketplaner-Miniport,, +# WINDOWSXP,08:00:27:35:20:4D,Ethernetadapter der AMD-PCNET-Familie,LAN-Verbindung 2,2 +# WINDOWSXP,08:00:27:35:20:4D,Paketplaner-Miniport,, + +def convert_winperf_if(info): + def canonize_nic_name(n): + return n.replace("_", " ").replace(" ", " ") + + lines = iter(info) + lines.next() # skip line with timestamp and counter number + nic_names = map(canonize_nic_name, lines.next()[2:]) + nics = dict([(n, {}) for n in nic_names]) + + # Scan lines with counters + try: + while True: + line = lines.next() + counter = saveint(line[0]) + if counter: + for nr, value in enumerate(line[1:len(nic_names)+1]): + nics[nic_names[nr]][counter] = int(value) + # Not and integer: then this must be the line with the additional + # information from wmic (which is optional!) + else: + headers = line + while True: + line = lines.next() + as_dict = dict(zip(headers, line)) + nic_name = canonize_nic_name(as_dict["Name"]) + try: + conn_id = int(as_dict["NetConnectionID"].split()[-1]) + nic_name += " " + str(conn_id) + except: + pass + + + def transform_name(name): + # Intel[R] PRO 1000 MT-Desktopadapter__3 (perf counter) + # Intel(R) PRO/1000 MT-Desktopadapter 3 (wmic name) + # Intel(R) PRO/1000 MT-Desktopadapter #3 (wmic InterfaceDescription) + mod_nic_name = name + for from_token, to_token in [ ("/", " "), ("(", "["), (")", "]"), ("#", " ") ]: + for n in nic_names: + if from_token in n: + # we do not modify it if this character is in any of the counter names + break + else: + mod_nic_name = mod_nic_name.replace(from_token, to_token).replace(" ", " ") + return mod_nic_name + + found_match = False + + # Exact match + if nic_name in nic_names: + found_match = True + + # In the perf counters the nics have strange suffixes, e.g. + # Ethernetadapter der AMD-PCNET-Familie 2 - Paketplaner-Miniport, while + # in wmic it's only named "Ethernetadapter der AMD-PCNET-Familie 2". + if not found_match: + mod_nic_name = transform_name(nic_name) + + if mod_nic_name not in nic_names: + for n in nic_names: + if n.startswith(mod_nic_name + " "): + l = len(mod_nic_name) + if not (n[l:].strip()[0]).isdigit(): + nic_name = n + found_match = True + break + else: + nic_name = mod_nic_name + + # The last straw. Try to find this the name as exact match in other fields + if not found_match: + for entry in [ "NetConnectionID", "InterfaceDescription" ]: + if as_dict.get(entry): + mod_nic_name = transform_name(as_dict.get(entry)) + if mod_nic_name in nic_names: + nic_name = mod_nic_name + found_match = True + break + + if not found_match: + # Ignore interfaces that do not have counters + continue + + nics[nic_name].update(as_dict) + except StopIteration: + pass + + # Now convert the dicts into the format that is needed by if.include + converted = [] + + # Sort NIC names are create artifical index + nic_index = dict(map(lambda x: (x[1], x[0] + 1), enumerate(nic_names))) + nic_names.sort(reverse=True) + + for nic_name in nic_names: + nic = nics[nic_name] + mac_txt = nic.get('MACAddress') + bandwidth = saveint(nic.get('Speed')) + if mac_txt: + mac = "".join(map(lambda x: chr(int(x, 16)), mac_txt.split(':'))) + else: + mac = '' + converted.append(( + str(nic_index[nic_name]), + nic_name, + "loopback" in nic_name.lower() and '24' or '6', + bandwidth or nic[10], # Aktuelle Bandbreite + # NetConnectionStatus: 2 st up, 7 ist 'not connected'. If the plugin + # wmic_if is missing and we have link information we need to assume 'up': + nic.get('NetConnectionStatus', '2') == '2' and '1' or '2', + nic[-246], # ifInOctets, + nic[14], # inucast + 0, # inmcast + nic[16], # non-unicast empfangen + nic[18], # ifInDiscards + nic[20], # ifInErrors + nic[-4], # ifOutOctets (Bytes gesendet) + nic[26], # outucast + 0, + nic[28], # outnonucast + nic[30], # ifOutDiscards + nic[32], # ifOutErrors + nic[34], # ifOutQLen + nic_name, + mac, + )) + + return converted + +def inventory_winperf_if(info): + return inventory_if_common(convert_winperf_if(info)) + +def check_winperf_if(item, params, info): + return check_if_common(item, params, convert_winperf_if(info)) + + +check_info["winperf_if"] = { + 'check_function': check_winperf_if, + 'inventory_function': inventory_winperf_if, + 'service_description': 'Interface %s', + 'has_perfdata': True, + 'includes': [ 'if.include' ], + 'group': 'if', + 'default_levels_variable': 'if_default_levels', +} diff -Nru check-mk-1.2.2p3/winperf_msx_queues check-mk-1.2.6p12/winperf_msx_queues --- check-mk-1.2.2p3/winperf_msx_queues 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/winperf_msx_queues 2015-09-16 14:25:30.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -45,9 +45,23 @@ # 10362 0 rawcount # 10364 811 rawcount -# Default warn/crit levels for length of queues +# Example output from a Exchange 2013 server: +# <<>> +# 1385554029.05 12048 +# 4 instances: niedrige_priorität normale_priorität hohe_priorität _total +# 2 0 0 0 0 rawcount +# 4 0 0 0 0 rawcount +# 6 0 0 0 0 rawcount + +# For Legacy reasons we need still this var. msx_queues_default_levels = (500, 2000) +# Default warn/crit levels for length of queues +factory_settings['winperf_msx_queues_factory'] = { + 'levels': ( 500, 2000 ), +} + + # Queues to be inventorized (number are relative to counter base) winperf_msx_queues = { "Active Remote Delivery" : "2", @@ -56,31 +70,63 @@ "Poison Queue Length" : "44", } +winperf_msx_queues_inventory = [] + def inventory_winperf_msx_queues(info): - num_instances = int(info[1][0]) - if num_instances > 0: - return [ (name, "msx_queues_default_levels") for name in winperf_msx_queues ] + if len(info) > 1: + num_instances = int(info[1][0]) + if num_instances > 0: + # Its possible to set the wanted queues via wato + inventory_rules = {} + for rulset in host_extra_conf(g_hostname, winperf_msx_queues_inventory): + inventory_rules.update(dict(rulset)) + # In case that rules for this host are set, + # only use this rules + if inventory_rules: + queues = inventory_rules + else: + queues = winperf_msx_queues + return [ (name, { "offset" : offset } ) for name, offset in queues.items() ] def check_winperf_msx_queues(item, params, info): - num_instances = int(info[1][0]) - if num_instances < 1: - return (3, "UNKNOWN - no counters available, transport service running?") + # current windows agents should not produce winperf sections with no data after the header but + # this ensures compatibility with older agents + if len(info) < 2 or int(info[1][0]): + return 3, "no counters available, transport service running?" + + # Old default case: + if type(params) == tuple: + warn, crit = params + offset = winperf_msx_queues.get(item) + else: + warn, crit = params['levels'] + if params.get('offset'): + offset = str(params['offset']) + # If no offset is set, we assume that still the default counters are used + else: + offset = winperf_msx_queues.get(item) for line in info[2:]: - if line[0] == winperf_msx_queues.get(item): - length = int(line[1]) - warn, crit = params + if line[0] == offset: + length = int(line[-2]) perfdata = [("length", length, warn, crit)] - infotext = " - %d entries" % length + infotext = "%d entries" % length if length >= crit: - return (2, "CRIT" + infotext, perfdata) + return (2, infotext, perfdata) elif length >= warn: - return (1, "WARN" + infotext, perfdata) - return (0, "OK" + infotext, perfdata) + return (1, infotext, perfdata) + return (0, infotext, perfdata) - return (3, "UNKNOWN - counter not found") + return (3, "counter not found") -check_info['winperf_msx_queues'] = (check_winperf_msx_queues, "Queue %s", 1, inventory_winperf_msx_queues) check_config_variables.append("winperf_msx_queues") -checkgroup_of["winperf_msx_queues"] = "msx_queues" + +check_info["winperf_msx_queues"] = { + 'check_function': check_winperf_msx_queues, + 'inventory_function': inventory_winperf_msx_queues, + 'service_description': 'Queue %s', + 'has_perfdata': True, + "default_levels_variable" : "winperf_msx_queues_factory", + 'group': 'msx_queues', +} diff -Nru check-mk-1.2.2p3/winperf_phydisk check-mk-1.2.6p12/winperf_phydisk --- check-mk-1.2.2p3/winperf_phydisk 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/winperf_phydisk 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -63,33 +63,47 @@ check_includes['winperf_phydisk'] = [ "diskstat.include" ] def winperf_phydisk_convert(info): - disks = [ d.split('_')[-1] for d in info[1][2:-1] ] + # node_info has been activated. This check simply ignores this + # for now. + # In case disk performance counters are not enabled, the agent sends + # an almost empty section, where the second line is missing completely + if len(info) == 1: + return [] + disks = [ d.split('_')[-1] for d in info[1][3:-1] ] for line in info[2:]: - if line[0] == '-14': - disk_read_bytes = [ int(x) / 512 for x in line[1:-2] ] - elif line[0] == '-12': - disk_write_bytes = [ int(x) / 512 for x in line[1:-2] ] - elif line[0] == '-20': - disk_reads = [ int(x) for x in line[1:-2] ] - elif line[0] == '-18': - disk_writes = [ int(x) for x in line[1:-2] ] - elif line[0] == '1168': # Average Disk Read Queue Length - disk_readq_ctrs = [ int(x) for x in line[1:-2] ] - elif line[0] == '1170': # Average Disk Read Queue Length - disk_writeq_ctrs = [ int(x) for x in line[1:-2] ] + if line[1] == '-14': + disk_read_bytes = [ int(x) / 512 for x in line[2:-2] ] + elif line[1] == '-12': + disk_write_bytes = [ int(x) / 512 for x in line[2:-2] ] + elif line[1] == '-20': + disk_reads = [ int(x) for x in line[2:-2] ] + elif line[1] == '-18': + disk_writes = [ int(x) for x in line[2:-2] ] + elif line[1] == '1168': # Average Disk Read Queue Length + disk_readq_ctrs = [ int(x) for x in line[2:-2] ] + elif line[1] == '1170': # Average Disk Read Queue Length + disk_writeq_ctrs = [ int(x) for x in line[2:-2] ] # Missing columns are donted by negative values (Linux sends here latency # information) empty = [ -1 for x in disks ] + none = [ None for x in disks ] # Used as dummy node info - return zip(disks, disk_read_bytes, disk_write_bytes, disk_reads, disk_writes, empty, disk_readq_ctrs, disk_writeq_ctrs) + return zip(none, disks, disk_read_bytes, disk_write_bytes, disk_reads, disk_writes, empty, disk_readq_ctrs, disk_writeq_ctrs) def inventory_winperf_phydisk(info): return inventory_diskstat_generic(winperf_phydisk_convert(info)) def check_winperf_phydisk(item, params, info): - this_time = int(float(info[0][0])) + this_time = int(time.time()) return check_diskstat_generic(item, params, this_time, winperf_phydisk_convert(info)) -check_info['winperf_phydisk'] = (check_winperf_phydisk, "Disk IO %s", 1, inventory_winperf_phydisk) -checkgroup_of["winperf_phydisk"] = "disk_io" + +check_info["winperf_phydisk"] = { + 'check_function': check_winperf_phydisk, + 'inventory_function': inventory_winperf_phydisk, + 'service_description': 'Disk IO %s', + 'node_info': True, + 'has_perfdata': True, + 'group': 'disk_io', +} diff -Nru check-mk-1.2.2p3/winperf_processor check-mk-1.2.6p12/winperf_processor --- check-mk-1.2.2p3/winperf_processor 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/winperf_processor 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -25,7 +25,7 @@ # Boston, MA 02110-1301 USA. -winperf_cpu_default_levels = ( 101.0, 101.0 ) +winperf_cpu_default_levels = { "levels": ( 101.0, 101.0 ) } def inventory_winperf_util(info): if len(info) <= 1: @@ -40,7 +40,9 @@ # params: levels for warn / crit in percentage -def check_winperf_util(item, params, info): +def check_winperf_util(_no_item, params, info): + if not info: + return 3, "Got no information from agent" this_time = int(float(info[0][0])) for line in info[1:]: @@ -49,7 +51,7 @@ # sums up to total (called _Total). We only need that last value. ticks = int(line[-2]) num_cpus = len(line) - 3 - timedif, ticks_per_sec = get_counter("winperf_util", this_time, ticks) + ticks_per_sec = get_rate("winperf_util", this_time, ticks) # We get the value of the PERF_100NSEC_TIMER_INV here. # This counter type shows the average percentage of active time observed # during the sample interval. This is an inverse counter. Counters of this @@ -60,30 +62,26 @@ cpusecs_per_sec = ticks_per_sec / 10000000.0 used_perc = 100.0 * (1 - cpusecs_per_sec) + # Due to timeing invariancies the measured level can become > 100%. + # This makes users unhappy, so cut it off. if used_perc < 0: used_perc = 0 elif used_perc > 100: used_perc = 100 - if params != None: - warn, crit = params - else: - warn, crit = None, None - perfdata = [ ("util", "%.2f" % used_perc, warn, crit, 0, num_cpus) ] - if num_cpus == 1: - num_txt = "" - else: - num_txt = " / %d CPUs" % num_cpus - - infotext = " - %0.2f%% used%s (in last %d secs)" % (used_perc, num_txt, timedif) - - if crit != None and used_perc >= crit: - return (2, "CRIT" + infotext, perfdata) - elif warn != None and used_perc >= warn: - return (1, "WARN" + infotext, perfdata) - else: - return (0, "OK" + infotext, perfdata) - return (3, "UNKNOWN - counter for CPU (6) not found") - -check_info['winperf_processor.util'] = (check_winperf_util, "CPU utilization", 1, inventory_winperf_util) -checkgroup_of['winperf_processor.util'] = "cpu_utilization" + state, infotext, perfdata = check_cpu_util(used_perc, params, this_time) + perfdata[0] = perfdata[0][:5] + (num_cpus,) + infotext += ", %d CPUs" % num_cpus + return state, infotext, perfdata + + return (3, "counter for CPU (6) not found") + + +check_info["winperf_processor.util"] = { + 'check_function': check_winperf_util, + 'inventory_function': inventory_winperf_util, + 'service_description': 'CPU utilization', + 'has_perfdata': True, + 'group': 'cpu_utilization_os', + 'includes': [ "cpu_util.include" ], +} diff -Nru check-mk-1.2.2p3/winperf_processor.util check-mk-1.2.6p12/winperf_processor.util --- check-mk-1.2.2p3/winperf_processor.util 2013-11-05 09:23:07.000000000 +0000 +++ check-mk-1.2.6p12/winperf_processor.util 2015-06-24 09:48:36.000000000 +0000 @@ -1,31 +1,38 @@ -title: Measure CPU utilization on Windows systems +title: CPU utilization on Windows systems agents: windows -author: Mathias Kettner +catalog: os/kernel license: GPL distribution: check_mk description: This check monitors the CPU utilization on a Windows system using the - windows performance counters (agent section {<<>>}). + windows performance counters (agent section {winperf_processor}). This check replaces {winperf.cpuusage} which is outdated since version {1.1.11i1} Setting a level for warning or a critical state is possible, but might not be very usefull, since the computed value is an average over - just one check cycle. A combination with a larger value of {max_check_attempts} - or {notification_delay} might be useful here. + just one check cycle. Consider to set the {average} option for this purpose. + A combination with a larger value of {max_check_attempts} + or {notification_delay} might also be useful here. perfdata: - The averaged percentage of CPU utilization during the last check cycle. + One or two values: the first value is current usage in percent - ranging from + 0 to 100. The "maximum" value is not 100, but the number of CPU threads. This + case be used for scaling the graph in terms of the number of used CPU threads. + + If averaging is enabled then a second value is sent: the averaged CPU utilization + ranging from 0 to 100. inventory: A check of this type is automatically created for all Windows hosts. [parameters] -warning (int): The percentage of CPU usage that triggers - a WARNING state -critical (int): The percentage of CPU usage that triggers - a CRITICAL state +parameters(dict): A dictionary with the following keys: -[configuration] -winperf_cpu_default_levels (int, int): Default levels warning and critical + {"levels"}: Either {None} for no levels, a tuple of warn and crit (in percent) or + a dictionary with predictive levels settings. + {"average"}: A number of minutes for enabling averaging. + +[configuration] +winperf_cpu_default_levels(dict): Default levels, preset to { "levels": (101.0, 101.0) } diff -Nru check-mk-1.2.2p3/winperf_tcp_conn check-mk-1.2.6p12/winperf_tcp_conn --- check-mk-1.2.2p3/winperf_tcp_conn 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/winperf_tcp_conn 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,74 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +# <<>> +# 1368619819.06 638 +# 2 53267 counter +# 4 3 rawcount +# 6 23 rawcount +# 8 1 rawcount +# 10 1 rawcount +# 12 12 rawcount +# 14 34830 counter +# 16 18437 counter + +def inventory_winperf_tcp_conn(info): + return [ (None, {}) ] + +def check_winperf_tcp_conn(item, params, info): + fields = [ + ( 2, "Established", "ESTABLISHED" ), + ] + infotext = "" + worst_state = 0 + perfdata = [] + for offset, name, param_key in fields: + value = saveint(info[offset][1]) + infotext += "%s: %s" % (name, value) + warn, crit = "", "" + if params.get(param_key): + warn, crit = params.get(param_key) + if value >= crit: + worst_state = 2 + infotext += "(!!) (critical at %d)" % crit + elif value >= warn: + worst_state = max(1, worst_state) + infotext += "(!) (warning at %d)" % warn + infotext += ", " + perfdata.append( (name, value, warn, crit) ) + + infotext = infotext[:-2] + return worst_state, infotext, perfdata + +check_info["winperf_tcp_conn"] = { + 'check_function': check_winperf_tcp_conn, + 'inventory_function': inventory_winperf_tcp_conn, + 'service_description': 'TCP connections', + 'has_perfdata': True, + 'group': 'tcp_conn_stats', +} + diff -Nru check-mk-1.2.2p3/winperf_ts_sessions check-mk-1.2.6p12/winperf_ts_sessions --- check-mk-1.2.2p3/winperf_ts_sessions 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/winperf_ts_sessions 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,77 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +# <<>> +# 1385714515.93 2102 +# 2 20 rawcount +# 4 18 rawcount +# 6 2 rawcount + +# Counters, relative to the base ID (e.g. 2102) +# 2 Total number of Terminal Services sessions. +# 4 Number of active Terminal Services sessions. +# 6 Number of inactive Terminal Services sessions. + +def inventory_winperf_ts_sessions(info): + if len(info) > 1: + return [ (None, {}) ] + +def check_winperf_ts_sessions(_unused, params, info): + if not info or len(info) == 1: + return 3, "Performance counters not available" + total, active, inactive = [ int(l[1]) for l in info[1:4] ] + + # Tom Moore said, that the order of the columns has recently changed + # in newer Windows versions (hooray!) and is now active, inactive, total. + # We try to accomodate for that. + if active + inactive != total: + active, inactive, total = total, active, inactive + + state = 0 + state_txt = [] + for val, key, title in [ (active, 'active', 'Active'), + (inactive, 'inactive', 'Inactive') ]: + txt = '%d %s' % (val, title) + if key in params: + if val > params[key][0]: + state = 2 + txt += '(!!)' + elif val > params[key][1]: + state = max(state, 1) + txt += '(!)' + state_txt.append(txt) + + perfdata = [ ('active', active, ), ('inactive', inactive) ] + return state, ", ".join(state_txt), perfdata + +check_info["winperf_ts_sessions"] = { + 'check_function': check_winperf_ts_sessions, + 'inventory_function': inventory_winperf_ts_sessions, + 'service_description': 'Sessions', + 'has_perfdata': True, + 'group': 'winperf_ts_sessions', +} diff -Nru check-mk-1.2.2p3/win_printers check-mk-1.2.6p12/win_printers --- check-mk-1.2.2p3/win_printers 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/win_printers 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,98 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +# Put here the example output from your TCP-Based agent. If the +# check is SNMP-Based, then remove this section + +def inventory_win_printers(info): + return [ (" ".join(x[:-3]), None) for x in info ] + +def check_win_printers(item, params, info): + status_map = { + 1 : "Other", + 2 : "Unkown", + 3 : "Idle", + 4 : "Printing", + 5 : "Warming Up", + 6 : "Stopped Printing", + 7 : "Offline", + } + error_map = { + 0 : "Unkown", + 1 : "Other", + 2 : "No Error", + 3 : "Low Paper", + 4 : "No Paper", + 5 : "Low Toner", + 6 : "No Toner", + 7 : "Door Open", + 8 : "Jammed", + 9 : "Offline", + 10 : "Service Requested", + 11 : "Output Bin Full" + } + + if params == None: + warn, crit = None, None + else: + warn, crit = params + + for line in info: + name = " ".join(line[:-3]) + if name == item: + state = 0 + current_jobs, status, error = map(saveint, line[-3:]) + + error_text = "" + if error in [ 9, 10 ]: + state = 2 + error_text = "Error State: %s(!!)" % error_map[error] + elif error in [ 8, 11 ]: + state = 1 + error_text = "Error State: %s(!)" % error_map[error] + + queue_label = "" + if crit != None and current_jobs >= crit: + state = 2 + queue_label = "(!!)" + elif warn != None and current_jobs >= warn: + state = max(1, state) + queue_label = "(!)" + + return state, "%s jobs current%s, State: %s, %s" %\ + ( current_jobs, queue_label, status_map[status], error_text ) + + return 3, "Printer not found in agent output" + +check_info["win_printers"] = { + "check_function" : check_win_printers, + "group" : "windows_printer_queues", + "inventory_function" : inventory_win_printers, + "service_description" : "Printer %s", + "has_perfdata" : False, +} + diff -Nru check-mk-1.2.2p3/win_reg_uninstall check-mk-1.2.6p12/win_reg_uninstall --- check-mk-1.2.2p3/win_reg_uninstall 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/win_reg_uninstall 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,60 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# agent output +# <<>> +# ... + +def inv_win_reg_uninstall(info): + paclist = inv_tree("software.packages:") + for line in info: + if len(line) != 7: + continue + + display_name, publisher, path, pacname, version, estimated_size, date = line + install_date = "" + if re.match("^20\d{6}", date): + install_date = int(time.mktime(time.strptime(date, "%Y%m%d"))) + size = saveint(estimated_size) + if size == 0: + size = None + + entry = { + "name" : pacname, + "version" : version, + "vendor" : publisher, + "summary" : display_name, + "install_date" : install_date, + "size" : size, + "path" : path, + "package_type" : "registry", + } + paclist.append(entry) + +inv_info['win_reg_uninstall'] = { + "inv_function" : inv_win_reg_uninstall, + "unicode" : True, +} diff -Nru check-mk-1.2.2p3/win_system check-mk-1.2.6p12/win_system --- check-mk-1.2.2p3/win_system 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/win_system 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,58 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output: +# <<>> +# Manufacturer : Oracle Corporation +# Name : Computergehäuse +# Model : +# HotSwappable : +# InstallDate : +# PartNumber : +# SerialNumber : + + +def inv_win_system(info): + node = inv_tree("hardware.system.") + for line in info: + if len(line) > 2: + line = [ line[0], ":".join(line[1:]) ] + varname, value = line + varname = re.sub(" *","", varname) + value = re.sub("^ ", "", value) + if varname == "SerialNumber": + node["serial"] = value + elif varname == "Manufacturer": + node["vendor"] = value + elif varname == "Name": + node["product"] = value + elif varname == "Model": + node["family"] = value + +inv_info['win_system'] = { + "inv_function" : inv_win_system, + "unicode" : True, +} diff -Nru check-mk-1.2.2p3/win_video check-mk-1.2.6p12/win_video --- check-mk-1.2.2p3/win_video 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/win_video 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,60 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output: +# <<>> +# Name : VirtualBox Graphics Adapter +# Description : VirtualBox Graphics Adapter +# Caption : VirtualBox Graphics Adapter +# AdapterCompatibility : Oracle Corporation +# VideoProcessor : +# DriverVersion : 4.3.10.0 +# DriverDate : 20140326000000.000000-000 +# MaxMemorySupported : + + +def inv_win_video(info): + node = inv_tree("hardware.video:") + array = {} + for line in info: + if len(line) > 2: + line = [ line[0], ":".join(line[1:]) ] + varname, value = line + varname = re.sub(" *","", varname) + value = re.sub("^ ", "", value) + if varname == "Name": + array["name"] = value + elif varname == "DriverVersion": + array["driver_version"] = value + elif varname == "DriverDate": + array["driver_date"] = int(time.mktime(time.strptime(value.split(".")[0], "%Y%m%d%H%M%S"))) + node.append(array) + + +inv_info['win_video'] = { + "inv_function" : inv_win_video, + "unicode" : True, +} diff -Nru check-mk-1.2.2p3/win_wmi_software check-mk-1.2.6p12/win_wmi_software --- check-mk-1.2.2p3/win_wmi_software 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/win_wmi_software 2015-09-21 10:59:54.000000000 +0000 @@ -0,0 +1,59 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output +# <<>> +# 64 Bit HP CIO Components Installer|Hewlett-Packard|15.2.1 +# Adobe Flash Player 12 ActiveX|Adobe Systems Incorporated|12.0.0.70 +# Microsoft Visio 2010 Interactive Guide DEU|Microsoft|1.2.1 +# Microsoft Outlook 2010 Interactive Guide DEU|Microsoft|1.2.1 +# VMware vSphere Client 4.1|VMware, Inc.|4.1.0.17435 +# Microsoft Office Professional Plus 2010|Microsoft Corporation|14.0.7015.1000 + + +def inv_win_wmi_software(info): + paclist = inv_tree("software.packages:") + for line in info: + pacname, vendor, version = line[0:3] + dat = line[3] + install_date = "" + if len(dat) == 8 and re.match("^20", dat): + install_date = int(time.mktime(time.strptime(dat, "%Y%m%d"))) + + entry = { + "name" : pacname, + "version" : version, + "vendor" : vendor.replace('\x00', ''), # Can happen, reason unclear + "install_date" : install_date, + "package_type" : "wmi", + } + paclist.append(entry) + + +inv_info['win_wmi_software'] = { + "inv_function" : inv_win_wmi_software, + "unicode" : True, +} diff -Nru check-mk-1.2.2p3/wmic_process check-mk-1.2.6p12/wmic_process --- check-mk-1.2.2p3/wmic_process 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/wmic_process 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -30,7 +30,7 @@ count, mem, page, userc, kernelc = 0, 0, 0, 0, 0 cpucores = 1 if len(info) == 0: - return (3, "UNKNOWN - No output from agent in section wmic_process") + return (3, "No output from agent in section wmic_process") legend = info[0] for line in info[1:]: psinfo = dict(zip(legend, line)) @@ -45,8 +45,8 @@ mem_mb = mem / 1048576.0 page_mb = page / 1048576.0 - timedif, user_per_sec = get_counter("wmic_process.user.%s.%d" % (name, count), time.time(), userc) - timedif, kernel_per_sec = get_counter("wmic_process.kernel.%s.%d" % (name, count), time.time(), kernelc) + user_per_sec = get_rate("wmic_process.user.%s.%d" % (name, count), time.time(), userc) + kernel_per_sec = get_rate("wmic_process.kernel.%s.%d" % (name, count), time.time(), kernelc) user_perc = user_per_sec / 100000.0 / cpucores kernel_perc = kernel_per_sec / 100000.0 / cpucores cpu_perc = user_perc + kernel_perc @@ -56,45 +56,42 @@ ("user", user_perc, cpuwarn, cpucrit, 0, 100), ("kernel", kernel_perc, cpuwarn, cpucrit, 0, 100) ] - infos = [ - " - %d processes" % count, - "", # 1 - ", %.1fMB RAM" % mem_mb, - "", # 3 - ", %1.fMB Page" % page_mb, - "", # 5 - ", %.0f%%/%.0f%% User/Kernel" % (user_perc, kernel_perc), - "", # 7 - ] + messages = [] + messages.append("%d processes" % count) + state = 0 + msg = "%.0f%%/%.0f%% User/Kernel" % (user_perc, kernel_perc) if cpu_perc >= cpucrit: state = 2 - infos[7] = "(!!) - critical at %d%%" % cpucrit - - elif page_mb >= pagecrit: - state = 2 - infos[5] = "(!!) critical at %d MB" % pagecrit - - elif mem_mb >= memcrit: - state = 2 - infos[3] = "(!!) critical at %d MB" % memcrit - + msg += "(!!) (critical at %d%%)" % cpucrit elif cpu_perc >= cpuwarn: state = 1 - infos[7] = "(!) warning at %d%%" % cpuwarn + msg += "(!) (warning at %d%%)" % cpuwarn + messages.append(msg) - elif mem_mb >= memwarn: - state = 1 - infos[3] = "(!) warning at %d MB" % memwarn + msg = "%.1fMB RAM" % mem_mb + if mem_mb >= memcrit and memcrit > 0: + state = 2 + msg += "(!!) (critical at %d MB)" % memcrit + elif mem_mb >= memwarn and memwarn > 0: + state = max(1, state) + msg += "(!) (warning at %d MB)" % memwarn + messages.append(msg) + msg = "%1.fMB Page" % page_mb + if page_mb >= pagecrit: + state = 2 + msg += "(!!) (critical at %d MB)" % pagecrit elif page_mb >= pagewarn: - state = 1 - infos[5] = "(!) warning at %d MB" % pagewarn - - else: - state = 0 - - infotext = "".join(infos) - return (state, nagios_state_names[state] + infotext, perfdata) - -check_info['wmic_process'] = (check_wmic_process, "proc_%s", 1, no_inventory_possible) + state = max(state, 1) + msg += "(!) (warning at %d MB)" % pagewarn + messages.append(msg) + + return (state, ", ".join(messages), perfdata) + +check_info["wmic_process"] = { + 'check_function': check_wmic_process, + 'service_description': 'Process %s', + 'has_perfdata': True, + 'group': 'wmic_process' +} diff -Nru check-mk-1.2.2p3/wut_webio_io check-mk-1.2.6p12/wut_webio_io --- check-mk-1.2.2p3/wut_webio_io 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/wut_webio_io 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -84,14 +84,14 @@ # Compare to the state of the IO port at inventory. if state != params: - return (2, "CRITICAL - state should be %s but is %s (!!)" % ( + return (2, "state should be %s but is %s (!!)" % ( webio_state_names[params], webio_state_names[state])) else: - return (0, "OK - state is %s" % webio_state_names[state]) + return (0, "state is %s" % webio_state_names[state]) - return (3, "UNKNOWN - Item not found in agent output") + return (3, "Item not found in agent output") check_info['wut_webio_io.inputs'] = { @@ -99,15 +99,13 @@ "inventory_function" : inventory_wut_webio_io_inputs, "service_description": "INPUT %s", "has_perfdata" : False, - # "group" : "", - # "default_levels_variable" : "services_default_levels", - # first check we have a vendor mib from W&T, then check for the model in their MIB. - "snmp_scan_function" : lambda oid: \ - ".1.3.6.1.4.1.5040" in oid("1.3.6.1.2.1.1.2.0") and - oid(".1.3.6.1.4.1.5040.1.2.4.3.3.5.0").lower().startswith("web-io"), "snmp_info" : (".1.3.6.1.4.1.5040.1.2.4", [ "3.1.5.2.1.1", # io port index. (bugged) "3.2.1.1.1", # user defined description. "1.3.1.4", # the low/high state. - ]), + ]), + # first check we have a vendor mib from W&T, then check for the model in their MIB. + "snmp_scan_function" : lambda oid: \ + ".1.3.6.1.4.1.5040" in oid(".1.3.6.1.2.1.1.2.0") and + oid(".1.3.6.1.4.1.5040.1.2.4.3.3.5.0").lower().startswith("web-io"), } diff -Nru check-mk-1.2.2p3/wut_webio_io.inputs check-mk-1.2.6p12/wut_webio_io.inputs --- check-mk-1.2.2p3/wut_webio_io.inputs 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/wut_webio_io.inputs 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,19 @@ +title: WuT Web-IO: Analogue Input Channels +agents: snmp +catalog: hw/environment/wut +license: GPL +distribution: check_mk +description: + This check monitors the analogue input channels of a + a WuT (Wiesemann & Theis) Web-IO + +item: + The item is the description of the channel as string, either + "IO INPUT %d" or "IO INPUT" the user-defined string describing + the channel. + +perfdata: + This check currently has no performance data. + +inventory: + All channels are automatically inventorized with their state. diff -Nru check-mk-1.2.2p3/wut_webtherm check-mk-1.2.6p12/wut_webtherm --- check-mk-1.2.2p3/wut_webtherm 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/wut_webtherm 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -27,7 +27,7 @@ wut_webtherm_defaultlevels = (30, 35) def inventory_webtherm(info): - # line[0] is 1.1, 2.1, 3.1, 6.1, 8.1 or 16.1 depending on MIB variant. We chop + # line[0] is 1.1, 2.1, 3.1, 6.1, 8.1, 16.1 or 18.1 depending on MIB variant. We chop # of the first part in order to get independent of that. return [ (line[0].split(".")[1], "wut_webtherm_defaultlevels") for line in info if len(line) == 2 ] @@ -38,14 +38,14 @@ temp = float(line[1].replace(",", ".")) warn, crit = params perfdata = [ ("temp", temp, warn, crit) ] - infotext = " - %.1f C (warn/crit at %.1f/%.1f C)" % (temp, warn, crit) + infotext = "%.1f C (warn/crit at %.1f/%.1f C)" % (temp, warn, crit) if temp >= crit: - return (2, "CRIT" + infotext, perfdata) + return (2, infotext, perfdata) elif temp >= warn: - return (1, "WARN" + infotext, perfdata) + return (1, infotext, perfdata) else: - return (0, "OK" + infotext, perfdata) - return (3, "UNKNOWN - No sensor found") + return (0, infotext, perfdata) + return (3, "No sensor found") def detect_webtherm(oid): return oid(".1.3.6.1.2.1.1.2.0") in \ @@ -53,14 +53,21 @@ ".1.3.6.1.4.1.5040.1.2.2", ".1.3.6.1.4.1.5040.1.2.3", ".1.3.6.1.4.1.5040.1.2.6", + ".1.3.6.1.4.1.5040.1.2.7", ".1.3.6.1.4.1.5040.1.2.8", - ".1.3.6.1.4.1.5040.1.2.16" ] - - -check_info['wut_webtherm'] = ( check_webtherm, "Thermograph Sensor %s", 1, inventory_webtherm) - -snmp_info['wut_webtherm'] = ( - ".1.3.6.1.4.1.5040.1.2", ["1", "2", "3", "6", "8", "16"], [ "1.2.1.1", "1.3.1.1" ]) - -snmp_scan_functions['wut_webtherm'] = detect_webtherm -checkgroup_of["wut_webtherm"] = "room_temperature" + ".1.3.6.1.4.1.5040.1.2.9", + ".1.3.6.1.4.1.5040.1.2.16", + ".1.3.6.1.4.1.5040.1.2.18" ] + +check_info["wut_webtherm"] = { + 'check_function': check_webtherm, + 'inventory_function': inventory_webtherm, + 'service_description': 'Thermograph Sensor %s', + 'has_perfdata': True, + 'snmp_info': ('.1.3.6.1.4.1.5040.1.2', + ['1', '2', '3', '6', '7', '8', '9', '16', '18'], + ['1.2.1.1', '1.3.1.1'] + ), + 'snmp_scan_function': detect_webtherm, + 'group': 'room_temperature', +} diff -Nru check-mk-1.2.2p3/xinetd_caching.conf check-mk-1.2.6p12/xinetd_caching.conf --- check-mk-1.2.2p3/xinetd_caching.conf 2013-10-12 17:49:41.000000000 +0000 +++ check-mk-1.2.6p12/xinetd_caching.conf 2014-12-11 10:41:49.000000000 +0000 @@ -5,7 +5,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/xinetd.conf check-mk-1.2.6p12/xinetd.conf --- check-mk-1.2.2p3/xinetd.conf 2013-10-12 17:49:41.000000000 +0000 +++ check-mk-1.2.6p12/xinetd.conf 2014-12-11 10:41:49.000000000 +0000 @@ -5,7 +5,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. diff -Nru check-mk-1.2.2p3/zfs_arc_cache check-mk-1.2.6p12/zfs_arc_cache --- check-mk-1.2.2p3/zfs_arc_cache 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/zfs_arc_cache 2015-06-24 09:48:37.000000000 +0000 @@ -0,0 +1,231 @@ +#!/usr/bin/python +# -*- encoding: utf-8; py-indent-offset: 4 -*- +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Example output from agent: +# <<>> +# hits = 106259988004 +# misses = 27664604758 +# demand_data_hits = 23694052185 +# demand_data_misses = 2806853416 +# demand_metadata_hits = 73187550363 +# demand_metadata_misses = 1557349557 +# prefetch_data_hits = 3100882779 +# prefetch_data_misses = 21062611239 +# prefetch_metadata_hits = 6277502677 +# prefetch_metadata_misses = 2237790546 +# mru_hits = 44007947284 +# mru_ghost_hits = 2418664836 +# mfu_hits = 52875478045 +# mfu_ghost_hits = 1458768458 +# deleted = 25139978315 +# recycle_miss = 3965481664 +# mutex_miss = 323199589 +# evict_skip = 2543918629307 +# evict_l2_cached = 0 +# evict_l2_eligible = 253548767148544 +# evict_l2_ineligible = 36185885241856 +# hash_elements = 182514 +# hash_elements_max = 388216 +# hash_collisions = 6825894732 +# hash_chains = 14194 +# hash_chain_max = 8 +# p = 914 MB +# c = 2010 MB +# c_min = 2010 MB +# c_max = 320 MB +# size = 1554 MB +# hdr_size = 36128904 +# data_size = 951095808 +# other_size = 642656472 +# l2_hits = 0 +# l2_misses = 0 +# l2_feeds = 0 +# l2_rw_clash = 0 +# l2_read_bytes = 0 +# l2_write_bytes = 0 +# l2_writes_sent = 0 +# l2_writes_done = 0 +# l2_writes_error = 0 +# l2_writes_hdr_miss = 0 +# l2_evict_lock_retry = 0 +# l2_evict_reading = 0 +# l2_free_on_write = 0 +# l2_abort_lowmem = 0 +# l2_cksum_bad = 0 +# l2_io_error = 0 +# l2_size = 0 +# l2_hdr_size = 0 +# memory_throttle_count = 439874 +# arc_no_grow = 1 +# arc_tempreserve = 0 MB +# arc_meta_used = 1322 MB +# arc_meta_limit = 80 MB +# arc_meta_max = 2077 MB + + +# parses agent output in a structure like +# {'arc_meta_limit': 80, +# 'arc_meta_max': 2077, +# 'arc_meta_used': 1322, +# [...] +# } + +def parse_zfs_arc_cache(info): + parsed = {} + for line in info: + if len(line) > 2 and line[1] == "=": + parsed[line[0]] = int(line[2]) + return parsed + +# .--cache---------------------------------------------------------------. +# | _ | +# | ___ __ _ ___| |__ ___ | +# | / __/ _` |/ __| '_ \ / _ \ | +# | | (_| (_| | (__| | | | __/ | +# | \___\__,_|\___|_| |_|\___| | +# | | +# '----------------------------------------------------------------------' + +def inventory_zfs_arc_cache(parsed): + if parsed.get("hits") and parsed.get("misses"): + return [ (None, None) ] + else: + return [] + +def check_zfs_arc_cache(_no_item, _no_params, parsed): + status = 0 + perfdata = [] + message = "ZFS arc cache:" + + # hit ratio + if "hits" in parsed.keys() and "misses" in parsed.keys(): + hit_ratio = float(parsed["hits"]) / (parsed["hits"] + parsed["misses"]) * 100 + message += " hit ratio: %0.2f %%" % hit_ratio + perfdata.append(("hit_ratio", "%0.2f" % hit_ratio, '', '', 0, 100)) + else: + message += " no info about hit ratio available" + perfdata.append(("hit_ratio", 0, '', '', 0, 100)) + status = 3 + + # prefetch data hit ratio + if "prefetch_data_hits" in parsed.keys() and "prefetch_data_misses" in parsed.keys(): + prefetch_data_hit_ratio = float(parsed["prefetch_data_hits"]) / (parsed["prefetch_data_hits"] + parsed["prefetch_data_misses"]) * 100 + message += ", prefetch data hit ratio: %0.2f %%" % prefetch_data_hit_ratio + perfdata.append(("prefetch_data_hit_ratio", "%0.2f" % prefetch_data_hit_ratio, '', '', 0, 100)) + else: + message += ", no info about prefetch data hit ratio available" + perfdata.append(("prefetch_data_hit_ratio", 0, '', '', 0, 100)) + status = 3 + + # prefetch metadata hit ratio + if "prefetch_metadata_hits" in parsed.keys() and "prefetch_metadata_misses" in parsed.keys(): + prefetch_metadata_hit_ratio = float(parsed["prefetch_metadata_hits"]) / (parsed["prefetch_metadata_hits"] + parsed["prefetch_metadata_misses"]) * 100 + message += ", prefetch metadata hit ratio: %0.2f %%" % prefetch_metadata_hit_ratio + perfdata.append(("prefetch_metadata_hit_ratio", "%0.2f" % prefetch_metadata_hit_ratio, '', '', 0, 100)) + else: + message += ", no info about prefetch metadata hit ratio available" + perfdata.append(("prefetch_metadata_hit_ratio", 0, '', '', 0, 100)) + status = 3 + + # size + if "size" in parsed.keys(): + message += ", cache size: %d MB" % parsed["size"] + perfdata.append(("size", parsed["size"] * 1024 * 1024, '', '', 0)) + else: + message += ", no info about cache size available" + perfdata.append(("size", 0, '', '', 0)) + status = 3 + + # arc_meta + # these values may be missing, this is ok too + # in this case just do not report these values + if "arc_meta_used" in parsed.keys() and "arc_meta_limit" in parsed.keys() and "arc_meta_max" in parsed.keys(): + message += ", arc meta %d MB used, limit %d MB, max %d MB" \ + % (parsed["arc_meta_used"], parsed["arc_meta_limit"], parsed["arc_meta_max"]) + perfdata.append(("arc_meta_used", parsed["arc_meta_used"] * 1024 * 1024, '', '', 0)) + perfdata.append(("arc_meta_limit", parsed["arc_meta_limit"] * 1024 * 1024, '', '', 0)) + perfdata.append(("arc_meta_max", parsed["arc_meta_max"] * 1024 * 1024, '', '', 0)) + + return status, message, perfdata + +check_info["zfs_arc_cache"] = { + "parse_function" : parse_zfs_arc_cache, + "check_function" : check_zfs_arc_cache, + "inventory_function" : inventory_zfs_arc_cache, + "service_description" : "ZFS arc cache", + "has_perfdata" : True, +} + +#. +# .--L2 cache------------------------------------------------------------. +# | _ ____ _ | +# | | | |___ \ ___ __ _ ___| |__ ___ | +# | | | __) | / __/ _` |/ __| '_ \ / _ \ | +# | | |___ / __/ | (_| (_| | (__| | | | __/ | +# | |_____|_____| \___\__,_|\___|_| |_|\___| | +# | | +# '----------------------------------------------------------------------' + +def inventory_zfs_arc_cache_l2(parsed): + # if l2_size == 0 there is no l2 cache available at all + if "l2_size" in parsed.keys() and parsed["l2_size"] > 0: + return [ (None, None) ] + else: + return [] + +def check_zfs_arc_cache_l2(_no_item, _no_params, parsed): + status = 0 + perfdata = [] + message = "ZFS arc cache L2:" + + # hit ratio + if "l2_hits" in parsed.keys() and "l2_misses" in parsed.keys(): + l2_hit_ratio = float(parsed["l2_hits"]) / (parsed["l2_hits"] + parsed["l2_misses"]) * 100 + message += " L2 hit ratio: %0.2f %%" % l2_hit_ratio + perfdata.append(("l2_hit_ratio", "%0.2f" % l2_hit_ratio, '', '', 0, 100)) + else: + message += " no info about L2 hit ratio available" + perfdata.append(("l2_hit_ratio", 0, '', '', 0, 100)) + status = 3 + + # size + if "l2_size" in parsed.keys(): + message += ", L2 size: %s" % get_bytes_human_readable(parsed["l2_size"]) + perfdata.append(("l2_size", parsed["l2_size"], '', '', 0)) + else: + message += ", no info about L2 size available" + perfdata.append(("l2_size", 0, '', '', 0)) + status = 3 + return status, message, perfdata + +check_info["zfs_arc_cache.l2"] = { + "check_function" : check_zfs_arc_cache_l2, + "inventory_function" : inventory_zfs_arc_cache_l2, + "service_description" : "ZFS arc cache L2", + "has_perfdata" : True, +} + +#. diff -Nru check-mk-1.2.2p3/zfs_arc_cache.l2 check-mk-1.2.6p12/zfs_arc_cache.l2 --- check-mk-1.2.2p3/zfs_arc_cache.l2 1970-01-01 00:00:00.000000000 +0000 +++ check-mk-1.2.6p12/zfs_arc_cache.l2 2015-06-24 09:48:36.000000000 +0000 @@ -0,0 +1,17 @@ +title: ZFS arc L2 Cache: Hit Ratio and Size +agents: solaris +catalog: os/storage +license: GPL +distribution: check_mk +description: + Reports the L2 cache hit ratio and the L2 cache size of ZFS. + + This check is only for reporting and always returns {OK}. + +perfdata: + 2 values are reported: + l2_hit_ratio in percent and l2_size in bytes + +inventory: + Creates exactly on check on every machine having an L2 cache, + that means reporting an l2_size greater than 0. diff -Nru check-mk-1.2.2p3/zfsget check-mk-1.2.6p12/zfsget --- check-mk-1.2.2p3/zfsget 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/zfsget 2015-09-21 10:59:54.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -59,27 +59,11 @@ # tsrdb10exp 5128704 21 4982717 1% /tsrdb10exp # tsrdb10dat 30707712 19914358 10789464 65% /u01 - -def parse_zfsget(info): +def parse_zfs_entry(info): def mb(x): return saveint(x) / (1024.0 * 1024) - - entries = [] - entry = None - lineno = 0 - last_name = None - start_of_df = None - for line in info: - lineno += 1 - if line == ['[df]']: - start_of_df = lineno - break - name, what, value = line[:3] - if last_name != name: - if entry: - entries.append(entry) - entry = {} - last_name = name + entry = {} + for name, what, value in info: if what in ["used", "available"]: entry[what] = mb(value) elif what == "quota": @@ -88,62 +72,101 @@ elif what in [ 'mountpoint', 'type', 'name' ]: entry[what] = value - if entry: - entries.append(entry) + return entry - parsed = {} - for entry in entries: - if entry["mountpoint"] != '-': - entry["is_pool"] = '/' not in name - parsed[entry["mountpoint"]] = entry - - if start_of_df != None: - zfsget_parse_df_info(parsed, info[start_of_df:]) - - return parsed - - -def zfsget_parse_df_info(entries, info): - new_entries = {} - - for device, kbytes, used, avail, percent, mountpoint in info: - # ignore entries already contained in zfsget and also - # entries for virtual filesystems (like swap) - if mountpoint.startswith("/") and mountpoint not in entries: - entry = {} - total = int(kbytes) / 1024.0 - entry["total"] = total - entry["used"] = int(used) / 1024.0 - entry["available"] = total - entry["used"] - entry["mountpoint"] = mountpoint - new_entries[mountpoint] = entry +def parse_zfsget(info): + run_zfs = True + run_df = False + last_name = None + zfs_agent_data = [] + zfs_converted = [] + df_parsed = {} + for line in info: + if line == ['[zfs]']: + run_zfs = True + run_df = False + continue + if line == ['[df]']: + run_df = True + run_zfs = False + continue + + if run_zfs: + name = line[0] + # New block so parse everthing and go on collecting more blocks + if last_name != name: + last_name = name + if zfs_agent_data: + new_entry = parse_zfs_entry(zfs_agent_data) + if new_entry: + zfs_converted.append(new_entry) + zfs_agent_data = [] + zfs_agent_data.append(line[:3]) + + if run_df: + if len(line) == 6: + device, kbytes, used, avail, percent, mountpoint = line + else: + device, fs_type, kbytes, used, avail, percent, mountpoint = line + if mountpoint.startswith("/"): + entry = {} + total = int(kbytes) / 1024.0 + entry["name"] = device + entry["total"] = total + entry["used"] = int(used) / 1024.0 + entry["available"] = total - entry["used"] + entry["mountpoint"] = mountpoint + df_parsed[mountpoint] = entry # Now remove duplicate entries for the root filesystem, such # as /dev/ or /lib/libc.so.1. We do this if size, used and # avail is equal. I hope that it will not happen too often # that this is per chance the case for different passed-through # filesystems - root_entry = new_entries.get("/") + root_entry = df_parsed.get("/") if root_entry: t_u_a = (root_entry["total"], root_entry["used"], root_entry["available"]) drop = [] - for mountpoint, entry in new_entries.items(): + for mountpoint, entry in df_parsed.items(): if mountpoint != "/" and \ t_u_a == (entry["total"], entry["used"], entry["available"]): drop.append(mountpoint) for mp in drop: - del new_entries[mp] - entries.update(new_entries) + del df_parsed[mp] + # parsed has the device name as key, because there may exist + # several device names per mount point, and we know only + # later which one to take + zfs_parsed = {} + for entry in zfs_converted: + if entry["mountpoint"].startswith("/"): + entry["is_pool"] = '/' not in entry["name"] + if entry['available'] !=0 and entry['type'] == 'filesystem': + zfs_parsed[entry["name"]] = entry + + # parsed_df and parsed_final have the mount point as key + parsed_final = {} + for mountpoint, entry_df in df_parsed.items(): + found = False + # for every mount point in the df section, if the device name + # is also present in the "parsed" variable, we take those data + for name, entry in zfs_parsed.items(): + if entry_df["name"] == name: + parsed_final[mountpoint] = entry + found = True + # if a mount point in the df section is not present in the + # parsed variable, we take the data from the df section + if not found: + parsed_final[mountpoint] = entry_df + return parsed_final def inventory_zfsget(info): mplist = [] parsed = parse_zfsget(info) for mountpoint, properties in parsed.items(): if mountpoint not in inventory_df_exclude_mountpoints: - if properties["available"] != 0: - mplist.append(mountpoint) + mplist.append(mountpoint) return df_inventory(mplist) @@ -178,11 +201,10 @@ return df_check_filesystem_list(item, params, fslist) - check_info['zfsget'] = { "check_function" : check_zfsget, "inventory_function" : inventory_zfsget, - "service_description" : "fs_%s", + "service_description" : "Filesystem %s", "has_perfdata" : True, "group" : "filesystem", "default_levels_variable" : "filesystem_default_levels", Binary files /tmp/tmpz1J_py/xquijR6Ryw/check-mk-1.2.2p3/z_os/waitmax and /tmp/tmpz1J_py/GXS8RRKTxu/check-mk-1.2.6p12/z_os/waitmax differ diff -Nru check-mk-1.2.2p3/zpool_status check-mk-1.2.6p12/zpool_status --- check-mk-1.2.2p3/zpool_status 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/zpool_status 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -69,7 +69,7 @@ def check_zpool_status(_no_item, _no_params, info): if " ".join(info[0]) == "all pools are healthy": - return(0, "OK - All pools are healthy") + return(0, "All pools are healthy") start_pool = False last_pool = None @@ -77,10 +77,34 @@ warning_pools = {} pool_messages = {} state = 0 + message = [] + for line in info: if line[0] == "pool:": last_pool = line[1] + if line[0] == "state:": + if line[1] == "ONLINE": + state = 0 + elif line[1] == "DEGRADED": + state = 1 + message.append("DEGRADED State") + elif line[1] == "FAULTED": + state = 2 + message.append("FAULTED State") + elif line[1] == "UNAVIL": + state = 2 + message.append("UNAVIL State") + elif line[1] == "REMOVED": + state = 2 + message.append("REMOVED State") + elif line[1] == "OFFLINE": + state = 0 + else: + message.append("Unknown State") + state = 1 + continue + if line[0] == "NAME": start_pool = True continue @@ -92,7 +116,7 @@ pool_messages[last_pool] = msg continue - if line[0] == "spares": + if line[0] in ["spares", "logs", "cache"]: start_pool = False continue @@ -103,7 +127,7 @@ if saveint(line[4]) != 0: warning_pools[line[0]] = tuple(line[1:]) - message = [] + for pool in pool_messages.keys(): state = 1 message.append("%s: %s" % (pool, pool_messages[pool])) @@ -115,8 +139,13 @@ for pool in error_pools.keys(): state = 2 message.append("%s State:%s(!!)" % (pool, error_pools[pool][0])) + if len(message) == 0: message = ['No critical errors'] - return (state, nagios_state_names[state] + " - " + ", ".join(message)) -# -check_info['zpool_status'] = ( check_zpool_status, "zpool status", 0, inventory_zpool_status) + return (state, ", ".join(message)) + +check_info["zpool_status"] = { + 'check_function': check_zpool_status, + 'inventory_function': inventory_zpool_status, + 'service_description': 'zpool status', +} diff -Nru check-mk-1.2.2p3/zypper check-mk-1.2.6p12/zypper --- check-mk-1.2.2p3/zypper 2013-11-05 09:23:08.000000000 +0000 +++ check-mk-1.2.6p12/zypper 2015-06-24 09:48:37.000000000 +0000 @@ -7,7 +7,7 @@ # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | -# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. @@ -37,13 +37,18 @@ def inventory_zypper(info): - if len(info) > 0: - return [(None,{})] + # the agent section is only present when the agent has + # detected that zypper is installed, therefore the check + # can always register + return [(None,{})] def check_zypper(_no_item, _no_params, info): patch_types = {} updates = 0 locks = [] + firstline = " ".join(info[0]) + if re.match("ERROR:", firstline): + return 3, firstline for line in info: # 5 patches needed (2 security patches) if len(line) >= 5 and line[4].lower().strip() == 'needed': @@ -56,7 +61,7 @@ state = 0 - infotext = " - %d updates" % updates + infotext = "%d updates" % updates if updates: patch_items = patch_types.items() patch_items.sort() @@ -77,12 +82,11 @@ state = max(1, state) infotext += ", %d locks(!)" % len(locks) - return state, nagios_state_names[state] + infotext - + return state, infotext check_info['zypper'] = { "check_function" : check_zypper, "inventory_function" : inventory_zypper, "service_description" : "Zypper Updates", + "group" : "zypper", } -